Wherever possible it's best to avoid depending on a linear ABD.
Update the code accordingly in the following areas.
- vdev_raidz
- zio, zio_checksum
- zfs_fm
- change abd_alloc_for_io() to use abd_alloc()
Reviewed-by: David Quigley <david.quigley@intel.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Gvozden Neskovic <neskovic@gmail.com>
Closes #5668
void *lbuf, *buf;
char *s, *p, *dup, *vdev, *flagstr;
int i, error;
+ boolean_t borrowed = B_FALSE;
dup = strdup(thing);
s = strtok(dup, ":");
psize = size;
lsize = size;
- pabd = abd_alloc_linear(SPA_MAXBLOCKSIZE, B_FALSE);
+ pabd = abd_alloc_for_io(SPA_MAXBLOCKSIZE, B_FALSE);
lbuf = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL);
BP_ZERO(bp);
buf = lbuf;
size = lsize;
} else {
- buf = abd_to_buf(pabd);
size = psize;
+ buf = abd_borrow_buf_copy(pabd, size);
+ borrowed = B_TRUE;
}
if (flags & ZDB_FLAG_PRINT_BLKPTR)
else
zdb_dump_block(thing, buf, size, flags);
+ if (borrowed)
+ abd_return_buf_copy(pabd, buf, size);
+
out:
abd_free(pabd);
umem_free(lbuf, SPA_MAXBLOCKSIZE);
typedef struct zio_cksum_report zio_cksum_report_t;
typedef void zio_cksum_finish_f(zio_cksum_report_t *rep,
- const void *good_data);
+ const abd_t *good_data);
typedef void zio_cksum_free_f(void *cbdata, size_t size);
struct zio_bad_cksum; /* defined in zio_checksum.h */
extern void zfs_ereport_start_checksum(spa_t *spa, vdev_t *vd, struct zio *zio,
uint64_t offset, uint64_t length, void *arg, struct zio_bad_cksum *info);
extern void zfs_ereport_finish_checksum(zio_cksum_report_t *report,
- const void *good_data, const void *bad_data, boolean_t drop_if_identical);
+ const abd_t *good_data, const abd_t *bad_data, boolean_t drop_if_identical);
extern void zfs_ereport_free_checksum(zio_cksum_report_t *report);
/* If we have the good data in hand, this function can be used */
extern void zfs_ereport_post_checksum(spa_t *spa, vdev_t *vd,
struct zio *zio, uint64_t offset, uint64_t length,
- const void *good_data, const void *bad_data, struct zio_bad_cksum *info);
+ const abd_t *good_data, const abd_t *bad_data, struct zio_bad_cksum *info);
/* Called from spa_sync(), but primarily an injection handler */
extern void spa_handle_ignored_writes(spa_t *spa);
void *, uint64_t, uint64_t, zio_bad_cksum_t *);
extern void zio_checksum_compute(zio_t *, enum zio_checksum,
struct abd *, uint64_t);
-extern int zio_checksum_error_impl(spa_t *, blkptr_t *, enum zio_checksum,
+extern int zio_checksum_error_impl(spa_t *, const blkptr_t *, enum zio_checksum,
struct abd *, uint64_t, uint64_t, zio_bad_cksum_t *);
extern int zio_checksum_error(zio_t *zio, zio_bad_cksum_t *out);
extern enum zio_checksum spa_dedup_checksum(spa_t *spa);
abd_t *
abd_alloc_for_io(size_t size, boolean_t is_metadata)
{
- return (abd_alloc_linear(size, is_metadata));
+ return (abd_alloc(size, is_metadata));
}
/*
vdev_raidz_map_free(raidz_map_t *rm)
{
int c;
- size_t size;
for (c = 0; c < rm->rm_firstdatacol; c++) {
abd_free(rm->rm_col[c].rc_abd);
if (rm->rm_col[c].rc_gdata != NULL)
- zio_buf_free(rm->rm_col[c].rc_gdata,
- rm->rm_col[c].rc_size);
+ abd_free(rm->rm_col[c].rc_gdata);
}
- size = 0;
- for (c = rm->rm_firstdatacol; c < rm->rm_cols; c++) {
+ for (c = rm->rm_firstdatacol; c < rm->rm_cols; c++)
abd_put(rm->rm_col[c].rc_abd);
- size += rm->rm_col[c].rc_size;
- }
if (rm->rm_abd_copy != NULL)
abd_free(rm->rm_abd_copy);
}
static void
-vdev_raidz_cksum_finish(zio_cksum_report_t *zcr, const void *good_data)
+vdev_raidz_cksum_finish(zio_cksum_report_t *zcr, const abd_t *good_data)
{
raidz_map_t *rm = zcr->zcr_cbdata;
- size_t c = zcr->zcr_cbinfo;
- size_t x;
+ const size_t c = zcr->zcr_cbinfo;
+ size_t x, offset;
- const char *good = NULL;
- char *bad;
+ const abd_t *good = NULL;
+ const abd_t *bad = rm->rm_col[c].rc_abd;
if (good_data == NULL) {
zfs_ereport_finish_checksum(zcr, NULL, NULL, B_FALSE);
*/
if (rm->rm_col[0].rc_gdata == NULL) {
abd_t *bad_parity[VDEV_RAIDZ_MAXPARITY];
- char *buf;
- int offset;
/*
* Set up the rm_col[]s to generate the parity for
*/
for (x = 0; x < rm->rm_firstdatacol; x++) {
bad_parity[x] = rm->rm_col[x].rc_abd;
- rm->rm_col[x].rc_gdata =
- zio_buf_alloc(rm->rm_col[x].rc_size);
rm->rm_col[x].rc_abd =
- abd_get_from_buf(rm->rm_col[x].rc_gdata,
+ rm->rm_col[x].rc_gdata =
+ abd_alloc_sametype(rm->rm_col[x].rc_abd,
rm->rm_col[x].rc_size);
}
/* fill in the data columns from good_data */
- buf = (char *)good_data;
+ offset = 0;
for (; x < rm->rm_cols; x++) {
abd_put(rm->rm_col[x].rc_abd);
- rm->rm_col[x].rc_abd = abd_get_from_buf(buf,
- rm->rm_col[x].rc_size);
- buf += rm->rm_col[x].rc_size;
+
+ rm->rm_col[x].rc_abd =
+ abd_get_offset_size((abd_t *)good_data,
+ offset, rm->rm_col[x].rc_size);
+ offset += rm->rm_col[x].rc_size;
}
/*
vdev_raidz_generate_parity(rm);
/* restore everything back to its original state */
- for (x = 0; x < rm->rm_firstdatacol; x++) {
- abd_put(rm->rm_col[x].rc_abd);
+ for (x = 0; x < rm->rm_firstdatacol; x++)
rm->rm_col[x].rc_abd = bad_parity[x];
- }
offset = 0;
for (x = rm->rm_firstdatacol; x < rm->rm_cols; x++) {
}
ASSERT3P(rm->rm_col[c].rc_gdata, !=, NULL);
- good = rm->rm_col[c].rc_gdata;
+ good = abd_get_offset_size(rm->rm_col[c].rc_gdata, 0,
+ rm->rm_col[c].rc_size);
} else {
/* adjust good_data to point at the start of our column */
- good = good_data;
-
+ offset = 0;
for (x = rm->rm_firstdatacol; x < c; x++)
- good += rm->rm_col[x].rc_size;
+ offset += rm->rm_col[x].rc_size;
+
+ good = abd_get_offset_size((abd_t *)good_data, offset,
+ rm->rm_col[c].rc_size);
}
- bad = abd_borrow_buf_copy(rm->rm_col[c].rc_abd, rm->rm_col[c].rc_size);
/* we drop the ereport if it ends up that the data was good */
zfs_ereport_finish_checksum(zcr, good, bad, B_TRUE);
- abd_return_buf(rm->rm_col[c].rc_abd, bad, rm->rm_col[c].rc_size);
+ abd_put((abd_t *)good);
}
/*
for (c = rm->rm_firstdatacol; c < rm->rm_cols; c++)
size += rm->rm_col[c].rc_size;
- rm->rm_abd_copy =
- abd_alloc_sametype(rm->rm_col[rm->rm_firstdatacol].rc_abd, size);
+ rm->rm_abd_copy = abd_alloc_for_io(size, B_FALSE);
for (offset = 0, c = rm->rm_firstdatacol; c < rm->rm_cols; c++) {
raidz_col_t *col = &rm->rm_col[c];
col->rc_size);
abd_copy(tmp, col->rc_abd, col->rc_size);
+
abd_put(col->rc_abd);
col->rc_abd = tmp;
* Report a checksum error for a child of a RAID-Z device.
*/
static void
-raidz_checksum_error(zio_t *zio, raidz_col_t *rc, void *bad_data)
+raidz_checksum_error(zio_t *zio, raidz_col_t *rc, abd_t *bad_data)
{
- void *buf;
vdev_t *vd = zio->io_vd->vdev_child[rc->rc_devidx];
if (!(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
zbc.zbc_has_cksum = 0;
zbc.zbc_injected = rm->rm_ecksuminjected;
- buf = abd_borrow_buf_copy(rc->rc_abd, rc->rc_size);
zfs_ereport_post_checksum(zio->io_spa, vd, zio,
- rc->rc_offset, rc->rc_size, buf, bad_data,
+ rc->rc_offset, rc->rc_size, rc->rc_abd, bad_data,
&zbc);
- abd_return_buf(rc->rc_abd, buf, rc->rc_size);
}
}
static int
raidz_parity_verify(zio_t *zio, raidz_map_t *rm)
{
- void *orig[VDEV_RAIDZ_MAXPARITY];
+ abd_t *orig[VDEV_RAIDZ_MAXPARITY];
int c, ret = 0;
raidz_col_t *rc;
rc = &rm->rm_col[c];
if (!rc->rc_tried || rc->rc_error != 0)
continue;
- orig[c] = zio_buf_alloc(rc->rc_size);
- abd_copy_to_buf(orig[c], rc->rc_abd, rc->rc_size);
+
+ orig[c] = abd_alloc_sametype(rc->rc_abd, rc->rc_size);
+ abd_copy(orig[c], rc->rc_abd, rc->rc_size);
}
vdev_raidz_generate_parity(rm);
rc = &rm->rm_col[c];
if (!rc->rc_tried || rc->rc_error != 0)
continue;
- if (bcmp(orig[c], abd_to_buf(rc->rc_abd), rc->rc_size) != 0) {
+ if (abd_cmp(orig[c], rc->rc_abd) != 0) {
raidz_checksum_error(zio, rc, orig[c]);
rc->rc_error = SET_ERROR(ECKSUM);
ret++;
}
- zio_buf_free(orig[c], rc->rc_size);
+ abd_free(orig[c]);
}
return (ret);
{
raidz_map_t *rm = zio->io_vsd;
raidz_col_t *rc;
- void *orig[VDEV_RAIDZ_MAXPARITY];
+ abd_t *orig[VDEV_RAIDZ_MAXPARITY];
int tstore[VDEV_RAIDZ_MAXPARITY + 2];
int *tgts = &tstore[1];
int curr, next, i, c, n;
ASSERT(orig[i] != NULL);
}
- orig[n - 1] = zio_buf_alloc(rm->rm_col[0].rc_size);
+ orig[n - 1] = abd_alloc_sametype(rm->rm_col[0].rc_abd,
+ rm->rm_col[0].rc_size);
curr = 0;
next = tgts[curr];
ASSERT3S(c, >=, 0);
ASSERT3S(c, <, rm->rm_cols);
rc = &rm->rm_col[c];
- abd_copy_to_buf(orig[i], rc->rc_abd,
- rc->rc_size);
+ abd_copy(orig[i], rc->rc_abd, rc->rc_size);
}
/*
for (i = 0; i < n; i++) {
c = tgts[i];
rc = &rm->rm_col[c];
- abd_copy_from_buf(rc->rc_abd, orig[i],
- rc->rc_size);
+ abd_copy(rc->rc_abd, orig[i], rc->rc_size);
}
do {
}
n--;
done:
- for (i = 0; i < n; i++) {
- zio_buf_free(orig[i], rm->rm_col[0].rc_size);
- }
+ for (i = 0; i < n; i++)
+ abd_free(orig[i]);
return (ret);
}
static zfs_ecksum_info_t *
annotate_ecksum(nvlist_t *ereport, zio_bad_cksum_t *info,
- const uint8_t *goodbuf, const uint8_t *badbuf, size_t size,
+ const abd_t *goodabd, const abd_t *badabd, size_t size,
boolean_t drop_if_identical)
{
- const uint64_t *good = (const uint64_t *)goodbuf;
- const uint64_t *bad = (const uint64_t *)badbuf;
+ const uint64_t *good;
+ const uint64_t *bad;
uint64_t allset = 0;
uint64_t allcleared = 0;
}
}
- if (badbuf == NULL || goodbuf == NULL)
+ if (badabd == NULL || goodabd == NULL)
return (eip);
ASSERT3U(size, ==, nui64s * sizeof (uint64_t));
ASSERT3U(size, <=, SPA_MAXBLOCKSIZE);
ASSERT3U(size, <=, UINT32_MAX);
+ good = (const uint64_t *) abd_borrow_buf_copy((abd_t *)goodabd, size);
+ bad = (const uint64_t *) abd_borrow_buf_copy((abd_t *)badabd, size);
+
/* build up the range list by comparing the two buffers. */
for (idx = 0; idx < nui64s; idx++) {
if (good[idx] == bad[idx]) {
*/
if (inline_size == 0 && drop_if_identical) {
kmem_free(eip, sizeof (*eip));
+ abd_return_buf((abd_t *)goodabd, (void *)good, size);
+ abd_return_buf((abd_t *)badabd, (void *)bad, size);
return (NULL);
}
eip->zei_ranges[range].zr_start *= sizeof (uint64_t);
eip->zei_ranges[range].zr_end *= sizeof (uint64_t);
}
+
+ abd_return_buf((abd_t *)goodabd, (void *)good, size);
+ abd_return_buf((abd_t *)badabd, (void *)bad, size);
+
eip->zei_allowed_mingap *= sizeof (uint64_t);
inline_size *= sizeof (uint64_t);
}
void
-zfs_ereport_finish_checksum(zio_cksum_report_t *report,
- const void *good_data, const void *bad_data, boolean_t drop_if_identical)
+zfs_ereport_finish_checksum(zio_cksum_report_t *report, const abd_t *good_data,
+ const abd_t *bad_data, boolean_t drop_if_identical)
{
#ifdef _KERNEL
zfs_ecksum_info_t *info;
void
zfs_ereport_post_checksum(spa_t *spa, vdev_t *vd,
struct zio *zio, uint64_t offset, uint64_t length,
- const void *good_data, const void *bad_data, zio_bad_cksum_t *zbc)
+ const abd_t *good_data, const abd_t *bad_data, zio_bad_cksum_t *zbc)
{
#ifdef _KERNEL
nvlist_t *ereport = NULL;
kmem_cache_free(zio_data_buf_cache[c], buf);
}
+static void
+zio_abd_free(void *abd, size_t size)
+{
+ abd_free((abd_t *)abd);
+}
+
/*
* ==========================================================================
* Push and pop I/O transform buffers
*/
static void
zio_vsd_default_cksum_finish(zio_cksum_report_t *zcr,
- const void *good_buf)
+ const abd_t *good_buf)
{
/* no processing needed */
zfs_ereport_finish_checksum(zcr, good_buf, zcr->zcr_cbdata, B_FALSE);
void
zio_vsd_default_cksum_report(zio_t *zio, zio_cksum_report_t *zcr, void *ignored)
{
- void *buf = zio_buf_alloc(zio->io_size);
+ void *abd = abd_alloc_sametype(zio->io_abd, zio->io_size);
- abd_copy_to_buf(buf, zio->io_abd, zio->io_size);
+ abd_copy(abd, zio->io_abd, zio->io_size);
zcr->zcr_cbinfo = zio->io_size;
- zcr->zcr_cbdata = buf;
+ zcr->zcr_cbdata = abd;
zcr->zcr_finish = zio_vsd_default_cksum_finish;
- zcr->zcr_free = zio_buf_free;
+ zcr->zcr_free = zio_abd_free;
}
static int
* Always attempt to keep stack usage minimal here since
* we can be called recurisvely up to 19 levels deep.
*/
- uint64_t psize = zio->io_size;
+ const uint64_t psize = zio->io_size;
zio_t *pio, *pio_next;
int c, w;
zio_link_t *zl = NULL;
zio_cksum_report_t *zcr = zio->io_cksum_report;
uint64_t align = zcr->zcr_align;
uint64_t asize = P2ROUNDUP(psize, align);
- char *abuf = NULL;
abd_t *adata = zio->io_abd;
if (asize != psize) {
- adata = abd_alloc_linear(asize, B_TRUE);
+ adata = abd_alloc(asize, B_TRUE);
abd_copy(adata, zio->io_abd, psize);
abd_zero_off(adata, psize, asize - psize);
}
- if (adata != NULL)
- abuf = abd_borrow_buf_copy(adata, asize);
-
zio->io_cksum_report = zcr->zcr_next;
zcr->zcr_next = NULL;
- zcr->zcr_finish(zcr, abuf);
+ zcr->zcr_finish(zcr, adata);
zfs_ereport_free_checksum(zcr);
- if (adata != NULL)
- abd_return_buf(adata, abuf, asize);
-
if (asize != psize)
abd_free(adata);
}
* a tuple which is guaranteed to be unique for the life of the pool.
*/
static void
-zio_checksum_gang_verifier(zio_cksum_t *zcp, blkptr_t *bp)
+zio_checksum_gang_verifier(zio_cksum_t *zcp, const blkptr_t *bp)
{
const dva_t *dva = BP_IDENTITY(bp);
uint64_t txg = BP_PHYSICAL_BIRTH(bp);
zio_checksum_compute(zio_t *zio, enum zio_checksum checksum,
abd_t *abd, uint64_t size)
{
+ static const uint64_t zec_magic = ZEC_MAGIC;
blkptr_t *bp = zio->io_bp;
uint64_t offset = zio->io_offset;
zio_checksum_info_t *ci = &zio_checksum_table[checksum];
zio_checksum_template_init(checksum, spa);
if (ci->ci_flags & ZCHECKSUM_FLAG_EMBEDDED) {
- zio_eck_t *eck;
- void *data = abd_to_buf(abd);
+ zio_eck_t eck;
+ size_t eck_offset;
if (checksum == ZIO_CHECKSUM_ZILOG2) {
- zil_chain_t *zilc = data;
+ zil_chain_t zilc;
+ abd_copy_to_buf(&zilc, abd, sizeof (zil_chain_t));
- size = P2ROUNDUP_TYPED(zilc->zc_nused, ZIL_MIN_BLKSZ,
+ size = P2ROUNDUP_TYPED(zilc.zc_nused, ZIL_MIN_BLKSZ,
uint64_t);
- eck = &zilc->zc_eck;
+ eck = zilc.zc_eck;
+ eck_offset = offsetof(zil_chain_t, zc_eck);
} else {
- eck = (zio_eck_t *)((char *)data + size) - 1;
+ eck_offset = size - sizeof (zio_eck_t);
+ abd_copy_to_buf_off(&eck, abd, eck_offset,
+ sizeof (zio_eck_t));
}
- if (checksum == ZIO_CHECKSUM_GANG_HEADER)
- zio_checksum_gang_verifier(&eck->zec_cksum, bp);
- else if (checksum == ZIO_CHECKSUM_LABEL)
- zio_checksum_label_verifier(&eck->zec_cksum, offset);
- else
- bp->blk_cksum = eck->zec_cksum;
- eck->zec_magic = ZEC_MAGIC;
+
+ if (checksum == ZIO_CHECKSUM_GANG_HEADER) {
+ zio_checksum_gang_verifier(&eck.zec_cksum, bp);
+ abd_copy_from_buf_off(abd, &eck.zec_cksum,
+ eck_offset + offsetof(zio_eck_t, zec_cksum),
+ sizeof (zio_cksum_t));
+ } else if (checksum == ZIO_CHECKSUM_LABEL) {
+ zio_checksum_label_verifier(&eck.zec_cksum, offset);
+ abd_copy_from_buf_off(abd, &eck.zec_cksum,
+ eck_offset + offsetof(zio_eck_t, zec_cksum),
+ sizeof (zio_cksum_t));
+ } else {
+ bp->blk_cksum = eck.zec_cksum;
+ }
+
+ abd_copy_from_buf_off(abd, &zec_magic,
+ eck_offset + offsetof(zio_eck_t, zec_magic),
+ sizeof (zec_magic));
+
ci->ci_func[0](abd, size, spa->spa_cksum_tmpls[checksum],
&cksum);
- eck->zec_cksum = cksum;
+
+ abd_copy_from_buf_off(abd, &cksum,
+ eck_offset + offsetof(zio_eck_t, zec_cksum),
+ sizeof (zio_cksum_t));
} else {
ci->ci_func[0](abd, size, spa->spa_cksum_tmpls[checksum],
&bp->blk_cksum);
}
int
-zio_checksum_error_impl(spa_t *spa, blkptr_t *bp, enum zio_checksum checksum,
- abd_t *abd, uint64_t size, uint64_t offset, zio_bad_cksum_t *info)
+zio_checksum_error_impl(spa_t *spa, const blkptr_t *bp,
+ enum zio_checksum checksum, abd_t *abd, uint64_t size, uint64_t offset,
+ zio_bad_cksum_t *info)
{
zio_checksum_info_t *ci = &zio_checksum_table[checksum];
- int byteswap;
zio_cksum_t actual_cksum, expected_cksum;
+ zio_eck_t eck;
+ int byteswap;
if (checksum >= ZIO_CHECKSUM_FUNCTIONS || ci->ci_func[0] == NULL)
return (SET_ERROR(EINVAL));
zio_checksum_template_init(checksum, spa);
if (ci->ci_flags & ZCHECKSUM_FLAG_EMBEDDED) {
- zio_eck_t *eck;
zio_cksum_t verifier;
size_t eck_offset;
- uint64_t data_size = size;
- void *data = abd_borrow_buf_copy(abd, data_size);
if (checksum == ZIO_CHECKSUM_ZILOG2) {
- zil_chain_t *zilc = data;
+ zil_chain_t zilc;
uint64_t nused;
- eck = &zilc->zc_eck;
- if (eck->zec_magic == ZEC_MAGIC) {
- nused = zilc->zc_nused;
- } else if (eck->zec_magic == BSWAP_64(ZEC_MAGIC)) {
- nused = BSWAP_64(zilc->zc_nused);
+ abd_copy_to_buf(&zilc, abd, sizeof (zil_chain_t));
+
+ eck = zilc.zc_eck;
+ eck_offset = offsetof(zil_chain_t, zc_eck) +
+ offsetof(zio_eck_t, zec_cksum);
+
+ if (eck.zec_magic == ZEC_MAGIC) {
+ nused = zilc.zc_nused;
+ } else if (eck.zec_magic == BSWAP_64(ZEC_MAGIC)) {
+ nused = BSWAP_64(zilc.zc_nused);
} else {
- abd_return_buf(abd, data, data_size);
return (SET_ERROR(ECKSUM));
}
- if (nused > data_size) {
- abd_return_buf(abd, data, data_size);
+ if (nused > size) {
return (SET_ERROR(ECKSUM));
}
size = P2ROUNDUP_TYPED(nused, ZIL_MIN_BLKSZ, uint64_t);
} else {
- eck = (zio_eck_t *)((char *)data + data_size) - 1;
+ eck_offset = size - sizeof (zio_eck_t);
+ abd_copy_to_buf_off(&eck, abd, eck_offset,
+ sizeof (zio_eck_t));
+ eck_offset += offsetof(zio_eck_t, zec_cksum);
}
if (checksum == ZIO_CHECKSUM_GANG_HEADER)
else
verifier = bp->blk_cksum;
- byteswap = (eck->zec_magic == BSWAP_64(ZEC_MAGIC));
+ byteswap = (eck.zec_magic == BSWAP_64(ZEC_MAGIC));
if (byteswap)
byteswap_uint64_array(&verifier, sizeof (zio_cksum_t));
- eck_offset = (size_t)(&eck->zec_cksum) - (size_t)data;
- expected_cksum = eck->zec_cksum;
- eck->zec_cksum = verifier;
- abd_return_buf_copy(abd, data, data_size);
+ expected_cksum = eck.zec_cksum;
+
+ abd_copy_from_buf_off(abd, &verifier, eck_offset,
+ sizeof (zio_cksum_t));
ci->ci_func[byteswap](abd, size,
spa->spa_cksum_tmpls[checksum], &actual_cksum);
- abd_copy_from_buf_off(abd, &expected_cksum,
- eck_offset, sizeof (zio_cksum_t));
+
+ abd_copy_from_buf_off(abd, &expected_cksum, eck_offset,
+ sizeof (zio_cksum_t));
if (byteswap) {
byteswap_uint64_array(&expected_cksum,