]> granicus.if.org Git - zfs/commitdiff
Fix 'zfs recv' of non large_dnode send streams
authorTom Caputi <tcaputi@datto.com>
Thu, 28 Jun 2018 21:55:11 +0000 (17:55 -0400)
committerTony Hutter <hutter2@llnl.gov>
Fri, 6 Jul 2018 09:46:51 +0000 (02:46 -0700)
Currently, there is a bug where older send streams without the
DMU_BACKUP_FEATURE_LARGE_DNODE flag are not handled correctly.
The code in receive_object() fails to handle cases where
drro->drr_dn_slots is set to 0, which is always the case when the
sending code does not support this feature flag. This patch fixes
the issue by ensuring that that a value of 0 is treated as
DNODE_MIN_SLOTS.

Tested-by: DHE <git@dehacked.net>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #7617
Closes #7662

module/zfs/dmu_object.c
module/zfs/dmu_send.c

index 1fc71d1036f9dcf9e8d3ff7ed2656bd2ec0c362b..40c25362ae4f9295453c23fa7c4afdc2aecb24e2 100644 (file)
@@ -261,6 +261,9 @@ dmu_object_reclaim_dnsize(objset_t *os, uint64_t object, dmu_object_type_t ot,
        int dn_slots = dnodesize >> DNODE_SHIFT;
        int err;
 
+       if (dn_slots == 0)
+               dn_slots = DNODE_MIN_SLOTS;
+
        if (object == DMU_META_DNODE_OBJECT)
                return (SET_ERROR(EBADF));
 
index 1de0f316f0caa8aaed6e07fe6578ffc4f8e1def6..13aae9606f342260aa698c6a879ddeaa53cd8dbe 100644 (file)
@@ -2139,6 +2139,8 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
        dmu_tx_t *tx;
        uint64_t object;
        int err;
+       uint8_t dn_slots = drro->drr_dn_slots != 0 ?
+           drro->drr_dn_slots : DNODE_MIN_SLOTS;
 
        if (drro->drr_type == DMU_OT_NONE ||
            !DMU_OT_IS_VALID(drro->drr_type) ||
@@ -2150,7 +2152,7 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
            drro->drr_blksz > spa_maxblocksize(dmu_objset_spa(rwa->os)) ||
            drro->drr_bonuslen >
            DN_BONUS_SIZE(spa_maxdnodesize(dmu_objset_spa(rwa->os))) ||
-           drro->drr_dn_slots >
+           dn_slots >
            (spa_maxdnodesize(dmu_objset_spa(rwa->os)) >> DNODE_SHIFT))  {
                return (SET_ERROR(EINVAL));
        }
@@ -2177,12 +2179,31 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
 
                if (drro->drr_blksz != doi.doi_data_block_size ||
                    nblkptr < doi.doi_nblkptr ||
-                   drro->drr_dn_slots != doi.doi_dnodesize >> DNODE_SHIFT) {
+                   dn_slots != doi.doi_dnodesize >> DNODE_SHIFT) {
                        err = dmu_free_long_range(rwa->os, drro->drr_object,
                            0, DMU_OBJECT_END);
                        if (err != 0)
                                return (SET_ERROR(EINVAL));
                }
+
+               /*
+                * The dmu does not currently support decreasing nlevels
+                * on an object. For non-raw sends, this does not matter
+                * and the new object can just use the previous one's nlevels.
+                * For raw sends, however, the structure of the received dnode
+                * (including nlevels) must match that of the send side.
+                * Therefore, instead of using dmu_object_reclaim(), we must
+                * free the object completely and call dmu_object_claim_dnsize()
+                * instead.
+                */
+               if (dn_slots != doi.doi_dnodesize >> DNODE_SHIFT) {
+                       err = dmu_free_long_object(rwa->os, drro->drr_object);
+                       if (err != 0)
+                               return (SET_ERROR(EINVAL));
+
+                       txg_wait_synced(dmu_objset_pool(rwa->os), 0);
+                       object = DMU_NEW_OBJECT;
+               }
        } else if (err == EEXIST) {
                /*
                 * The object requested is currently an interior slot of a
@@ -2204,9 +2225,9 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
         * another object from the previous snapshot. We must free
         * these objects before we attempt to allocate the new dnode.
         */
-       if (drro->drr_dn_slots > 1) {
+       if (dn_slots > 1) {
                for (uint64_t slot = drro->drr_object + 1;
-                   slot < drro->drr_object + drro->drr_dn_slots;
+                   slot < drro->drr_object + dn_slots;
                    slot++) {
                        dmu_object_info_t slot_doi;
 
@@ -2238,7 +2259,7 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
                err = dmu_object_claim_dnsize(rwa->os, drro->drr_object,
                    drro->drr_type, drro->drr_blksz,
                    drro->drr_bonustype, drro->drr_bonuslen,
-                   drro->drr_dn_slots << DNODE_SHIFT, tx);
+                   dn_slots << DNODE_SHIFT, tx);
        } else if (drro->drr_type != doi.doi_type ||
            drro->drr_blksz != doi.doi_data_block_size ||
            drro->drr_bonustype != doi.doi_bonus_type ||
@@ -2247,7 +2268,7 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
                err = dmu_object_reclaim_dnsize(rwa->os, drro->drr_object,
                    drro->drr_type, drro->drr_blksz,
                    drro->drr_bonustype, drro->drr_bonuslen,
-                   drro->drr_dn_slots << DNODE_SHIFT, tx);
+                   dn_slots << DNODE_SHIFT, tx);
        }
        if (err != 0) {
                dmu_tx_commit(tx);