4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #include <sys/zfs_context.h>
27 #include <sys/dmu_objset.h>
28 #include <sys/dmu_traverse.h>
29 #include <sys/dsl_dataset.h>
30 #include <sys/dsl_dir.h>
31 #include <sys/dsl_pool.h>
32 #include <sys/dnode.h>
35 #include <sys/dmu_impl.h>
36 #include <sys/callb.h>
38 #define SET_BOOKMARK(zb, objset, object, level, blkid) \
40 (zb)->zb_objset = objset; \
41 (zb)->zb_object = object; \
42 (zb)->zb_level = level; \
43 (zb)->zb_blkid = blkid; \
46 struct prefetch_data {
56 struct traverse_data {
62 struct prefetch_data *td_pfd;
67 static int traverse_dnode(struct traverse_data *td, const dnode_phys_t *dnp,
68 arc_buf_t *buf, uint64_t objset, uint64_t object);
72 traverse_zil_block(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg)
74 struct traverse_data *td = arg;
77 if (bp->blk_birth == 0)
80 if (claim_txg == 0 && bp->blk_birth >= spa_first_txg(td->td_spa))
83 zb.zb_objset = td->td_objset;
86 zb.zb_blkid = bp->blk_cksum.zc_word[ZIL_ZC_SEQ];
87 VERIFY(0 == td->td_func(td->td_spa, bp, &zb, NULL, td->td_arg));
92 traverse_zil_record(zilog_t *zilog, lr_t *lrc, void *arg, uint64_t claim_txg)
94 struct traverse_data *td = arg;
96 if (lrc->lrc_txtype == TX_WRITE) {
97 lr_write_t *lr = (lr_write_t *)lrc;
98 blkptr_t *bp = &lr->lr_blkptr;
101 if (bp->blk_birth == 0)
104 if (claim_txg == 0 || bp->blk_birth < claim_txg)
107 zb.zb_objset = td->td_objset;
108 zb.zb_object = lr->lr_foid;
109 zb.zb_level = BP_GET_LEVEL(bp);
110 zb.zb_blkid = lr->lr_offset / BP_GET_LSIZE(bp);
111 VERIFY(0 == td->td_func(td->td_spa, bp, &zb, NULL, td->td_arg));
116 traverse_zil(struct traverse_data *td, zil_header_t *zh)
118 uint64_t claim_txg = zh->zh_claim_txg;
122 * We only want to visit blocks that have been claimed but not yet
123 * replayed (or, in read-only mode, blocks that *would* be claimed).
125 if (claim_txg == 0 && spa_writeable(td->td_spa))
128 zilog = zil_alloc(spa_get_dsl(td->td_spa)->dp_meta_objset, zh);
130 (void) zil_parse(zilog, traverse_zil_block, traverse_zil_record, td,
137 traverse_visitbp(struct traverse_data *td, const dnode_phys_t *dnp,
138 arc_buf_t *pbuf, blkptr_t *bp, const zbookmark_t *zb)
142 arc_buf_t *buf = NULL;
143 struct prefetch_data *pd = td->td_pfd;
145 if (bp->blk_birth == 0) {
146 err = td->td_func(td->td_spa, NULL, zb, dnp, td->td_arg);
150 if (bp->blk_birth <= td->td_min_txg)
153 if (pd && !pd->pd_exited &&
154 ((pd->pd_flags & TRAVERSE_PREFETCH_DATA) ||
155 BP_GET_TYPE(bp) == DMU_OT_DNODE || BP_GET_LEVEL(bp) > 0)) {
156 mutex_enter(&pd->pd_mtx);
157 ASSERT(pd->pd_blks_fetched >= 0);
158 while (pd->pd_blks_fetched == 0 && !pd->pd_exited)
159 cv_wait(&pd->pd_cv, &pd->pd_mtx);
160 pd->pd_blks_fetched--;
161 cv_broadcast(&pd->pd_cv);
162 mutex_exit(&pd->pd_mtx);
165 if (td->td_flags & TRAVERSE_PRE) {
166 err = td->td_func(td->td_spa, bp, zb, dnp, td->td_arg);
171 if (BP_GET_LEVEL(bp) > 0) {
172 uint32_t flags = ARC_WAIT;
175 int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
177 err = arc_read(NULL, td->td_spa, bp, pbuf,
178 arc_getbuf_func, &buf,
179 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
183 /* recursively visitbp() blocks below this */
185 for (i = 0; i < epb; i++, cbp++) {
186 SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object,
188 zb->zb_blkid * epb + i);
189 err = traverse_visitbp(td, dnp, buf, cbp, &czb);
193 } else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) {
194 uint32_t flags = ARC_WAIT;
196 int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT;
198 err = arc_read(NULL, td->td_spa, bp, pbuf,
199 arc_getbuf_func, &buf,
200 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
204 /* recursively visitbp() blocks below this */
206 for (i = 0; i < epb && err == 0; i++, dnp++) {
207 err = traverse_dnode(td, dnp, buf, zb->zb_objset,
208 zb->zb_blkid * epb + i);
212 } else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) {
213 uint32_t flags = ARC_WAIT;
217 err = arc_read_nolock(NULL, td->td_spa, bp,
218 arc_getbuf_func, &buf,
219 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
224 traverse_zil(td, &osp->os_zil_header);
226 dnp = &osp->os_meta_dnode;
227 err = traverse_dnode(td, dnp, buf, zb->zb_objset, 0);
228 if (err == 0 && arc_buf_size(buf) >= sizeof (objset_phys_t)) {
229 dnp = &osp->os_userused_dnode;
230 err = traverse_dnode(td, dnp, buf, zb->zb_objset,
231 DMU_USERUSED_OBJECT);
233 if (err == 0 && arc_buf_size(buf) >= sizeof (objset_phys_t)) {
234 dnp = &osp->os_groupused_dnode;
235 err = traverse_dnode(td, dnp, buf, zb->zb_objset,
236 DMU_GROUPUSED_OBJECT);
241 (void) arc_buf_remove_ref(buf, &buf);
243 if (err == 0 && (td->td_flags & TRAVERSE_POST))
244 err = td->td_func(td->td_spa, bp, zb, dnp, td->td_arg);
250 traverse_dnode(struct traverse_data *td, const dnode_phys_t *dnp,
251 arc_buf_t *buf, uint64_t objset, uint64_t object)
256 for (j = 0; j < dnp->dn_nblkptr; j++) {
257 SET_BOOKMARK(&czb, objset, object, dnp->dn_nlevels - 1, j);
258 err = traverse_visitbp(td, dnp, buf,
259 (blkptr_t *)&dnp->dn_blkptr[j], &czb);
268 traverse_prefetcher(spa_t *spa, blkptr_t *bp, const zbookmark_t *zb,
269 const dnode_phys_t *dnp, void *arg)
271 struct prefetch_data *pfd = arg;
272 uint32_t aflags = ARC_NOWAIT | ARC_PREFETCH;
274 ASSERT(pfd->pd_blks_fetched >= 0);
278 if (bp == NULL || !((pfd->pd_flags & TRAVERSE_PREFETCH_DATA) ||
279 BP_GET_TYPE(bp) == DMU_OT_DNODE || BP_GET_LEVEL(bp) > 0))
282 mutex_enter(&pfd->pd_mtx);
283 while (!pfd->pd_cancel && pfd->pd_blks_fetched >= pfd->pd_blks_max)
284 cv_wait(&pfd->pd_cv, &pfd->pd_mtx);
285 pfd->pd_blks_fetched++;
286 cv_broadcast(&pfd->pd_cv);
287 mutex_exit(&pfd->pd_mtx);
289 (void) arc_read_nolock(NULL, spa, bp, NULL, NULL,
290 ZIO_PRIORITY_ASYNC_READ,
291 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
298 traverse_prefetch_thread(void *arg)
300 struct traverse_data *td_main = arg;
301 struct traverse_data td = *td_main;
304 td.td_func = traverse_prefetcher;
305 td.td_arg = td_main->td_pfd;
308 SET_BOOKMARK(&czb, td.td_objset, 0, -1, 0);
309 (void) traverse_visitbp(&td, NULL, NULL, td.td_rootbp, &czb);
311 mutex_enter(&td_main->td_pfd->pd_mtx);
312 td_main->td_pfd->pd_exited = B_TRUE;
313 cv_broadcast(&td_main->td_pfd->pd_cv);
314 mutex_exit(&td_main->td_pfd->pd_mtx);
318 * NB: dataset must not be changing on-disk (eg, is a snapshot or we are
319 * in syncing context).
322 traverse_impl(spa_t *spa, uint64_t objset, blkptr_t *rootbp,
323 uint64_t txg_start, int flags, blkptr_cb_t func, void *arg)
325 struct traverse_data td;
326 struct prefetch_data pd = { 0 };
331 td.td_objset = objset;
332 td.td_rootbp = rootbp;
333 td.td_min_txg = txg_start;
339 pd.pd_blks_max = 100;
341 mutex_init(&pd.pd_mtx, NULL, MUTEX_DEFAULT, NULL);
342 cv_init(&pd.pd_cv, NULL, CV_DEFAULT, NULL);
344 if (!(flags & TRAVERSE_PREFETCH) ||
345 0 == taskq_dispatch(system_taskq, traverse_prefetch_thread,
347 pd.pd_exited = B_TRUE;
349 SET_BOOKMARK(&czb, objset, 0, -1, 0);
350 err = traverse_visitbp(&td, NULL, NULL, rootbp, &czb);
352 mutex_enter(&pd.pd_mtx);
353 pd.pd_cancel = B_TRUE;
354 cv_broadcast(&pd.pd_cv);
355 while (!pd.pd_exited)
356 cv_wait(&pd.pd_cv, &pd.pd_mtx);
357 mutex_exit(&pd.pd_mtx);
359 mutex_destroy(&pd.pd_mtx);
360 cv_destroy(&pd.pd_cv);
366 * NB: dataset must not be changing on-disk (eg, is a snapshot or we are
367 * in syncing context).
370 traverse_dataset(dsl_dataset_t *ds, uint64_t txg_start, int flags,
371 blkptr_cb_t func, void *arg)
373 return (traverse_impl(ds->ds_dir->dd_pool->dp_spa, ds->ds_object,
374 &ds->ds_phys->ds_bp, txg_start, flags, func, arg));
378 * NB: pool must not be changing on-disk (eg, from zdb or sync context).
381 traverse_pool(spa_t *spa, blkptr_cb_t func, void *arg)
385 dsl_pool_t *dp = spa_get_dsl(spa);
386 objset_t *mos = dp->dp_meta_objset;
389 err = traverse_impl(spa, 0, spa_get_rootblkptr(spa),
390 0, TRAVERSE_PRE, func, arg);
394 /* visit each dataset */
395 for (obj = 1; err == 0; err = dmu_object_next(mos, &obj, FALSE, 0)) {
396 dmu_object_info_t doi;
398 err = dmu_object_info(mos, obj, &doi);
402 if (doi.doi_type == DMU_OT_DSL_DATASET) {
404 rw_enter(&dp->dp_config_rwlock, RW_READER);
405 err = dsl_dataset_hold_obj(dp, obj, FTAG, &ds);
406 rw_exit(&dp->dp_config_rwlock);
409 err = traverse_dataset(ds,
410 ds->ds_phys->ds_prev_snap_txg, TRAVERSE_PRE,
412 dsl_dataset_rele(ds, FTAG);