4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2016 by Delphix. All rights reserved.
26 #include <sys/zfs_context.h>
27 #include <sys/dmu_objset.h>
28 #include <sys/dmu_traverse.h>
29 #include <sys/dsl_dataset.h>
30 #include <sys/dsl_dir.h>
31 #include <sys/dsl_pool.h>
32 #include <sys/dnode.h>
35 #include <sys/dmu_impl.h>
37 #include <sys/sa_impl.h>
38 #include <sys/callb.h>
39 #include <sys/zfeature.h>
41 int32_t zfs_pd_bytes_max = 50 * 1024 * 1024; /* 50MB */
43 typedef struct prefetch_data {
46 int32_t pd_bytes_fetched;
50 zbookmark_phys_t pd_resume;
53 typedef struct traverse_data {
58 zbookmark_phys_t *td_resume;
60 prefetch_data_t *td_pfd;
62 uint64_t td_hole_birth_enabled_txg;
65 boolean_t td_realloc_possible;
68 static int traverse_dnode(traverse_data_t *td, const dnode_phys_t *dnp,
69 uint64_t objset, uint64_t object);
70 static void prefetch_dnode_metadata(traverse_data_t *td, const dnode_phys_t *,
71 uint64_t objset, uint64_t object);
74 traverse_zil_block(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg)
76 traverse_data_t *td = arg;
82 if (claim_txg == 0 && bp->blk_birth >= spa_first_txg(td->td_spa))
85 SET_BOOKMARK(&zb, td->td_objset, ZB_ZIL_OBJECT, ZB_ZIL_LEVEL,
86 bp->blk_cksum.zc_word[ZIL_ZC_SEQ]);
88 (void) td->td_func(td->td_spa, zilog, bp, &zb, NULL, td->td_arg);
94 traverse_zil_record(zilog_t *zilog, lr_t *lrc, void *arg, uint64_t claim_txg)
96 traverse_data_t *td = arg;
98 if (lrc->lrc_txtype == TX_WRITE) {
99 lr_write_t *lr = (lr_write_t *)lrc;
100 blkptr_t *bp = &lr->lr_blkptr;
106 if (claim_txg == 0 || bp->blk_birth < claim_txg)
109 SET_BOOKMARK(&zb, td->td_objset, lr->lr_foid,
110 ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp));
112 (void) td->td_func(td->td_spa, zilog, bp, &zb, NULL,
119 traverse_zil(traverse_data_t *td, zil_header_t *zh)
121 uint64_t claim_txg = zh->zh_claim_txg;
125 * We only want to visit blocks that have been claimed but not yet
126 * replayed; plus, in read-only mode, blocks that are already stable.
128 if (claim_txg == 0 && spa_writeable(td->td_spa))
131 zilog = zil_alloc(spa_get_dsl(td->td_spa)->dp_meta_objset, zh);
133 (void) zil_parse(zilog, traverse_zil_block, traverse_zil_record, td,
139 typedef enum resume_skip {
146 * Returns RESUME_SKIP_ALL if td indicates that we are resuming a traversal and
147 * the block indicated by zb does not need to be visited at all. Returns
148 * RESUME_SKIP_CHILDREN if we are resuming a post traversal and we reach the
149 * resume point. This indicates that this block should be visited but not its
150 * children (since they must have been visited in a previous traversal).
151 * Otherwise returns RESUME_SKIP_NONE.
154 resume_skip_check(traverse_data_t *td, const dnode_phys_t *dnp,
155 const zbookmark_phys_t *zb)
157 if (td->td_resume != NULL && !ZB_IS_ZERO(td->td_resume)) {
159 * If we already visited this bp & everything below,
160 * don't bother doing it again.
162 if (zbookmark_subtree_completed(dnp, zb, td->td_resume))
163 return (RESUME_SKIP_ALL);
166 * If we found the block we're trying to resume from, zero
167 * the bookmark out to indicate that we have resumed.
169 if (bcmp(zb, td->td_resume, sizeof (*zb)) == 0) {
170 bzero(td->td_resume, sizeof (*zb));
171 if (td->td_flags & TRAVERSE_POST)
172 return (RESUME_SKIP_CHILDREN);
175 return (RESUME_SKIP_NONE);
179 traverse_prefetch_metadata(traverse_data_t *td,
180 const blkptr_t *bp, const zbookmark_phys_t *zb)
182 arc_flags_t flags = ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH;
184 if (!(td->td_flags & TRAVERSE_PREFETCH_METADATA))
187 * If we are in the process of resuming, don't prefetch, because
188 * some children will not be needed (and in fact may have already
191 if (td->td_resume != NULL && !ZB_IS_ZERO(td->td_resume))
193 if (BP_IS_HOLE(bp) || bp->blk_birth <= td->td_min_txg)
195 if (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_DNODE)
198 (void) arc_read(NULL, td->td_spa, bp, NULL, NULL,
199 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
203 prefetch_needed(prefetch_data_t *pfd, const blkptr_t *bp)
205 ASSERT(pfd->pd_flags & TRAVERSE_PREFETCH_DATA);
206 if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp) ||
207 BP_GET_TYPE(bp) == DMU_OT_INTENT_LOG)
213 traverse_visitbp(traverse_data_t *td, const dnode_phys_t *dnp,
214 const blkptr_t *bp, const zbookmark_phys_t *zb)
217 arc_buf_t *buf = NULL;
218 prefetch_data_t *pd = td->td_pfd;
220 switch (resume_skip_check(td, dnp, zb)) {
221 case RESUME_SKIP_ALL:
223 case RESUME_SKIP_CHILDREN:
225 case RESUME_SKIP_NONE:
231 if (bp->blk_birth == 0) {
233 * Since this block has a birth time of 0 it must be one of
234 * two things: a hole created before the
235 * SPA_FEATURE_HOLE_BIRTH feature was enabled, or a hole
236 * which has always been a hole in an object.
238 * If a file is written sparsely, then the unwritten parts of
239 * the file were "always holes" -- that is, they have been
240 * holes since this object was allocated. However, we (and
241 * our callers) can not necessarily tell when an object was
242 * allocated. Therefore, if it's possible that this object
243 * was freed and then its object number reused, we need to
244 * visit all the holes with birth==0.
246 * If it isn't possible that the object number was reused,
247 * then if SPA_FEATURE_HOLE_BIRTH was enabled before we wrote
248 * all the blocks we will visit as part of this traversal,
249 * then this hole must have always existed, so we can skip
250 * it. We visit blocks born after (exclusive) td_min_txg.
252 * Note that the meta-dnode cannot be reallocated.
254 if ((!td->td_realloc_possible ||
255 zb->zb_object == DMU_META_DNODE_OBJECT) &&
256 td->td_hole_birth_enabled_txg <= td->td_min_txg)
258 } else if (bp->blk_birth <= td->td_min_txg) {
262 if (pd != NULL && !pd->pd_exited && prefetch_needed(pd, bp)) {
263 uint64_t size = BP_GET_LSIZE(bp);
264 mutex_enter(&pd->pd_mtx);
265 ASSERT(pd->pd_bytes_fetched >= 0);
266 while (pd->pd_bytes_fetched < size && !pd->pd_exited)
267 cv_wait_sig(&pd->pd_cv, &pd->pd_mtx);
268 pd->pd_bytes_fetched -= size;
269 cv_broadcast(&pd->pd_cv);
270 mutex_exit(&pd->pd_mtx);
273 if (BP_IS_HOLE(bp)) {
274 err = td->td_func(td->td_spa, NULL, bp, zb, dnp, td->td_arg);
280 if (td->td_flags & TRAVERSE_PRE) {
281 err = td->td_func(td->td_spa, NULL, bp, zb, dnp,
283 if (err == TRAVERSE_VISIT_NO_CHILDREN)
289 if (BP_GET_LEVEL(bp) > 0) {
290 uint32_t flags = ARC_FLAG_WAIT;
292 int32_t epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
293 zbookmark_phys_t *czb;
295 err = arc_read(NULL, td->td_spa, bp, arc_getbuf_func, &buf,
296 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
300 czb = kmem_alloc(sizeof (zbookmark_phys_t), KM_SLEEP);
302 for (i = 0; i < epb; i++) {
303 SET_BOOKMARK(czb, zb->zb_objset, zb->zb_object,
305 zb->zb_blkid * epb + i);
306 traverse_prefetch_metadata(td,
307 &((blkptr_t *)buf->b_data)[i], czb);
310 /* recursively visitbp() blocks below this */
311 for (i = 0; i < epb; i++) {
312 SET_BOOKMARK(czb, zb->zb_objset, zb->zb_object,
314 zb->zb_blkid * epb + i);
315 err = traverse_visitbp(td, dnp,
316 &((blkptr_t *)buf->b_data)[i], czb);
321 kmem_free(czb, sizeof (zbookmark_phys_t));
323 } else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) {
324 uint32_t flags = ARC_FLAG_WAIT;
326 int32_t epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT;
327 dnode_phys_t *child_dnp;
329 err = arc_read(NULL, td->td_spa, bp, arc_getbuf_func, &buf,
330 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
333 child_dnp = buf->b_data;
335 for (i = 0; i < epb; i += child_dnp[i].dn_extra_slots + 1) {
336 prefetch_dnode_metadata(td, &child_dnp[i],
337 zb->zb_objset, zb->zb_blkid * epb + i);
340 /* recursively visitbp() blocks below this */
341 for (i = 0; i < epb; i += child_dnp[i].dn_extra_slots + 1) {
342 err = traverse_dnode(td, &child_dnp[i],
343 zb->zb_objset, zb->zb_blkid * epb + i);
347 } else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) {
348 arc_flags_t flags = ARC_FLAG_WAIT;
351 err = arc_read(NULL, td->td_spa, bp, arc_getbuf_func, &buf,
352 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
357 prefetch_dnode_metadata(td, &osp->os_meta_dnode, zb->zb_objset,
358 DMU_META_DNODE_OBJECT);
360 * See the block comment above for the goal of this variable.
361 * If the maxblkid of the meta-dnode is 0, then we know that
362 * we've never had more than DNODES_PER_BLOCK objects in the
363 * dataset, which means we can't have reused any object ids.
365 if (osp->os_meta_dnode.dn_maxblkid == 0)
366 td->td_realloc_possible = B_FALSE;
368 if (arc_buf_size(buf) >= sizeof (objset_phys_t)) {
369 prefetch_dnode_metadata(td, &osp->os_groupused_dnode,
370 zb->zb_objset, DMU_GROUPUSED_OBJECT);
371 prefetch_dnode_metadata(td, &osp->os_userused_dnode,
372 zb->zb_objset, DMU_USERUSED_OBJECT);
375 err = traverse_dnode(td, &osp->os_meta_dnode, zb->zb_objset,
376 DMU_META_DNODE_OBJECT);
377 if (err == 0 && arc_buf_size(buf) >= sizeof (objset_phys_t)) {
378 err = traverse_dnode(td, &osp->os_groupused_dnode,
379 zb->zb_objset, DMU_GROUPUSED_OBJECT);
381 if (err == 0 && arc_buf_size(buf) >= sizeof (objset_phys_t)) {
382 err = traverse_dnode(td, &osp->os_userused_dnode,
383 zb->zb_objset, DMU_USERUSED_OBJECT);
388 (void) arc_buf_remove_ref(buf, &buf);
391 if (err == 0 && (td->td_flags & TRAVERSE_POST))
392 err = td->td_func(td->td_spa, NULL, bp, zb, dnp, td->td_arg);
394 if ((td->td_flags & TRAVERSE_HARD) && (err == EIO || err == ECKSUM)) {
396 * Ignore this disk error as requested by the HARD flag,
397 * and continue traversal.
403 * If we are stopping here, set td_resume.
405 if (td->td_resume != NULL && err != 0 && !td->td_paused) {
406 td->td_resume->zb_objset = zb->zb_objset;
407 td->td_resume->zb_object = zb->zb_object;
408 td->td_resume->zb_level = 0;
410 * If we have stopped on an indirect block (e.g. due to
411 * i/o error), we have not visited anything below it.
412 * Set the bookmark to the first level-0 block that we need
413 * to visit. This way, the resuming code does not need to
414 * deal with resuming from indirect blocks.
416 * Note, if zb_level <= 0, dnp may be NULL, so we don't want
419 td->td_resume->zb_blkid = zb->zb_blkid;
420 if (zb->zb_level > 0) {
421 td->td_resume->zb_blkid <<= zb->zb_level *
422 (dnp->dn_indblkshift - SPA_BLKPTRSHIFT);
424 td->td_paused = B_TRUE;
431 prefetch_dnode_metadata(traverse_data_t *td, const dnode_phys_t *dnp,
432 uint64_t objset, uint64_t object)
435 zbookmark_phys_t czb;
437 for (j = 0; j < dnp->dn_nblkptr; j++) {
438 SET_BOOKMARK(&czb, objset, object, dnp->dn_nlevels - 1, j);
439 traverse_prefetch_metadata(td, &dnp->dn_blkptr[j], &czb);
442 if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
443 SET_BOOKMARK(&czb, objset, object, 0, DMU_SPILL_BLKID);
444 traverse_prefetch_metadata(td, DN_SPILL_BLKPTR(dnp), &czb);
449 traverse_dnode(traverse_data_t *td, const dnode_phys_t *dnp,
450 uint64_t objset, uint64_t object)
453 zbookmark_phys_t czb;
455 if (object != DMU_META_DNODE_OBJECT && td->td_resume != NULL &&
456 object < td->td_resume->zb_object)
459 if (td->td_flags & TRAVERSE_PRE) {
460 SET_BOOKMARK(&czb, objset, object, ZB_DNODE_LEVEL,
462 err = td->td_func(td->td_spa, NULL, NULL, &czb, dnp,
464 if (err == TRAVERSE_VISIT_NO_CHILDREN)
470 for (j = 0; j < dnp->dn_nblkptr; j++) {
471 SET_BOOKMARK(&czb, objset, object, dnp->dn_nlevels - 1, j);
472 err = traverse_visitbp(td, dnp, &dnp->dn_blkptr[j], &czb);
477 if (err == 0 && (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) {
478 SET_BOOKMARK(&czb, objset, object, 0, DMU_SPILL_BLKID);
479 err = traverse_visitbp(td, dnp, DN_SPILL_BLKPTR(dnp), &czb);
482 if (err == 0 && (td->td_flags & TRAVERSE_POST)) {
483 SET_BOOKMARK(&czb, objset, object, ZB_DNODE_LEVEL,
485 err = td->td_func(td->td_spa, NULL, NULL, &czb, dnp,
487 if (err == TRAVERSE_VISIT_NO_CHILDREN)
497 traverse_prefetcher(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
498 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
500 prefetch_data_t *pfd = arg;
501 arc_flags_t aflags = ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH;
503 ASSERT(pfd->pd_bytes_fetched >= 0);
507 return (SET_ERROR(EINTR));
509 if (!prefetch_needed(pfd, bp))
512 mutex_enter(&pfd->pd_mtx);
513 while (!pfd->pd_cancel && pfd->pd_bytes_fetched >= zfs_pd_bytes_max)
514 cv_wait_sig(&pfd->pd_cv, &pfd->pd_mtx);
515 pfd->pd_bytes_fetched += BP_GET_LSIZE(bp);
516 cv_broadcast(&pfd->pd_cv);
517 mutex_exit(&pfd->pd_mtx);
519 (void) arc_read(NULL, spa, bp, NULL, NULL, ZIO_PRIORITY_ASYNC_READ,
520 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, &aflags, zb);
526 traverse_prefetch_thread(void *arg)
528 traverse_data_t *td_main = arg;
529 traverse_data_t td = *td_main;
530 zbookmark_phys_t czb;
531 fstrans_cookie_t cookie = spl_fstrans_mark();
533 td.td_func = traverse_prefetcher;
534 td.td_arg = td_main->td_pfd;
536 td.td_resume = &td_main->td_pfd->pd_resume;
538 SET_BOOKMARK(&czb, td.td_objset,
539 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
540 (void) traverse_visitbp(&td, NULL, td.td_rootbp, &czb);
542 mutex_enter(&td_main->td_pfd->pd_mtx);
543 td_main->td_pfd->pd_exited = B_TRUE;
544 cv_broadcast(&td_main->td_pfd->pd_cv);
545 mutex_exit(&td_main->td_pfd->pd_mtx);
546 spl_fstrans_unmark(cookie);
550 * NB: dataset must not be changing on-disk (eg, is a snapshot or we are
551 * in syncing context).
554 traverse_impl(spa_t *spa, dsl_dataset_t *ds, uint64_t objset, blkptr_t *rootbp,
555 uint64_t txg_start, zbookmark_phys_t *resume, int flags,
556 blkptr_cb_t func, void *arg)
560 zbookmark_phys_t *czb;
563 ASSERT(ds == NULL || objset == ds->ds_object);
564 ASSERT(!(flags & TRAVERSE_PRE) || !(flags & TRAVERSE_POST));
566 td = kmem_alloc(sizeof (traverse_data_t), KM_SLEEP);
567 pd = kmem_zalloc(sizeof (prefetch_data_t), KM_SLEEP);
568 czb = kmem_alloc(sizeof (zbookmark_phys_t), KM_SLEEP);
571 td->td_objset = objset;
572 td->td_rootbp = rootbp;
573 td->td_min_txg = txg_start;
574 td->td_resume = resume;
578 td->td_flags = flags;
579 td->td_paused = B_FALSE;
580 td->td_realloc_possible = (txg_start == 0 ? B_FALSE : B_TRUE);
582 if (spa_feature_is_active(spa, SPA_FEATURE_HOLE_BIRTH)) {
583 VERIFY(spa_feature_enabled_txg(spa,
584 SPA_FEATURE_HOLE_BIRTH, &td->td_hole_birth_enabled_txg));
586 td->td_hole_birth_enabled_txg = UINT64_MAX;
589 pd->pd_flags = flags;
591 pd->pd_resume = *resume;
592 mutex_init(&pd->pd_mtx, NULL, MUTEX_DEFAULT, NULL);
593 cv_init(&pd->pd_cv, NULL, CV_DEFAULT, NULL);
595 SET_BOOKMARK(czb, td->td_objset,
596 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
598 /* See comment on ZIL traversal in dsl_scan_visitds. */
599 if (ds != NULL && !ds->ds_is_snapshot && !BP_IS_HOLE(rootbp)) {
600 uint32_t flags = ARC_FLAG_WAIT;
604 err = arc_read(NULL, td->td_spa, rootbp,
605 arc_getbuf_func, &buf,
606 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, czb);
611 traverse_zil(td, &osp->os_zil_header);
612 (void) arc_buf_remove_ref(buf, &buf);
615 if (!(flags & TRAVERSE_PREFETCH_DATA) ||
616 0 == taskq_dispatch(system_taskq, traverse_prefetch_thread,
618 pd->pd_exited = B_TRUE;
620 err = traverse_visitbp(td, NULL, rootbp, czb);
622 mutex_enter(&pd->pd_mtx);
623 pd->pd_cancel = B_TRUE;
624 cv_broadcast(&pd->pd_cv);
625 while (!pd->pd_exited)
626 cv_wait_sig(&pd->pd_cv, &pd->pd_mtx);
627 mutex_exit(&pd->pd_mtx);
629 mutex_destroy(&pd->pd_mtx);
630 cv_destroy(&pd->pd_cv);
632 kmem_free(czb, sizeof (zbookmark_phys_t));
633 kmem_free(pd, sizeof (struct prefetch_data));
634 kmem_free(td, sizeof (struct traverse_data));
640 * NB: dataset must not be changing on-disk (eg, is a snapshot or we are
641 * in syncing context).
644 traverse_dataset_resume(dsl_dataset_t *ds, uint64_t txg_start,
645 zbookmark_phys_t *resume,
646 int flags, blkptr_cb_t func, void *arg)
648 return (traverse_impl(ds->ds_dir->dd_pool->dp_spa, ds, ds->ds_object,
649 &dsl_dataset_phys(ds)->ds_bp, txg_start, resume, flags, func, arg));
653 traverse_dataset(dsl_dataset_t *ds, uint64_t txg_start,
654 int flags, blkptr_cb_t func, void *arg)
656 return (traverse_dataset_resume(ds, txg_start, NULL, flags, func, arg));
660 traverse_dataset_destroyed(spa_t *spa, blkptr_t *blkptr,
661 uint64_t txg_start, zbookmark_phys_t *resume, int flags,
662 blkptr_cb_t func, void *arg)
664 return (traverse_impl(spa, NULL, ZB_DESTROYED_OBJSET,
665 blkptr, txg_start, resume, flags, func, arg));
669 * NB: pool must not be changing on-disk (eg, from zdb or sync context).
672 traverse_pool(spa_t *spa, uint64_t txg_start, int flags,
673 blkptr_cb_t func, void *arg)
677 dsl_pool_t *dp = spa_get_dsl(spa);
678 objset_t *mos = dp->dp_meta_objset;
679 boolean_t hard = (flags & TRAVERSE_HARD);
682 err = traverse_impl(spa, NULL, 0, spa_get_rootblkptr(spa),
683 txg_start, NULL, flags, func, arg);
687 /* visit each dataset */
688 for (obj = 1; err == 0;
689 err = dmu_object_next(mos, &obj, B_FALSE, txg_start)) {
690 dmu_object_info_t doi;
692 err = dmu_object_info(mos, obj, &doi);
699 if (doi.doi_bonus_type == DMU_OT_DSL_DATASET) {
701 uint64_t txg = txg_start;
703 dsl_pool_config_enter(dp, FTAG);
704 err = dsl_dataset_hold_obj(dp, obj, FTAG, &ds);
705 dsl_pool_config_exit(dp, FTAG);
711 if (dsl_dataset_phys(ds)->ds_prev_snap_txg > txg)
712 txg = dsl_dataset_phys(ds)->ds_prev_snap_txg;
713 err = traverse_dataset(ds, txg, flags, func, arg);
714 dsl_dataset_rele(ds, FTAG);
724 #if defined(_KERNEL) && defined(HAVE_SPL)
725 EXPORT_SYMBOL(traverse_dataset);
726 EXPORT_SYMBOL(traverse_pool);
728 module_param(zfs_pd_bytes_max, int, 0644);
729 MODULE_PARM_DESC(zfs_pd_bytes_max, "Max number of bytes to prefetch");