]> granicus.if.org Git - zfs/commitdiff
Perform whole-page page truncation for hole-punching under a range lock
authorTim Chase <tim@chase2k.com>
Fri, 26 Sep 2014 04:40:41 +0000 (23:40 -0500)
committerBrian Behlendorf <behlendorf1@llnl.gov>
Mon, 29 Sep 2014 16:22:03 +0000 (09:22 -0700)
As an attempt to perform the page truncation more optimally, the
hole-punching support added in 223df0161fad50f53a8fa5ffeea8cc4f8137d522
truncated performed the operation in two steps: first, sub-page "stubs"
were zeroed under the range lock in zfs_free_range() using the new
zfs_zero_partial_page() function and then the whole pages were truncated
within zfs_freesp().  This left a window of opportunity during which
the full pages could be touched.

This patch closes the window by moving the whole-page truncation into
zfs_free_range() under the range lock.

Signed-off-by: Tim Chase <tim@chase2k.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #2733

module/zfs/zfs_znode.c

index 08faf0838c680b7c88144a4501b66ab178fc4fa8..90dbfd31579f90d474c41e48af998b3dba6585b4 100644 (file)
@@ -1440,6 +1440,13 @@ zfs_free_range(znode_t *zp, uint64_t off, uint64_t len)
                /* offset of last_page */
                last_page_offset = last_page << PAGE_CACHE_SHIFT;
 
+               /* truncate whole pages */
+               if (last_page_offset > first_page_offset) {
+                       truncate_inode_pages_range(ZTOI(zp)->i_mapping,
+                           first_page_offset, last_page_offset - 1);
+               }
+
+               /* truncate sub-page ranges */
                if (first_page > last_page) {
                        /* entire punched area within a single page */
                        zfs_zero_partial_page(zp, off, len);
@@ -1607,31 +1614,10 @@ out:
        /*
         * Truncate the page cache - for file truncate operations, use
         * the purpose-built API for truncations.  For punching operations,
-        * truncate only whole pages within the region; partial pages are
-        * zeroed under a range lock in zfs_free_range().
+        * the truncation is handled under a range lock in zfs_free_range.
         */
        if (len == 0)
                truncate_setsize(ZTOI(zp), off);
-       else if (zp->z_is_mapped) {
-               loff_t first_page, last_page;
-               loff_t first_page_offset, last_page_offset;
-
-               /* first possible full page in hole */
-               first_page = (off + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-               /* last page of hole */
-               last_page = (off + len) >> PAGE_CACHE_SHIFT;
-
-               /* offset of first_page */
-               first_page_offset = first_page << PAGE_CACHE_SHIFT;
-               /* offset of last_page */
-               last_page_offset = last_page << PAGE_CACHE_SHIFT;
-
-               /* truncate whole pages */
-               if (last_page_offset > first_page_offset) {
-                       truncate_inode_pages_range(ZTOI(zp)->i_mapping,
-                           first_page_offset, last_page_offset - 1);
-               }
-       }
        return (error);
 }