]> granicus.if.org Git - zfs/commitdiff
Add ZFS perf test for dbuf cache
authorJohn Wren Kennedy <jwk404@gmail.com>
Wed, 28 Feb 2018 18:38:37 +0000 (10:38 -0800)
committerBrian Behlendorf <behlendorf1@llnl.gov>
Wed, 28 Feb 2018 18:38:37 +0000 (10:38 -0800)
This change adds a test for sequential reads out of the dbuf cache.
It's essentially a copy of sequential_reads_cached, using a smaller
data set. The sequential read tests are renamed to differentiate them.

Authored by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed by: Paul Dagnelie <pcd@delphix.com>
Reviewed by: Matt Ahrens <mahrens@delphix.com>
Reviewed by: George Wilson <george.wilson@delphix.com>
Reviewed by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: John Wren Kennedy <john.kennedy@delphix.com>
Closes #7225

tests/runfiles/perf-regression.run
tests/zfs-tests/include/commands.cfg
tests/zfs-tests/tests/perf/perf.shlib
tests/zfs-tests/tests/perf/regression/Makefile.am
tests/zfs-tests/tests/perf/regression/sequential_reads_arc_cached.ksh [moved from tests/zfs-tests/tests/perf/regression/sequential_reads_cached.ksh with 100% similarity]
tests/zfs-tests/tests/perf/regression/sequential_reads_arc_cached_clone.ksh [moved from tests/zfs-tests/tests/perf/regression/sequential_reads_cached_clone.ksh with 100% similarity]
tests/zfs-tests/tests/perf/regression/sequential_reads_dbuf_cached.ksh [new file with mode: 0755]

index 3b571dddd924b74558d078a268c53451c3647eb8..cb068e887fe8fc01d0e0d877f640b451855e8b9a 100644 (file)
@@ -25,8 +25,8 @@ outputdir = /var/tmp/test_results
 tags = ['perf']
 
 [tests/perf/regression]
-tests = ['sequential_writes', 'sequential_reads', 'sequential_reads_cached',
-    'sequential_reads_cached_clone', 'random_reads', 'random_writes',
-    'random_readwrite']
+tests = ['sequential_writes', 'sequential_reads', 'sequential_reads_arc_cached',
+    'sequential_reads_arc_cached_clone', 'sequential_reads_dbuf_cached',
+    'random_reads', 'random_writes', 'random_readwrite']
 post =
 tags = ['perf', 'regression']
index 0600fe71c979149eb0ca5504cc10ef26027851a5..ca734d39fd7246df08f7e79024df26f5fee5ad6e 100644 (file)
@@ -44,6 +44,7 @@ export SYSTEM_FILES='arp
     file
     find
     fio
+    free
     getconf
     getent
     getfacl
index 76b6651c3acba38a6c0148d57ae4ea819ee21b99..e1e845ba6568213e889569745791a4c20daf7a41 100644 (file)
@@ -188,6 +188,24 @@ function get_max_arc_size
        echo $max_arc_size
 }
 
+function get_max_dbuf_cache_size
+{
+       typeset -l max_dbuf_cache_size
+
+       if is_linux; then
+               max_dbuf_cache_size=$(get_tunable dbuf_cache_max_bytes)
+       else
+               max_dbuf_cache_size=$(dtrace -qn 'BEGIN {
+                   printf("%u\n", `dbuf_cache_max_bytes);
+                   exit(0);
+               }')
+
+               [[ $? -eq 0 ]] || log_fail "get_max_dbuf_cache_size failed"
+       fi
+
+       echo $max_dbuf_cache_size
+}
+
 # Create a file with some information about how this system is configured.
 function get_system_config
 {
index c9032a26b1a5bde66678c39e5032bec1b2ed01e0..c0419949d4cb4d2cd27352c521cdeb17aec1c6d0 100644 (file)
@@ -3,8 +3,9 @@ dist_pkgdata_SCRIPTS = \
        random_reads.ksh \
        random_readwrite.ksh \
        random_writes.ksh \
-       sequential_reads_cached_clone.ksh \
-       sequential_reads_cached.ksh \
+       sequential_reads_arc_cached_clone.ksh \
+       sequential_reads_arc_cached.ksh \
+       sequential_reads_dbuf_cached.ksh \
        sequential_reads.ksh \
        sequential_writes.ksh \
        setup.ksh
diff --git a/tests/zfs-tests/tests/perf/regression/sequential_reads_dbuf_cached.ksh b/tests/zfs-tests/tests/perf/regression/sequential_reads_dbuf_cached.ksh
new file mode 100755 (executable)
index 0000000..5d02928
--- /dev/null
@@ -0,0 +1,93 @@
+#!/bin/ksh
+
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source.  A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+
+#
+# Copyright (c) 2016 by Delphix. All rights reserved.
+#
+
+#
+# Description:
+# Trigger fio runs using the sequential_reads job file. The number of runs and
+# data collected is determined by the PERF_* variables. See do_fio_run for
+# details about these variables.
+#
+# The files to read from are created prior to the first fio run, and used
+# for all fio runs. The ARC is not cleared to ensure that all data is cached.
+#
+# This is basically a copy of the sequential_reads_cached test case, but with
+# a smaller dateset so that we can fit everything into the decompressed, linear
+# space in the dbuf cache.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/perf/perf.shlib
+
+function cleanup
+{
+       # kill fio and iostat
+       pkill ${fio##*/}
+       pkill ${iostat##*/}
+       log_must_busy zfs destroy $TESTFS
+       log_must_busy zpool destroy $PERFPOOL
+}
+
+trap "log_fail \"Measure IO stats during sequential read load\"" SIGTERM
+log_onexit cleanup
+
+export TESTFS=$PERFPOOL/testfs
+recreate_perfpool
+log_must zfs create $PERF_FS_OPTS $TESTFS
+
+# Ensure the working set can be cached in the dbuf cache.
+export TOTAL_SIZE=$(($(get_max_dbuf_cache_size) * 3 / 4))
+
+# Variables for use by fio.
+if [[ -n $PERF_REGRESSION_WEEKLY ]]; then
+       export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY}
+       export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'}
+       export PERF_NTHREADS=${PERF_NTHREADS:-'8 16 32 64'}
+       export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
+       export PERF_IOSIZES=${PERF_IOSIZES:-'8k 64k 128k'}
+elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then
+       export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY}
+       export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'}
+       export PERF_NTHREADS=${PERF_NTHREADS:-'64'}
+       export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
+       export PERF_IOSIZES=${PERF_IOSIZES:-'64k'}
+fi
+
+# Layout the files to be used by the read tests. Create as many files as the
+# largest number of threads. An fio run with fewer threads will use a subset
+# of the available files.
+export NUMJOBS=$(get_max $PERF_NTHREADS)
+export FILE_SIZE=$((TOTAL_SIZE / NUMJOBS))
+log_must fio $FIO_SCRIPTS/mkfiles.fio
+
+# Set up the scripts and output files that will log performance data.
+lun_list=$(pool_to_lun_list $PERFPOOL)
+log_note "Collecting backend IO stats with lun list $lun_list"
+if is_linux; then
+       export collect_scripts=("zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat"
+           "$PERF_SCRIPTS/prefetch_io.sh $PERFPOOL 1" "prefetch" "vmstat 1"
+           "vmstat" "mpstat  -P ALL 1" "mpstat" "iostat -dxyz 1" "iostat")
+else
+       export collect_scripts=("kstat zfs:0 1" "kstat" "vmstat -T d 1" "vmstat"
+           "mpstat -T d 1" "mpstat" "iostat -T d -xcnz 1" "iostat"
+           "dtrace -Cs $PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io"
+           "dtrace -Cs $PERF_SCRIPTS/prefetch_io.d $PERFPOOL 1" "prefetch"
+           "dtrace -s $PERF_SCRIPTS/profile.d" "profile")
+fi
+
+log_note "Sequential cached reads with $PERF_RUNTYPE settings"
+do_fio_run sequential_reads.fio false false
+log_pass "Measure IO stats during sequential cached read load"