4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
30 * ZFS's 2nd and 4th order Fletcher checksums are defined by the following
31 * recurrence relations:
39 * c = c + b (fletcher-4 only)
42 * d = d + c (fletcher-4 only)
46 * a_0 = b_0 = c_0 = d_0 = 0
48 * f_0 .. f_(n-1) are the input data.
50 * Using standard techniques, these translate into the following series:
55 * n /___| n - i n /___| n - i
60 * \ | i*(i+1) \ | i*(i+1)*(i+2)
61 * c = > ------- f d = > ------------- f
62 * n /___| 2 n - i n /___| 6 n - i
65 * For fletcher-2, the f_is are 64-bit, and [ab]_i are 64-bit accumulators.
66 * Since the additions are done mod (2^64), errors in the high bits may not
67 * be noticed. For this reason, fletcher-2 is deprecated.
69 * For fletcher-4, the f_is are 32-bit, and [abcd]_i are 64-bit accumulators.
70 * A conservative estimate of how big the buffer can get before we overflow
71 * can be estimated using f_i = 0xffffffff for all i:
74 * f=2^32-1;d=0; for (i = 1; d<2^64; i++) { d += f*i*(i+1)*(i+2)/6 }; (i-1)*4
79 * So blocks of up to 2k will not overflow. Our largest block size is
80 * 128k, which has 32k 4-byte words, so we can compute the largest possible
81 * accumulators, then divide by 2^64 to figure the max amount of overflow:
84 * a=b=c=d=0; f=2^32-1; for (i=1; i<=32*1024; i++) { a+=f; b+=a; c+=b; d+=c }
85 * a/2^64;b/2^64;c/2^64;d/2^64
93 * So a and b cannot overflow. To make sure each bit of input has some
94 * effect on the contents of c and d, we can look at what the factors of
95 * the coefficients in the equations for c_n and d_n are. The number of 2s
96 * in the factors determines the lowest set bit in the multiplier. Running
97 * through the cases for n*(n+1)/2 reveals that the highest power of 2 is
98 * 2^14, and for n*(n+1)*(n+2)/6 it is 2^15. So while some data may overflow
99 * the 64-bit accumulators, every bit of every f_i effects every accumulator,
100 * even for 128k blocks.
102 * If we wanted to make a stronger version of fletcher4 (fletcher4c?),
103 * we could do our calculations mod (2^32 - 1) by adding in the carries
104 * periodically, and store the number of carries in the top 32-bits.
106 * --------------------
107 * Checksum Performance
108 * --------------------
110 * There are two interesting components to checksum performance: cached and
111 * uncached performance. With cached data, fletcher-2 is about four times
112 * faster than fletcher-4. With uncached data, the performance difference is
113 * negligible, since the cost of a cache fill dominates the processing time.
114 * Even though fletcher-4 is slower than fletcher-2, it is still a pretty
115 * efficient pass over the data.
117 * In normal operation, the data which is being checksummed is in a buffer
118 * which has been filled either by:
120 * 1. a compression step, which will be mostly cached, or
121 * 2. a bcopy() or copyin(), which will be uncached (because the
122 * copy is cache-bypassing).
124 * For both cached and uncached data, both fletcher checksums are much faster
125 * than sha-256, and slower than 'off', which doesn't touch the data at all.
128 #include <sys/types.h>
129 #include <sys/sysmacros.h>
130 #include <sys/byteorder.h>
132 #include <sys/zfs_context.h>
133 #include <zfs_fletcher.h>
135 static void fletcher_4_scalar_init(zio_cksum_t *zcp);
136 static void fletcher_4_scalar(const void *buf, uint64_t size,
138 static void fletcher_4_scalar_byteswap(const void *buf, uint64_t size,
140 static boolean_t fletcher_4_scalar_valid(void);
142 static const fletcher_4_ops_t fletcher_4_scalar_ops = {
143 .init = fletcher_4_scalar_init,
144 .compute = fletcher_4_scalar,
145 .compute_byteswap = fletcher_4_scalar_byteswap,
146 .valid = fletcher_4_scalar_valid,
150 static const fletcher_4_ops_t *fletcher_4_algos[] = {
151 &fletcher_4_scalar_ops,
152 #if defined(HAVE_SSE2)
153 &fletcher_4_sse2_ops,
155 #if defined(HAVE_SSE2) && defined(HAVE_SSSE3)
156 &fletcher_4_ssse3_ops,
158 #if defined(HAVE_AVX) && defined(HAVE_AVX2)
159 &fletcher_4_avx2_ops,
163 static enum fletcher_selector {
164 FLETCHER_FASTEST = 0,
166 #if defined(HAVE_SSE2)
169 #if defined(HAVE_SSE2) && defined(HAVE_SSSE3)
172 #if defined(HAVE_AVX) && defined(HAVE_AVX2)
176 } fletcher_4_impl_chosen = FLETCHER_SCALAR;
178 static struct fletcher_4_impl_selector {
179 const char *fis_name;
180 const fletcher_4_ops_t *fis_ops;
181 } fletcher_4_impl_selectors[] = {
182 [ FLETCHER_FASTEST ] = { "fastest", NULL },
183 [ FLETCHER_SCALAR ] = { "scalar", &fletcher_4_scalar_ops },
184 #if defined(HAVE_SSE2)
185 [ FLETCHER_SSE2 ] = { "sse2", &fletcher_4_sse2_ops },
187 #if defined(HAVE_SSE2) && defined(HAVE_SSSE3)
188 [ FLETCHER_SSSE3 ] = { "ssse3", &fletcher_4_ssse3_ops },
190 #if defined(HAVE_AVX) && defined(HAVE_AVX2)
191 [ FLETCHER_AVX2 ] = { "avx2", &fletcher_4_avx2_ops },
193 #if !defined(_KERNEL)
194 [ FLETCHER_CYCLE ] = { "cycle", &fletcher_4_scalar_ops }
198 static kmutex_t fletcher_4_impl_lock;
200 static kstat_t *fletcher_4_kstat;
202 static kstat_named_t fletcher_4_kstat_data[ARRAY_SIZE(fletcher_4_algos)];
205 fletcher_2_native(const void *buf, uint64_t size, zio_cksum_t *zcp)
207 const uint64_t *ip = buf;
208 const uint64_t *ipend = ip + (size / sizeof (uint64_t));
209 uint64_t a0, b0, a1, b1;
211 for (a0 = b0 = a1 = b1 = 0; ip < ipend; ip += 2) {
218 ZIO_SET_CHECKSUM(zcp, a0, a1, b0, b1);
222 fletcher_2_byteswap(const void *buf, uint64_t size, zio_cksum_t *zcp)
224 const uint64_t *ip = buf;
225 const uint64_t *ipend = ip + (size / sizeof (uint64_t));
226 uint64_t a0, b0, a1, b1;
228 for (a0 = b0 = a1 = b1 = 0; ip < ipend; ip += 2) {
229 a0 += BSWAP_64(ip[0]);
230 a1 += BSWAP_64(ip[1]);
235 ZIO_SET_CHECKSUM(zcp, a0, a1, b0, b1);
238 static void fletcher_4_scalar_init(zio_cksum_t *zcp)
240 ZIO_SET_CHECKSUM(zcp, 0, 0, 0, 0);
244 fletcher_4_scalar(const void *buf, uint64_t size, zio_cksum_t *zcp)
246 const uint32_t *ip = buf;
247 const uint32_t *ipend = ip + (size / sizeof (uint32_t));
255 for (; ip < ipend; ip++) {
262 ZIO_SET_CHECKSUM(zcp, a, b, c, d);
266 fletcher_4_scalar_byteswap(const void *buf, uint64_t size, zio_cksum_t *zcp)
268 const uint32_t *ip = buf;
269 const uint32_t *ipend = ip + (size / sizeof (uint32_t));
277 for (; ip < ipend; ip++) {
278 a += BSWAP_32(ip[0]);
284 ZIO_SET_CHECKSUM(zcp, a, b, c, d);
288 fletcher_4_scalar_valid(void)
294 fletcher_4_impl_set(const char *val)
296 const fletcher_4_ops_t *ops;
297 enum fletcher_selector idx;
301 val_len = strlen(val);
302 while ((val_len > 0) && !!isspace(val[val_len-1])) /* trim '\n' */
305 for (i = 0; i < ARRAY_SIZE(fletcher_4_impl_selectors); i++) {
306 const char *name = fletcher_4_impl_selectors[i].fis_name;
308 if (val_len == strlen(name) &&
309 strncmp(val, name, val_len) == 0) {
314 if (i >= ARRAY_SIZE(fletcher_4_impl_selectors))
317 ops = fletcher_4_impl_selectors[idx].fis_ops;
318 if (ops == NULL || !ops->valid())
321 mutex_enter(&fletcher_4_impl_lock);
322 if (fletcher_4_impl_chosen != idx)
323 fletcher_4_impl_chosen = idx;
324 mutex_exit(&fletcher_4_impl_lock);
329 static inline const fletcher_4_ops_t *
330 fletcher_4_impl_get(void)
332 #if !defined(_KERNEL)
333 if (fletcher_4_impl_chosen == FLETCHER_CYCLE) {
334 static volatile unsigned int cycle_count = 0;
335 const fletcher_4_ops_t *ops = NULL;
339 index = atomic_inc_uint_nv(&cycle_count);
340 ops = fletcher_4_algos[
341 index % ARRAY_SIZE(fletcher_4_algos)];
349 return (fletcher_4_impl_selectors[fletcher_4_impl_chosen].fis_ops);
353 fletcher_4_native(const void *buf, uint64_t size, zio_cksum_t *zcp)
355 const fletcher_4_ops_t *ops;
357 if (IS_P2ALIGNED(size, 4 * sizeof (uint32_t)))
358 ops = fletcher_4_impl_get();
360 ops = &fletcher_4_scalar_ops;
363 ops->compute(buf, size, zcp);
364 if (ops->fini != NULL)
369 fletcher_4_byteswap(const void *buf, uint64_t size, zio_cksum_t *zcp)
371 const fletcher_4_ops_t *ops;
373 if (IS_P2ALIGNED(size, 4 * sizeof (uint32_t)))
374 ops = fletcher_4_impl_get();
376 ops = &fletcher_4_scalar_ops;
379 ops->compute_byteswap(buf, size, zcp);
380 if (ops->fini != NULL)
385 fletcher_4_incremental_native(const void *buf, uint64_t size,
388 fletcher_4_scalar(buf, size, zcp);
392 fletcher_4_incremental_byteswap(const void *buf, uint64_t size,
395 fletcher_4_scalar_byteswap(buf, size, zcp);
399 fletcher_4_init(void)
401 const uint64_t const bench_ns = (50 * MICROSEC); /* 50ms */
402 unsigned long best_run_count = 0;
403 unsigned long best_run_index = 0;
404 const unsigned data_size = 4096;
408 databuf = kmem_alloc(data_size, KM_SLEEP);
409 for (i = 0; i < ARRAY_SIZE(fletcher_4_algos); i++) {
410 const fletcher_4_ops_t *ops = fletcher_4_algos[i];
411 kstat_named_t *stat = &fletcher_4_kstat_data[i];
412 unsigned long run_count = 0;
416 strncpy(stat->name, ops->name, sizeof (stat->name) - 1);
417 stat->data_type = KSTAT_DATA_UINT64;
418 stat->value.ui64 = 0;
427 ops->compute(databuf, data_size, &zc);
428 ops->compute_byteswap(databuf, data_size, &zc);
430 } while (gethrtime() < start + bench_ns);
431 if (ops->fini != NULL)
435 if (run_count > best_run_count) {
436 best_run_count = run_count;
441 * Due to high overhead of gethrtime(), the performance data
442 * here is inaccurate and much slower than it could be.
443 * It's fine for our use though because only relative speed
446 stat->value.ui64 = data_size * run_count *
447 (NANOSEC / bench_ns) >> 20; /* by MB/s */
449 kmem_free(databuf, data_size);
451 fletcher_4_impl_selectors[FLETCHER_FASTEST].fis_ops =
452 fletcher_4_algos[best_run_index];
454 mutex_init(&fletcher_4_impl_lock, NULL, MUTEX_DEFAULT, NULL);
455 fletcher_4_impl_set("fastest");
457 fletcher_4_kstat = kstat_create("zfs", 0, "fletcher_4_bench",
458 "misc", KSTAT_TYPE_NAMED, ARRAY_SIZE(fletcher_4_algos),
460 if (fletcher_4_kstat != NULL) {
461 fletcher_4_kstat->ks_data = fletcher_4_kstat_data;
462 kstat_install(fletcher_4_kstat);
467 fletcher_4_fini(void)
469 mutex_destroy(&fletcher_4_impl_lock);
470 if (fletcher_4_kstat != NULL) {
471 kstat_delete(fletcher_4_kstat);
472 fletcher_4_kstat = NULL;
476 #if defined(_KERNEL) && defined(HAVE_SPL)
479 fletcher_4_param_get(char *buffer, struct kernel_param *unused)
483 for (i = 0; i < ARRAY_SIZE(fletcher_4_impl_selectors); i++) {
484 const fletcher_4_ops_t *ops;
486 ops = fletcher_4_impl_selectors[i].fis_ops;
490 cnt += sprintf(buffer + cnt,
491 fletcher_4_impl_chosen == i ? "[%s] " : "%s ",
492 fletcher_4_impl_selectors[i].fis_name);
499 fletcher_4_param_set(const char *val, struct kernel_param *unused)
501 return (fletcher_4_impl_set(val));
505 * Choose a fletcher 4 implementation in ZFS.
506 * Users can choose the "fastest" algorithm, or "scalar" and "avx2" which means
507 * to compute fletcher 4 by CPU or vector instructions respectively.
508 * Users can also choose "cycle" to exercise all implementions, but this is
509 * for testing purpose therefore it can only be set in user space.
511 module_param_call(zfs_fletcher_4_impl,
512 fletcher_4_param_set, fletcher_4_param_get, NULL, 0644);
513 MODULE_PARM_DESC(zfs_fletcher_4_impl, "Select fletcher 4 algorithm");
515 EXPORT_SYMBOL(fletcher_4_init);
516 EXPORT_SYMBOL(fletcher_4_fini);
517 EXPORT_SYMBOL(fletcher_2_native);
518 EXPORT_SYMBOL(fletcher_2_byteswap);
519 EXPORT_SYMBOL(fletcher_4_native);
520 EXPORT_SYMBOL(fletcher_4_byteswap);
521 EXPORT_SYMBOL(fletcher_4_incremental_native);
522 EXPORT_SYMBOL(fletcher_4_incremental_byteswap);