4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
25 #include <sys/zfs_context.h>
26 #include <modes/modes.h>
27 #include <sys/crypto/common.h>
28 #include <sys/crypto/icp.h>
29 #include <sys/crypto/impl.h>
30 #include <sys/byteorder.h>
32 #include <modes/gcm_impl.h>
34 #define GHASH(c, d, t, o) \
35 xor_block((uint8_t *)(d), (uint8_t *)(c)->gcm_ghash); \
36 (o)->mul((uint64_t *)(void *)(c)->gcm_ghash, (c)->gcm_H, \
37 (uint64_t *)(void *)(t));
40 * Encrypt multiple blocks of data in GCM mode. Decrypt for GCM mode
41 * is done in another function.
44 gcm_mode_encrypt_contiguous_blocks(gcm_ctx_t *ctx, char *data, size_t length,
45 crypto_data_t *out, size_t block_size,
46 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
47 void (*copy_block)(uint8_t *, uint8_t *),
48 void (*xor_block)(uint8_t *, uint8_t *))
50 const gcm_impl_ops_t *gops;
51 size_t remainder = length;
53 uint8_t *datap = (uint8_t *)data;
60 size_t out_data_1_len;
62 uint64_t counter_mask = ntohll(0x00000000ffffffffULL);
64 if (length + ctx->gcm_remainder_len < block_size) {
65 /* accumulate bytes here and return */
67 (uint8_t *)ctx->gcm_remainder + ctx->gcm_remainder_len,
69 ctx->gcm_remainder_len += length;
70 ctx->gcm_copy_to = datap;
71 return (CRYPTO_SUCCESS);
74 lastp = (uint8_t *)ctx->gcm_cb;
76 crypto_init_ptrs(out, &iov_or_mp, &offset);
78 gops = gcm_impl_get_ops();
80 /* Unprocessed data from last call. */
81 if (ctx->gcm_remainder_len > 0) {
82 need = block_size - ctx->gcm_remainder_len;
85 return (CRYPTO_DATA_LEN_RANGE);
87 bcopy(datap, &((uint8_t *)ctx->gcm_remainder)
88 [ctx->gcm_remainder_len], need);
90 blockp = (uint8_t *)ctx->gcm_remainder;
96 * Increment counter. Counter bits are confined
97 * to the bottom 32 bits of the counter block.
99 counter = ntohll(ctx->gcm_cb[1] & counter_mask);
100 counter = htonll(counter + 1);
101 counter &= counter_mask;
102 ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter;
104 encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_cb,
105 (uint8_t *)ctx->gcm_tmp);
106 xor_block(blockp, (uint8_t *)ctx->gcm_tmp);
108 lastp = (uint8_t *)ctx->gcm_tmp;
110 ctx->gcm_processed_data_len += block_size;
113 if (ctx->gcm_remainder_len > 0) {
114 bcopy(blockp, ctx->gcm_copy_to,
115 ctx->gcm_remainder_len);
116 bcopy(blockp + ctx->gcm_remainder_len, datap,
120 crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
121 &out_data_1_len, &out_data_2, block_size);
123 /* copy block to where it belongs */
124 if (out_data_1_len == block_size) {
125 copy_block(lastp, out_data_1);
127 bcopy(lastp, out_data_1, out_data_1_len);
128 if (out_data_2 != NULL) {
129 bcopy(lastp + out_data_1_len,
131 block_size - out_data_1_len);
135 out->cd_offset += block_size;
138 /* add ciphertext to the hash */
139 GHASH(ctx, ctx->gcm_tmp, ctx->gcm_ghash, gops);
141 /* Update pointer to next block of data to be processed. */
142 if (ctx->gcm_remainder_len != 0) {
144 ctx->gcm_remainder_len = 0;
149 remainder = (size_t)&data[length] - (size_t)datap;
151 /* Incomplete last block. */
152 if (remainder > 0 && remainder < block_size) {
153 bcopy(datap, ctx->gcm_remainder, remainder);
154 ctx->gcm_remainder_len = remainder;
155 ctx->gcm_copy_to = datap;
158 ctx->gcm_copy_to = NULL;
160 } while (remainder > 0);
162 return (CRYPTO_SUCCESS);
167 gcm_encrypt_final(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
168 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
169 void (*copy_block)(uint8_t *, uint8_t *),
170 void (*xor_block)(uint8_t *, uint8_t *))
172 const gcm_impl_ops_t *gops;
173 uint64_t counter_mask = ntohll(0x00000000ffffffffULL);
174 uint8_t *ghash, *macp = NULL;
178 (ctx->gcm_remainder_len + ctx->gcm_tag_len)) {
179 return (CRYPTO_DATA_LEN_RANGE);
182 gops = gcm_impl_get_ops();
183 ghash = (uint8_t *)ctx->gcm_ghash;
185 if (ctx->gcm_remainder_len > 0) {
187 uint8_t *tmpp = (uint8_t *)ctx->gcm_tmp;
190 * Here is where we deal with data that is not a
191 * multiple of the block size.
197 counter = ntohll(ctx->gcm_cb[1] & counter_mask);
198 counter = htonll(counter + 1);
199 counter &= counter_mask;
200 ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter;
202 encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_cb,
203 (uint8_t *)ctx->gcm_tmp);
205 macp = (uint8_t *)ctx->gcm_remainder;
206 bzero(macp + ctx->gcm_remainder_len,
207 block_size - ctx->gcm_remainder_len);
209 /* XOR with counter block */
210 for (i = 0; i < ctx->gcm_remainder_len; i++) {
214 /* add ciphertext to the hash */
215 GHASH(ctx, macp, ghash, gops);
217 ctx->gcm_processed_data_len += ctx->gcm_remainder_len;
220 ctx->gcm_len_a_len_c[1] =
221 htonll(CRYPTO_BYTES2BITS(ctx->gcm_processed_data_len));
222 GHASH(ctx, ctx->gcm_len_a_len_c, ghash, gops);
223 encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_J0,
224 (uint8_t *)ctx->gcm_J0);
225 xor_block((uint8_t *)ctx->gcm_J0, ghash);
227 if (ctx->gcm_remainder_len > 0) {
228 rv = crypto_put_output_data(macp, out, ctx->gcm_remainder_len);
229 if (rv != CRYPTO_SUCCESS)
232 out->cd_offset += ctx->gcm_remainder_len;
233 ctx->gcm_remainder_len = 0;
234 rv = crypto_put_output_data(ghash, out, ctx->gcm_tag_len);
235 if (rv != CRYPTO_SUCCESS)
237 out->cd_offset += ctx->gcm_tag_len;
239 return (CRYPTO_SUCCESS);
243 * This will only deal with decrypting the last block of the input that
244 * might not be a multiple of block length.
247 gcm_decrypt_incomplete_block(gcm_ctx_t *ctx, size_t block_size, size_t index,
248 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
249 void (*xor_block)(uint8_t *, uint8_t *))
251 uint8_t *datap, *outp, *counterp;
253 uint64_t counter_mask = ntohll(0x00000000ffffffffULL);
258 * Counter bits are confined to the bottom 32 bits
260 counter = ntohll(ctx->gcm_cb[1] & counter_mask);
261 counter = htonll(counter + 1);
262 counter &= counter_mask;
263 ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter;
265 datap = (uint8_t *)ctx->gcm_remainder;
266 outp = &((ctx->gcm_pt_buf)[index]);
267 counterp = (uint8_t *)ctx->gcm_tmp;
269 /* authentication tag */
270 bzero((uint8_t *)ctx->gcm_tmp, block_size);
271 bcopy(datap, (uint8_t *)ctx->gcm_tmp, ctx->gcm_remainder_len);
273 /* add ciphertext to the hash */
274 GHASH(ctx, ctx->gcm_tmp, ctx->gcm_ghash, gcm_impl_get_ops());
276 /* decrypt remaining ciphertext */
277 encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_cb, counterp);
279 /* XOR with counter block */
280 for (i = 0; i < ctx->gcm_remainder_len; i++) {
281 outp[i] = datap[i] ^ counterp[i];
287 gcm_mode_decrypt_contiguous_blocks(gcm_ctx_t *ctx, char *data, size_t length,
288 crypto_data_t *out, size_t block_size,
289 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
290 void (*copy_block)(uint8_t *, uint8_t *),
291 void (*xor_block)(uint8_t *, uint8_t *))
297 * Copy contiguous ciphertext input blocks to plaintext buffer.
298 * Ciphertext will be decrypted in the final.
301 new_len = ctx->gcm_pt_buf_len + length;
302 new = vmem_alloc(new_len, ctx->gcm_kmflag);
303 bcopy(ctx->gcm_pt_buf, new, ctx->gcm_pt_buf_len);
304 vmem_free(ctx->gcm_pt_buf, ctx->gcm_pt_buf_len);
306 return (CRYPTO_HOST_MEMORY);
308 ctx->gcm_pt_buf = new;
309 ctx->gcm_pt_buf_len = new_len;
310 bcopy(data, &ctx->gcm_pt_buf[ctx->gcm_processed_data_len],
312 ctx->gcm_processed_data_len += length;
315 ctx->gcm_remainder_len = 0;
316 return (CRYPTO_SUCCESS);
320 gcm_decrypt_final(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
321 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
322 void (*xor_block)(uint8_t *, uint8_t *))
324 const gcm_impl_ops_t *gops;
331 uint64_t counter_mask = ntohll(0x00000000ffffffffULL);
332 int processed = 0, rv;
334 ASSERT(ctx->gcm_processed_data_len == ctx->gcm_pt_buf_len);
336 gops = gcm_impl_get_ops();
337 pt_len = ctx->gcm_processed_data_len - ctx->gcm_tag_len;
338 ghash = (uint8_t *)ctx->gcm_ghash;
339 blockp = ctx->gcm_pt_buf;
341 while (remainder > 0) {
342 /* Incomplete last block */
343 if (remainder < block_size) {
344 bcopy(blockp, ctx->gcm_remainder, remainder);
345 ctx->gcm_remainder_len = remainder;
347 * not expecting anymore ciphertext, just
348 * compute plaintext for the remaining input
350 gcm_decrypt_incomplete_block(ctx, block_size,
351 processed, encrypt_block, xor_block);
352 ctx->gcm_remainder_len = 0;
355 /* add ciphertext to the hash */
356 GHASH(ctx, blockp, ghash, gops);
360 * Counter bits are confined to the bottom 32 bits
362 counter = ntohll(ctx->gcm_cb[1] & counter_mask);
363 counter = htonll(counter + 1);
364 counter &= counter_mask;
365 ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter;
367 cbp = (uint8_t *)ctx->gcm_tmp;
368 encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_cb, cbp);
370 /* XOR with ciphertext */
371 xor_block(cbp, blockp);
373 processed += block_size;
374 blockp += block_size;
375 remainder -= block_size;
378 ctx->gcm_len_a_len_c[1] = htonll(CRYPTO_BYTES2BITS(pt_len));
379 GHASH(ctx, ctx->gcm_len_a_len_c, ghash, gops);
380 encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_J0,
381 (uint8_t *)ctx->gcm_J0);
382 xor_block((uint8_t *)ctx->gcm_J0, ghash);
384 /* compare the input authentication tag with what we calculated */
385 if (bcmp(&ctx->gcm_pt_buf[pt_len], ghash, ctx->gcm_tag_len)) {
386 /* They don't match */
387 return (CRYPTO_INVALID_MAC);
389 rv = crypto_put_output_data(ctx->gcm_pt_buf, out, pt_len);
390 if (rv != CRYPTO_SUCCESS)
392 out->cd_offset += pt_len;
394 return (CRYPTO_SUCCESS);
398 gcm_validate_args(CK_AES_GCM_PARAMS *gcm_param)
403 * Check the length of the authentication tag (in bits).
405 tag_len = gcm_param->ulTagBits;
416 return (CRYPTO_MECHANISM_PARAM_INVALID);
419 if (gcm_param->ulIvLen == 0)
420 return (CRYPTO_MECHANISM_PARAM_INVALID);
422 return (CRYPTO_SUCCESS);
426 gcm_format_initial_blocks(uchar_t *iv, ulong_t iv_len,
427 gcm_ctx_t *ctx, size_t block_size,
428 void (*copy_block)(uint8_t *, uint8_t *),
429 void (*xor_block)(uint8_t *, uint8_t *))
431 const gcm_impl_ops_t *gops;
433 ulong_t remainder = iv_len;
434 ulong_t processed = 0;
435 uint8_t *datap, *ghash;
436 uint64_t len_a_len_c[2];
438 gops = gcm_impl_get_ops();
439 ghash = (uint8_t *)ctx->gcm_ghash;
440 cb = (uint8_t *)ctx->gcm_cb;
447 /* J0 will be used again in the final */
448 copy_block(cb, (uint8_t *)ctx->gcm_J0);
452 if (remainder < block_size) {
453 bzero(cb, block_size);
454 bcopy(&(iv[processed]), cb, remainder);
455 datap = (uint8_t *)cb;
458 datap = (uint8_t *)(&(iv[processed]));
459 processed += block_size;
460 remainder -= block_size;
462 GHASH(ctx, datap, ghash, gops);
463 } while (remainder > 0);
466 len_a_len_c[1] = htonll(CRYPTO_BYTES2BITS(iv_len));
467 GHASH(ctx, len_a_len_c, ctx->gcm_J0, gops);
469 /* J0 will be used again in the final */
470 copy_block((uint8_t *)ctx->gcm_J0, (uint8_t *)cb);
475 * The following function is called at encrypt or decrypt init time
479 gcm_init(gcm_ctx_t *ctx, unsigned char *iv, size_t iv_len,
480 unsigned char *auth_data, size_t auth_data_len, size_t block_size,
481 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
482 void (*copy_block)(uint8_t *, uint8_t *),
483 void (*xor_block)(uint8_t *, uint8_t *))
485 const gcm_impl_ops_t *gops;
486 uint8_t *ghash, *datap, *authp;
487 size_t remainder, processed;
489 /* encrypt zero block to get subkey H */
490 bzero(ctx->gcm_H, sizeof (ctx->gcm_H));
491 encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_H,
492 (uint8_t *)ctx->gcm_H);
494 gcm_format_initial_blocks(iv, iv_len, ctx, block_size,
495 copy_block, xor_block);
497 gops = gcm_impl_get_ops();
498 authp = (uint8_t *)ctx->gcm_tmp;
499 ghash = (uint8_t *)ctx->gcm_ghash;
500 bzero(authp, block_size);
501 bzero(ghash, block_size);
504 remainder = auth_data_len;
506 if (remainder < block_size) {
508 * There's not a block full of data, pad rest of
511 bzero(authp, block_size);
512 bcopy(&(auth_data[processed]), authp, remainder);
513 datap = (uint8_t *)authp;
516 datap = (uint8_t *)(&(auth_data[processed]));
517 processed += block_size;
518 remainder -= block_size;
521 /* add auth data to the hash */
522 GHASH(ctx, datap, ghash, gops);
524 } while (remainder > 0);
526 return (CRYPTO_SUCCESS);
530 gcm_init_ctx(gcm_ctx_t *gcm_ctx, char *param, size_t block_size,
531 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
532 void (*copy_block)(uint8_t *, uint8_t *),
533 void (*xor_block)(uint8_t *, uint8_t *))
536 CK_AES_GCM_PARAMS *gcm_param;
539 gcm_param = (CK_AES_GCM_PARAMS *)(void *)param;
541 if ((rv = gcm_validate_args(gcm_param)) != 0) {
545 gcm_ctx->gcm_tag_len = gcm_param->ulTagBits;
546 gcm_ctx->gcm_tag_len >>= 3;
547 gcm_ctx->gcm_processed_data_len = 0;
549 /* these values are in bits */
550 gcm_ctx->gcm_len_a_len_c[0]
551 = htonll(CRYPTO_BYTES2BITS(gcm_param->ulAADLen));
554 gcm_ctx->gcm_flags |= GCM_MODE;
556 rv = CRYPTO_MECHANISM_PARAM_INVALID;
560 if (gcm_init(gcm_ctx, gcm_param->pIv, gcm_param->ulIvLen,
561 gcm_param->pAAD, gcm_param->ulAADLen, block_size,
562 encrypt_block, copy_block, xor_block) != 0) {
563 rv = CRYPTO_MECHANISM_PARAM_INVALID;
570 gmac_init_ctx(gcm_ctx_t *gcm_ctx, char *param, size_t block_size,
571 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
572 void (*copy_block)(uint8_t *, uint8_t *),
573 void (*xor_block)(uint8_t *, uint8_t *))
576 CK_AES_GMAC_PARAMS *gmac_param;
579 gmac_param = (CK_AES_GMAC_PARAMS *)(void *)param;
581 gcm_ctx->gcm_tag_len = CRYPTO_BITS2BYTES(AES_GMAC_TAG_BITS);
582 gcm_ctx->gcm_processed_data_len = 0;
584 /* these values are in bits */
585 gcm_ctx->gcm_len_a_len_c[0]
586 = htonll(CRYPTO_BYTES2BITS(gmac_param->ulAADLen));
589 gcm_ctx->gcm_flags |= GMAC_MODE;
591 rv = CRYPTO_MECHANISM_PARAM_INVALID;
595 if (gcm_init(gcm_ctx, gmac_param->pIv, AES_GMAC_IV_LEN,
596 gmac_param->pAAD, gmac_param->ulAADLen, block_size,
597 encrypt_block, copy_block, xor_block) != 0) {
598 rv = CRYPTO_MECHANISM_PARAM_INVALID;
605 gcm_alloc_ctx(int kmflag)
609 if ((gcm_ctx = kmem_zalloc(sizeof (gcm_ctx_t), kmflag)) == NULL)
612 gcm_ctx->gcm_flags = GCM_MODE;
617 gmac_alloc_ctx(int kmflag)
621 if ((gcm_ctx = kmem_zalloc(sizeof (gcm_ctx_t), kmflag)) == NULL)
624 gcm_ctx->gcm_flags = GMAC_MODE;
629 gcm_set_kmflag(gcm_ctx_t *ctx, int kmflag)
631 ctx->gcm_kmflag = kmflag;
634 /* GCM implementation that contains the fastest methods */
635 static gcm_impl_ops_t gcm_fastest_impl = {
639 /* All compiled in implementations */
640 const gcm_impl_ops_t *gcm_all_impl[] = {
642 #if defined(__x86_64) && defined(HAVE_PCLMULQDQ)
647 /* Indicate that benchmark has been completed */
648 static boolean_t gcm_impl_initialized = B_FALSE;
650 /* Select GCM implementation */
651 #define IMPL_FASTEST (UINT32_MAX)
652 #define IMPL_CYCLE (UINT32_MAX-1)
654 #define GCM_IMPL_READ(i) (*(volatile uint32_t *) &(i))
656 static uint32_t icp_gcm_impl = IMPL_FASTEST;
657 static uint32_t user_sel_impl = IMPL_FASTEST;
659 /* Hold all supported implementations */
660 static size_t gcm_supp_impl_cnt = 0;
661 static gcm_impl_ops_t *gcm_supp_impl[ARRAY_SIZE(gcm_all_impl)];
664 * Returns the GCM operations for encrypt/decrypt/key setup. When a
665 * SIMD implementation is not allowed in the current context, then
666 * fallback to the fastest generic implementation.
668 const gcm_impl_ops_t *
672 return (&gcm_generic_impl);
674 const gcm_impl_ops_t *ops = NULL;
675 const uint32_t impl = GCM_IMPL_READ(icp_gcm_impl);
679 ASSERT(gcm_impl_initialized);
680 ops = &gcm_fastest_impl;
683 /* Cycle through supported implementations */
684 ASSERT(gcm_impl_initialized);
685 ASSERT3U(gcm_supp_impl_cnt, >, 0);
686 static size_t cycle_impl_idx = 0;
687 size_t idx = (++cycle_impl_idx) % gcm_supp_impl_cnt;
688 ops = gcm_supp_impl[idx];
691 ASSERT3U(impl, <, gcm_supp_impl_cnt);
692 ASSERT3U(gcm_supp_impl_cnt, >, 0);
693 if (impl < ARRAY_SIZE(gcm_all_impl))
694 ops = gcm_supp_impl[impl];
698 ASSERT3P(ops, !=, NULL);
704 * Initialize all supported implementations.
709 gcm_impl_ops_t *curr_impl;
712 /* Move supported implementations into gcm_supp_impls */
713 for (i = 0, c = 0; i < ARRAY_SIZE(gcm_all_impl); i++) {
714 curr_impl = (gcm_impl_ops_t *)gcm_all_impl[i];
716 if (curr_impl->is_supported())
717 gcm_supp_impl[c++] = (gcm_impl_ops_t *)curr_impl;
719 gcm_supp_impl_cnt = c;
722 * Set the fastest implementation given the assumption that the
723 * hardware accelerated version is the fastest.
725 #if defined(__x86_64) && defined(HAVE_PCLMULQDQ)
726 if (gcm_pclmulqdq_impl.is_supported()) {
727 memcpy(&gcm_fastest_impl, &gcm_pclmulqdq_impl,
728 sizeof (gcm_fastest_impl));
732 memcpy(&gcm_fastest_impl, &gcm_generic_impl,
733 sizeof (gcm_fastest_impl));
736 strcpy(gcm_fastest_impl.name, "fastest");
738 /* Finish initialization */
739 atomic_swap_32(&icp_gcm_impl, user_sel_impl);
740 gcm_impl_initialized = B_TRUE;
743 static const struct {
746 } gcm_impl_opts[] = {
747 { "cycle", IMPL_CYCLE },
748 { "fastest", IMPL_FASTEST },
752 * Function sets desired gcm implementation.
754 * If we are called before init(), user preference will be saved in
755 * user_sel_impl, and applied in later init() call. This occurs when module
756 * parameter is specified on module load. Otherwise, directly update
759 * @val Name of gcm implementation to use
763 gcm_impl_set(const char *val)
766 char req_name[GCM_IMPL_NAME_MAX];
767 uint32_t impl = GCM_IMPL_READ(user_sel_impl);
771 i = strnlen(val, GCM_IMPL_NAME_MAX);
772 if (i == 0 || i >= GCM_IMPL_NAME_MAX)
775 strlcpy(req_name, val, GCM_IMPL_NAME_MAX);
776 while (i > 0 && isspace(req_name[i-1]))
780 /* Check mandatory options */
781 for (i = 0; i < ARRAY_SIZE(gcm_impl_opts); i++) {
782 if (strcmp(req_name, gcm_impl_opts[i].name) == 0) {
783 impl = gcm_impl_opts[i].sel;
789 /* check all supported impl if init() was already called */
790 if (err != 0 && gcm_impl_initialized) {
791 /* check all supported implementations */
792 for (i = 0; i < gcm_supp_impl_cnt; i++) {
793 if (strcmp(req_name, gcm_supp_impl[i]->name) == 0) {
802 if (gcm_impl_initialized)
803 atomic_swap_32(&icp_gcm_impl, impl);
805 atomic_swap_32(&user_sel_impl, impl);
814 icp_gcm_impl_set(const char *val, zfs_kernel_param_t *kp)
816 return (gcm_impl_set(val));
820 icp_gcm_impl_get(char *buffer, zfs_kernel_param_t *kp)
824 const uint32_t impl = GCM_IMPL_READ(icp_gcm_impl);
826 ASSERT(gcm_impl_initialized);
828 /* list mandatory options */
829 for (i = 0; i < ARRAY_SIZE(gcm_impl_opts); i++) {
830 fmt = (impl == gcm_impl_opts[i].sel) ? "[%s] " : "%s ";
831 cnt += sprintf(buffer + cnt, fmt, gcm_impl_opts[i].name);
834 /* list all supported implementations */
835 for (i = 0; i < gcm_supp_impl_cnt; i++) {
836 fmt = (i == impl) ? "[%s] " : "%s ";
837 cnt += sprintf(buffer + cnt, fmt, gcm_supp_impl[i]->name);
843 module_param_call(icp_gcm_impl, icp_gcm_impl_set, icp_gcm_impl_get,
845 MODULE_PARM_DESC(icp_gcm_impl, "Select gcm implementation.");