]> granicus.if.org Git - zfs/blob - module/icp/algs/modes/gcm.c
Linux 4.14, 4.19, 5.0+ compat: SIMD save/restore
[zfs] / module / icp / algs / modes / gcm.c
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
23  */
24
25 #include <sys/zfs_context.h>
26 #include <modes/modes.h>
27 #include <sys/crypto/common.h>
28 #include <sys/crypto/icp.h>
29 #include <sys/crypto/impl.h>
30 #include <sys/byteorder.h>
31 #include <sys/simd.h>
32 #include <modes/gcm_impl.h>
33
34 #define GHASH(c, d, t, o) \
35         xor_block((uint8_t *)(d), (uint8_t *)(c)->gcm_ghash); \
36         (o)->mul((uint64_t *)(void *)(c)->gcm_ghash, (c)->gcm_H, \
37         (uint64_t *)(void *)(t));
38
39 /*
40  * Encrypt multiple blocks of data in GCM mode.  Decrypt for GCM mode
41  * is done in another function.
42  */
43 int
44 gcm_mode_encrypt_contiguous_blocks(gcm_ctx_t *ctx, char *data, size_t length,
45     crypto_data_t *out, size_t block_size,
46     int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
47     void (*copy_block)(uint8_t *, uint8_t *),
48     void (*xor_block)(uint8_t *, uint8_t *))
49 {
50         const gcm_impl_ops_t *gops;
51         size_t remainder = length;
52         size_t need = 0;
53         uint8_t *datap = (uint8_t *)data;
54         uint8_t *blockp;
55         uint8_t *lastp;
56         void *iov_or_mp;
57         offset_t offset;
58         uint8_t *out_data_1;
59         uint8_t *out_data_2;
60         size_t out_data_1_len;
61         uint64_t counter;
62         uint64_t counter_mask = ntohll(0x00000000ffffffffULL);
63
64         if (length + ctx->gcm_remainder_len < block_size) {
65                 /* accumulate bytes here and return */
66                 bcopy(datap,
67                     (uint8_t *)ctx->gcm_remainder + ctx->gcm_remainder_len,
68                     length);
69                 ctx->gcm_remainder_len += length;
70                 ctx->gcm_copy_to = datap;
71                 return (CRYPTO_SUCCESS);
72         }
73
74         lastp = (uint8_t *)ctx->gcm_cb;
75         if (out != NULL)
76                 crypto_init_ptrs(out, &iov_or_mp, &offset);
77
78         gops = gcm_impl_get_ops();
79         do {
80                 /* Unprocessed data from last call. */
81                 if (ctx->gcm_remainder_len > 0) {
82                         need = block_size - ctx->gcm_remainder_len;
83
84                         if (need > remainder)
85                                 return (CRYPTO_DATA_LEN_RANGE);
86
87                         bcopy(datap, &((uint8_t *)ctx->gcm_remainder)
88                             [ctx->gcm_remainder_len], need);
89
90                         blockp = (uint8_t *)ctx->gcm_remainder;
91                 } else {
92                         blockp = datap;
93                 }
94
95                 /*
96                  * Increment counter. Counter bits are confined
97                  * to the bottom 32 bits of the counter block.
98                  */
99                 counter = ntohll(ctx->gcm_cb[1] & counter_mask);
100                 counter = htonll(counter + 1);
101                 counter &= counter_mask;
102                 ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter;
103
104                 encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_cb,
105                     (uint8_t *)ctx->gcm_tmp);
106                 xor_block(blockp, (uint8_t *)ctx->gcm_tmp);
107
108                 lastp = (uint8_t *)ctx->gcm_tmp;
109
110                 ctx->gcm_processed_data_len += block_size;
111
112                 if (out == NULL) {
113                         if (ctx->gcm_remainder_len > 0) {
114                                 bcopy(blockp, ctx->gcm_copy_to,
115                                     ctx->gcm_remainder_len);
116                                 bcopy(blockp + ctx->gcm_remainder_len, datap,
117                                     need);
118                         }
119                 } else {
120                         crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
121                             &out_data_1_len, &out_data_2, block_size);
122
123                         /* copy block to where it belongs */
124                         if (out_data_1_len == block_size) {
125                                 copy_block(lastp, out_data_1);
126                         } else {
127                                 bcopy(lastp, out_data_1, out_data_1_len);
128                                 if (out_data_2 != NULL) {
129                                         bcopy(lastp + out_data_1_len,
130                                             out_data_2,
131                                             block_size - out_data_1_len);
132                                 }
133                         }
134                         /* update offset */
135                         out->cd_offset += block_size;
136                 }
137
138                 /* add ciphertext to the hash */
139                 GHASH(ctx, ctx->gcm_tmp, ctx->gcm_ghash, gops);
140
141                 /* Update pointer to next block of data to be processed. */
142                 if (ctx->gcm_remainder_len != 0) {
143                         datap += need;
144                         ctx->gcm_remainder_len = 0;
145                 } else {
146                         datap += block_size;
147                 }
148
149                 remainder = (size_t)&data[length] - (size_t)datap;
150
151                 /* Incomplete last block. */
152                 if (remainder > 0 && remainder < block_size) {
153                         bcopy(datap, ctx->gcm_remainder, remainder);
154                         ctx->gcm_remainder_len = remainder;
155                         ctx->gcm_copy_to = datap;
156                         goto out;
157                 }
158                 ctx->gcm_copy_to = NULL;
159
160         } while (remainder > 0);
161 out:
162         return (CRYPTO_SUCCESS);
163 }
164
165 /* ARGSUSED */
166 int
167 gcm_encrypt_final(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
168     int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
169     void (*copy_block)(uint8_t *, uint8_t *),
170     void (*xor_block)(uint8_t *, uint8_t *))
171 {
172         const gcm_impl_ops_t *gops;
173         uint64_t counter_mask = ntohll(0x00000000ffffffffULL);
174         uint8_t *ghash, *macp = NULL;
175         int i, rv;
176
177         if (out->cd_length <
178             (ctx->gcm_remainder_len + ctx->gcm_tag_len)) {
179                 return (CRYPTO_DATA_LEN_RANGE);
180         }
181
182         gops = gcm_impl_get_ops();
183         ghash = (uint8_t *)ctx->gcm_ghash;
184
185         if (ctx->gcm_remainder_len > 0) {
186                 uint64_t counter;
187                 uint8_t *tmpp = (uint8_t *)ctx->gcm_tmp;
188
189                 /*
190                  * Here is where we deal with data that is not a
191                  * multiple of the block size.
192                  */
193
194                 /*
195                  * Increment counter.
196                  */
197                 counter = ntohll(ctx->gcm_cb[1] & counter_mask);
198                 counter = htonll(counter + 1);
199                 counter &= counter_mask;
200                 ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter;
201
202                 encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_cb,
203                     (uint8_t *)ctx->gcm_tmp);
204
205                 macp = (uint8_t *)ctx->gcm_remainder;
206                 bzero(macp + ctx->gcm_remainder_len,
207                     block_size - ctx->gcm_remainder_len);
208
209                 /* XOR with counter block */
210                 for (i = 0; i < ctx->gcm_remainder_len; i++) {
211                         macp[i] ^= tmpp[i];
212                 }
213
214                 /* add ciphertext to the hash */
215                 GHASH(ctx, macp, ghash, gops);
216
217                 ctx->gcm_processed_data_len += ctx->gcm_remainder_len;
218         }
219
220         ctx->gcm_len_a_len_c[1] =
221             htonll(CRYPTO_BYTES2BITS(ctx->gcm_processed_data_len));
222         GHASH(ctx, ctx->gcm_len_a_len_c, ghash, gops);
223         encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_J0,
224             (uint8_t *)ctx->gcm_J0);
225         xor_block((uint8_t *)ctx->gcm_J0, ghash);
226
227         if (ctx->gcm_remainder_len > 0) {
228                 rv = crypto_put_output_data(macp, out, ctx->gcm_remainder_len);
229                 if (rv != CRYPTO_SUCCESS)
230                         return (rv);
231         }
232         out->cd_offset += ctx->gcm_remainder_len;
233         ctx->gcm_remainder_len = 0;
234         rv = crypto_put_output_data(ghash, out, ctx->gcm_tag_len);
235         if (rv != CRYPTO_SUCCESS)
236                 return (rv);
237         out->cd_offset += ctx->gcm_tag_len;
238
239         return (CRYPTO_SUCCESS);
240 }
241
242 /*
243  * This will only deal with decrypting the last block of the input that
244  * might not be a multiple of block length.
245  */
246 static void
247 gcm_decrypt_incomplete_block(gcm_ctx_t *ctx, size_t block_size, size_t index,
248     int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
249     void (*xor_block)(uint8_t *, uint8_t *))
250 {
251         uint8_t *datap, *outp, *counterp;
252         uint64_t counter;
253         uint64_t counter_mask = ntohll(0x00000000ffffffffULL);
254         int i;
255
256         /*
257          * Increment counter.
258          * Counter bits are confined to the bottom 32 bits
259          */
260         counter = ntohll(ctx->gcm_cb[1] & counter_mask);
261         counter = htonll(counter + 1);
262         counter &= counter_mask;
263         ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter;
264
265         datap = (uint8_t *)ctx->gcm_remainder;
266         outp = &((ctx->gcm_pt_buf)[index]);
267         counterp = (uint8_t *)ctx->gcm_tmp;
268
269         /* authentication tag */
270         bzero((uint8_t *)ctx->gcm_tmp, block_size);
271         bcopy(datap, (uint8_t *)ctx->gcm_tmp, ctx->gcm_remainder_len);
272
273         /* add ciphertext to the hash */
274         GHASH(ctx, ctx->gcm_tmp, ctx->gcm_ghash, gcm_impl_get_ops());
275
276         /* decrypt remaining ciphertext */
277         encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_cb, counterp);
278
279         /* XOR with counter block */
280         for (i = 0; i < ctx->gcm_remainder_len; i++) {
281                 outp[i] = datap[i] ^ counterp[i];
282         }
283 }
284
285 /* ARGSUSED */
286 int
287 gcm_mode_decrypt_contiguous_blocks(gcm_ctx_t *ctx, char *data, size_t length,
288     crypto_data_t *out, size_t block_size,
289     int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
290     void (*copy_block)(uint8_t *, uint8_t *),
291     void (*xor_block)(uint8_t *, uint8_t *))
292 {
293         size_t new_len;
294         uint8_t *new;
295
296         /*
297          * Copy contiguous ciphertext input blocks to plaintext buffer.
298          * Ciphertext will be decrypted in the final.
299          */
300         if (length > 0) {
301                 new_len = ctx->gcm_pt_buf_len + length;
302                 new = vmem_alloc(new_len, ctx->gcm_kmflag);
303                 bcopy(ctx->gcm_pt_buf, new, ctx->gcm_pt_buf_len);
304                 vmem_free(ctx->gcm_pt_buf, ctx->gcm_pt_buf_len);
305                 if (new == NULL)
306                         return (CRYPTO_HOST_MEMORY);
307
308                 ctx->gcm_pt_buf = new;
309                 ctx->gcm_pt_buf_len = new_len;
310                 bcopy(data, &ctx->gcm_pt_buf[ctx->gcm_processed_data_len],
311                     length);
312                 ctx->gcm_processed_data_len += length;
313         }
314
315         ctx->gcm_remainder_len = 0;
316         return (CRYPTO_SUCCESS);
317 }
318
319 int
320 gcm_decrypt_final(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
321     int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
322     void (*xor_block)(uint8_t *, uint8_t *))
323 {
324         const gcm_impl_ops_t *gops;
325         size_t pt_len;
326         size_t remainder;
327         uint8_t *ghash;
328         uint8_t *blockp;
329         uint8_t *cbp;
330         uint64_t counter;
331         uint64_t counter_mask = ntohll(0x00000000ffffffffULL);
332         int processed = 0, rv;
333
334         ASSERT(ctx->gcm_processed_data_len == ctx->gcm_pt_buf_len);
335
336         gops = gcm_impl_get_ops();
337         pt_len = ctx->gcm_processed_data_len - ctx->gcm_tag_len;
338         ghash = (uint8_t *)ctx->gcm_ghash;
339         blockp = ctx->gcm_pt_buf;
340         remainder = pt_len;
341         while (remainder > 0) {
342                 /* Incomplete last block */
343                 if (remainder < block_size) {
344                         bcopy(blockp, ctx->gcm_remainder, remainder);
345                         ctx->gcm_remainder_len = remainder;
346                         /*
347                          * not expecting anymore ciphertext, just
348                          * compute plaintext for the remaining input
349                          */
350                         gcm_decrypt_incomplete_block(ctx, block_size,
351                             processed, encrypt_block, xor_block);
352                         ctx->gcm_remainder_len = 0;
353                         goto out;
354                 }
355                 /* add ciphertext to the hash */
356                 GHASH(ctx, blockp, ghash, gops);
357
358                 /*
359                  * Increment counter.
360                  * Counter bits are confined to the bottom 32 bits
361                  */
362                 counter = ntohll(ctx->gcm_cb[1] & counter_mask);
363                 counter = htonll(counter + 1);
364                 counter &= counter_mask;
365                 ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter;
366
367                 cbp = (uint8_t *)ctx->gcm_tmp;
368                 encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_cb, cbp);
369
370                 /* XOR with ciphertext */
371                 xor_block(cbp, blockp);
372
373                 processed += block_size;
374                 blockp += block_size;
375                 remainder -= block_size;
376         }
377 out:
378         ctx->gcm_len_a_len_c[1] = htonll(CRYPTO_BYTES2BITS(pt_len));
379         GHASH(ctx, ctx->gcm_len_a_len_c, ghash, gops);
380         encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_J0,
381             (uint8_t *)ctx->gcm_J0);
382         xor_block((uint8_t *)ctx->gcm_J0, ghash);
383
384         /* compare the input authentication tag with what we calculated */
385         if (bcmp(&ctx->gcm_pt_buf[pt_len], ghash, ctx->gcm_tag_len)) {
386                 /* They don't match */
387                 return (CRYPTO_INVALID_MAC);
388         } else {
389                 rv = crypto_put_output_data(ctx->gcm_pt_buf, out, pt_len);
390                 if (rv != CRYPTO_SUCCESS)
391                         return (rv);
392                 out->cd_offset += pt_len;
393         }
394         return (CRYPTO_SUCCESS);
395 }
396
397 static int
398 gcm_validate_args(CK_AES_GCM_PARAMS *gcm_param)
399 {
400         size_t tag_len;
401
402         /*
403          * Check the length of the authentication tag (in bits).
404          */
405         tag_len = gcm_param->ulTagBits;
406         switch (tag_len) {
407         case 32:
408         case 64:
409         case 96:
410         case 104:
411         case 112:
412         case 120:
413         case 128:
414                 break;
415         default:
416                 return (CRYPTO_MECHANISM_PARAM_INVALID);
417         }
418
419         if (gcm_param->ulIvLen == 0)
420                 return (CRYPTO_MECHANISM_PARAM_INVALID);
421
422         return (CRYPTO_SUCCESS);
423 }
424
425 static void
426 gcm_format_initial_blocks(uchar_t *iv, ulong_t iv_len,
427     gcm_ctx_t *ctx, size_t block_size,
428     void (*copy_block)(uint8_t *, uint8_t *),
429     void (*xor_block)(uint8_t *, uint8_t *))
430 {
431         const gcm_impl_ops_t *gops;
432         uint8_t *cb;
433         ulong_t remainder = iv_len;
434         ulong_t processed = 0;
435         uint8_t *datap, *ghash;
436         uint64_t len_a_len_c[2];
437
438         gops = gcm_impl_get_ops();
439         ghash = (uint8_t *)ctx->gcm_ghash;
440         cb = (uint8_t *)ctx->gcm_cb;
441         if (iv_len == 12) {
442                 bcopy(iv, cb, 12);
443                 cb[12] = 0;
444                 cb[13] = 0;
445                 cb[14] = 0;
446                 cb[15] = 1;
447                 /* J0 will be used again in the final */
448                 copy_block(cb, (uint8_t *)ctx->gcm_J0);
449         } else {
450                 /* GHASH the IV */
451                 do {
452                         if (remainder < block_size) {
453                                 bzero(cb, block_size);
454                                 bcopy(&(iv[processed]), cb, remainder);
455                                 datap = (uint8_t *)cb;
456                                 remainder = 0;
457                         } else {
458                                 datap = (uint8_t *)(&(iv[processed]));
459                                 processed += block_size;
460                                 remainder -= block_size;
461                         }
462                         GHASH(ctx, datap, ghash, gops);
463                 } while (remainder > 0);
464
465                 len_a_len_c[0] = 0;
466                 len_a_len_c[1] = htonll(CRYPTO_BYTES2BITS(iv_len));
467                 GHASH(ctx, len_a_len_c, ctx->gcm_J0, gops);
468
469                 /* J0 will be used again in the final */
470                 copy_block((uint8_t *)ctx->gcm_J0, (uint8_t *)cb);
471         }
472 }
473
474 /*
475  * The following function is called at encrypt or decrypt init time
476  * for AES GCM mode.
477  */
478 int
479 gcm_init(gcm_ctx_t *ctx, unsigned char *iv, size_t iv_len,
480     unsigned char *auth_data, size_t auth_data_len, size_t block_size,
481     int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
482     void (*copy_block)(uint8_t *, uint8_t *),
483     void (*xor_block)(uint8_t *, uint8_t *))
484 {
485         const gcm_impl_ops_t *gops;
486         uint8_t *ghash, *datap, *authp;
487         size_t remainder, processed;
488
489         /* encrypt zero block to get subkey H */
490         bzero(ctx->gcm_H, sizeof (ctx->gcm_H));
491         encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_H,
492             (uint8_t *)ctx->gcm_H);
493
494         gcm_format_initial_blocks(iv, iv_len, ctx, block_size,
495             copy_block, xor_block);
496
497         gops = gcm_impl_get_ops();
498         authp = (uint8_t *)ctx->gcm_tmp;
499         ghash = (uint8_t *)ctx->gcm_ghash;
500         bzero(authp, block_size);
501         bzero(ghash, block_size);
502
503         processed = 0;
504         remainder = auth_data_len;
505         do {
506                 if (remainder < block_size) {
507                         /*
508                          * There's not a block full of data, pad rest of
509                          * buffer with zero
510                          */
511                         bzero(authp, block_size);
512                         bcopy(&(auth_data[processed]), authp, remainder);
513                         datap = (uint8_t *)authp;
514                         remainder = 0;
515                 } else {
516                         datap = (uint8_t *)(&(auth_data[processed]));
517                         processed += block_size;
518                         remainder -= block_size;
519                 }
520
521                 /* add auth data to the hash */
522                 GHASH(ctx, datap, ghash, gops);
523
524         } while (remainder > 0);
525
526         return (CRYPTO_SUCCESS);
527 }
528
529 int
530 gcm_init_ctx(gcm_ctx_t *gcm_ctx, char *param, size_t block_size,
531     int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
532     void (*copy_block)(uint8_t *, uint8_t *),
533     void (*xor_block)(uint8_t *, uint8_t *))
534 {
535         int rv;
536         CK_AES_GCM_PARAMS *gcm_param;
537
538         if (param != NULL) {
539                 gcm_param = (CK_AES_GCM_PARAMS *)(void *)param;
540
541                 if ((rv = gcm_validate_args(gcm_param)) != 0) {
542                         return (rv);
543                 }
544
545                 gcm_ctx->gcm_tag_len = gcm_param->ulTagBits;
546                 gcm_ctx->gcm_tag_len >>= 3;
547                 gcm_ctx->gcm_processed_data_len = 0;
548
549                 /* these values are in bits */
550                 gcm_ctx->gcm_len_a_len_c[0]
551                     = htonll(CRYPTO_BYTES2BITS(gcm_param->ulAADLen));
552
553                 rv = CRYPTO_SUCCESS;
554                 gcm_ctx->gcm_flags |= GCM_MODE;
555         } else {
556                 rv = CRYPTO_MECHANISM_PARAM_INVALID;
557                 goto out;
558         }
559
560         if (gcm_init(gcm_ctx, gcm_param->pIv, gcm_param->ulIvLen,
561             gcm_param->pAAD, gcm_param->ulAADLen, block_size,
562             encrypt_block, copy_block, xor_block) != 0) {
563                 rv = CRYPTO_MECHANISM_PARAM_INVALID;
564         }
565 out:
566         return (rv);
567 }
568
569 int
570 gmac_init_ctx(gcm_ctx_t *gcm_ctx, char *param, size_t block_size,
571     int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
572     void (*copy_block)(uint8_t *, uint8_t *),
573     void (*xor_block)(uint8_t *, uint8_t *))
574 {
575         int rv;
576         CK_AES_GMAC_PARAMS *gmac_param;
577
578         if (param != NULL) {
579                 gmac_param = (CK_AES_GMAC_PARAMS *)(void *)param;
580
581                 gcm_ctx->gcm_tag_len = CRYPTO_BITS2BYTES(AES_GMAC_TAG_BITS);
582                 gcm_ctx->gcm_processed_data_len = 0;
583
584                 /* these values are in bits */
585                 gcm_ctx->gcm_len_a_len_c[0]
586                     = htonll(CRYPTO_BYTES2BITS(gmac_param->ulAADLen));
587
588                 rv = CRYPTO_SUCCESS;
589                 gcm_ctx->gcm_flags |= GMAC_MODE;
590         } else {
591                 rv = CRYPTO_MECHANISM_PARAM_INVALID;
592                 goto out;
593         }
594
595         if (gcm_init(gcm_ctx, gmac_param->pIv, AES_GMAC_IV_LEN,
596             gmac_param->pAAD, gmac_param->ulAADLen, block_size,
597             encrypt_block, copy_block, xor_block) != 0) {
598                 rv = CRYPTO_MECHANISM_PARAM_INVALID;
599         }
600 out:
601         return (rv);
602 }
603
604 void *
605 gcm_alloc_ctx(int kmflag)
606 {
607         gcm_ctx_t *gcm_ctx;
608
609         if ((gcm_ctx = kmem_zalloc(sizeof (gcm_ctx_t), kmflag)) == NULL)
610                 return (NULL);
611
612         gcm_ctx->gcm_flags = GCM_MODE;
613         return (gcm_ctx);
614 }
615
616 void *
617 gmac_alloc_ctx(int kmflag)
618 {
619         gcm_ctx_t *gcm_ctx;
620
621         if ((gcm_ctx = kmem_zalloc(sizeof (gcm_ctx_t), kmflag)) == NULL)
622                 return (NULL);
623
624         gcm_ctx->gcm_flags = GMAC_MODE;
625         return (gcm_ctx);
626 }
627
628 void
629 gcm_set_kmflag(gcm_ctx_t *ctx, int kmflag)
630 {
631         ctx->gcm_kmflag = kmflag;
632 }
633
634 /* GCM implementation that contains the fastest methods */
635 static gcm_impl_ops_t gcm_fastest_impl = {
636         .name = "fastest"
637 };
638
639 /* All compiled in implementations */
640 const gcm_impl_ops_t *gcm_all_impl[] = {
641         &gcm_generic_impl,
642 #if defined(__x86_64) && defined(HAVE_PCLMULQDQ)
643         &gcm_pclmulqdq_impl,
644 #endif
645 };
646
647 /* Indicate that benchmark has been completed */
648 static boolean_t gcm_impl_initialized = B_FALSE;
649
650 /* Select GCM implementation */
651 #define IMPL_FASTEST    (UINT32_MAX)
652 #define IMPL_CYCLE      (UINT32_MAX-1)
653
654 #define GCM_IMPL_READ(i) (*(volatile uint32_t *) &(i))
655
656 static uint32_t icp_gcm_impl = IMPL_FASTEST;
657 static uint32_t user_sel_impl = IMPL_FASTEST;
658
659 /* Hold all supported implementations */
660 static size_t gcm_supp_impl_cnt = 0;
661 static gcm_impl_ops_t *gcm_supp_impl[ARRAY_SIZE(gcm_all_impl)];
662
663 /*
664  * Returns the GCM operations for encrypt/decrypt/key setup.  When a
665  * SIMD implementation is not allowed in the current context, then
666  * fallback to the fastest generic implementation.
667  */
668 const gcm_impl_ops_t *
669 gcm_impl_get_ops()
670 {
671         if (!kfpu_allowed())
672                 return (&gcm_generic_impl);
673
674         const gcm_impl_ops_t *ops = NULL;
675         const uint32_t impl = GCM_IMPL_READ(icp_gcm_impl);
676
677         switch (impl) {
678         case IMPL_FASTEST:
679                 ASSERT(gcm_impl_initialized);
680                 ops = &gcm_fastest_impl;
681                 break;
682         case IMPL_CYCLE:
683                 /* Cycle through supported implementations */
684                 ASSERT(gcm_impl_initialized);
685                 ASSERT3U(gcm_supp_impl_cnt, >, 0);
686                 static size_t cycle_impl_idx = 0;
687                 size_t idx = (++cycle_impl_idx) % gcm_supp_impl_cnt;
688                 ops = gcm_supp_impl[idx];
689                 break;
690         default:
691                 ASSERT3U(impl, <, gcm_supp_impl_cnt);
692                 ASSERT3U(gcm_supp_impl_cnt, >, 0);
693                 if (impl < ARRAY_SIZE(gcm_all_impl))
694                         ops = gcm_supp_impl[impl];
695                 break;
696         }
697
698         ASSERT3P(ops, !=, NULL);
699
700         return (ops);
701 }
702
703 /*
704  * Initialize all supported implementations.
705  */
706 void
707 gcm_impl_init(void)
708 {
709         gcm_impl_ops_t *curr_impl;
710         int i, c;
711
712         /* Move supported implementations into gcm_supp_impls */
713         for (i = 0, c = 0; i < ARRAY_SIZE(gcm_all_impl); i++) {
714                 curr_impl = (gcm_impl_ops_t *)gcm_all_impl[i];
715
716                 if (curr_impl->is_supported())
717                         gcm_supp_impl[c++] = (gcm_impl_ops_t *)curr_impl;
718         }
719         gcm_supp_impl_cnt = c;
720
721         /*
722          * Set the fastest implementation given the assumption that the
723          * hardware accelerated version is the fastest.
724          */
725 #if defined(__x86_64) && defined(HAVE_PCLMULQDQ)
726         if (gcm_pclmulqdq_impl.is_supported()) {
727                 memcpy(&gcm_fastest_impl, &gcm_pclmulqdq_impl,
728                     sizeof (gcm_fastest_impl));
729         } else
730 #endif
731         {
732                 memcpy(&gcm_fastest_impl, &gcm_generic_impl,
733                     sizeof (gcm_fastest_impl));
734         }
735
736         strcpy(gcm_fastest_impl.name, "fastest");
737
738         /* Finish initialization */
739         atomic_swap_32(&icp_gcm_impl, user_sel_impl);
740         gcm_impl_initialized = B_TRUE;
741 }
742
743 static const struct {
744         char *name;
745         uint32_t sel;
746 } gcm_impl_opts[] = {
747                 { "cycle",      IMPL_CYCLE },
748                 { "fastest",    IMPL_FASTEST },
749 };
750
751 /*
752  * Function sets desired gcm implementation.
753  *
754  * If we are called before init(), user preference will be saved in
755  * user_sel_impl, and applied in later init() call. This occurs when module
756  * parameter is specified on module load. Otherwise, directly update
757  * icp_gcm_impl.
758  *
759  * @val         Name of gcm implementation to use
760  * @param       Unused.
761  */
762 int
763 gcm_impl_set(const char *val)
764 {
765         int err = -EINVAL;
766         char req_name[GCM_IMPL_NAME_MAX];
767         uint32_t impl = GCM_IMPL_READ(user_sel_impl);
768         size_t i;
769
770         /* sanitize input */
771         i = strnlen(val, GCM_IMPL_NAME_MAX);
772         if (i == 0 || i >= GCM_IMPL_NAME_MAX)
773                 return (err);
774
775         strlcpy(req_name, val, GCM_IMPL_NAME_MAX);
776         while (i > 0 && isspace(req_name[i-1]))
777                 i--;
778         req_name[i] = '\0';
779
780         /* Check mandatory options */
781         for (i = 0; i < ARRAY_SIZE(gcm_impl_opts); i++) {
782                 if (strcmp(req_name, gcm_impl_opts[i].name) == 0) {
783                         impl = gcm_impl_opts[i].sel;
784                         err = 0;
785                         break;
786                 }
787         }
788
789         /* check all supported impl if init() was already called */
790         if (err != 0 && gcm_impl_initialized) {
791                 /* check all supported implementations */
792                 for (i = 0; i < gcm_supp_impl_cnt; i++) {
793                         if (strcmp(req_name, gcm_supp_impl[i]->name) == 0) {
794                                 impl = i;
795                                 err = 0;
796                                 break;
797                         }
798                 }
799         }
800
801         if (err == 0) {
802                 if (gcm_impl_initialized)
803                         atomic_swap_32(&icp_gcm_impl, impl);
804                 else
805                         atomic_swap_32(&user_sel_impl, impl);
806         }
807
808         return (err);
809 }
810
811 #if defined(_KERNEL)
812
813 static int
814 icp_gcm_impl_set(const char *val, zfs_kernel_param_t *kp)
815 {
816         return (gcm_impl_set(val));
817 }
818
819 static int
820 icp_gcm_impl_get(char *buffer, zfs_kernel_param_t *kp)
821 {
822         int i, cnt = 0;
823         char *fmt;
824         const uint32_t impl = GCM_IMPL_READ(icp_gcm_impl);
825
826         ASSERT(gcm_impl_initialized);
827
828         /* list mandatory options */
829         for (i = 0; i < ARRAY_SIZE(gcm_impl_opts); i++) {
830                 fmt = (impl == gcm_impl_opts[i].sel) ? "[%s] " : "%s ";
831                 cnt += sprintf(buffer + cnt, fmt, gcm_impl_opts[i].name);
832         }
833
834         /* list all supported implementations */
835         for (i = 0; i < gcm_supp_impl_cnt; i++) {
836                 fmt = (i == impl) ? "[%s] " : "%s ";
837                 cnt += sprintf(buffer + cnt, fmt, gcm_supp_impl[i]->name);
838         }
839
840         return (cnt);
841 }
842
843 module_param_call(icp_gcm_impl, icp_gcm_impl_set, icp_gcm_impl_get,
844     NULL, 0644);
845 MODULE_PARM_DESC(icp_gcm_impl, "Select gcm implementation.");
846 #endif