]> granicus.if.org Git - zfs/blob - module/icp/algs/aes/aes_impl.c
Fix for ARC sysctls ignored at runtime
[zfs] / module / icp / algs / aes / aes_impl.c
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
23  */
24
25 #include <sys/zfs_context.h>
26 #include <sys/crypto/icp.h>
27 #include <sys/crypto/spi.h>
28 #include <sys/simd.h>
29 #include <modes/modes.h>
30 #include <aes/aes_impl.h>
31
32 /*
33  * Initialize AES encryption and decryption key schedules.
34  *
35  * Parameters:
36  * cipherKey    User key
37  * keyBits      AES key size (128, 192, or 256 bits)
38  * keysched     AES key schedule to be initialized, of type aes_key_t.
39  *              Allocated by aes_alloc_keysched().
40  */
41 void
42 aes_init_keysched(const uint8_t *cipherKey, uint_t keyBits, void *keysched)
43 {
44         const aes_impl_ops_t *ops = aes_impl_get_ops();
45         aes_key_t *newbie = keysched;
46         uint_t keysize, i, j;
47         union {
48                 uint64_t        ka64[4];
49                 uint32_t        ka32[8];
50                 } keyarr;
51
52         switch (keyBits) {
53         case 128:
54                 newbie->nr = 10;
55                 break;
56
57         case 192:
58                 newbie->nr = 12;
59                 break;
60
61         case 256:
62                 newbie->nr = 14;
63                 break;
64
65         default:
66                 /* should never get here */
67                 return;
68         }
69         keysize = CRYPTO_BITS2BYTES(keyBits);
70
71         /*
72          * Generic C implementation requires byteswap for little endian
73          * machines, various accelerated implementations for various
74          * architectures may not.
75          */
76         if (!ops->needs_byteswap) {
77                 /* no byteswap needed */
78                 if (IS_P2ALIGNED(cipherKey, sizeof (uint64_t))) {
79                         for (i = 0, j = 0; j < keysize; i++, j += 8) {
80                                 /* LINTED: pointer alignment */
81                                 keyarr.ka64[i] = *((uint64_t *)&cipherKey[j]);
82                         }
83                 } else {
84                         bcopy(cipherKey, keyarr.ka32, keysize);
85                 }
86         } else {
87                 /* byte swap */
88                 for (i = 0, j = 0; j < keysize; i++, j += 4) {
89                         keyarr.ka32[i] =
90                             htonl(*(uint32_t *)(void *)&cipherKey[j]);
91                 }
92         }
93
94         ops->generate(newbie, keyarr.ka32, keyBits);
95         newbie->ops = ops;
96
97         /*
98          * Note: if there are systems that need the AES_64BIT_KS type in the
99          * future, move setting key schedule type to individual implementations
100          */
101         newbie->type = AES_32BIT_KS;
102 }
103
104
105 /*
106  * Encrypt one block using AES.
107  * Align if needed and (for x86 32-bit only) byte-swap.
108  *
109  * Parameters:
110  * ks   Key schedule, of type aes_key_t
111  * pt   Input block (plain text)
112  * ct   Output block (crypto text).  Can overlap with pt
113  */
114 int
115 aes_encrypt_block(const void *ks, const uint8_t *pt, uint8_t *ct)
116 {
117         aes_key_t       *ksch = (aes_key_t *)ks;
118         const aes_impl_ops_t    *ops = ksch->ops;
119
120         if (IS_P2ALIGNED2(pt, ct, sizeof (uint32_t)) && !ops->needs_byteswap) {
121                 /* LINTED:  pointer alignment */
122                 ops->encrypt(&ksch->encr_ks.ks32[0], ksch->nr,
123                     /* LINTED:  pointer alignment */
124                     (uint32_t *)pt, (uint32_t *)ct);
125         } else {
126                 uint32_t buffer[AES_BLOCK_LEN / sizeof (uint32_t)];
127
128                 /* Copy input block into buffer */
129                 if (ops->needs_byteswap) {
130                         buffer[0] = htonl(*(uint32_t *)(void *)&pt[0]);
131                         buffer[1] = htonl(*(uint32_t *)(void *)&pt[4]);
132                         buffer[2] = htonl(*(uint32_t *)(void *)&pt[8]);
133                         buffer[3] = htonl(*(uint32_t *)(void *)&pt[12]);
134                 } else
135                         bcopy(pt, &buffer, AES_BLOCK_LEN);
136
137                 ops->encrypt(&ksch->encr_ks.ks32[0], ksch->nr, buffer, buffer);
138
139                 /* Copy result from buffer to output block */
140                 if (ops->needs_byteswap) {
141                         *(uint32_t *)(void *)&ct[0] = htonl(buffer[0]);
142                         *(uint32_t *)(void *)&ct[4] = htonl(buffer[1]);
143                         *(uint32_t *)(void *)&ct[8] = htonl(buffer[2]);
144                         *(uint32_t *)(void *)&ct[12] = htonl(buffer[3]);
145                 } else
146                         bcopy(&buffer, ct, AES_BLOCK_LEN);
147         }
148         return (CRYPTO_SUCCESS);
149 }
150
151
152 /*
153  * Decrypt one block using AES.
154  * Align and byte-swap if needed.
155  *
156  * Parameters:
157  * ks   Key schedule, of type aes_key_t
158  * ct   Input block (crypto text)
159  * pt   Output block (plain text). Can overlap with pt
160  */
161 int
162 aes_decrypt_block(const void *ks, const uint8_t *ct, uint8_t *pt)
163 {
164         aes_key_t       *ksch = (aes_key_t *)ks;
165         const aes_impl_ops_t    *ops = ksch->ops;
166
167         if (IS_P2ALIGNED2(ct, pt, sizeof (uint32_t)) && !ops->needs_byteswap) {
168                 /* LINTED:  pointer alignment */
169                 ops->decrypt(&ksch->decr_ks.ks32[0], ksch->nr,
170                     /* LINTED:  pointer alignment */
171                     (uint32_t *)ct, (uint32_t *)pt);
172         } else {
173                 uint32_t buffer[AES_BLOCK_LEN / sizeof (uint32_t)];
174
175                 /* Copy input block into buffer */
176                 if (ops->needs_byteswap) {
177                         buffer[0] = htonl(*(uint32_t *)(void *)&ct[0]);
178                         buffer[1] = htonl(*(uint32_t *)(void *)&ct[4]);
179                         buffer[2] = htonl(*(uint32_t *)(void *)&ct[8]);
180                         buffer[3] = htonl(*(uint32_t *)(void *)&ct[12]);
181                 } else
182                         bcopy(ct, &buffer, AES_BLOCK_LEN);
183
184                 ops->decrypt(&ksch->decr_ks.ks32[0], ksch->nr, buffer, buffer);
185
186                 /* Copy result from buffer to output block */
187                 if (ops->needs_byteswap) {
188                         *(uint32_t *)(void *)&pt[0] = htonl(buffer[0]);
189                         *(uint32_t *)(void *)&pt[4] = htonl(buffer[1]);
190                         *(uint32_t *)(void *)&pt[8] = htonl(buffer[2]);
191                         *(uint32_t *)(void *)&pt[12] = htonl(buffer[3]);
192                 } else
193                         bcopy(&buffer, pt, AES_BLOCK_LEN);
194         }
195         return (CRYPTO_SUCCESS);
196 }
197
198
199 /*
200  * Allocate key schedule for AES.
201  *
202  * Return the pointer and set size to the number of bytes allocated.
203  * Memory allocated must be freed by the caller when done.
204  *
205  * Parameters:
206  * size         Size of key schedule allocated, in bytes
207  * kmflag       Flag passed to kmem_alloc(9F); ignored in userland.
208  */
209 /* ARGSUSED */
210 void *
211 aes_alloc_keysched(size_t *size, int kmflag)
212 {
213         aes_key_t *keysched;
214
215         keysched = (aes_key_t *)kmem_alloc(sizeof (aes_key_t), kmflag);
216         if (keysched != NULL) {
217                 *size = sizeof (aes_key_t);
218                 return (keysched);
219         }
220         return (NULL);
221 }
222
223 /* AES implementation that contains the fastest methods */
224 static aes_impl_ops_t aes_fastest_impl = {
225         .name = "fastest"
226 };
227
228 /* All compiled in implementations */
229 const aes_impl_ops_t *aes_all_impl[] = {
230         &aes_generic_impl,
231 #if defined(__x86_64)
232         &aes_x86_64_impl,
233 #endif
234 #if defined(__x86_64) && defined(HAVE_AES)
235         &aes_aesni_impl,
236 #endif
237 };
238
239 /* Indicate that benchmark has been completed */
240 static boolean_t aes_impl_initialized = B_FALSE;
241
242 /* Select aes implementation */
243 #define IMPL_FASTEST    (UINT32_MAX)
244 #define IMPL_CYCLE      (UINT32_MAX-1)
245
246 #define AES_IMPL_READ(i) (*(volatile uint32_t *) &(i))
247
248 static uint32_t icp_aes_impl = IMPL_FASTEST;
249 static uint32_t user_sel_impl = IMPL_FASTEST;
250
251 /* Hold all supported implementations */
252 static size_t aes_supp_impl_cnt = 0;
253 static aes_impl_ops_t *aes_supp_impl[ARRAY_SIZE(aes_all_impl)];
254
255 /*
256  * Returns the AES operations for encrypt/decrypt/key setup.  When a
257  * SIMD implementation is not allowed in the current context, then
258  * fallback to the fastest generic implementation.
259  */
260 const aes_impl_ops_t *
261 aes_impl_get_ops(void)
262 {
263         if (!kfpu_allowed())
264                 return (&aes_generic_impl);
265
266         const aes_impl_ops_t *ops = NULL;
267         const uint32_t impl = AES_IMPL_READ(icp_aes_impl);
268
269         switch (impl) {
270         case IMPL_FASTEST:
271                 ASSERT(aes_impl_initialized);
272                 ops = &aes_fastest_impl;
273                 break;
274         case IMPL_CYCLE:
275                 /* Cycle through supported implementations */
276                 ASSERT(aes_impl_initialized);
277                 ASSERT3U(aes_supp_impl_cnt, >, 0);
278                 static size_t cycle_impl_idx = 0;
279                 size_t idx = (++cycle_impl_idx) % aes_supp_impl_cnt;
280                 ops = aes_supp_impl[idx];
281                 break;
282         default:
283                 ASSERT3U(impl, <, aes_supp_impl_cnt);
284                 ASSERT3U(aes_supp_impl_cnt, >, 0);
285                 if (impl < ARRAY_SIZE(aes_all_impl))
286                         ops = aes_supp_impl[impl];
287                 break;
288         }
289
290         ASSERT3P(ops, !=, NULL);
291
292         return (ops);
293 }
294
295 /*
296  * Initialize all supported implementations.
297  */
298 void
299 aes_impl_init(void)
300 {
301         aes_impl_ops_t *curr_impl;
302         int i, c;
303
304         /* Move supported implementations into aes_supp_impls */
305         for (i = 0, c = 0; i < ARRAY_SIZE(aes_all_impl); i++) {
306                 curr_impl = (aes_impl_ops_t *)aes_all_impl[i];
307
308                 if (curr_impl->is_supported())
309                         aes_supp_impl[c++] = (aes_impl_ops_t *)curr_impl;
310         }
311         aes_supp_impl_cnt = c;
312
313         /*
314          * Set the fastest implementation given the assumption that the
315          * hardware accelerated version is the fastest.
316          */
317 #if defined(__x86_64)
318 #if defined(HAVE_AES)
319         if (aes_aesni_impl.is_supported()) {
320                 memcpy(&aes_fastest_impl, &aes_aesni_impl,
321                     sizeof (aes_fastest_impl));
322         } else
323 #endif
324         {
325                 memcpy(&aes_fastest_impl, &aes_x86_64_impl,
326                     sizeof (aes_fastest_impl));
327         }
328 #else
329         memcpy(&aes_fastest_impl, &aes_generic_impl,
330             sizeof (aes_fastest_impl));
331 #endif
332
333         strcpy(aes_fastest_impl.name, "fastest");
334
335         /* Finish initialization */
336         atomic_swap_32(&icp_aes_impl, user_sel_impl);
337         aes_impl_initialized = B_TRUE;
338 }
339
340 static const struct {
341         char *name;
342         uint32_t sel;
343 } aes_impl_opts[] = {
344                 { "cycle",      IMPL_CYCLE },
345                 { "fastest",    IMPL_FASTEST },
346 };
347
348 /*
349  * Function sets desired aes implementation.
350  *
351  * If we are called before init(), user preference will be saved in
352  * user_sel_impl, and applied in later init() call. This occurs when module
353  * parameter is specified on module load. Otherwise, directly update
354  * icp_aes_impl.
355  *
356  * @val         Name of aes implementation to use
357  * @param       Unused.
358  */
359 int
360 aes_impl_set(const char *val)
361 {
362         int err = -EINVAL;
363         char req_name[AES_IMPL_NAME_MAX];
364         uint32_t impl = AES_IMPL_READ(user_sel_impl);
365         size_t i;
366
367         /* sanitize input */
368         i = strnlen(val, AES_IMPL_NAME_MAX);
369         if (i == 0 || i >= AES_IMPL_NAME_MAX)
370                 return (err);
371
372         strlcpy(req_name, val, AES_IMPL_NAME_MAX);
373         while (i > 0 && isspace(req_name[i-1]))
374                 i--;
375         req_name[i] = '\0';
376
377         /* Check mandatory options */
378         for (i = 0; i < ARRAY_SIZE(aes_impl_opts); i++) {
379                 if (strcmp(req_name, aes_impl_opts[i].name) == 0) {
380                         impl = aes_impl_opts[i].sel;
381                         err = 0;
382                         break;
383                 }
384         }
385
386         /* check all supported impl if init() was already called */
387         if (err != 0 && aes_impl_initialized) {
388                 /* check all supported implementations */
389                 for (i = 0; i < aes_supp_impl_cnt; i++) {
390                         if (strcmp(req_name, aes_supp_impl[i]->name) == 0) {
391                                 impl = i;
392                                 err = 0;
393                                 break;
394                         }
395                 }
396         }
397
398         if (err == 0) {
399                 if (aes_impl_initialized)
400                         atomic_swap_32(&icp_aes_impl, impl);
401                 else
402                         atomic_swap_32(&user_sel_impl, impl);
403         }
404
405         return (err);
406 }
407
408 #if defined(_KERNEL)
409
410 static int
411 icp_aes_impl_set(const char *val, zfs_kernel_param_t *kp)
412 {
413         return (aes_impl_set(val));
414 }
415
416 static int
417 icp_aes_impl_get(char *buffer, zfs_kernel_param_t *kp)
418 {
419         int i, cnt = 0;
420         char *fmt;
421         const uint32_t impl = AES_IMPL_READ(icp_aes_impl);
422
423         ASSERT(aes_impl_initialized);
424
425         /* list mandatory options */
426         for (i = 0; i < ARRAY_SIZE(aes_impl_opts); i++) {
427                 fmt = (impl == aes_impl_opts[i].sel) ? "[%s] " : "%s ";
428                 cnt += sprintf(buffer + cnt, fmt, aes_impl_opts[i].name);
429         }
430
431         /* list all supported implementations */
432         for (i = 0; i < aes_supp_impl_cnt; i++) {
433                 fmt = (i == impl) ? "[%s] " : "%s ";
434                 cnt += sprintf(buffer + cnt, fmt, aes_supp_impl[i]->name);
435         }
436
437         return (cnt);
438 }
439
440 module_param_call(icp_aes_impl, icp_aes_impl_set, icp_aes_impl_get,
441     NULL, 0644);
442 MODULE_PARM_DESC(icp_aes_impl, "Select aes implementation.");
443 #endif