const union {
long one;
char little;
- } is_endian = {
- 1
- };
+ } is_endian = { 1 };
static const size_t rem_8bit[256] = {
PACK(0x0000), PACK(0x01C2), PACK(0x0384), PACK(0x0246),
PACK(0x0708), PACK(0x06CA), PACK(0x048C), PACK(0x054E),
const union {
long one;
char little;
- } is_endian = {
- 1
- };
+ } is_endian = { 1 };
if (is_endian.little)
for (j = 0; j < 16; ++j) {
const union {
long one;
char little;
- } is_endian = {
- 1
- };
+ } is_endian = { 1 };
nlo = ((const u8 *)Xi)[15];
nhi = nlo >> 4;
const union {
long one;
char little;
- } is_endian = {
- 1
- };
+ } is_endian = { 1 };
# if 1
do {
const union {
long one;
char little;
- } is_endian = {
- 1
- };
+ } is_endian = { 1 };
V.hi = H[0]; /* H is in host byte order, no byte swapping */
V.lo = H[1];
const union {
long one;
char little;
- } is_endian = {
- 1
- };
+ } is_endian = { 1 };
memset(ctx, 0, sizeof(*ctx));
ctx->block = block;
#if TABLE_BITS==8
gcm_init_8bit(ctx->Htable, ctx->H.u);
#elif TABLE_BITS==4
+# if defined(GHASH)
+# define CTX__GHASH(f) (ctx->ghash = (f))
+# else
+# define CTX__GHASH(f) (ctx->ghash = NULL)
+# endif
# if defined(GHASH_ASM_X86_OR_64)
# if !defined(GHASH_ASM_X86) || defined(OPENSSL_IA32_SSE2)
if (OPENSSL_ia32cap_P[0] & (1 << 24) && /* check FXSR bit */
if (((OPENSSL_ia32cap_P[1] >> 22) & 0x41) == 0x41) { /* AVX+MOVBE */
gcm_init_avx(ctx->Htable, ctx->H.u);
ctx->gmult = gcm_gmult_avx;
- ctx->ghash = gcm_ghash_avx;
+ CTX__GHASH(gcm_ghash_avx);
} else {
gcm_init_clmul(ctx->Htable, ctx->H.u);
ctx->gmult = gcm_gmult_clmul;
- ctx->ghash = gcm_ghash_clmul;
+ CTX__GHASH(gcm_ghash_clmul);
}
return;
}
if (OPENSSL_ia32cap_P[0] & (1 << 23)) { /* check MMX bit */
# endif
ctx->gmult = gcm_gmult_4bit_mmx;
- ctx->ghash = gcm_ghash_4bit_mmx;
+ CTX__GHASH(gcm_ghash_4bit_mmx);
} else {
ctx->gmult = gcm_gmult_4bit_x86;
- ctx->ghash = gcm_ghash_4bit_x86;
+ CTX__GHASH(gcm_ghash_4bit_x86);
}
# else
ctx->gmult = gcm_gmult_4bit;
- ctx->ghash = gcm_ghash_4bit;
+ CTX__GHASH(gcm_ghash_4bit);
# endif
# elif defined(GHASH_ASM_ARM)
# ifdef PMULL_CAPABLE
if (PMULL_CAPABLE) {
gcm_init_v8(ctx->Htable, ctx->H.u);
ctx->gmult = gcm_gmult_v8;
- ctx->ghash = gcm_ghash_v8;
+ CTX__GHASH(gcm_ghash_v8);
} else
# endif
# ifdef NEON_CAPABLE
if (NEON_CAPABLE) {
gcm_init_neon(ctx->Htable, ctx->H.u);
ctx->gmult = gcm_gmult_neon;
- ctx->ghash = gcm_ghash_neon;
+ CTX__GHASH(gcm_ghash_neon);
} else
# endif
{
gcm_init_4bit(ctx->Htable, ctx->H.u);
ctx->gmult = gcm_gmult_4bit;
-# if defined(GHASH)
- ctx->ghash = gcm_ghash_4bit;
-# else
- ctx->ghash = NULL;
-# endif
+ CTX__GHASH(gcm_ghash_4bit);
}
# elif defined(GHASH_ASM_SPARC)
if (OPENSSL_sparcv9cap_P[0] & SPARCV9_VIS3) {
gcm_init_vis3(ctx->Htable, ctx->H.u);
ctx->gmult = gcm_gmult_vis3;
- ctx->ghash = gcm_ghash_vis3;
+ CTX__GHASH(gcm_ghash_vis3);
} else {
gcm_init_4bit(ctx->Htable, ctx->H.u);
ctx->gmult = gcm_gmult_4bit;
- ctx->ghash = gcm_ghash_4bit;
+ CTX__GHASH(gcm_ghash_4bit);
}
# elif defined(GHASH_ASM_PPC)
if (OPENSSL_ppccap_P & PPC_CRYPTO207) {
gcm_init_p8(ctx->Htable, ctx->H.u);
ctx->gmult = gcm_gmult_p8;
- ctx->ghash = gcm_ghash_p8;
+ CTX__GHASH(gcm_ghash_p8);
} else {
gcm_init_4bit(ctx->Htable, ctx->H.u);
ctx->gmult = gcm_gmult_4bit;
-# if defined(GHASH)
- ctx->ghash = gcm_ghash_4bit;
-# else
- ctx->ghash = NULL;
-# endif
+ CTX__GHASH(gcm_ghash_4bit);
}
# else
gcm_init_4bit(ctx->Htable, ctx->H.u);
# endif
+# undef CTX__GHASH
#endif
}
const union {
long one;
char little;
- } is_endian = {
- 1
- };
+ } is_endian = { 1 };
unsigned int ctr;
#ifdef GCM_FUNCREF_4BIT
void (*gcm_gmult_p) (u64 Xi[2], const u128 Htable[16]) = ctx->gmult;
const union {
long one;
char little;
- } is_endian = {
- 1
- };
+ } is_endian = { 1 };
unsigned int n, ctr;
size_t i;
u64 mlen = ctx->len.u[1];
void *key = ctx->key;
#ifdef GCM_FUNCREF_4BIT
void (*gcm_gmult_p) (u64 Xi[2], const u128 Htable[16]) = ctx->gmult;
-# ifdef GHASH
+# if defined(GHASH) && !defined(OPENSSL_SMALL_FOOTPRINT)
void (*gcm_ghash_p) (u64 Xi[2], const u128 Htable[16],
const u8 *inp, size_t len) = ctx->ghash;
# endif
if (((size_t)in | (size_t)out) % sizeof(size_t) != 0)
break;
# endif
-# if defined(GHASH) && defined(GHASH_CHUNK)
+# if defined(GHASH)
+# if defined(GHASH_CHUNK)
while (len >= GHASH_CHUNK) {
size_t j = GHASH_CHUNK;
(*block) (ctx->Yi.c, ctx->EKi.c, key);
++ctr;
if (is_endian.little)
-# ifdef BSWAP4
+# ifdef BSWAP4
ctx->Yi.d[3] = BSWAP4(ctr);
-# else
+# else
PUTU32(ctx->Yi.c + 12, ctr);
-# endif
+# endif
else
ctx->Yi.d[3] = ctr;
for (i = 0; i < 16 / sizeof(size_t); ++i)
GHASH(ctx, out - GHASH_CHUNK, GHASH_CHUNK);
len -= GHASH_CHUNK;
}
+# endif
if ((i = (len & (size_t)-16))) {
size_t j = i;
const union {
long one;
char little;
- } is_endian = {
- 1
- };
+ } is_endian = { 1 };
unsigned int n, ctr;
size_t i;
u64 mlen = ctx->len.u[1];
void *key = ctx->key;
#ifdef GCM_FUNCREF_4BIT
void (*gcm_gmult_p) (u64 Xi[2], const u128 Htable[16]) = ctx->gmult;
-# ifdef GHASH
+# if defined(GHASH) && !defined(OPENSSL_SMALL_FOOTPRINT)
void (*gcm_ghash_p) (u64 Xi[2], const u128 Htable[16],
const u8 *inp, size_t len) = ctx->ghash;
# endif
if (((size_t)in | (size_t)out) % sizeof(size_t) != 0)
break;
# endif
-# if defined(GHASH) && defined(GHASH_CHUNK)
+# if defined(GHASH)
+# if defined(GHASH_CHUNK)
while (len >= GHASH_CHUNK) {
size_t j = GHASH_CHUNK;
(*block) (ctx->Yi.c, ctx->EKi.c, key);
++ctr;
if (is_endian.little)
-# ifdef BSWAP4
+# ifdef BSWAP4
ctx->Yi.d[3] = BSWAP4(ctr);
-# else
+# else
PUTU32(ctx->Yi.c + 12, ctr);
-# endif
+# endif
else
ctx->Yi.d[3] = ctr;
for (i = 0; i < 16 / sizeof(size_t); ++i)
}
len -= GHASH_CHUNK;
}
+# endif
if ((i = (len & (size_t)-16))) {
GHASH(ctx, in, i);
while (len >= 16) {
const unsigned char *in, unsigned char *out,
size_t len, ctr128_f stream)
{
+#if defined(OPENSSL_SMALL_FOOTPRINT)
+ return CRYPTO_gcm128_encrypt(ctx, in, out, len);
+#else
const union {
long one;
char little;
- } is_endian = {
- 1
- };
+ } is_endian = { 1 };
unsigned int n, ctr;
size_t i;
u64 mlen = ctx->len.u[1];
void *key = ctx->key;
-#ifdef GCM_FUNCREF_4BIT
+# ifdef GCM_FUNCREF_4BIT
void (*gcm_gmult_p) (u64 Xi[2], const u128 Htable[16]) = ctx->gmult;
-# ifdef GHASH
+# ifdef GHASH
void (*gcm_ghash_p) (u64 Xi[2], const u128 Htable[16],
const u8 *inp, size_t len) = ctx->ghash;
+# endif
# endif
-#endif
mlen += len;
if (mlen > ((U64(1) << 36) - 32) || (sizeof(len) == 8 && mlen < len))
}
if (is_endian.little)
-#ifdef BSWAP4
+# ifdef BSWAP4
ctr = BSWAP4(ctx->Yi.d[3]);
-#else
+# else
ctr = GETU32(ctx->Yi.c + 12);
-#endif
+# endif
else
ctr = ctx->Yi.d[3];
return 0;
}
}
-#if defined(GHASH) && !defined(OPENSSL_SMALL_FOOTPRINT)
+# if defined(GHASH) && defined(GHASH_CHUNK)
while (len >= GHASH_CHUNK) {
(*stream) (in, out, GHASH_CHUNK / 16, key, ctx->Yi.c);
ctr += GHASH_CHUNK / 16;
if (is_endian.little)
-# ifdef BSWAP4
+# ifdef BSWAP4
ctx->Yi.d[3] = BSWAP4(ctr);
-# else
+# else
PUTU32(ctx->Yi.c + 12, ctr);
-# endif
+# endif
else
ctx->Yi.d[3] = ctr;
GHASH(ctx, out, GHASH_CHUNK);
in += GHASH_CHUNK;
len -= GHASH_CHUNK;
}
-#endif
+# endif
if ((i = (len & (size_t)-16))) {
size_t j = i / 16;
(*stream) (in, out, j, key, ctx->Yi.c);
ctr += (unsigned int)j;
if (is_endian.little)
-#ifdef BSWAP4
+# ifdef BSWAP4
ctx->Yi.d[3] = BSWAP4(ctr);
-#else
+# else
PUTU32(ctx->Yi.c + 12, ctr);
-#endif
+# endif
else
ctx->Yi.d[3] = ctr;
in += i;
len -= i;
-#if defined(GHASH)
+# if defined(GHASH)
GHASH(ctx, out, i);
out += i;
-#else
+# else
while (j--) {
for (i = 0; i < 16; ++i)
ctx->Xi.c[i] ^= out[i];
GCM_MUL(ctx, Xi);
out += 16;
}
-#endif
+# endif
}
if (len) {
(*ctx->block) (ctx->Yi.c, ctx->EKi.c, key);
++ctr;
if (is_endian.little)
-#ifdef BSWAP4
+# ifdef BSWAP4
ctx->Yi.d[3] = BSWAP4(ctr);
-#else
+# else
PUTU32(ctx->Yi.c + 12, ctr);
-#endif
+# endif
else
ctx->Yi.d[3] = ctr;
while (len--) {
ctx->mres = n;
return 0;
+#endif
}
int CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx,
const unsigned char *in, unsigned char *out,
size_t len, ctr128_f stream)
{
+#if defined(OPENSSL_SMALL_FOOTPRINT)
+ return CRYPTO_gcm128_decrypt(ctx, in, out, len);
+#else
const union {
long one;
char little;
- } is_endian = {
- 1
- };
+ } is_endian = { 1 };
unsigned int n, ctr;
size_t i;
u64 mlen = ctx->len.u[1];
void *key = ctx->key;
-#ifdef GCM_FUNCREF_4BIT
+# ifdef GCM_FUNCREF_4BIT
void (*gcm_gmult_p) (u64 Xi[2], const u128 Htable[16]) = ctx->gmult;
-# ifdef GHASH
+# ifdef GHASH
void (*gcm_ghash_p) (u64 Xi[2], const u128 Htable[16],
const u8 *inp, size_t len) = ctx->ghash;
+# endif
# endif
-#endif
mlen += len;
if (mlen > ((U64(1) << 36) - 32) || (sizeof(len) == 8 && mlen < len))
}
if (is_endian.little)
-#ifdef BSWAP4
+# ifdef BSWAP4
ctr = BSWAP4(ctx->Yi.d[3]);
-#else
+# else
ctr = GETU32(ctx->Yi.c + 12);
-#endif
+# endif
else
ctr = ctx->Yi.d[3];
return 0;
}
}
-#if defined(GHASH) && !defined(OPENSSL_SMALL_FOOTPRINT)
+# if defined(GHASH) && defined(GHASH_CHUNK)
while (len >= GHASH_CHUNK) {
GHASH(ctx, in, GHASH_CHUNK);
(*stream) (in, out, GHASH_CHUNK / 16, key, ctx->Yi.c);
ctr += GHASH_CHUNK / 16;
if (is_endian.little)
-# ifdef BSWAP4
+# ifdef BSWAP4
ctx->Yi.d[3] = BSWAP4(ctr);
-# else
+# else
PUTU32(ctx->Yi.c + 12, ctr);
-# endif
+# endif
else
ctx->Yi.d[3] = ctr;
out += GHASH_CHUNK;
in += GHASH_CHUNK;
len -= GHASH_CHUNK;
}
-#endif
+# endif
if ((i = (len & (size_t)-16))) {
size_t j = i / 16;
-#if defined(GHASH)
+# if defined(GHASH)
GHASH(ctx, in, i);
-#else
+# else
while (j--) {
size_t k;
for (k = 0; k < 16; ++k)
}
j = i / 16;
in -= i;
-#endif
+# endif
(*stream) (in, out, j, key, ctx->Yi.c);
ctr += (unsigned int)j;
if (is_endian.little)
-#ifdef BSWAP4
+# ifdef BSWAP4
ctx->Yi.d[3] = BSWAP4(ctr);
-#else
+# else
PUTU32(ctx->Yi.c + 12, ctr);
-#endif
+# endif
else
ctx->Yi.d[3] = ctr;
out += i;
(*ctx->block) (ctx->Yi.c, ctx->EKi.c, key);
++ctr;
if (is_endian.little)
-#ifdef BSWAP4
+# ifdef BSWAP4
ctx->Yi.d[3] = BSWAP4(ctr);
-#else
+# else
PUTU32(ctx->Yi.c + 12, ctr);
-#endif
+# endif
else
ctx->Yi.d[3] = ctr;
while (len--) {
ctx->mres = n;
return 0;
+#endif
}
int CRYPTO_gcm128_finish(GCM128_CONTEXT *ctx, const unsigned char *tag,
const union {
long one;
char little;
- } is_endian = {
- 1
- };
+ } is_endian = { 1 };
u64 alen = ctx->len.u[0] << 3;
u64 clen = ctx->len.u[1] << 3;
#ifdef GCM_FUNCREF_4BIT