15 #if defined(__clang__) 16 # undef CRYPTOPP_X86_ASM_AVAILABLE 17 # undef CRYPTOPP_X32_ASM_AVAILABLE 18 # undef CRYPTOPP_X64_ASM_AVAILABLE 19 # undef CRYPTOPP_SSE2_ASM_AVAILABLE 24 #if defined(__SUNPRO_CC) && (__SUNPRO_CC <= 0x5140) 25 # undef CRYPTOPP_CLMUL_AVAILABLE 29 #if !(defined(__ARM_FEATURE_CRYPTO) || defined(_MSC_VER)) 30 # undef CRYPTOPP_ARM_PMULL_AVAILABLE 33 #if (CRYPTOPP_SSE2_INTRIN_AVAILABLE) 34 # include <emmintrin.h> 37 #if (CRYPTOPP_CLMUL_AVAILABLE) 38 # include <tmmintrin.h> 39 # include <wmmintrin.h> 42 #if (CRYPTOPP_ARM_NEON_AVAILABLE) 43 # include <arm_neon.h> 48 #if defined(CRYPTOPP_ARM_ACLE_AVAILABLE) 50 # include <arm_acle.h> 53 #ifdef CRYPTOPP_GNU_STYLE_INLINE_ASSEMBLY 58 #ifndef EXCEPTION_EXECUTE_HANDLER 59 # define EXCEPTION_EXECUTE_HANDLER 1 63 #define M128_CAST(x) ((__m128i *)(void *)(x)) 64 #define CONST_M128_CAST(x) ((const __m128i *)(const void *)(x)) 67 #define UINT64X2_CAST(x) ((uint64x2_t *)(void *)(x)) 68 #define CONST_UINT64X2_CAST(x) ((const uint64x2_t *)(const void *)(x)) 70 ANONYMOUS_NAMESPACE_BEGIN
73 #if (CRYPTOPP_ARM_PMULL_AVAILABLE) 74 # if (CRYPTOPP_GCC_VERSION >= 40800) && (CRYPTOPP_GCC_VERSION < 49000) 75 inline poly128_t VMULL_P64(poly64_t a, poly64_t b)
77 return __builtin_aarch64_crypto_pmulldi_ppp (a, b);
80 inline poly128_t VMULL_HIGH_P64(poly64x2_t a, poly64x2_t b)
82 return __builtin_aarch64_crypto_pmullv2di_ppp (a, b);
85 inline poly128_t VMULL_P64(poly64_t a, poly64_t b)
87 return vmull_p64(a, b);
90 inline poly128_t VMULL_HIGH_P64(poly64x2_t a, poly64x2_t b)
92 return vmull_high_p64(a, b);
97 #if CRYPTOPP_ARM_PMULL_AVAILABLE 103 inline uint64x2_t PMULL_00(
const uint64x2_t a,
const uint64x2_t b)
106 __asm __volatile(
"pmull %0.1q, %1.1d, %2.1d \n\t" 107 :
"=w" (r) :
"w" (a),
"w" (b) );
111 inline uint64x2_t PMULL_01(
const uint64x2_t a,
const uint64x2_t b)
114 __asm __volatile(
"pmull %0.1q, %1.1d, %2.1d \n\t" 115 :
"=w" (r) :
"w" (a),
"w" (vget_high_u64(b)) );
119 inline uint64x2_t PMULL_10(
const uint64x2_t a,
const uint64x2_t b)
122 __asm __volatile(
"pmull %0.1q, %1.1d, %2.1d \n\t" 123 :
"=w" (r) :
"w" (vget_high_u64(a)),
"w" (b) );
127 inline uint64x2_t PMULL_11(
const uint64x2_t a,
const uint64x2_t b)
130 __asm __volatile(
"pmull2 %0.1q, %1.2d, %2.2d \n\t" 131 :
"=w" (r) :
"w" (a),
"w" (b) );
135 inline uint64x2_t VEXT_U8(uint64x2_t a, uint64x2_t b,
unsigned int c)
138 __asm __volatile(
"ext %0.16b, %1.16b, %2.16b, %3 \n\t" 139 :
"=w" (r) :
"w" (a),
"w" (b),
"I" (c) );
144 template <
unsigned int C>
145 inline uint64x2_t VEXT_U8(uint64x2_t a, uint64x2_t b)
148 __asm __volatile(
"ext %0.16b, %1.16b, %2.16b, %3 \n\t" 149 :
"=w" (r) :
"w" (a),
"w" (b),
"I" (C) );
152 #endif // GCC and compatibles 154 #if defined(_MSC_VER) 155 inline uint64x2_t PMULL_00(
const uint64x2_t a,
const uint64x2_t b)
157 return (uint64x2_t)(vmull_p64(vgetq_lane_u64(vreinterpretq_u64_u8(a),0),
158 vgetq_lane_u64(vreinterpretq_u64_u8(b),0)));
161 inline uint64x2_t PMULL_01(
const uint64x2_t a,
const uint64x2_t b)
163 return (uint64x2_t)(vmull_p64(vgetq_lane_u64(vreinterpretq_u64_u8(a),0),
164 vgetq_lane_u64(vreinterpretq_u64_u8(b),1)));
167 inline uint64x2_t PMULL_10(
const uint64x2_t a,
const uint64x2_t b)
169 return (uint64x2_t)(vmull_p64(vgetq_lane_u64(vreinterpretq_u64_u8(a),1),
170 vgetq_lane_u64(vreinterpretq_u64_u8(b),0)));
173 inline uint64x2_t PMULL_11(
const uint64x2_t a,
const uint64x2_t b)
175 return (uint64x2_t)(vmull_p64(vgetq_lane_u64(vreinterpretq_u64_u8(a),1),
176 vgetq_lane_u64(vreinterpretq_u64_u8(b),1)));
179 inline uint64x2_t VEXT_U8(uint64x2_t a, uint64x2_t b,
unsigned int c)
181 return (uint64x2_t)vextq_u8(vreinterpretq_u8_u64(a), vreinterpretq_u8_u64(b), c);
185 template <
unsigned int C>
186 inline uint64x2_t VEXT_U8(uint64x2_t a, uint64x2_t b)
188 return (uint64x2_t)vextq_u8(vreinterpretq_u8_u64(a), vreinterpretq_u8_u64(b), C);
190 #endif // Microsoft and compatibles 191 #endif // CRYPTOPP_ARM_PMULL_AVAILABLE 193 ANONYMOUS_NAMESPACE_END
197 #ifdef CRYPTOPP_GNU_STYLE_INLINE_ASSEMBLY 199 typedef void (*SigHandler)(int);
201 static jmp_buf s_jmpSIGILL;
202 static void SigIllHandler(
int)
204 longjmp(s_jmpSIGILL, 1);
207 #endif // Not CRYPTOPP_MS_STYLE_INLINE_ASSEMBLY 209 #if (CRYPTOPP_BOOL_ARM32 || CRYPTOPP_BOOL_ARM64) 210 bool CPU_ProbePMULL()
212 #if defined(CRYPTOPP_NO_CPU_FEATURE_PROBES) 214 #elif (CRYPTOPP_ARM_PMULL_AVAILABLE) 215 # if defined(CRYPTOPP_MS_STYLE_INLINE_ASSEMBLY) 216 volatile bool result =
true;
219 const poly64_t a1={0x9090909090909090}, b1={0xb0b0b0b0b0b0b0b0};
220 const poly8x16_t a2={0x80,0x80,0x80,0x80,0x80,0x80,0x80,0x80,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0},
221 b2={0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0};
223 const poly128_t r1 = vmull_p64(a1, b1);
224 const poly128_t r2 = vmull_high_p64((poly64x2_t)(a2), (poly64x2_t)(b2));
227 const uint64x2_t& t1 = (uint64x2_t)(r1);
228 const uint64x2_t& t2 = (uint64x2_t)(r2);
230 result = !!(vgetq_lane_u64(t1,0) == 0x5300530053005300 && vgetq_lane_u64(t1,1) == 0x5300530053005300 &&
231 vgetq_lane_u64(t2,0) == 0x6c006c006c006c00 && vgetq_lane_u64(t2,1) == 0x6c006c006c006c00);
233 __except (EXCEPTION_EXECUTE_HANDLER)
240 # if defined(__APPLE__) 247 volatile bool result =
true;
249 volatile SigHandler oldHandler = signal(SIGILL, SigIllHandler);
250 if (oldHandler == SIG_ERR)
253 volatile sigset_t oldMask;
254 if (sigprocmask(0, NULLPTR, (sigset_t*)&oldMask))
257 if (setjmp(s_jmpSIGILL))
261 const poly64_t a1={0x9090909090909090}, b1={0xb0b0b0b0b0b0b0b0};
262 const poly8x16_t a2={0x80,0x80,0x80,0x80,0x80,0x80,0x80,0x80,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0},
263 b2={0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0};
265 const poly128_t r1 = VMULL_P64(a1, b1);
266 const poly128_t r2 = VMULL_HIGH_P64((poly64x2_t)(a2), (poly64x2_t)(b2));
269 const uint64x2_t& t1 = (uint64x2_t)(r1);
270 const uint64x2_t& t2 = (uint64x2_t)(r2);
272 result = !!(vgetq_lane_u64(t1,0) == 0x5300530053005300 && vgetq_lane_u64(t1,1) == 0x5300530053005300 &&
273 vgetq_lane_u64(t2,0) == 0x6c006c006c006c00 && vgetq_lane_u64(t2,1) == 0x6c006c006c006c00);
276 sigprocmask(SIG_SETMASK, (sigset_t*)&oldMask, NULLPTR);
277 signal(SIGILL, oldHandler);
282 #endif // CRYPTOPP_ARM_SHA_AVAILABLE 284 #endif // ARM32 or ARM64 286 #if CRYPTOPP_ARM_NEON_AVAILABLE 287 void GCM_Xor16_NEON(
byte *a,
const byte *b,
const byte *c)
292 *UINT64X2_CAST(a) = veorq_u64(*CONST_UINT64X2_CAST(b), *CONST_UINT64X2_CAST(c));
296 #if CRYPTOPP_ARM_PMULL_AVAILABLE 298 ANONYMOUS_NAMESPACE_BEGIN
300 CRYPTOPP_ALIGN_DATA(16)
301 const word64 s_clmulConstants64[] = {
302 W64LIT(0xe100000000000000), W64LIT(0xc200000000000000),
303 W64LIT(0x08090a0b0c0d0e0f), W64LIT(0x0001020304050607),
304 W64LIT(0x0001020304050607), W64LIT(0x08090a0b0c0d0e0f)
307 const uint64x2_t *s_clmulConstants = (
const uint64x2_t *)s_clmulConstants64;
308 const unsigned int s_clmulTableSizeInBlocks = 8;
310 ANONYMOUS_NAMESPACE_END
312 uint64x2_t GCM_Reduce_PMULL(uint64x2_t c0, uint64x2_t c1, uint64x2_t c2,
const uint64x2_t &r)
314 c1 = veorq_u64(c1, VEXT_U8<8>(vdupq_n_u64(0), c0));
315 c1 = veorq_u64(c1, PMULL_01(c0, r));
316 c0 = VEXT_U8<8>(c0, vdupq_n_u64(0));
317 c0 = vshlq_n_u64(veorq_u64(c0, c1), 1);
318 c0 = PMULL_00(c0, r);
319 c2 = veorq_u64(c2, c0);
320 c2 = veorq_u64(c2, VEXT_U8<8>(c1, vdupq_n_u64(0)));
321 c1 = vshrq_n_u64(vcombine_u64(vget_low_u64(c1), vget_low_u64(c2)), 63);
322 c2 = vshlq_n_u64(c2, 1);
324 return veorq_u64(c2, c1);
327 uint64x2_t GCM_Multiply_PMULL(
const uint64x2_t &x,
const uint64x2_t &h,
const uint64x2_t &r)
329 const uint64x2_t c0 = PMULL_00(x, h);
330 const uint64x2_t c1 = veorq_u64(PMULL_10(x, h), PMULL_01(x, h));
331 const uint64x2_t c2 = PMULL_11(x, h);
333 return GCM_Reduce_PMULL(c0, c1, c2, r);
336 void GCM_SetKeyWithoutResync_PMULL(
const byte *hashKey,
byte *mulTable,
unsigned int tableSize)
338 const uint64x2_t r = s_clmulConstants[0];
339 const uint64x2_t t = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(hashKey)));
340 const uint64x2_t h0 = vextq_u64(t, t, 1);
344 for (i=0; i<tableSize-32; i+=32)
346 const uint64x2_t h1 = GCM_Multiply_PMULL(h, h0, r);
347 vst1_u64((uint64_t *)(mulTable+i), vget_low_u64(h));
348 vst1q_u64((uint64_t *)(mulTable+i+16), h1);
349 vst1q_u64((uint64_t *)(mulTable+i+8), h);
350 vst1_u64((uint64_t *)(mulTable+i+8), vget_low_u64(h1));
351 h = GCM_Multiply_PMULL(h1, h0, r);
354 const uint64x2_t h1 = GCM_Multiply_PMULL(h, h0, r);
355 vst1_u64((uint64_t *)(mulTable+i), vget_low_u64(h));
356 vst1q_u64((uint64_t *)(mulTable+i+16), h1);
357 vst1q_u64((uint64_t *)(mulTable+i+8), h);
358 vst1_u64((uint64_t *)(mulTable+i+8), vget_low_u64(h1));
361 size_t GCM_AuthenticateBlocks_PMULL(
const byte *data,
size_t len,
const byte *mtable,
byte *hbuffer)
363 const uint64x2_t* table = reinterpret_cast<const uint64x2_t*>(mtable);
364 uint64x2_t x = vreinterpretq_u64_u8(vld1q_u8(hbuffer));
365 const uint64x2_t r = s_clmulConstants[0];
367 const size_t BLOCKSIZE = 16;
368 while (len >= BLOCKSIZE)
370 size_t s =
UnsignedMin(len/BLOCKSIZE, s_clmulTableSizeInBlocks), i=0;
371 uint64x2_t d1, d2 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data+(s-1)*BLOCKSIZE)));
372 uint64x2_t c0 = vdupq_n_u64(0);
373 uint64x2_t c1 = vdupq_n_u64(0);
374 uint64x2_t c2 = vdupq_n_u64(0);
378 const uint64x2_t h0 = vld1q_u64((
const uint64_t*)(table+i));
379 const uint64x2_t h1 = vld1q_u64((
const uint64_t*)(table+i+1));
380 const uint64x2_t h2 = veorq_u64(h0, h1);
384 const uint64x2_t t1 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data)));
385 d1 = veorq_u64(vextq_u64(t1, t1, 1), x);
386 c0 = veorq_u64(c0, PMULL_00(d1, h0));
387 c2 = veorq_u64(c2, PMULL_10(d1, h1));
388 d1 = veorq_u64(d1, (uint64x2_t)vcombine_u32(vget_high_u32(vreinterpretq_u32_u64(d1)),
389 vget_low_u32(vreinterpretq_u32_u64(d1))));
390 c1 = veorq_u64(c1, PMULL_00(d1, h2));
395 d1 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data+(s-i)*16-8)));
396 c0 = veorq_u64(c0, PMULL_10(d2, h0));
397 c2 = veorq_u64(c2, PMULL_10(d1, h1));
398 d2 = veorq_u64(d2, d1);
399 c1 = veorq_u64(c1, PMULL_10(d2, h2));
403 const uint64x2_t t2 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data)));
404 d1 = veorq_u64(vextq_u64(t2, t2, 1), x);
405 c0 = veorq_u64(c0, PMULL_01(d1, h0));
406 c2 = veorq_u64(c2, PMULL_11(d1, h1));
407 d1 = veorq_u64(d1, (uint64x2_t)vcombine_u32(vget_high_u32(vreinterpretq_u32_u64(d1)),
408 vget_low_u32(vreinterpretq_u32_u64(d1))));
409 c1 = veorq_u64(c1, PMULL_01(d1, h2));
414 const uint64x2_t t3 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data+(s-i)*16-8)));
415 d2 = vextq_u64(t3, t3, 1);
416 c0 = veorq_u64(c0, PMULL_01(d1, h0));
417 c2 = veorq_u64(c2, PMULL_01(d2, h1));
418 d1 = veorq_u64(d1, d2);
419 c1 = veorq_u64(c1, PMULL_01(d1, h2));
424 c1 = veorq_u64(veorq_u64(c1, c0), c2);
425 x = GCM_Reduce_PMULL(c0, c1, c2, r);
428 vst1q_u64(reinterpret_cast<uint64_t *>(hbuffer), x);
432 void GCM_ReverseHashBufferIfNeeded_PMULL(
byte *hashBuffer)
436 const uint8x16_t x = vrev64q_u8(vld1q_u8(hashBuffer));
437 vst1q_u8(hashBuffer, vextq_u8(x, x, 8));
442 #if CRYPTOPP_SSE2_INTRIN_AVAILABLE || CRYPTOPP_SSE2_ASM_AVAILABLE 445 void GCM_Xor16_SSE2(
byte *a,
const byte *b,
const byte *c)
447 # if CRYPTOPP_SSE2_ASM_AVAILABLE && defined(__GNUC__) 448 asm (
"movdqa %1, %%xmm0; pxor %2, %%xmm0; movdqa %%xmm0, %0;" 449 :
"=m" (a[0]) :
"m"(b[0]),
"m"(c[0]));
450 # else // CRYPTOPP_SSE2_INTRIN_AVAILABLE 451 _mm_store_si128(M128_CAST(a), _mm_xor_si128(
452 _mm_load_si128(CONST_M128_CAST(b)),
453 _mm_load_si128(CONST_M128_CAST(c))));
458 #if CRYPTOPP_CLMUL_AVAILABLE 460 ANONYMOUS_NAMESPACE_BEGIN
462 CRYPTOPP_ALIGN_DATA(16)
463 const word64 s_clmulConstants64[] = {
464 W64LIT(0xe100000000000000), W64LIT(0xc200000000000000),
465 W64LIT(0x08090a0b0c0d0e0f), W64LIT(0x0001020304050607),
466 W64LIT(0x0001020304050607), W64LIT(0x08090a0b0c0d0e0f)};
468 const __m128i *s_clmulConstants = CONST_M128_CAST(s_clmulConstants64);
469 const unsigned int s_cltableSizeInBlocks = 8;
471 ANONYMOUS_NAMESPACE_END
475 void gcm_gf_mult(
const unsigned char *a,
const unsigned char *b,
unsigned char *c)
477 word64 Z0=0, Z1=0, V0, V1;
480 Block::Get(a)(V0)(V1);
482 for (
int i=0; i<16; i++)
484 for (
int j=0x80; j!=0; j>>=1)
490 V1 = (V1>>1) | (V0<<63);
491 V0 = (V0>>1) ^ (x ? W64LIT(0xe1) << 56 : 0);
497 __m128i _mm_clmulepi64_si128(
const __m128i &a,
const __m128i &b,
int i)
507 for (
int i=0; i<16; i++)
508 ((
byte *)&output)[i] = c.GetByte(i);
513 __m128i GCM_Reduce_CLMUL(__m128i c0, __m128i c1, __m128i c2,
const __m128i &r)
527 c1 = _mm_xor_si128(c1, _mm_slli_si128(c0, 8));
528 c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(c0, r, 0x10));
529 c0 = _mm_srli_si128(c0, 8);
530 c0 = _mm_xor_si128(c0, c1);
531 c0 = _mm_slli_epi64(c0, 1);
532 c0 = _mm_clmulepi64_si128(c0, r, 0);
533 c2 = _mm_xor_si128(c2, c0);
534 c2 = _mm_xor_si128(c2, _mm_srli_si128(c1, 8));
535 c1 = _mm_unpacklo_epi64(c1, c2);
536 c1 = _mm_srli_epi64(c1, 63);
537 c2 = _mm_slli_epi64(c2, 1);
538 return _mm_xor_si128(c2, c1);
541 __m128i GCM_Multiply_CLMUL(
const __m128i &x,
const __m128i &h,
const __m128i &r)
543 const __m128i c0 = _mm_clmulepi64_si128(x,h,0);
544 const __m128i c1 = _mm_xor_si128(_mm_clmulepi64_si128(x,h,1), _mm_clmulepi64_si128(x,h,0x10));
545 const __m128i c2 = _mm_clmulepi64_si128(x,h,0x11);
547 return GCM_Reduce_CLMUL(c0, c1, c2, r);
550 void GCM_SetKeyWithoutResync_CLMUL(
const byte *hashKey,
byte *mulTable,
unsigned int tableSize)
552 const __m128i r = s_clmulConstants[0];
553 const __m128i h0 = _mm_shuffle_epi8(_mm_load_si128(CONST_M128_CAST(hashKey)), s_clmulConstants[1]);
557 for (i=0; i<tableSize-32; i+=32)
559 const __m128i h1 = GCM_Multiply_CLMUL(h, h0, r);
560 _mm_storel_epi64(M128_CAST(mulTable+i), h);
561 _mm_storeu_si128(M128_CAST(mulTable+i+16), h1);
562 _mm_storeu_si128(M128_CAST(mulTable+i+8), h);
563 _mm_storel_epi64(M128_CAST(mulTable+i+8), h1);
564 h = GCM_Multiply_CLMUL(h1, h0, r);
567 const __m128i h1 = GCM_Multiply_CLMUL(h, h0, r);
568 _mm_storel_epi64(M128_CAST(mulTable+i), h);
569 _mm_storeu_si128(M128_CAST(mulTable+i+16), h1);
570 _mm_storeu_si128(M128_CAST(mulTable+i+8), h);
571 _mm_storel_epi64(M128_CAST(mulTable+i+8), h1);
574 size_t GCM_AuthenticateBlocks_CLMUL(
const byte *data,
size_t len,
const byte *mtable,
byte *hbuffer)
576 const __m128i *table = CONST_M128_CAST(mtable);
577 __m128i x = _mm_load_si128(M128_CAST(hbuffer));
578 const __m128i r = s_clmulConstants[0], mask1 = s_clmulConstants[1], mask2 = s_clmulConstants[2];
582 size_t s =
UnsignedMin(len/16, s_cltableSizeInBlocks), i=0;
583 __m128i d1, d2 = _mm_shuffle_epi8(_mm_loadu_si128(CONST_M128_CAST(data+(s-1)*16)), mask2);
584 __m128i c0 = _mm_setzero_si128();
585 __m128i c1 = _mm_setzero_si128();
586 __m128i c2 = _mm_setzero_si128();
590 const __m128i h0 = _mm_load_si128(table+i);
591 const __m128i h1 = _mm_load_si128(table+i+1);
592 const __m128i h2 = _mm_xor_si128(h0, h1);
596 d1 = _mm_shuffle_epi8(_mm_loadu_si128(CONST_M128_CAST(data)), mask1);
597 d1 = _mm_xor_si128(d1, x);
598 c0 = _mm_xor_si128(c0, _mm_clmulepi64_si128(d1, h0, 0));
599 c2 = _mm_xor_si128(c2, _mm_clmulepi64_si128(d1, h1, 1));
600 d1 = _mm_xor_si128(d1, _mm_shuffle_epi32(d1, _MM_SHUFFLE(1, 0, 3, 2)));
601 c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(d1, h2, 0));
605 d1 = _mm_shuffle_epi8(_mm_loadu_si128(CONST_M128_CAST(data+(s-i)*16-8)), mask2);
606 c0 = _mm_xor_si128(c0, _mm_clmulepi64_si128(d2, h0, 1));
607 c2 = _mm_xor_si128(c2, _mm_clmulepi64_si128(d1, h1, 1));
608 d2 = _mm_xor_si128(d2, d1);
609 c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(d2, h2, 1));
613 d1 = _mm_shuffle_epi8(_mm_loadu_si128(CONST_M128_CAST(data)), mask1);
614 d1 = _mm_xor_si128(d1, x);
615 c0 = _mm_xor_si128(c0, _mm_clmulepi64_si128(d1, h0, 0x10));
616 c2 = _mm_xor_si128(c2, _mm_clmulepi64_si128(d1, h1, 0x11));
617 d1 = _mm_xor_si128(d1, _mm_shuffle_epi32(d1, _MM_SHUFFLE(1, 0, 3, 2)));
618 c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(d1, h2, 0x10));
622 d2 = _mm_shuffle_epi8(_mm_loadu_si128(CONST_M128_CAST(data+(s-i)*16-8)), mask1);
623 c0 = _mm_xor_si128(c0, _mm_clmulepi64_si128(d1, h0, 0x10));
624 c2 = _mm_xor_si128(c2, _mm_clmulepi64_si128(d2, h1, 0x10));
625 d1 = _mm_xor_si128(d1, d2);
626 c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(d1, h2, 0x10));
631 c1 = _mm_xor_si128(_mm_xor_si128(c1, c0), c2);
632 x = GCM_Reduce_CLMUL(c0, c1, c2, r);
635 _mm_store_si128(M128_CAST(hbuffer), x);
639 void GCM_ReverseHashBufferIfNeeded_CLMUL(
byte *hashBuffer)
642 __m128i &x = *M128_CAST(hashBuffer);
643 x = _mm_shuffle_epi8(x, s_clmulConstants[1]);
Utility functions for the Crypto++ library.
Library configuration file.
Access a block of memory.
Polynomial with Coefficients in GF(2)
bool IsAlignedOn(const void *ptr, unsigned int alignment)
Determines whether ptr is aligned to a minimum value.
const T1 UnsignedMin(const T1 &a, const T2 &b)
Safe comparison of values that could be neagtive and incorrectly promoted.
#define CRYPTOPP_ASSERT(exp)
Debugging and diagnostic assertion.
ByteOrder GetNativeByteOrder()
Returns NativeByteOrder as an enumerated ByteOrder value.
Access a block of memory.
Crypto++ library namespace.
byte ByteReverse(byte value)
Reverses bytes in a 8-bit value.