19 #if (CRYPTOPP_SSSE3_AVAILABLE) 21 # include <pmmintrin.h> 22 # include <tmmintrin.h> 26 # include <ammintrin.h> 29 #if defined(__AVX512F__) 30 # define CRYPTOPP_AVX512_ROTATE 1 31 # include <immintrin.h> 35 #if (CRYPTOPP_ARM_NEON_AVAILABLE) 38 # include <arm_neon.h> 42 #if (CRYPTOPP_ARM_ACLE_AVAILABLE) 44 # include <arm_acle.h> 47 #if defined(CRYPTOPP_POWER7_AVAILABLE) 53 extern const char SIMON128_SIMD_FNAME[] = __FILE__;
55 ANONYMOUS_NAMESPACE_BEGIN
58 using CryptoPP::word32;
59 using CryptoPP::word64;
64 #if (CRYPTOPP_ARM_NEON_AVAILABLE) 67 #if defined(_MSC_VER) && !defined(_M_ARM64) 68 inline uint64x2_t vld1q_dup_u64(
const uint64_t* ptr)
70 return vmovq_n_u64(*ptr);
75 inline T UnpackHigh64(
const T& a,
const T& b)
77 const uint64x1_t x(vget_high_u64((uint64x2_t)a));
78 const uint64x1_t y(vget_high_u64((uint64x2_t)b));
79 return (T)vcombine_u64(x, y);
83 inline T UnpackLow64(
const T& a,
const T& b)
85 const uint64x1_t x(vget_low_u64((uint64x2_t)a));
86 const uint64x1_t y(vget_low_u64((uint64x2_t)b));
87 return (T)vcombine_u64(x, y);
90 template <
unsigned int R>
91 inline uint64x2_t RotateLeft64(
const uint64x2_t& val)
93 const uint64x2_t a(vshlq_n_u64(val, R));
94 const uint64x2_t b(vshrq_n_u64(val, 64 - R));
95 return vorrq_u64(a, b);
98 template <
unsigned int R>
99 inline uint64x2_t RotateRight64(
const uint64x2_t& val)
101 const uint64x2_t a(vshlq_n_u64(val, 64 - R));
102 const uint64x2_t b(vshrq_n_u64(val, R));
103 return vorrq_u64(a, b);
106 #if defined(__aarch32__) || defined(__aarch64__) 109 inline uint64x2_t RotateLeft64<8>(
const uint64x2_t& val)
111 #if (CRYPTOPP_BIG_ENDIAN) 112 const uint8_t maskb[16] = { 14,13,12,11, 10,9,8,15, 6,5,4,3, 2,1,0,7 };
113 const uint8x16_t mask = vld1q_u8(maskb);
115 const uint8_t maskb[16] = { 7,0,1,2, 3,4,5,6, 15,8,9,10, 11,12,13,14 };
116 const uint8x16_t mask = vld1q_u8(maskb);
119 return vreinterpretq_u64_u8(
120 vqtbl1q_u8(vreinterpretq_u8_u64(val), mask));
125 inline uint64x2_t RotateRight64<8>(
const uint64x2_t& val)
127 #if (CRYPTOPP_BIG_ENDIAN) 128 const uint8_t maskb[16] = { 8,15,14,13, 12,11,10,9, 0,7,6,5, 4,3,2,1 };
129 const uint8x16_t mask = vld1q_u8(maskb);
131 const uint8_t maskb[16] = { 1,2,3,4, 5,6,7,0, 9,10,11,12, 13,14,15,8 };
132 const uint8x16_t mask = vld1q_u8(maskb);
135 return vreinterpretq_u64_u8(
136 vqtbl1q_u8(vreinterpretq_u8_u64(val), mask));
140 inline uint64x2_t SIMON128_f(
const uint64x2_t& val)
142 return veorq_u64(RotateLeft64<2>(val),
143 vandq_u64(RotateLeft64<1>(val), RotateLeft64<8>(val)));
146 inline void SIMON128_Enc_Block(uint64x2_t &block0, uint64x2_t &block1,
147 const word64 *subkeys,
unsigned int rounds)
150 uint64x2_t x1 = UnpackHigh64(block0, block1);
151 uint64x2_t y1 = UnpackLow64(block0, block1);
153 for (
int i = 0; i < static_cast<int>(rounds & ~1)-1; i += 2)
155 const uint64x2_t rk1 = vld1q_dup_u64(subkeys+i);
156 y1 = veorq_u64(veorq_u64(y1, SIMON128_f(x1)), rk1);
158 const uint64x2_t rk2 = vld1q_dup_u64(subkeys+i+1);
159 x1 = veorq_u64(veorq_u64(x1, SIMON128_f(y1)), rk2);
164 const uint64x2_t rk = vld1q_dup_u64(subkeys+rounds-1);
166 y1 = veorq_u64(veorq_u64(y1, SIMON128_f(x1)), rk);
171 block0 = UnpackLow64(y1, x1);
172 block1 = UnpackHigh64(y1, x1);
175 inline void SIMON128_Enc_6_Blocks(uint64x2_t &block0, uint64x2_t &block1,
176 uint64x2_t &block2, uint64x2_t &block3, uint64x2_t &block4, uint64x2_t &block5,
177 const word64 *subkeys,
unsigned int rounds)
180 uint64x2_t x1 = UnpackHigh64(block0, block1);
181 uint64x2_t y1 = UnpackLow64(block0, block1);
182 uint64x2_t x2 = UnpackHigh64(block2, block3);
183 uint64x2_t y2 = UnpackLow64(block2, block3);
184 uint64x2_t x3 = UnpackHigh64(block4, block5);
185 uint64x2_t y3 = UnpackLow64(block4, block5);
187 for (
int i = 0; i < static_cast<int>(rounds & ~1) - 1; i += 2)
189 const uint64x2_t rk1 = vld1q_dup_u64(subkeys+i);
190 y1 = veorq_u64(veorq_u64(y1, SIMON128_f(x1)), rk1);
191 y2 = veorq_u64(veorq_u64(y2, SIMON128_f(x2)), rk1);
192 y3 = veorq_u64(veorq_u64(y3, SIMON128_f(x3)), rk1);
194 const uint64x2_t rk2 = vld1q_dup_u64(subkeys+i+1);
195 x1 = veorq_u64(veorq_u64(x1, SIMON128_f(y1)), rk2);
196 x2 = veorq_u64(veorq_u64(x2, SIMON128_f(y2)), rk2);
197 x3 = veorq_u64(veorq_u64(x3, SIMON128_f(y3)), rk2);
202 const uint64x2_t rk = vld1q_dup_u64(subkeys + rounds - 1);
204 y1 = veorq_u64(veorq_u64(y1, SIMON128_f(x1)), rk);
205 y2 = veorq_u64(veorq_u64(y2, SIMON128_f(x2)), rk);
206 y3 = veorq_u64(veorq_u64(y3, SIMON128_f(x3)), rk);
207 std::swap(x1, y1); std::swap(x2, y2); std::swap(x3, y3);
211 block0 = UnpackLow64(y1, x1);
212 block1 = UnpackHigh64(y1, x1);
213 block2 = UnpackLow64(y2, x2);
214 block3 = UnpackHigh64(y2, x2);
215 block4 = UnpackLow64(y3, x3);
216 block5 = UnpackHigh64(y3, x3);
219 inline void SIMON128_Dec_Block(uint64x2_t &block0, uint64x2_t &block1,
220 const word64 *subkeys,
unsigned int rounds)
223 uint64x2_t x1 = UnpackHigh64(block0, block1);
224 uint64x2_t y1 = UnpackLow64(block0, block1);
229 const uint64x2_t rk = vld1q_dup_u64(subkeys + rounds - 1);
231 y1 = veorq_u64(veorq_u64(y1, rk), SIMON128_f(x1));
235 for (
int i = static_cast<int>(rounds-2); i >= 0; i -= 2)
237 const uint64x2_t rk1 = vld1q_dup_u64(subkeys+i+1);
238 x1 = veorq_u64(veorq_u64(x1, SIMON128_f(y1)), rk1);
240 const uint64x2_t rk2 = vld1q_dup_u64(subkeys+i);
241 y1 = veorq_u64(veorq_u64(y1, SIMON128_f(x1)), rk2);
245 block0 = UnpackLow64(y1, x1);
246 block1 = UnpackHigh64(y1, x1);
249 inline void SIMON128_Dec_6_Blocks(uint64x2_t &block0, uint64x2_t &block1,
250 uint64x2_t &block2, uint64x2_t &block3, uint64x2_t &block4, uint64x2_t &block5,
251 const word64 *subkeys,
unsigned int rounds)
254 uint64x2_t x1 = UnpackHigh64(block0, block1);
255 uint64x2_t y1 = UnpackLow64(block0, block1);
256 uint64x2_t x2 = UnpackHigh64(block2, block3);
257 uint64x2_t y2 = UnpackLow64(block2, block3);
258 uint64x2_t x3 = UnpackHigh64(block4, block5);
259 uint64x2_t y3 = UnpackLow64(block4, block5);
263 std::swap(x1, y1); std::swap(x2, y2); std::swap(x3, y3);
264 const uint64x2_t rk = vld1q_dup_u64(subkeys + rounds - 1);
266 y1 = veorq_u64(veorq_u64(y1, rk), SIMON128_f(x1));
267 y2 = veorq_u64(veorq_u64(y2, rk), SIMON128_f(x2));
268 y3 = veorq_u64(veorq_u64(y3, rk), SIMON128_f(x3));
272 for (
int i = static_cast<int>(rounds-2); i >= 0; i -= 2)
274 const uint64x2_t rk1 = vld1q_dup_u64(subkeys + i + 1);
275 x1 = veorq_u64(veorq_u64(x1, SIMON128_f(y1)), rk1);
276 x2 = veorq_u64(veorq_u64(x2, SIMON128_f(y2)), rk1);
277 x3 = veorq_u64(veorq_u64(x3, SIMON128_f(y3)), rk1);
279 const uint64x2_t rk2 = vld1q_dup_u64(subkeys + i);
280 y1 = veorq_u64(veorq_u64(y1, SIMON128_f(x1)), rk2);
281 y2 = veorq_u64(veorq_u64(y2, SIMON128_f(x2)), rk2);
282 y3 = veorq_u64(veorq_u64(y3, SIMON128_f(x3)), rk2);
286 block0 = UnpackLow64(y1, x1);
287 block1 = UnpackHigh64(y1, x1);
288 block2 = UnpackLow64(y2, x2);
289 block3 = UnpackHigh64(y2, x2);
290 block4 = UnpackLow64(y3, x3);
291 block5 = UnpackHigh64(y3, x3);
294 #endif // CRYPTOPP_ARM_NEON_AVAILABLE 298 #if defined(CRYPTOPP_SSSE3_AVAILABLE) 302 # define M128_CAST(x) ((__m128i *)(void *)(x)) 304 #ifndef CONST_M128_CAST 305 # define CONST_M128_CAST(x) ((const __m128i *)(const void *)(x)) 310 # define DOUBLE_CAST(x) ((double *)(void *)(x)) 312 #ifndef CONST_DOUBLE_CAST 313 # define CONST_DOUBLE_CAST(x) ((const double *)(const void *)(x)) 316 inline void Swap128(__m128i& a,__m128i& b)
318 #if defined(__SUNPRO_CC) && (__SUNPRO_CC <= 0x5120) 327 template <
unsigned int R>
328 inline __m128i RotateLeft64(
const __m128i& val)
330 #if defined(CRYPTOPP_AVX512_ROTATE) 331 return _mm_rol_epi64(val, R);
332 #elif defined(__XOP__) 333 return _mm_roti_epi64(val, R);
336 _mm_slli_epi64(val, R), _mm_srli_epi64(val, 64-R));
340 template <
unsigned int R>
341 inline __m128i RotateRight64(
const __m128i& val)
343 #if defined(CRYPTOPP_AVX512_ROTATE) 344 return _mm_ror_epi64(val, R);
345 #elif defined(__XOP__) 346 return _mm_roti_epi64(val, 64-R);
349 _mm_slli_epi64(val, 64-R), _mm_srli_epi64(val, R));
355 __m128i RotateLeft64<8>(
const __m128i& val)
358 return _mm_roti_epi64(val, 8);
360 const __m128i mask = _mm_set_epi8(14,13,12,11, 10,9,8,15, 6,5,4,3, 2,1,0,7);
361 return _mm_shuffle_epi8(val, mask);
367 __m128i RotateRight64<8>(
const __m128i& val)
370 return _mm_roti_epi64(val, 64-8);
372 const __m128i mask = _mm_set_epi8(8,15,14,13, 12,11,10,9, 0,7,6,5, 4,3,2,1);
373 return _mm_shuffle_epi8(val, mask);
377 inline __m128i SIMON128_f(
const __m128i& v)
379 return _mm_xor_si128(RotateLeft64<2>(v),
380 _mm_and_si128(RotateLeft64<1>(v), RotateLeft64<8>(v)));
383 inline void SIMON128_Enc_Block(__m128i &block0, __m128i &block1,
384 const word64 *subkeys,
unsigned int rounds)
387 __m128i x1 = _mm_unpackhi_epi64(block0, block1);
388 __m128i y1 = _mm_unpacklo_epi64(block0, block1);
390 for (
int i = 0; i < static_cast<int>(rounds & ~1)-1; i += 2)
392 const __m128i rk1 = _mm_castpd_si128(
393 _mm_loaddup_pd(CONST_DOUBLE_CAST(subkeys+i)));
394 y1 = _mm_xor_si128(_mm_xor_si128(y1, SIMON128_f(x1)), rk1);
396 const __m128i rk2 = _mm_castpd_si128(
397 _mm_loaddup_pd(CONST_DOUBLE_CAST(subkeys+i+1)));
398 x1 = _mm_xor_si128(_mm_xor_si128(x1, SIMON128_f(y1)), rk2);
403 const __m128i rk = _mm_castpd_si128(
404 _mm_loaddup_pd(CONST_DOUBLE_CAST(subkeys+rounds-1)));
406 y1 = _mm_xor_si128(_mm_xor_si128(y1, SIMON128_f(x1)), rk);
411 block0 = _mm_unpacklo_epi64(y1, x1);
412 block1 = _mm_unpackhi_epi64(y1, x1);
415 inline void SIMON128_Enc_6_Blocks(__m128i &block0, __m128i &block1,
416 __m128i &block2, __m128i &block3, __m128i &block4, __m128i &block5,
417 const word64 *subkeys,
unsigned int rounds)
420 __m128i x1 = _mm_unpackhi_epi64(block0, block1);
421 __m128i y1 = _mm_unpacklo_epi64(block0, block1);
422 __m128i x2 = _mm_unpackhi_epi64(block2, block3);
423 __m128i y2 = _mm_unpacklo_epi64(block2, block3);
424 __m128i x3 = _mm_unpackhi_epi64(block4, block5);
425 __m128i y3 = _mm_unpacklo_epi64(block4, block5);
427 for (
int i = 0; i < static_cast<int>(rounds & ~1) - 1; i += 2)
429 const __m128i rk1 = _mm_castpd_si128(
430 _mm_loaddup_pd(CONST_DOUBLE_CAST(subkeys + i)));
431 y1 = _mm_xor_si128(_mm_xor_si128(y1, SIMON128_f(x1)), rk1);
432 y2 = _mm_xor_si128(_mm_xor_si128(y2, SIMON128_f(x2)), rk1);
433 y3 = _mm_xor_si128(_mm_xor_si128(y3, SIMON128_f(x3)), rk1);
435 const __m128i rk2 = _mm_castpd_si128(
436 _mm_loaddup_pd(CONST_DOUBLE_CAST(subkeys + i + 1)));
437 x1 = _mm_xor_si128(_mm_xor_si128(x1, SIMON128_f(y1)), rk2);
438 x2 = _mm_xor_si128(_mm_xor_si128(x2, SIMON128_f(y2)), rk2);
439 x3 = _mm_xor_si128(_mm_xor_si128(x3, SIMON128_f(y3)), rk2);
444 const __m128i rk = _mm_castpd_si128(
445 _mm_loaddup_pd(CONST_DOUBLE_CAST(subkeys + rounds - 1)));
446 y1 = _mm_xor_si128(_mm_xor_si128(y1, SIMON128_f(x1)), rk);
447 y2 = _mm_xor_si128(_mm_xor_si128(y2, SIMON128_f(x2)), rk);
448 y3 = _mm_xor_si128(_mm_xor_si128(y3, SIMON128_f(x3)), rk);
449 Swap128(x1, y1); Swap128(x2, y2); Swap128(x3, y3);
453 block0 = _mm_unpacklo_epi64(y1, x1);
454 block1 = _mm_unpackhi_epi64(y1, x1);
455 block2 = _mm_unpacklo_epi64(y2, x2);
456 block3 = _mm_unpackhi_epi64(y2, x2);
457 block4 = _mm_unpacklo_epi64(y3, x3);
458 block5 = _mm_unpackhi_epi64(y3, x3);
461 inline void SIMON128_Dec_Block(__m128i &block0, __m128i &block1,
462 const word64 *subkeys,
unsigned int rounds)
465 __m128i x1 = _mm_unpackhi_epi64(block0, block1);
466 __m128i y1 = _mm_unpacklo_epi64(block0, block1);
470 const __m128i rk = _mm_castpd_si128(
471 _mm_loaddup_pd(CONST_DOUBLE_CAST(subkeys + rounds - 1)));
474 y1 = _mm_xor_si128(_mm_xor_si128(y1, rk), SIMON128_f(x1));
478 for (
int i = static_cast<int>(rounds-2); i >= 0; i -= 2)
480 const __m128i rk1 = _mm_castpd_si128(
481 _mm_loaddup_pd(CONST_DOUBLE_CAST(subkeys+i+1)));
482 x1 = _mm_xor_si128(_mm_xor_si128(x1, SIMON128_f(y1)), rk1);
484 const __m128i rk2 = _mm_castpd_si128(
485 _mm_loaddup_pd(CONST_DOUBLE_CAST(subkeys+i)));
486 y1 = _mm_xor_si128(_mm_xor_si128(y1, SIMON128_f(x1)), rk2);
490 block0 = _mm_unpacklo_epi64(y1, x1);
491 block1 = _mm_unpackhi_epi64(y1, x1);
494 inline void SIMON128_Dec_6_Blocks(__m128i &block0, __m128i &block1,
495 __m128i &block2, __m128i &block3, __m128i &block4, __m128i &block5,
496 const word64 *subkeys,
unsigned int rounds)
499 __m128i x1 = _mm_unpackhi_epi64(block0, block1);
500 __m128i y1 = _mm_unpacklo_epi64(block0, block1);
501 __m128i x2 = _mm_unpackhi_epi64(block2, block3);
502 __m128i y2 = _mm_unpacklo_epi64(block2, block3);
503 __m128i x3 = _mm_unpackhi_epi64(block4, block5);
504 __m128i y3 = _mm_unpacklo_epi64(block4, block5);
508 const __m128i rk = _mm_castpd_si128(
509 _mm_loaddup_pd(CONST_DOUBLE_CAST(subkeys + rounds - 1)));
511 Swap128(x1, y1); Swap128(x2, y2); Swap128(x3, y3);
512 y1 = _mm_xor_si128(_mm_xor_si128(y1, rk), SIMON128_f(x1));
513 y2 = _mm_xor_si128(_mm_xor_si128(y2, rk), SIMON128_f(x2));
514 y3 = _mm_xor_si128(_mm_xor_si128(y3, rk), SIMON128_f(x3));
518 for (
int i = static_cast<int>(rounds-2); i >= 0; i -= 2)
520 const __m128i rk1 = _mm_castpd_si128(
521 _mm_loaddup_pd(CONST_DOUBLE_CAST(subkeys + i + 1)));
522 x1 = _mm_xor_si128(_mm_xor_si128(x1, SIMON128_f(y1)), rk1);
523 x2 = _mm_xor_si128(_mm_xor_si128(x2, SIMON128_f(y2)), rk1);
524 x3 = _mm_xor_si128(_mm_xor_si128(x3, SIMON128_f(y3)), rk1);
526 const __m128i rk2 = _mm_castpd_si128(
527 _mm_loaddup_pd(CONST_DOUBLE_CAST(subkeys + i)));
528 y1 = _mm_xor_si128(_mm_xor_si128(y1, SIMON128_f(x1)), rk2);
529 y2 = _mm_xor_si128(_mm_xor_si128(y2, SIMON128_f(x2)), rk2);
530 y3 = _mm_xor_si128(_mm_xor_si128(y3, SIMON128_f(x3)), rk2);
534 block0 = _mm_unpacklo_epi64(y1, x1);
535 block1 = _mm_unpackhi_epi64(y1, x1);
536 block2 = _mm_unpacklo_epi64(y2, x2);
537 block3 = _mm_unpackhi_epi64(y2, x2);
538 block4 = _mm_unpacklo_epi64(y3, x3);
539 block5 = _mm_unpackhi_epi64(y3, x3);
542 #endif // CRYPTOPP_SSSE3_AVAILABLE 546 #if defined(CRYPTOPP_POWER8_AVAILABLE) 557 template<
unsigned int C>
561 return vec_rl(val, m);
565 template<
unsigned int C>
569 return vec_rl(val, m);
574 return VecXor(RotateLeft64<2>(val),
575 VecAnd(RotateLeft64<1>(val), RotateLeft64<8>(val)));
578 inline void SIMON128_Enc_Block(
uint32x4_p &block,
const word64 *subkeys,
unsigned int rounds)
580 #if (CRYPTOPP_BIG_ENDIAN) 581 const uint8x16_p m1 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8};
582 const uint8x16_p m2 = {23,22,21,20,19,18,17,16, 7,6,5,4,3,2,1,0};
584 const uint8x16_p m1 = {7,6,5,4,3,2,1,0, 23,22,21,20,19,18,17,16};
585 const uint8x16_p m2 = {15,14,13,12,11,10,9,8, 31,30,29,28,27,26,25,24};
592 for (
int i = 0; i < static_cast<int>(rounds & ~1)-1; i += 2)
594 const uint64x2_p rk1 = vec_splats((
unsigned long long)subkeys[i]);
597 const uint64x2_p rk2 = vec_splats((
unsigned long long)subkeys[i+1]);
603 const uint64x2_p rk = vec_splats((
unsigned long long)subkeys[rounds-1]);
608 #if (CRYPTOPP_BIG_ENDIAN) 609 const uint8x16_p m3 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8};
612 const uint8x16_p m3 = {7,6,5,4,3,2,1,0, 23,22,21,20,19,18,17,16};
620 inline void SIMON128_Dec_Block(
uint32x4_p &block,
const word64 *subkeys,
unsigned int rounds)
622 #if (CRYPTOPP_BIG_ENDIAN) 623 const uint8x16_p m1 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8};
624 const uint8x16_p m2 = {23,22,21,20,19,18,17,16, 7,6,5,4,3,2,1,0};
626 const uint8x16_p m1 = {7,6,5,4,3,2,1,0, 23,22,21,20,19,18,17,16};
627 const uint8x16_p m2 = {15,14,13,12,11,10,9,8, 31,30,29,28,27,26,25,24};
637 const uint64x2_p rk = vec_splats((
unsigned long long)subkeys[rounds-1]);
642 for (
int i = static_cast<int>(rounds-2); i >= 0; i -= 2)
644 const uint64x2_p rk1 = vec_splats((
unsigned long long)subkeys[i+1]);
647 const uint64x2_p rk2 = vec_splats((
unsigned long long)subkeys[i]);
651 #if (CRYPTOPP_BIG_ENDIAN) 652 const uint8x16_p m3 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8};
655 const uint8x16_p m3 = {7,6,5,4,3,2,1,0, 23,22,21,20,19,18,17,16};
665 uint32x4_p &block5,
const word64 *subkeys,
unsigned int rounds)
667 #if (CRYPTOPP_BIG_ENDIAN) 668 const uint8x16_p m1 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8};
669 const uint8x16_p m2 = {23,22,21,20,19,18,17,16, 7,6,5,4,3,2,1,0};
671 const uint8x16_p m1 = {7,6,5,4,3,2,1,0, 23,22,21,20,19,18,17,16};
672 const uint8x16_p m2 = {15,14,13,12,11,10,9,8, 31,30,29,28,27,26,25,24};
683 for (
int i = 0; i < static_cast<int>(rounds & ~1)-1; i += 2)
685 const uint64x2_p rk1 = vec_splats((
unsigned long long)subkeys[i]);
690 const uint64x2_p rk2 = vec_splats((
unsigned long long)subkeys[i+1]);
698 const uint64x2_p rk = vec_splats((
unsigned long long)subkeys[rounds-1]);
702 std::swap(x1, y1); std::swap(x2, y2); std::swap(x3, y3);
705 #if (CRYPTOPP_BIG_ENDIAN) 706 const uint8x16_p m3 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8};
707 const uint8x16_p m4 = {23,22,21,20,19,18,17,16, 7,6,5,4,3,2,1,0};
709 const uint8x16_p m3 = {7,6,5,4,3,2,1,0, 23,22,21,20,19,18,17,16};
710 const uint8x16_p m4 = {15,14,13,12,11,10,9,8, 31,30,29,28,27,26,25,24};
724 uint32x4_p &block5,
const word64 *subkeys,
unsigned int rounds)
726 #if (CRYPTOPP_BIG_ENDIAN) 727 const uint8x16_p m1 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8};
728 const uint8x16_p m2 = {23,22,21,20,19,18,17,16, 7,6,5,4,3,2,1,0};
730 const uint8x16_p m1 = {7,6,5,4,3,2,1,0, 23,22,21,20,19,18,17,16};
731 const uint8x16_p m2 = {15,14,13,12,11,10,9,8, 31,30,29,28,27,26,25,24};
744 std::swap(x1, y1); std::swap(x2, y2); std::swap(x3, y3);
745 const uint64x2_p rk = vec_splats((
unsigned long long)subkeys[rounds-1]);
752 for (
int i = static_cast<int>(rounds-2); i >= 0; i -= 2)
754 const uint64x2_p rk1 = vec_splats((
unsigned long long)subkeys[i+1]);
759 const uint64x2_p rk2 = vec_splats((
unsigned long long)subkeys[i]);
765 #if (CRYPTOPP_BIG_ENDIAN) 766 const uint8x16_p m3 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8};
767 const uint8x16_p m4 = {23,22,21,20,19,18,17,16, 7,6,5,4,3,2,1,0};
769 const uint8x16_p m3 = {7,6,5,4,3,2,1,0, 23,22,21,20,19,18,17,16};
770 const uint8x16_p m4 = {15,14,13,12,11,10,9,8, 31,30,29,28,27,26,25,24};
782 #endif // CRYPTOPP_POWER8_AVAILABLE 784 ANONYMOUS_NAMESPACE_END
792 #if (CRYPTOPP_ARM_NEON_AVAILABLE) 793 size_t SIMON128_Enc_AdvancedProcessBlocks_NEON(
const word64* subKeys,
size_t rounds,
794 const byte *inBlocks,
const byte *xorBlocks, byte *outBlocks,
size_t length, word32 flags)
796 return AdvancedProcessBlocks128_6x2_NEON(SIMON128_Enc_Block, SIMON128_Enc_6_Blocks,
797 subKeys, rounds, inBlocks, xorBlocks, outBlocks, length, flags);
800 size_t SIMON128_Dec_AdvancedProcessBlocks_NEON(
const word64* subKeys,
size_t rounds,
801 const byte *inBlocks,
const byte *xorBlocks, byte *outBlocks,
size_t length, word32 flags)
803 return AdvancedProcessBlocks128_6x2_NEON(SIMON128_Dec_Block, SIMON128_Dec_6_Blocks,
804 subKeys, rounds, inBlocks, xorBlocks, outBlocks, length, flags);
806 #endif // CRYPTOPP_ARM_NEON_AVAILABLE 810 #if defined(CRYPTOPP_SSSE3_AVAILABLE) 811 size_t SIMON128_Enc_AdvancedProcessBlocks_SSSE3(
const word64* subKeys,
size_t rounds,
812 const byte *inBlocks,
const byte *xorBlocks, byte *outBlocks,
size_t length, word32 flags)
814 return AdvancedProcessBlocks128_6x2_SSE(SIMON128_Enc_Block, SIMON128_Enc_6_Blocks,
815 subKeys, rounds, inBlocks, xorBlocks, outBlocks, length, flags);
818 size_t SIMON128_Dec_AdvancedProcessBlocks_SSSE3(
const word64* subKeys,
size_t rounds,
819 const byte *inBlocks,
const byte *xorBlocks, byte *outBlocks,
size_t length, word32 flags)
821 return AdvancedProcessBlocks128_6x2_SSE(SIMON128_Dec_Block, SIMON128_Dec_6_Blocks,
822 subKeys, rounds, inBlocks, xorBlocks, outBlocks, length, flags);
824 #endif // CRYPTOPP_SSSE3_AVAILABLE 828 #if defined(CRYPTOPP_POWER8_AVAILABLE) 829 size_t SIMON128_Enc_AdvancedProcessBlocks_POWER8(
const word64* subKeys,
size_t rounds,
830 const byte *inBlocks,
const byte *xorBlocks, byte *outBlocks,
size_t length, word32 flags)
832 return AdvancedProcessBlocks128_6x1_ALTIVEC(SIMON128_Enc_Block, SIMON128_Enc_6_Blocks,
833 subKeys, rounds, inBlocks, xorBlocks, outBlocks, length, flags);
836 size_t SIMON128_Dec_AdvancedProcessBlocks_POWER8(
const word64* subKeys,
size_t rounds,
837 const byte *inBlocks,
const byte *xorBlocks, byte *outBlocks,
size_t length, word32 flags)
839 return AdvancedProcessBlocks128_6x1_ALTIVEC(SIMON128_Dec_Block, SIMON128_Dec_6_Blocks,
840 subKeys, rounds, inBlocks, xorBlocks, outBlocks, length, flags);
842 #endif // CRYPTOPP_POWER8_AVAILABLE Utility functions for the Crypto++ library.
Library configuration file.
T1 VecPermute(const T1 vec, const T2 mask)
Permutes a vector.
__vector unsigned int uint32x4_p
Vector of 32-bit elements.
Support functions for PowerPC and vector operations.
Template for AdvancedProcessBlocks and SIMD processing.
T1 VecXor(const T1 vec1, const T2 vec2)
XOR two vectors.
__vector unsigned long long uint64x2_p
Vector of 64-bit elements.
Classes for the Simon block cipher.
Crypto++ library namespace.
__vector unsigned char uint8x16_p
Vector of 8-bit elements.
T1 VecAnd(const T1 vec1, const T2 vec2)
AND two vectors.
void vec_swap(T &a, T &b)
Swaps two variables which are arrays.