Crypto++  5.6.4
Free C++ class library of cryptographic schemes
sha.cpp
1 // sha.cpp - modified by Wei Dai from Steve Reid's public domain sha1.c
2 
3 // Steve Reid implemented SHA-1. Wei Dai implemented SHA-2.
4 // Both are in the public domain.
5 
6 // use "cl /EP /P /DCRYPTOPP_GENERATE_X64_MASM sha.cpp" to generate MASM code
7 
8 #include "pch.h"
9 #include "config.h"
10 
11 #if CRYPTOPP_MSC_VERSION
12 # pragma warning(disable: 4100 4731)
13 #endif
14 
15 #ifndef CRYPTOPP_IMPORTS
16 #ifndef CRYPTOPP_GENERATE_X64_MASM
17 
18 #include "secblock.h"
19 #include "sha.h"
20 #include "misc.h"
21 #include "cpu.h"
22 
23 #if defined(CRYPTOPP_DISABLE_SHA_ASM)
24 # undef CRYPTOPP_X86_ASM_AVAILABLE
25 # undef CRYPTOPP_X32_ASM_AVAILABLE
26 # undef CRYPTOPP_X64_ASM_AVAILABLE
27 # undef CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE
28 #endif
29 
30 NAMESPACE_BEGIN(CryptoPP)
31 
32 // start of Steve Reid's code
33 
34 #define blk0(i) (W[i] = data[i])
35 #define blk1(i) (W[i&15] = rotlFixed(W[(i+13)&15]^W[(i+8)&15]^W[(i+2)&15]^W[i&15],1))
36 
37 void SHA1::InitState(HashWordType *state)
38 {
39  state[0] = 0x67452301L;
40  state[1] = 0xEFCDAB89L;
41  state[2] = 0x98BADCFEL;
42  state[3] = 0x10325476L;
43  state[4] = 0xC3D2E1F0L;
44 }
45 
46 #define f1(x,y,z) (z^(x&(y^z)))
47 #define f2(x,y,z) (x^y^z)
48 #define f3(x,y,z) ((x&y)|(z&(x|y)))
49 #define f4(x,y,z) (x^y^z)
50 
51 /* (R0+R1), R2, R3, R4 are the different operations used in SHA1 */
52 #define R0(v,w,x,y,z,i) z+=f1(w,x,y)+blk0(i)+0x5A827999+rotlFixed(v,5);w=rotlFixed(w,30);
53 #define R1(v,w,x,y,z,i) z+=f1(w,x,y)+blk1(i)+0x5A827999+rotlFixed(v,5);w=rotlFixed(w,30);
54 #define R2(v,w,x,y,z,i) z+=f2(w,x,y)+blk1(i)+0x6ED9EBA1+rotlFixed(v,5);w=rotlFixed(w,30);
55 #define R3(v,w,x,y,z,i) z+=f3(w,x,y)+blk1(i)+0x8F1BBCDC+rotlFixed(v,5);w=rotlFixed(w,30);
56 #define R4(v,w,x,y,z,i) z+=f4(w,x,y)+blk1(i)+0xCA62C1D6+rotlFixed(v,5);w=rotlFixed(w,30);
57 
58 void SHA1::Transform(word32 *state, const word32 *data)
59 {
60  word32 W[16];
61  /* Copy context->state[] to working vars */
62  word32 a = state[0];
63  word32 b = state[1];
64  word32 c = state[2];
65  word32 d = state[3];
66  word32 e = state[4];
67  /* 4 rounds of 20 operations each. Loop unrolled. */
68  R0(a,b,c,d,e, 0); R0(e,a,b,c,d, 1); R0(d,e,a,b,c, 2); R0(c,d,e,a,b, 3);
69  R0(b,c,d,e,a, 4); R0(a,b,c,d,e, 5); R0(e,a,b,c,d, 6); R0(d,e,a,b,c, 7);
70  R0(c,d,e,a,b, 8); R0(b,c,d,e,a, 9); R0(a,b,c,d,e,10); R0(e,a,b,c,d,11);
71  R0(d,e,a,b,c,12); R0(c,d,e,a,b,13); R0(b,c,d,e,a,14); R0(a,b,c,d,e,15);
72  R1(e,a,b,c,d,16); R1(d,e,a,b,c,17); R1(c,d,e,a,b,18); R1(b,c,d,e,a,19);
73  R2(a,b,c,d,e,20); R2(e,a,b,c,d,21); R2(d,e,a,b,c,22); R2(c,d,e,a,b,23);
74  R2(b,c,d,e,a,24); R2(a,b,c,d,e,25); R2(e,a,b,c,d,26); R2(d,e,a,b,c,27);
75  R2(c,d,e,a,b,28); R2(b,c,d,e,a,29); R2(a,b,c,d,e,30); R2(e,a,b,c,d,31);
76  R2(d,e,a,b,c,32); R2(c,d,e,a,b,33); R2(b,c,d,e,a,34); R2(a,b,c,d,e,35);
77  R2(e,a,b,c,d,36); R2(d,e,a,b,c,37); R2(c,d,e,a,b,38); R2(b,c,d,e,a,39);
78  R3(a,b,c,d,e,40); R3(e,a,b,c,d,41); R3(d,e,a,b,c,42); R3(c,d,e,a,b,43);
79  R3(b,c,d,e,a,44); R3(a,b,c,d,e,45); R3(e,a,b,c,d,46); R3(d,e,a,b,c,47);
80  R3(c,d,e,a,b,48); R3(b,c,d,e,a,49); R3(a,b,c,d,e,50); R3(e,a,b,c,d,51);
81  R3(d,e,a,b,c,52); R3(c,d,e,a,b,53); R3(b,c,d,e,a,54); R3(a,b,c,d,e,55);
82  R3(e,a,b,c,d,56); R3(d,e,a,b,c,57); R3(c,d,e,a,b,58); R3(b,c,d,e,a,59);
83  R4(a,b,c,d,e,60); R4(e,a,b,c,d,61); R4(d,e,a,b,c,62); R4(c,d,e,a,b,63);
84  R4(b,c,d,e,a,64); R4(a,b,c,d,e,65); R4(e,a,b,c,d,66); R4(d,e,a,b,c,67);
85  R4(c,d,e,a,b,68); R4(b,c,d,e,a,69); R4(a,b,c,d,e,70); R4(e,a,b,c,d,71);
86  R4(d,e,a,b,c,72); R4(c,d,e,a,b,73); R4(b,c,d,e,a,74); R4(a,b,c,d,e,75);
87  R4(e,a,b,c,d,76); R4(d,e,a,b,c,77); R4(c,d,e,a,b,78); R4(b,c,d,e,a,79);
88  /* Add the working vars back into context.state[] */
89  state[0] += a;
90  state[1] += b;
91  state[2] += c;
92  state[3] += d;
93  state[4] += e;
94 }
95 
96 // end of Steve Reid's code
97 
98 // *************************************************************
99 
100 void SHA224::InitState(HashWordType *state)
101 {
102  static const word32 s[8] = {0xc1059ed8, 0x367cd507, 0x3070dd17, 0xf70e5939, 0xffc00b31, 0x68581511, 0x64f98fa7, 0xbefa4fa4};
103  memcpy(state, s, sizeof(s));
104 }
105 
106 void SHA256::InitState(HashWordType *state)
107 {
108  static const word32 s[8] = {0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19};
109  memcpy(state, s, sizeof(s));
110 }
111 
112 #if CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE
113 CRYPTOPP_ALIGN_DATA(16) extern const word32 SHA256_K[64] CRYPTOPP_SECTION_ALIGN16 = {
114 #else
115 extern const word32 SHA256_K[64] = {
116 #endif
117  0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
118  0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
119  0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
120  0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
121  0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
122  0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
123  0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
124  0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
125  0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
126  0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
127  0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
128  0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
129  0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
130  0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
131  0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
132  0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
133 };
134 
135 #endif // #ifndef CRYPTOPP_GENERATE_X64_MASM
136 
137 #if (defined(CRYPTOPP_X86_ASM_AVAILABLE) || defined(CRYPTOPP_X32_ASM_AVAILABLE) || defined(CRYPTOPP_GENERATE_X64_MASM))
138 
139 static void CRYPTOPP_FASTCALL X86_SHA256_HashBlocks(word32 *state, const word32 *data, size_t len
140 #if defined(_MSC_VER) && (_MSC_VER == 1200)
141  , ... // VC60 workaround: prevent VC 6 from inlining this function
142 #endif
143  )
144 {
145 #if defined(_MSC_VER) && (_MSC_VER == 1200)
146  AS2(mov ecx, [state])
147  AS2(mov edx, [data])
148 #endif
149 
150  #define LOCALS_SIZE 8*4 + 16*4 + 4*WORD_SZ
151  #define H(i) [BASE+ASM_MOD(1024+7-(i),8)*4]
152  #define G(i) H(i+1)
153  #define F(i) H(i+2)
154  #define E(i) H(i+3)
155  #define D(i) H(i+4)
156  #define C(i) H(i+5)
157  #define B(i) H(i+6)
158  #define A(i) H(i+7)
159  #define Wt(i) BASE+8*4+ASM_MOD(1024+15-(i),16)*4
160  #define Wt_2(i) Wt((i)-2)
161  #define Wt_15(i) Wt((i)-15)
162  #define Wt_7(i) Wt((i)-7)
163  #define K_END [BASE+8*4+16*4+0*WORD_SZ]
164  #define STATE_SAVE [BASE+8*4+16*4+1*WORD_SZ]
165  #define DATA_SAVE [BASE+8*4+16*4+2*WORD_SZ]
166  #define DATA_END [BASE+8*4+16*4+3*WORD_SZ]
167  #define Kt(i) WORD_REG(si)+(i)*4
168 #if CRYPTOPP_BOOL_X32
169  #define BASE esp+8
170 #elif CRYPTOPP_BOOL_X86
171  #define BASE esp+4
172 #elif defined(__GNUC__)
173  #define BASE r8
174 #else
175  #define BASE rsp
176 #endif
177 
178 #define RA0(i, edx, edi) \
179  AS2( add edx, [Kt(i)] )\
180  AS2( add edx, [Wt(i)] )\
181  AS2( add edx, H(i) )\
182 
183 #define RA1(i, edx, edi)
184 
185 #define RB0(i, edx, edi)
186 
187 #define RB1(i, edx, edi) \
188  AS2( mov AS_REG_7d, [Wt_2(i)] )\
189  AS2( mov edi, [Wt_15(i)])\
190  AS2( mov ebx, AS_REG_7d )\
191  AS2( shr AS_REG_7d, 10 )\
192  AS2( ror ebx, 17 )\
193  AS2( xor AS_REG_7d, ebx )\
194  AS2( ror ebx, 2 )\
195  AS2( xor ebx, AS_REG_7d )/* s1(W_t-2) */\
196  AS2( add ebx, [Wt_7(i)])\
197  AS2( mov AS_REG_7d, edi )\
198  AS2( shr AS_REG_7d, 3 )\
199  AS2( ror edi, 7 )\
200  AS2( add ebx, [Wt(i)])/* s1(W_t-2) + W_t-7 + W_t-16 */\
201  AS2( xor AS_REG_7d, edi )\
202  AS2( add edx, [Kt(i)])\
203  AS2( ror edi, 11 )\
204  AS2( add edx, H(i) )\
205  AS2( xor AS_REG_7d, edi )/* s0(W_t-15) */\
206  AS2( add AS_REG_7d, ebx )/* W_t = s1(W_t-2) + W_t-7 + s0(W_t-15) W_t-16*/\
207  AS2( mov [Wt(i)], AS_REG_7d)\
208  AS2( add edx, AS_REG_7d )\
209 
210 #define ROUND(i, r, eax, ecx, edi, edx)\
211  /* in: edi = E */\
212  /* unused: eax, ecx, temp: ebx, AS_REG_7d, out: edx = T1 */\
213  AS2( mov edx, F(i) )\
214  AS2( xor edx, G(i) )\
215  AS2( and edx, edi )\
216  AS2( xor edx, G(i) )/* Ch(E,F,G) = (G^(E&(F^G))) */\
217  AS2( mov AS_REG_7d, edi )\
218  AS2( ror edi, 6 )\
219  AS2( ror AS_REG_7d, 25 )\
220  RA##r(i, edx, edi )/* H + Wt + Kt + Ch(E,F,G) */\
221  AS2( xor AS_REG_7d, edi )\
222  AS2( ror edi, 5 )\
223  AS2( xor AS_REG_7d, edi )/* S1(E) */\
224  AS2( add edx, AS_REG_7d )/* T1 = S1(E) + Ch(E,F,G) + H + Wt + Kt */\
225  RB##r(i, edx, edi )/* H + Wt + Kt + Ch(E,F,G) */\
226  /* in: ecx = A, eax = B^C, edx = T1 */\
227  /* unused: edx, temp: ebx, AS_REG_7d, out: eax = A, ecx = B^C, edx = E */\
228  AS2( mov ebx, ecx )\
229  AS2( xor ecx, B(i) )/* A^B */\
230  AS2( and eax, ecx )\
231  AS2( xor eax, B(i) )/* Maj(A,B,C) = B^((A^B)&(B^C) */\
232  AS2( mov AS_REG_7d, ebx )\
233  AS2( ror ebx, 2 )\
234  AS2( add eax, edx )/* T1 + Maj(A,B,C) */\
235  AS2( add edx, D(i) )\
236  AS2( mov D(i), edx )\
237  AS2( ror AS_REG_7d, 22 )\
238  AS2( xor AS_REG_7d, ebx )\
239  AS2( ror ebx, 11 )\
240  AS2( xor AS_REG_7d, ebx )\
241  AS2( add eax, AS_REG_7d )/* T1 + S0(A) + Maj(A,B,C) */\
242  AS2( mov H(i), eax )\
243 
244 // Unroll the use of CRYPTOPP_BOOL_X64 in assembler math. The GAS assembler on X32 (version 2.25)
245 // complains "Error: invalid operands (*ABS* and *UND* sections) for `*` and `-`"
246 #if CRYPTOPP_BOOL_X64
247 #define SWAP_COPY(i) \
248  AS2( mov WORD_REG(bx), [WORD_REG(dx)+i*WORD_SZ])\
249  AS1( bswap WORD_REG(bx))\
250  AS2( mov [Wt(i*2+1)], WORD_REG(bx))
251 #else // X86 and X32
252 #define SWAP_COPY(i) \
253  AS2( mov WORD_REG(bx), [WORD_REG(dx)+i*WORD_SZ])\
254  AS1( bswap WORD_REG(bx))\
255  AS2( mov [Wt(i)], WORD_REG(bx))
256 #endif
257 
258 #if defined(__GNUC__)
259  #if CRYPTOPP_BOOL_X64
261  #endif
262  __asm__ __volatile__
263  (
264  #if CRYPTOPP_BOOL_X64
265  "lea %4, %%r8;"
266  #endif
267  INTEL_NOPREFIX
268 #elif defined(CRYPTOPP_GENERATE_X64_MASM)
269  ALIGN 8
270  X86_SHA256_HashBlocks PROC FRAME
271  rex_push_reg rsi
272  push_reg rdi
273  push_reg rbx
274  push_reg rbp
275  alloc_stack(LOCALS_SIZE+8)
276  .endprolog
277  mov rdi, r8
278  lea rsi, [?SHA256_K@CryptoPP@@3QBIB + 48*4]
279 #endif
280 
281 #if CRYPTOPP_BOOL_X86 || CRYPTOPP_BOOL_X32
282  #ifndef __GNUC__
283  AS2( mov edi, [len])
284  AS2( lea WORD_REG(si), [SHA256_K+48*4])
285  #endif
286  #if !defined(_MSC_VER) || (_MSC_VER < 1400)
287  AS_PUSH_IF86(bx)
288  #endif
289 
290  AS_PUSH_IF86(bp)
291  AS2( mov ebx, esp)
292  AS2( and esp, -16)
293  AS2( sub WORD_REG(sp), LOCALS_SIZE)
294  AS_PUSH_IF86(bx)
295 #endif
296  AS2( mov STATE_SAVE, WORD_REG(cx))
297  AS2( mov DATA_SAVE, WORD_REG(dx))
298  AS2( lea WORD_REG(ax), [WORD_REG(di) + WORD_REG(dx)])
299  AS2( mov DATA_END, WORD_REG(ax))
300  AS2( mov K_END, WORD_REG(si))
301 
302 #if CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE
303 #if CRYPTOPP_BOOL_X86 || CRYPTOPP_BOOL_X32
304  AS2( test edi, 1)
305  ASJ( jnz, 2, f)
306  AS1( dec DWORD PTR K_END)
307 #endif
308  AS2( movdqa xmm0, XMMWORD_PTR [WORD_REG(cx)+0*16])
309  AS2( movdqa xmm1, XMMWORD_PTR [WORD_REG(cx)+1*16])
310 #endif
311 
312 #if CRYPTOPP_BOOL_X86 || CRYPTOPP_BOOL_X32
313 #if CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE
314  ASJ( jmp, 0, f)
315 #endif
316  ASL(2) // non-SSE2
317  AS2( mov esi, ecx)
318  AS2( lea edi, A(0))
319  AS2( mov ecx, 8)
320 ATT_NOPREFIX
321  AS1( rep movsd)
322 INTEL_NOPREFIX
323  AS2( mov esi, K_END)
324  ASJ( jmp, 3, f)
325 #endif
326 
327 #if CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE
328  ASL(0)
329  AS2( movdqa E(0), xmm1)
330  AS2( movdqa A(0), xmm0)
331 #endif
332 #if CRYPTOPP_BOOL_X86 || CRYPTOPP_BOOL_X32
333  ASL(3)
334 #endif
335  AS2( sub WORD_REG(si), 48*4)
336  SWAP_COPY(0) SWAP_COPY(1) SWAP_COPY(2) SWAP_COPY(3)
337  SWAP_COPY(4) SWAP_COPY(5) SWAP_COPY(6) SWAP_COPY(7)
338 #if CRYPTOPP_BOOL_X86 || CRYPTOPP_BOOL_X32
339  SWAP_COPY(8) SWAP_COPY(9) SWAP_COPY(10) SWAP_COPY(11)
340  SWAP_COPY(12) SWAP_COPY(13) SWAP_COPY(14) SWAP_COPY(15)
341 #endif
342  AS2( mov edi, E(0)) // E
343  AS2( mov eax, B(0)) // B
344  AS2( xor eax, C(0)) // B^C
345  AS2( mov ecx, A(0)) // A
346 
347  ROUND(0, 0, eax, ecx, edi, edx)
348  ROUND(1, 0, ecx, eax, edx, edi)
349  ROUND(2, 0, eax, ecx, edi, edx)
350  ROUND(3, 0, ecx, eax, edx, edi)
351  ROUND(4, 0, eax, ecx, edi, edx)
352  ROUND(5, 0, ecx, eax, edx, edi)
353  ROUND(6, 0, eax, ecx, edi, edx)
354  ROUND(7, 0, ecx, eax, edx, edi)
355  ROUND(8, 0, eax, ecx, edi, edx)
356  ROUND(9, 0, ecx, eax, edx, edi)
357  ROUND(10, 0, eax, ecx, edi, edx)
358  ROUND(11, 0, ecx, eax, edx, edi)
359  ROUND(12, 0, eax, ecx, edi, edx)
360  ROUND(13, 0, ecx, eax, edx, edi)
361  ROUND(14, 0, eax, ecx, edi, edx)
362  ROUND(15, 0, ecx, eax, edx, edi)
363 
364  ASL(1)
365  AS2(add WORD_REG(si), 4*16)
366  ROUND(0, 1, eax, ecx, edi, edx)
367  ROUND(1, 1, ecx, eax, edx, edi)
368  ROUND(2, 1, eax, ecx, edi, edx)
369  ROUND(3, 1, ecx, eax, edx, edi)
370  ROUND(4, 1, eax, ecx, edi, edx)
371  ROUND(5, 1, ecx, eax, edx, edi)
372  ROUND(6, 1, eax, ecx, edi, edx)
373  ROUND(7, 1, ecx, eax, edx, edi)
374  ROUND(8, 1, eax, ecx, edi, edx)
375  ROUND(9, 1, ecx, eax, edx, edi)
376  ROUND(10, 1, eax, ecx, edi, edx)
377  ROUND(11, 1, ecx, eax, edx, edi)
378  ROUND(12, 1, eax, ecx, edi, edx)
379  ROUND(13, 1, ecx, eax, edx, edi)
380  ROUND(14, 1, eax, ecx, edi, edx)
381  ROUND(15, 1, ecx, eax, edx, edi)
382  AS2( cmp WORD_REG(si), K_END)
383  ATT_NOPREFIX
384  ASJ( jb, 1, b)
385  INTEL_NOPREFIX
386 
387  AS2( mov WORD_REG(dx), DATA_SAVE)
388  AS2( add WORD_REG(dx), 64)
389  AS2( mov AS_REG_7, STATE_SAVE)
390  AS2( mov DATA_SAVE, WORD_REG(dx))
391 
392 #if CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE
393 #if CRYPTOPP_BOOL_X86 || CRYPTOPP_BOOL_X32
394  AS2( test DWORD PTR K_END, 1)
395  ASJ( jz, 4, f)
396 #endif
397  AS2( movdqa xmm1, XMMWORD_PTR [AS_REG_7+1*16])
398  AS2( movdqa xmm0, XMMWORD_PTR [AS_REG_7+0*16])
399  AS2( paddd xmm1, E(0))
400  AS2( paddd xmm0, A(0))
401  AS2( movdqa [AS_REG_7+1*16], xmm1)
402  AS2( movdqa [AS_REG_7+0*16], xmm0)
403  AS2( cmp WORD_REG(dx), DATA_END)
404  ATT_NOPREFIX
405  ASJ( jb, 0, b)
406  INTEL_NOPREFIX
407 #endif
408 
409 #if CRYPTOPP_BOOL_X86 || CRYPTOPP_BOOL_X32
410 #if CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE
411  ASJ( jmp, 5, f)
412  ASL(4) // non-SSE2
413 #endif
414  AS2( add [AS_REG_7+0*4], ecx) // A
415  AS2( add [AS_REG_7+4*4], edi) // E
416  AS2( mov eax, B(0))
417  AS2( mov ebx, C(0))
418  AS2( mov ecx, D(0))
419  AS2( add [AS_REG_7+1*4], eax)
420  AS2( add [AS_REG_7+2*4], ebx)
421  AS2( add [AS_REG_7+3*4], ecx)
422  AS2( mov eax, F(0))
423  AS2( mov ebx, G(0))
424  AS2( mov ecx, H(0))
425  AS2( add [AS_REG_7+5*4], eax)
426  AS2( add [AS_REG_7+6*4], ebx)
427  AS2( add [AS_REG_7+7*4], ecx)
428  AS2( mov ecx, AS_REG_7d)
429  AS2( cmp WORD_REG(dx), DATA_END)
430  ASJ( jb, 2, b)
431 #if CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE
432  ASL(5)
433 #endif
434 #endif
435 
436  AS_POP_IF86(sp)
437  AS_POP_IF86(bp)
438  #if !defined(_MSC_VER) || (_MSC_VER < 1400)
439  AS_POP_IF86(bx)
440  #endif
441 
442 #ifdef CRYPTOPP_GENERATE_X64_MASM
443  add rsp, LOCALS_SIZE+8
444  pop rbp
445  pop rbx
446  pop rdi
447  pop rsi
448  ret
449  X86_SHA256_HashBlocks ENDP
450 #endif
451 
452 #ifdef __GNUC__
453  ATT_PREFIX
454  :
455  : "c" (state), "d" (data), "S" (SHA256_K+48), "D" (len)
456  #if CRYPTOPP_BOOL_X64
457  , "m" (workspace[0])
458  #endif
459  : "memory", "cc", "%eax"
460  #if CRYPTOPP_BOOL_X64
461  , "%rbx", "%r8", "%r10"
462  #endif
463  );
464 #endif
465 }
466 
467 #endif // (defined(CRYPTOPP_X86_ASM_AVAILABLE) || defined(CRYPTOPP_GENERATE_X64_MASM))
468 
469 #ifndef CRYPTOPP_GENERATE_X64_MASM
470 
471 #ifdef CRYPTOPP_X64_MASM_AVAILABLE
472 extern "C" {
473 void CRYPTOPP_FASTCALL X86_SHA256_HashBlocks(word32 *state, const word32 *data, size_t len);
474 }
475 #endif
476 
477 #if (defined(CRYPTOPP_X86_ASM_AVAILABLE) || defined(CRYPTOPP_X32_ASM_AVAILABLE) || defined(CRYPTOPP_X64_MASM_AVAILABLE)) && !defined(CRYPTOPP_DISABLE_SHA_ASM)
478 
479 size_t SHA256::HashMultipleBlocks(const word32 *input, size_t length)
480 {
481  X86_SHA256_HashBlocks(m_state, input, (length&(size_t(0)-BLOCKSIZE)) - !HasSSE2());
482  return length % BLOCKSIZE;
483 }
484 
485 size_t SHA224::HashMultipleBlocks(const word32 *input, size_t length)
486 {
487  X86_SHA256_HashBlocks(m_state, input, (length&(size_t(0)-BLOCKSIZE)) - !HasSSE2());
488  return length % BLOCKSIZE;
489 }
490 
491 #endif
492 
493 #define blk2(i) (W[i&15]+=s1(W[(i-2)&15])+W[(i-7)&15]+s0(W[(i-15)&15]))
494 
495 #define Ch(x,y,z) (z^(x&(y^z)))
496 #define Maj(x,y,z) (y^((x^y)&(y^z)))
497 
498 #define a(i) T[(0-i)&7]
499 #define b(i) T[(1-i)&7]
500 #define c(i) T[(2-i)&7]
501 #define d(i) T[(3-i)&7]
502 #define e(i) T[(4-i)&7]
503 #define f(i) T[(5-i)&7]
504 #define g(i) T[(6-i)&7]
505 #define h(i) T[(7-i)&7]
506 
507 #define R(i) h(i)+=S1(e(i))+Ch(e(i),f(i),g(i))+SHA256_K[i+j]+(j?blk2(i):blk0(i));\
508  d(i)+=h(i);h(i)+=S0(a(i))+Maj(a(i),b(i),c(i))
509 
510 // for SHA256
511 #define S0(x) (rotrFixed(x,2)^rotrFixed(x,13)^rotrFixed(x,22))
512 #define S1(x) (rotrFixed(x,6)^rotrFixed(x,11)^rotrFixed(x,25))
513 #define s0(x) (rotrFixed(x,7)^rotrFixed(x,18)^(x>>3))
514 #define s1(x) (rotrFixed(x,17)^rotrFixed(x,19)^(x>>10))
515 
516 // Smaller but slower
517 #if defined(__OPTIMIZE_SIZE__)
518 void SHA256::Transform(word32 *state, const word32 *data)
519 {
520  word32 T[20];
521  word32 W[32];
522  unsigned int i = 0, j = 0;
523  word32 *t = T+8;
524 
525  memcpy(t, state, 8*4);
526  word32 e = t[4], a = t[0];
527 
528  do
529  {
530  word32 w = data[j];
531  W[j] = w;
532  w += SHA256_K[j];
533  w += t[7];
534  w += S1(e);
535  w += Ch(e, t[5], t[6]);
536  e = t[3] + w;
537  t[3] = t[3+8] = e;
538  w += S0(t[0]);
539  a = w + Maj(a, t[1], t[2]);
540  t[-1] = t[7] = a;
541  --t;
542  ++j;
543  if (j%8 == 0)
544  t += 8;
545  } while (j<16);
546 
547  do
548  {
549  i = j&0xf;
550  word32 w = s1(W[i+16-2]) + s0(W[i+16-15]) + W[i] + W[i+16-7];
551  W[i+16] = W[i] = w;
552  w += SHA256_K[j];
553  w += t[7];
554  w += S1(e);
555  w += Ch(e, t[5], t[6]);
556  e = t[3] + w;
557  t[3] = t[3+8] = e;
558  w += S0(t[0]);
559  a = w + Maj(a, t[1], t[2]);
560  t[-1] = t[7] = a;
561 
562  w = s1(W[(i+1)+16-2]) + s0(W[(i+1)+16-15]) + W[(i+1)] + W[(i+1)+16-7];
563  W[(i+1)+16] = W[(i+1)] = w;
564  w += SHA256_K[j+1];
565  w += (t-1)[7];
566  w += S1(e);
567  w += Ch(e, (t-1)[5], (t-1)[6]);
568  e = (t-1)[3] + w;
569  (t-1)[3] = (t-1)[3+8] = e;
570  w += S0((t-1)[0]);
571  a = w + Maj(a, (t-1)[1], (t-1)[2]);
572  (t-1)[-1] = (t-1)[7] = a;
573 
574  t-=2;
575  j+=2;
576  if (j%8 == 0)
577  t += 8;
578  } while (j<64);
579 
580  state[0] += a;
581  state[1] += t[1];
582  state[2] += t[2];
583  state[3] += t[3];
584  state[4] += e;
585  state[5] += t[5];
586  state[6] += t[6];
587  state[7] += t[7];
588 }
589 #else
590 void SHA256::Transform(word32 *state, const word32 *data)
591 {
592  word32 W[16];
593 #if (defined(CRYPTOPP_X86_ASM_AVAILABLE) || defined(CRYPTOPP_X32_ASM_AVAILABLE) || defined(CRYPTOPP_X64_MASM_AVAILABLE)) && !defined(CRYPTOPP_DISABLE_SHA_ASM)
594  // this byte reverse is a waste of time, but this function is only called by MDC
595  ByteReverse(W, data, BLOCKSIZE);
596  X86_SHA256_HashBlocks(state, W, BLOCKSIZE - !HasSSE2());
597 #else
598  word32 T[8];
599  /* Copy context->state[] to working vars */
600  memcpy(T, state, sizeof(T));
601  /* 64 operations, partially loop unrolled */
602  for (unsigned int j=0; j<64; j+=16)
603  {
604  R( 0); R( 1); R( 2); R( 3);
605  R( 4); R( 5); R( 6); R( 7);
606  R( 8); R( 9); R(10); R(11);
607  R(12); R(13); R(14); R(15);
608  }
609  /* Add the working vars back into context.state[] */
610  state[0] += a(0);
611  state[1] += b(0);
612  state[2] += c(0);
613  state[3] += d(0);
614  state[4] += e(0);
615  state[5] += f(0);
616  state[6] += g(0);
617  state[7] += h(0);
618 #endif
619 }
620 #endif
621 
622 #undef S0
623 #undef S1
624 #undef s0
625 #undef s1
626 #undef R
627 
628 // *************************************************************
629 
630 void SHA384::InitState(HashWordType *state)
631 {
632  static const word64 s[8] = {
633  W64LIT(0xcbbb9d5dc1059ed8), W64LIT(0x629a292a367cd507),
634  W64LIT(0x9159015a3070dd17), W64LIT(0x152fecd8f70e5939),
635  W64LIT(0x67332667ffc00b31), W64LIT(0x8eb44a8768581511),
636  W64LIT(0xdb0c2e0d64f98fa7), W64LIT(0x47b5481dbefa4fa4)};
637  memcpy(state, s, sizeof(s));
638 }
639 
640 void SHA512::InitState(HashWordType *state)
641 {
642  static const word64 s[8] = {
643  W64LIT(0x6a09e667f3bcc908), W64LIT(0xbb67ae8584caa73b),
644  W64LIT(0x3c6ef372fe94f82b), W64LIT(0xa54ff53a5f1d36f1),
645  W64LIT(0x510e527fade682d1), W64LIT(0x9b05688c2b3e6c1f),
646  W64LIT(0x1f83d9abfb41bd6b), W64LIT(0x5be0cd19137e2179)};
647  memcpy(state, s, sizeof(s));
648 }
649 
650 #if CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE && (CRYPTOPP_BOOL_X86 || CRYPTOPP_BOOL_X32)
651 CRYPTOPP_ALIGN_DATA(16) static const word64 SHA512_K[80] CRYPTOPP_SECTION_ALIGN16 = {
652 #else
653 CRYPTOPP_ALIGN_DATA(16) static const word64 SHA512_K[80] CRYPTOPP_SECTION_ALIGN16 = {
654 #endif
655  W64LIT(0x428a2f98d728ae22), W64LIT(0x7137449123ef65cd),
656  W64LIT(0xb5c0fbcfec4d3b2f), W64LIT(0xe9b5dba58189dbbc),
657  W64LIT(0x3956c25bf348b538), W64LIT(0x59f111f1b605d019),
658  W64LIT(0x923f82a4af194f9b), W64LIT(0xab1c5ed5da6d8118),
659  W64LIT(0xd807aa98a3030242), W64LIT(0x12835b0145706fbe),
660  W64LIT(0x243185be4ee4b28c), W64LIT(0x550c7dc3d5ffb4e2),
661  W64LIT(0x72be5d74f27b896f), W64LIT(0x80deb1fe3b1696b1),
662  W64LIT(0x9bdc06a725c71235), W64LIT(0xc19bf174cf692694),
663  W64LIT(0xe49b69c19ef14ad2), W64LIT(0xefbe4786384f25e3),
664  W64LIT(0x0fc19dc68b8cd5b5), W64LIT(0x240ca1cc77ac9c65),
665  W64LIT(0x2de92c6f592b0275), W64LIT(0x4a7484aa6ea6e483),
666  W64LIT(0x5cb0a9dcbd41fbd4), W64LIT(0x76f988da831153b5),
667  W64LIT(0x983e5152ee66dfab), W64LIT(0xa831c66d2db43210),
668  W64LIT(0xb00327c898fb213f), W64LIT(0xbf597fc7beef0ee4),
669  W64LIT(0xc6e00bf33da88fc2), W64LIT(0xd5a79147930aa725),
670  W64LIT(0x06ca6351e003826f), W64LIT(0x142929670a0e6e70),
671  W64LIT(0x27b70a8546d22ffc), W64LIT(0x2e1b21385c26c926),
672  W64LIT(0x4d2c6dfc5ac42aed), W64LIT(0x53380d139d95b3df),
673  W64LIT(0x650a73548baf63de), W64LIT(0x766a0abb3c77b2a8),
674  W64LIT(0x81c2c92e47edaee6), W64LIT(0x92722c851482353b),
675  W64LIT(0xa2bfe8a14cf10364), W64LIT(0xa81a664bbc423001),
676  W64LIT(0xc24b8b70d0f89791), W64LIT(0xc76c51a30654be30),
677  W64LIT(0xd192e819d6ef5218), W64LIT(0xd69906245565a910),
678  W64LIT(0xf40e35855771202a), W64LIT(0x106aa07032bbd1b8),
679  W64LIT(0x19a4c116b8d2d0c8), W64LIT(0x1e376c085141ab53),
680  W64LIT(0x2748774cdf8eeb99), W64LIT(0x34b0bcb5e19b48a8),
681  W64LIT(0x391c0cb3c5c95a63), W64LIT(0x4ed8aa4ae3418acb),
682  W64LIT(0x5b9cca4f7763e373), W64LIT(0x682e6ff3d6b2b8a3),
683  W64LIT(0x748f82ee5defb2fc), W64LIT(0x78a5636f43172f60),
684  W64LIT(0x84c87814a1f0ab72), W64LIT(0x8cc702081a6439ec),
685  W64LIT(0x90befffa23631e28), W64LIT(0xa4506cebde82bde9),
686  W64LIT(0xbef9a3f7b2c67915), W64LIT(0xc67178f2e372532b),
687  W64LIT(0xca273eceea26619c), W64LIT(0xd186b8c721c0c207),
688  W64LIT(0xeada7dd6cde0eb1e), W64LIT(0xf57d4f7fee6ed178),
689  W64LIT(0x06f067aa72176fba), W64LIT(0x0a637dc5a2c898a6),
690  W64LIT(0x113f9804bef90dae), W64LIT(0x1b710b35131c471b),
691  W64LIT(0x28db77f523047d84), W64LIT(0x32caab7b40c72493),
692  W64LIT(0x3c9ebe0a15c9bebc), W64LIT(0x431d67c49c100d4c),
693  W64LIT(0x4cc5d4becb3e42b6), W64LIT(0x597f299cfc657e2a),
694  W64LIT(0x5fcb6fab3ad6faec), W64LIT(0x6c44198c4a475817)
695 };
696 
697 #if CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE && (CRYPTOPP_BOOL_X86 || CRYPTOPP_BOOL_X32)
698 // put assembly version in separate function, otherwise MSVC 2005 SP1 doesn't generate correct code for the non-assembly version
699 CRYPTOPP_NAKED static void CRYPTOPP_FASTCALL SHA512_SSE2_Transform(word64 *state, const word64 *data)
700 {
701 #ifdef __GNUC__
702  __asm__ __volatile__
703  (
704  INTEL_NOPREFIX
705  AS_PUSH_IF86( bx)
706  AS2( mov ebx, eax)
707 #else
708  AS1( push ebx)
709  AS1( push esi)
710  AS1( push edi)
711  AS2( lea ebx, SHA512_K)
712 #endif
713 
714  AS2( mov eax, esp)
715  AS2( and esp, 0xfffffff0)
716  AS2( sub esp, 27*16) // 17*16 for expanded data, 20*8 for state
717  AS_PUSH_IF86( ax)
718  AS2( xor eax, eax)
719 
720 #if CRYPTOPP_BOOL_X32
721  AS2( lea edi, [esp+8+8*8]) // start at middle of state buffer. will decrement pointer each round to avoid copying
722  AS2( lea esi, [esp+8+20*8+8]) // 16-byte alignment, then add 8
723 #else
724  AS2( lea edi, [esp+4+8*8]) // start at middle of state buffer. will decrement pointer each round to avoid copying
725  AS2( lea esi, [esp+4+20*8+8]) // 16-byte alignment, then add 8
726 #endif
727 
728  AS2( movdqa xmm0, [ecx+0*16])
729  AS2( movdq2q mm4, xmm0)
730  AS2( movdqa [edi+0*16], xmm0)
731  AS2( movdqa xmm0, [ecx+1*16])
732  AS2( movdqa [edi+1*16], xmm0)
733  AS2( movdqa xmm0, [ecx+2*16])
734  AS2( movdq2q mm5, xmm0)
735  AS2( movdqa [edi+2*16], xmm0)
736  AS2( movdqa xmm0, [ecx+3*16])
737  AS2( movdqa [edi+3*16], xmm0)
738  ASJ( jmp, 0, f)
739 
740 #define SSE2_S0_S1(r, a, b, c) \
741  AS2( movq mm6, r)\
742  AS2( psrlq r, a)\
743  AS2( movq mm7, r)\
744  AS2( psllq mm6, 64-c)\
745  AS2( pxor mm7, mm6)\
746  AS2( psrlq r, b-a)\
747  AS2( pxor mm7, r)\
748  AS2( psllq mm6, c-b)\
749  AS2( pxor mm7, mm6)\
750  AS2( psrlq r, c-b)\
751  AS2( pxor r, mm7)\
752  AS2( psllq mm6, b-a)\
753  AS2( pxor r, mm6)
754 
755 #define SSE2_s0(r, a, b, c) \
756  AS2( movdqa xmm6, r)\
757  AS2( psrlq r, a)\
758  AS2( movdqa xmm7, r)\
759  AS2( psllq xmm6, 64-c)\
760  AS2( pxor xmm7, xmm6)\
761  AS2( psrlq r, b-a)\
762  AS2( pxor xmm7, r)\
763  AS2( psrlq r, c-b)\
764  AS2( pxor r, xmm7)\
765  AS2( psllq xmm6, c-a)\
766  AS2( pxor r, xmm6)
767 
768 #define SSE2_s1(r, a, b, c) \
769  AS2( movdqa xmm6, r)\
770  AS2( psrlq r, a)\
771  AS2( movdqa xmm7, r)\
772  AS2( psllq xmm6, 64-c)\
773  AS2( pxor xmm7, xmm6)\
774  AS2( psrlq r, b-a)\
775  AS2( pxor xmm7, r)\
776  AS2( psllq xmm6, c-b)\
777  AS2( pxor xmm7, xmm6)\
778  AS2( psrlq r, c-b)\
779  AS2( pxor r, xmm7)
780 
781  ASL(SHA512_Round)
782  // k + w is in mm0, a is in mm4, e is in mm5
783  AS2( paddq mm0, [edi+7*8]) // h
784  AS2( movq mm2, [edi+5*8]) // f
785  AS2( movq mm3, [edi+6*8]) // g
786  AS2( pxor mm2, mm3)
787  AS2( pand mm2, mm5)
788  SSE2_S0_S1(mm5,14,18,41)
789  AS2( pxor mm2, mm3)
790  AS2( paddq mm0, mm2) // h += Ch(e,f,g)
791  AS2( paddq mm5, mm0) // h += S1(e)
792  AS2( movq mm2, [edi+1*8]) // b
793  AS2( movq mm1, mm2)
794  AS2( por mm2, mm4)
795  AS2( pand mm2, [edi+2*8]) // c
796  AS2( pand mm1, mm4)
797  AS2( por mm1, mm2)
798  AS2( paddq mm1, mm5) // temp = h + Maj(a,b,c)
799  AS2( paddq mm5, [edi+3*8]) // e = d + h
800  AS2( movq [edi+3*8], mm5)
801  AS2( movq [edi+11*8], mm5)
802  SSE2_S0_S1(mm4,28,34,39) // S0(a)
803  AS2( paddq mm4, mm1) // a = temp + S0(a)
804  AS2( movq [edi-8], mm4)
805  AS2( movq [edi+7*8], mm4)
806  AS1( ret)
807 
808  // first 16 rounds
809  ASL(0)
810  AS2( movq mm0, [edx+eax*8])
811  AS2( movq [esi+eax*8], mm0)
812  AS2( movq [esi+eax*8+16*8], mm0)
813  AS2( paddq mm0, [ebx+eax*8])
814  ASC( call, SHA512_Round)
815  AS1( inc eax)
816  AS2( sub edi, 8)
817  AS2( test eax, 7)
818  ASJ( jnz, 0, b)
819  AS2( add edi, 8*8)
820  AS2( cmp eax, 16)
821  ASJ( jne, 0, b)
822 
823  // rest of the rounds
824  AS2( movdqu xmm0, [esi+(16-2)*8])
825  ASL(1)
826  // data expansion, W[i-2] already in xmm0
827  AS2( movdqu xmm3, [esi])
828  AS2( paddq xmm3, [esi+(16-7)*8])
829  AS2( movdqa xmm2, [esi+(16-15)*8])
830  SSE2_s1(xmm0, 6, 19, 61)
831  AS2( paddq xmm0, xmm3)
832  SSE2_s0(xmm2, 1, 7, 8)
833  AS2( paddq xmm0, xmm2)
834  AS2( movdq2q mm0, xmm0)
835  AS2( movhlps xmm1, xmm0)
836  AS2( paddq mm0, [ebx+eax*8])
837  AS2( movlps [esi], xmm0)
838  AS2( movlps [esi+8], xmm1)
839  AS2( movlps [esi+8*16], xmm0)
840  AS2( movlps [esi+8*17], xmm1)
841  // 2 rounds
842  ASC( call, SHA512_Round)
843  AS2( sub edi, 8)
844  AS2( movdq2q mm0, xmm1)
845  AS2( paddq mm0, [ebx+eax*8+8])
846  ASC( call, SHA512_Round)
847  // update indices and loop
848  AS2( add esi, 16)
849  AS2( add eax, 2)
850  AS2( sub edi, 8)
851  AS2( test eax, 7)
852  ASJ( jnz, 1, b)
853  // do housekeeping every 8 rounds
854  AS2( mov esi, 0xf)
855  AS2( and esi, eax)
856 #if CRYPTOPP_BOOL_X32
857  AS2( lea esi, [esp+8+20*8+8+esi*8])
858 #else
859  AS2( lea esi, [esp+4+20*8+8+esi*8])
860 #endif
861  AS2( add edi, 8*8)
862  AS2( cmp eax, 80)
863  ASJ( jne, 1, b)
864 
865 #define SSE2_CombineState(i) \
866  AS2( movdqa xmm0, [edi+i*16])\
867  AS2( paddq xmm0, [ecx+i*16])\
868  AS2( movdqa [ecx+i*16], xmm0)
869 
870  SSE2_CombineState(0)
871  SSE2_CombineState(1)
872  SSE2_CombineState(2)
873  SSE2_CombineState(3)
874 
875  AS_POP_IF86( sp)
876  AS1( emms)
877 
878 #if defined(__GNUC__)
879  AS_POP_IF86( bx)
880  ATT_PREFIX
881  :
882  : "a" (SHA512_K), "c" (state), "d" (data)
883  : "%esi", "%edi", "memory", "cc"
884  );
885 #else
886  AS1( pop edi)
887  AS1( pop esi)
888  AS1( pop ebx)
889  AS1( ret)
890 #endif
891 }
892 #endif // #if CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE
893 
894 void SHA512::Transform(word64 *state, const word64 *data)
895 {
896  CRYPTOPP_ASSERT(IsAlignedOn(state, GetAlignmentOf<word64>()));
897  CRYPTOPP_ASSERT(IsAlignedOn(data, GetAlignmentOf<word64>()));
898 
899 #if CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE && (CRYPTOPP_BOOL_X86 || CRYPTOPP_BOOL_X32)
900  if (HasSSE2())
901  {
902  SHA512_SSE2_Transform(state, data);
903  return;
904  }
905 #endif
906 
907 #define S0(x) (rotrFixed(x,28)^rotrFixed(x,34)^rotrFixed(x,39))
908 #define S1(x) (rotrFixed(x,14)^rotrFixed(x,18)^rotrFixed(x,41))
909 #define s0(x) (rotrFixed(x,1)^rotrFixed(x,8)^(x>>7))
910 #define s1(x) (rotrFixed(x,19)^rotrFixed(x,61)^(x>>6))
911 
912 #define R(i) h(i)+=S1(e(i))+Ch(e(i),f(i),g(i))+SHA512_K[i+j]+(j?blk2(i):blk0(i));\
913  d(i)+=h(i);h(i)+=S0(a(i))+Maj(a(i),b(i),c(i))
914 
915  word64 W[16];
916  word64 T[8];
917  /* Copy context->state[] to working vars */
918  memcpy(T, state, sizeof(T));
919  /* 80 operations, partially loop unrolled */
920  for (unsigned int j=0; j<80; j+=16)
921  {
922  R( 0); R( 1); R( 2); R( 3);
923  R( 4); R( 5); R( 6); R( 7);
924  R( 8); R( 9); R(10); R(11);
925  R(12); R(13); R(14); R(15);
926  }
927  /* Add the working vars back into context.state[] */
928  state[0] += a(0);
929  state[1] += b(0);
930  state[2] += c(0);
931  state[3] += d(0);
932  state[4] += e(0);
933  state[5] += f(0);
934  state[6] += g(0);
935  state[7] += h(0);
936 }
937 
938 NAMESPACE_END
939 
940 #endif // #ifndef CRYPTOPP_GENERATE_X64_MASM
941 #endif // #ifndef CRYPTOPP_IMPORTS
Utility functions for the Crypto++ library.
Library configuration file.
Classes and functions for secure memory allocations.
bool IsAlignedOn(const void *ptr, unsigned int alignment)
Determines whether ptr is aligned to a minimum value.
Definition: misc.h:916
Fixed size stack-based SecBlock with 16-byte alignment.
Definition: secblock.h:766
#define CRYPTOPP_ASSERT(exp)
Debugging and diagnostic assertion.
Definition: trap.h:62
Functions for CPU features and intrinsics.
Classes for SHA-1 and SHA-2 family of message digests.
bool HasSSE2()
Determines SSE2 availability.
Definition: cpu.h:160
Crypto++ library namespace.
byte ByteReverse(byte value)
Reverses bytes in a 8-bit value.
Definition: misc.h:1714