Mercurial > dropbear
annotate sha1-arm.S @ 911:367205a2c1c4 asm
Include license information for Cryptogams routines
author | Matt Johnston <matt@ucc.asn.au> |
---|---|
date | Thu, 27 Feb 2014 22:30:56 +0800 |
parents | 3ca7113936c1 |
children |
rev | line source |
---|---|
911
367205a2c1c4
Include license information for Cryptogams routines
Matt Johnston <matt@ucc.asn.au>
parents:
908
diff
changeset
|
1 @ Generated by sha1-armv4-large.pl |
367205a2c1c4
Include license information for Cryptogams routines
Matt Johnston <matt@ucc.asn.au>
parents:
908
diff
changeset
|
2 @ ==================================================================== |
367205a2c1c4
Include license information for Cryptogams routines
Matt Johnston <matt@ucc.asn.au>
parents:
908
diff
changeset
|
3 @ Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL |
367205a2c1c4
Include license information for Cryptogams routines
Matt Johnston <matt@ucc.asn.au>
parents:
908
diff
changeset
|
4 @ project. The module is, however, dual licensed under OpenSSL and |
367205a2c1c4
Include license information for Cryptogams routines
Matt Johnston <matt@ucc.asn.au>
parents:
908
diff
changeset
|
5 @ CRYPTOGAMS licenses depending on where you obtain it. For further |
367205a2c1c4
Include license information for Cryptogams routines
Matt Johnston <matt@ucc.asn.au>
parents:
908
diff
changeset
|
6 @ details see http://www.openssl.org/~appro/cryptogams/. |
367205a2c1c4
Include license information for Cryptogams routines
Matt Johnston <matt@ucc.asn.au>
parents:
908
diff
changeset
|
7 @ ==================================================================== |
367205a2c1c4
Include license information for Cryptogams routines
Matt Johnston <matt@ucc.asn.au>
parents:
908
diff
changeset
|
8 @ |
908 | 9 #define __ARM_ARCH__ 6 |
10 | |
11 .text | |
12 | |
13 .global sha1_block_data_order | |
14 .type sha1_block_data_order,%function | |
15 | |
16 .align 2 | |
17 sha1_block_data_order: | |
18 stmdb sp!,{r4-r12,lr} | |
19 add r2,r1,r2,lsl#6 @ r2 to point at the end of r1 | |
20 ldmia r0,{r3,r4,r5,r6,r7} | |
21 .Lloop: | |
22 ldr r8,.LK_00_19 | |
23 mov r14,sp | |
24 sub sp,sp,#15*4 | |
25 mov r5,r5,ror#30 | |
26 mov r6,r6,ror#30 | |
27 mov r7,r7,ror#30 @ [6] | |
28 .L_00_15: | |
29 #if __ARM_ARCH__<7 | |
30 ldrb r10,[r1,#2] | |
31 ldrb r9,[r1,#3] | |
32 ldrb r11,[r1,#1] | |
33 add r7,r8,r7,ror#2 @ E+=K_00_19 | |
34 ldrb r12,[r1],#4 | |
35 orr r9,r9,r10,lsl#8 | |
36 eor r10,r5,r6 @ F_xx_xx | |
37 orr r9,r9,r11,lsl#16 | |
38 add r7,r7,r3,ror#27 @ E+=ROR(A,27) | |
39 orr r9,r9,r12,lsl#24 | |
40 #else | |
41 ldr r9,[r1],#4 @ handles unaligned | |
42 add r7,r8,r7,ror#2 @ E+=K_00_19 | |
43 eor r10,r5,r6 @ F_xx_xx | |
44 add r7,r7,r3,ror#27 @ E+=ROR(A,27) | |
45 #ifdef __ARMEL__ | |
46 rev r9,r9 @ byte swap | |
47 #endif | |
48 #endif | |
49 and r10,r4,r10,ror#2 | |
50 add r7,r7,r9 @ E+=X[i] | |
51 eor r10,r10,r6,ror#2 @ F_00_19(B,C,D) | |
52 str r9,[r14,#-4]! | |
53 add r7,r7,r10 @ E+=F_00_19(B,C,D) | |
54 #if __ARM_ARCH__<7 | |
55 ldrb r10,[r1,#2] | |
56 ldrb r9,[r1,#3] | |
57 ldrb r11,[r1,#1] | |
58 add r6,r8,r6,ror#2 @ E+=K_00_19 | |
59 ldrb r12,[r1],#4 | |
60 orr r9,r9,r10,lsl#8 | |
61 eor r10,r4,r5 @ F_xx_xx | |
62 orr r9,r9,r11,lsl#16 | |
63 add r6,r6,r7,ror#27 @ E+=ROR(A,27) | |
64 orr r9,r9,r12,lsl#24 | |
65 #else | |
66 ldr r9,[r1],#4 @ handles unaligned | |
67 add r6,r8,r6,ror#2 @ E+=K_00_19 | |
68 eor r10,r4,r5 @ F_xx_xx | |
69 add r6,r6,r7,ror#27 @ E+=ROR(A,27) | |
70 #ifdef __ARMEL__ | |
71 rev r9,r9 @ byte swap | |
72 #endif | |
73 #endif | |
74 and r10,r3,r10,ror#2 | |
75 add r6,r6,r9 @ E+=X[i] | |
76 eor r10,r10,r5,ror#2 @ F_00_19(B,C,D) | |
77 str r9,[r14,#-4]! | |
78 add r6,r6,r10 @ E+=F_00_19(B,C,D) | |
79 #if __ARM_ARCH__<7 | |
80 ldrb r10,[r1,#2] | |
81 ldrb r9,[r1,#3] | |
82 ldrb r11,[r1,#1] | |
83 add r5,r8,r5,ror#2 @ E+=K_00_19 | |
84 ldrb r12,[r1],#4 | |
85 orr r9,r9,r10,lsl#8 | |
86 eor r10,r3,r4 @ F_xx_xx | |
87 orr r9,r9,r11,lsl#16 | |
88 add r5,r5,r6,ror#27 @ E+=ROR(A,27) | |
89 orr r9,r9,r12,lsl#24 | |
90 #else | |
91 ldr r9,[r1],#4 @ handles unaligned | |
92 add r5,r8,r5,ror#2 @ E+=K_00_19 | |
93 eor r10,r3,r4 @ F_xx_xx | |
94 add r5,r5,r6,ror#27 @ E+=ROR(A,27) | |
95 #ifdef __ARMEL__ | |
96 rev r9,r9 @ byte swap | |
97 #endif | |
98 #endif | |
99 and r10,r7,r10,ror#2 | |
100 add r5,r5,r9 @ E+=X[i] | |
101 eor r10,r10,r4,ror#2 @ F_00_19(B,C,D) | |
102 str r9,[r14,#-4]! | |
103 add r5,r5,r10 @ E+=F_00_19(B,C,D) | |
104 #if __ARM_ARCH__<7 | |
105 ldrb r10,[r1,#2] | |
106 ldrb r9,[r1,#3] | |
107 ldrb r11,[r1,#1] | |
108 add r4,r8,r4,ror#2 @ E+=K_00_19 | |
109 ldrb r12,[r1],#4 | |
110 orr r9,r9,r10,lsl#8 | |
111 eor r10,r7,r3 @ F_xx_xx | |
112 orr r9,r9,r11,lsl#16 | |
113 add r4,r4,r5,ror#27 @ E+=ROR(A,27) | |
114 orr r9,r9,r12,lsl#24 | |
115 #else | |
116 ldr r9,[r1],#4 @ handles unaligned | |
117 add r4,r8,r4,ror#2 @ E+=K_00_19 | |
118 eor r10,r7,r3 @ F_xx_xx | |
119 add r4,r4,r5,ror#27 @ E+=ROR(A,27) | |
120 #ifdef __ARMEL__ | |
121 rev r9,r9 @ byte swap | |
122 #endif | |
123 #endif | |
124 and r10,r6,r10,ror#2 | |
125 add r4,r4,r9 @ E+=X[i] | |
126 eor r10,r10,r3,ror#2 @ F_00_19(B,C,D) | |
127 str r9,[r14,#-4]! | |
128 add r4,r4,r10 @ E+=F_00_19(B,C,D) | |
129 #if __ARM_ARCH__<7 | |
130 ldrb r10,[r1,#2] | |
131 ldrb r9,[r1,#3] | |
132 ldrb r11,[r1,#1] | |
133 add r3,r8,r3,ror#2 @ E+=K_00_19 | |
134 ldrb r12,[r1],#4 | |
135 orr r9,r9,r10,lsl#8 | |
136 eor r10,r6,r7 @ F_xx_xx | |
137 orr r9,r9,r11,lsl#16 | |
138 add r3,r3,r4,ror#27 @ E+=ROR(A,27) | |
139 orr r9,r9,r12,lsl#24 | |
140 #else | |
141 ldr r9,[r1],#4 @ handles unaligned | |
142 add r3,r8,r3,ror#2 @ E+=K_00_19 | |
143 eor r10,r6,r7 @ F_xx_xx | |
144 add r3,r3,r4,ror#27 @ E+=ROR(A,27) | |
145 #ifdef __ARMEL__ | |
146 rev r9,r9 @ byte swap | |
147 #endif | |
148 #endif | |
149 and r10,r5,r10,ror#2 | |
150 add r3,r3,r9 @ E+=X[i] | |
151 eor r10,r10,r7,ror#2 @ F_00_19(B,C,D) | |
152 str r9,[r14,#-4]! | |
153 add r3,r3,r10 @ E+=F_00_19(B,C,D) | |
154 teq r14,sp | |
155 bne .L_00_15 @ [((11+4)*5+2)*3] | |
156 sub sp,sp,#25*4 | |
157 #if __ARM_ARCH__<7 | |
158 ldrb r10,[r1,#2] | |
159 ldrb r9,[r1,#3] | |
160 ldrb r11,[r1,#1] | |
161 add r7,r8,r7,ror#2 @ E+=K_00_19 | |
162 ldrb r12,[r1],#4 | |
163 orr r9,r9,r10,lsl#8 | |
164 eor r10,r5,r6 @ F_xx_xx | |
165 orr r9,r9,r11,lsl#16 | |
166 add r7,r7,r3,ror#27 @ E+=ROR(A,27) | |
167 orr r9,r9,r12,lsl#24 | |
168 #else | |
169 ldr r9,[r1],#4 @ handles unaligned | |
170 add r7,r8,r7,ror#2 @ E+=K_00_19 | |
171 eor r10,r5,r6 @ F_xx_xx | |
172 add r7,r7,r3,ror#27 @ E+=ROR(A,27) | |
173 #ifdef __ARMEL__ | |
174 rev r9,r9 @ byte swap | |
175 #endif | |
176 #endif | |
177 and r10,r4,r10,ror#2 | |
178 add r7,r7,r9 @ E+=X[i] | |
179 eor r10,r10,r6,ror#2 @ F_00_19(B,C,D) | |
180 str r9,[r14,#-4]! | |
181 add r7,r7,r10 @ E+=F_00_19(B,C,D) | |
182 ldr r9,[r14,#15*4] | |
183 ldr r10,[r14,#13*4] | |
184 ldr r11,[r14,#7*4] | |
185 add r6,r8,r6,ror#2 @ E+=K_xx_xx | |
186 ldr r12,[r14,#2*4] | |
187 eor r9,r9,r10 | |
188 eor r11,r11,r12 @ 1 cycle stall | |
189 eor r10,r4,r5 @ F_xx_xx | |
190 mov r9,r9,ror#31 | |
191 add r6,r6,r7,ror#27 @ E+=ROR(A,27) | |
192 eor r9,r9,r11,ror#31 | |
193 str r9,[r14,#-4]! | |
194 and r10,r3,r10,ror#2 @ F_xx_xx | |
195 @ F_xx_xx | |
196 add r6,r6,r9 @ E+=X[i] | |
197 eor r10,r10,r5,ror#2 @ F_00_19(B,C,D) | |
198 add r6,r6,r10 @ E+=F_00_19(B,C,D) | |
199 ldr r9,[r14,#15*4] | |
200 ldr r10,[r14,#13*4] | |
201 ldr r11,[r14,#7*4] | |
202 add r5,r8,r5,ror#2 @ E+=K_xx_xx | |
203 ldr r12,[r14,#2*4] | |
204 eor r9,r9,r10 | |
205 eor r11,r11,r12 @ 1 cycle stall | |
206 eor r10,r3,r4 @ F_xx_xx | |
207 mov r9,r9,ror#31 | |
208 add r5,r5,r6,ror#27 @ E+=ROR(A,27) | |
209 eor r9,r9,r11,ror#31 | |
210 str r9,[r14,#-4]! | |
211 and r10,r7,r10,ror#2 @ F_xx_xx | |
212 @ F_xx_xx | |
213 add r5,r5,r9 @ E+=X[i] | |
214 eor r10,r10,r4,ror#2 @ F_00_19(B,C,D) | |
215 add r5,r5,r10 @ E+=F_00_19(B,C,D) | |
216 ldr r9,[r14,#15*4] | |
217 ldr r10,[r14,#13*4] | |
218 ldr r11,[r14,#7*4] | |
219 add r4,r8,r4,ror#2 @ E+=K_xx_xx | |
220 ldr r12,[r14,#2*4] | |
221 eor r9,r9,r10 | |
222 eor r11,r11,r12 @ 1 cycle stall | |
223 eor r10,r7,r3 @ F_xx_xx | |
224 mov r9,r9,ror#31 | |
225 add r4,r4,r5,ror#27 @ E+=ROR(A,27) | |
226 eor r9,r9,r11,ror#31 | |
227 str r9,[r14,#-4]! | |
228 and r10,r6,r10,ror#2 @ F_xx_xx | |
229 @ F_xx_xx | |
230 add r4,r4,r9 @ E+=X[i] | |
231 eor r10,r10,r3,ror#2 @ F_00_19(B,C,D) | |
232 add r4,r4,r10 @ E+=F_00_19(B,C,D) | |
233 ldr r9,[r14,#15*4] | |
234 ldr r10,[r14,#13*4] | |
235 ldr r11,[r14,#7*4] | |
236 add r3,r8,r3,ror#2 @ E+=K_xx_xx | |
237 ldr r12,[r14,#2*4] | |
238 eor r9,r9,r10 | |
239 eor r11,r11,r12 @ 1 cycle stall | |
240 eor r10,r6,r7 @ F_xx_xx | |
241 mov r9,r9,ror#31 | |
242 add r3,r3,r4,ror#27 @ E+=ROR(A,27) | |
243 eor r9,r9,r11,ror#31 | |
244 str r9,[r14,#-4]! | |
245 and r10,r5,r10,ror#2 @ F_xx_xx | |
246 @ F_xx_xx | |
247 add r3,r3,r9 @ E+=X[i] | |
248 eor r10,r10,r7,ror#2 @ F_00_19(B,C,D) | |
249 add r3,r3,r10 @ E+=F_00_19(B,C,D) | |
250 | |
251 ldr r8,.LK_20_39 @ [+15+16*4] | |
252 cmn sp,#0 @ [+3], clear carry to denote 20_39 | |
253 .L_20_39_or_60_79: | |
254 ldr r9,[r14,#15*4] | |
255 ldr r10,[r14,#13*4] | |
256 ldr r11,[r14,#7*4] | |
257 add r7,r8,r7,ror#2 @ E+=K_xx_xx | |
258 ldr r12,[r14,#2*4] | |
259 eor r9,r9,r10 | |
260 eor r11,r11,r12 @ 1 cycle stall | |
261 eor r10,r5,r6 @ F_xx_xx | |
262 mov r9,r9,ror#31 | |
263 add r7,r7,r3,ror#27 @ E+=ROR(A,27) | |
264 eor r9,r9,r11,ror#31 | |
265 str r9,[r14,#-4]! | |
266 eor r10,r4,r10,ror#2 @ F_xx_xx | |
267 @ F_xx_xx | |
268 add r7,r7,r9 @ E+=X[i] | |
269 add r7,r7,r10 @ E+=F_20_39(B,C,D) | |
270 ldr r9,[r14,#15*4] | |
271 ldr r10,[r14,#13*4] | |
272 ldr r11,[r14,#7*4] | |
273 add r6,r8,r6,ror#2 @ E+=K_xx_xx | |
274 ldr r12,[r14,#2*4] | |
275 eor r9,r9,r10 | |
276 eor r11,r11,r12 @ 1 cycle stall | |
277 eor r10,r4,r5 @ F_xx_xx | |
278 mov r9,r9,ror#31 | |
279 add r6,r6,r7,ror#27 @ E+=ROR(A,27) | |
280 eor r9,r9,r11,ror#31 | |
281 str r9,[r14,#-4]! | |
282 eor r10,r3,r10,ror#2 @ F_xx_xx | |
283 @ F_xx_xx | |
284 add r6,r6,r9 @ E+=X[i] | |
285 add r6,r6,r10 @ E+=F_20_39(B,C,D) | |
286 ldr r9,[r14,#15*4] | |
287 ldr r10,[r14,#13*4] | |
288 ldr r11,[r14,#7*4] | |
289 add r5,r8,r5,ror#2 @ E+=K_xx_xx | |
290 ldr r12,[r14,#2*4] | |
291 eor r9,r9,r10 | |
292 eor r11,r11,r12 @ 1 cycle stall | |
293 eor r10,r3,r4 @ F_xx_xx | |
294 mov r9,r9,ror#31 | |
295 add r5,r5,r6,ror#27 @ E+=ROR(A,27) | |
296 eor r9,r9,r11,ror#31 | |
297 str r9,[r14,#-4]! | |
298 eor r10,r7,r10,ror#2 @ F_xx_xx | |
299 @ F_xx_xx | |
300 add r5,r5,r9 @ E+=X[i] | |
301 add r5,r5,r10 @ E+=F_20_39(B,C,D) | |
302 ldr r9,[r14,#15*4] | |
303 ldr r10,[r14,#13*4] | |
304 ldr r11,[r14,#7*4] | |
305 add r4,r8,r4,ror#2 @ E+=K_xx_xx | |
306 ldr r12,[r14,#2*4] | |
307 eor r9,r9,r10 | |
308 eor r11,r11,r12 @ 1 cycle stall | |
309 eor r10,r7,r3 @ F_xx_xx | |
310 mov r9,r9,ror#31 | |
311 add r4,r4,r5,ror#27 @ E+=ROR(A,27) | |
312 eor r9,r9,r11,ror#31 | |
313 str r9,[r14,#-4]! | |
314 eor r10,r6,r10,ror#2 @ F_xx_xx | |
315 @ F_xx_xx | |
316 add r4,r4,r9 @ E+=X[i] | |
317 add r4,r4,r10 @ E+=F_20_39(B,C,D) | |
318 ldr r9,[r14,#15*4] | |
319 ldr r10,[r14,#13*4] | |
320 ldr r11,[r14,#7*4] | |
321 add r3,r8,r3,ror#2 @ E+=K_xx_xx | |
322 ldr r12,[r14,#2*4] | |
323 eor r9,r9,r10 | |
324 eor r11,r11,r12 @ 1 cycle stall | |
325 eor r10,r6,r7 @ F_xx_xx | |
326 mov r9,r9,ror#31 | |
327 add r3,r3,r4,ror#27 @ E+=ROR(A,27) | |
328 eor r9,r9,r11,ror#31 | |
329 str r9,[r14,#-4]! | |
330 eor r10,r5,r10,ror#2 @ F_xx_xx | |
331 @ F_xx_xx | |
332 add r3,r3,r9 @ E+=X[i] | |
333 add r3,r3,r10 @ E+=F_20_39(B,C,D) | |
334 teq r14,sp @ preserve carry | |
335 bne .L_20_39_or_60_79 @ [+((12+3)*5+2)*4] | |
336 bcs .L_done @ [+((12+3)*5+2)*4], spare 300 bytes | |
337 | |
338 ldr r8,.LK_40_59 | |
339 sub sp,sp,#20*4 @ [+2] | |
340 .L_40_59: | |
341 ldr r9,[r14,#15*4] | |
342 ldr r10,[r14,#13*4] | |
343 ldr r11,[r14,#7*4] | |
344 add r7,r8,r7,ror#2 @ E+=K_xx_xx | |
345 ldr r12,[r14,#2*4] | |
346 eor r9,r9,r10 | |
347 eor r11,r11,r12 @ 1 cycle stall | |
348 eor r10,r5,r6 @ F_xx_xx | |
349 mov r9,r9,ror#31 | |
350 add r7,r7,r3,ror#27 @ E+=ROR(A,27) | |
351 eor r9,r9,r11,ror#31 | |
352 str r9,[r14,#-4]! | |
353 and r10,r4,r10,ror#2 @ F_xx_xx | |
354 and r11,r5,r6 @ F_xx_xx | |
355 add r7,r7,r9 @ E+=X[i] | |
356 add r7,r7,r10 @ E+=F_40_59(B,C,D) | |
357 add r7,r7,r11,ror#2 | |
358 ldr r9,[r14,#15*4] | |
359 ldr r10,[r14,#13*4] | |
360 ldr r11,[r14,#7*4] | |
361 add r6,r8,r6,ror#2 @ E+=K_xx_xx | |
362 ldr r12,[r14,#2*4] | |
363 eor r9,r9,r10 | |
364 eor r11,r11,r12 @ 1 cycle stall | |
365 eor r10,r4,r5 @ F_xx_xx | |
366 mov r9,r9,ror#31 | |
367 add r6,r6,r7,ror#27 @ E+=ROR(A,27) | |
368 eor r9,r9,r11,ror#31 | |
369 str r9,[r14,#-4]! | |
370 and r10,r3,r10,ror#2 @ F_xx_xx | |
371 and r11,r4,r5 @ F_xx_xx | |
372 add r6,r6,r9 @ E+=X[i] | |
373 add r6,r6,r10 @ E+=F_40_59(B,C,D) | |
374 add r6,r6,r11,ror#2 | |
375 ldr r9,[r14,#15*4] | |
376 ldr r10,[r14,#13*4] | |
377 ldr r11,[r14,#7*4] | |
378 add r5,r8,r5,ror#2 @ E+=K_xx_xx | |
379 ldr r12,[r14,#2*4] | |
380 eor r9,r9,r10 | |
381 eor r11,r11,r12 @ 1 cycle stall | |
382 eor r10,r3,r4 @ F_xx_xx | |
383 mov r9,r9,ror#31 | |
384 add r5,r5,r6,ror#27 @ E+=ROR(A,27) | |
385 eor r9,r9,r11,ror#31 | |
386 str r9,[r14,#-4]! | |
387 and r10,r7,r10,ror#2 @ F_xx_xx | |
388 and r11,r3,r4 @ F_xx_xx | |
389 add r5,r5,r9 @ E+=X[i] | |
390 add r5,r5,r10 @ E+=F_40_59(B,C,D) | |
391 add r5,r5,r11,ror#2 | |
392 ldr r9,[r14,#15*4] | |
393 ldr r10,[r14,#13*4] | |
394 ldr r11,[r14,#7*4] | |
395 add r4,r8,r4,ror#2 @ E+=K_xx_xx | |
396 ldr r12,[r14,#2*4] | |
397 eor r9,r9,r10 | |
398 eor r11,r11,r12 @ 1 cycle stall | |
399 eor r10,r7,r3 @ F_xx_xx | |
400 mov r9,r9,ror#31 | |
401 add r4,r4,r5,ror#27 @ E+=ROR(A,27) | |
402 eor r9,r9,r11,ror#31 | |
403 str r9,[r14,#-4]! | |
404 and r10,r6,r10,ror#2 @ F_xx_xx | |
405 and r11,r7,r3 @ F_xx_xx | |
406 add r4,r4,r9 @ E+=X[i] | |
407 add r4,r4,r10 @ E+=F_40_59(B,C,D) | |
408 add r4,r4,r11,ror#2 | |
409 ldr r9,[r14,#15*4] | |
410 ldr r10,[r14,#13*4] | |
411 ldr r11,[r14,#7*4] | |
412 add r3,r8,r3,ror#2 @ E+=K_xx_xx | |
413 ldr r12,[r14,#2*4] | |
414 eor r9,r9,r10 | |
415 eor r11,r11,r12 @ 1 cycle stall | |
416 eor r10,r6,r7 @ F_xx_xx | |
417 mov r9,r9,ror#31 | |
418 add r3,r3,r4,ror#27 @ E+=ROR(A,27) | |
419 eor r9,r9,r11,ror#31 | |
420 str r9,[r14,#-4]! | |
421 and r10,r5,r10,ror#2 @ F_xx_xx | |
422 and r11,r6,r7 @ F_xx_xx | |
423 add r3,r3,r9 @ E+=X[i] | |
424 add r3,r3,r10 @ E+=F_40_59(B,C,D) | |
425 add r3,r3,r11,ror#2 | |
426 teq r14,sp | |
427 bne .L_40_59 @ [+((12+5)*5+2)*4] | |
428 | |
429 ldr r8,.LK_60_79 | |
430 sub sp,sp,#20*4 | |
431 cmp sp,#0 @ set carry to denote 60_79 | |
432 b .L_20_39_or_60_79 @ [+4], spare 300 bytes | |
433 .L_done: | |
434 add sp,sp,#80*4 @ "deallocate" stack frame | |
435 ldmia r0,{r8,r9,r10,r11,r12} | |
436 add r3,r8,r3 | |
437 add r4,r9,r4 | |
438 add r5,r10,r5,ror#2 | |
439 add r6,r11,r6,ror#2 | |
440 add r7,r12,r7,ror#2 | |
441 stmia r0,{r3,r4,r5,r6,r7} | |
442 teq r1,r2 | |
443 bne .Lloop @ [+18], total 1307 | |
444 | |
445 #if __ARM_ARCH__>=5 | |
446 ldmia sp!,{r4-r12,pc} | |
447 #else | |
448 ldmia sp!,{r4-r12,lr} | |
449 tst lr,#1 | |
450 moveq pc,lr @ be binary compatible with V4, yet | |
451 .word 0xe12fff1e @ interoperable with Thumb ISA:-) | |
452 #endif | |
453 .align 2 | |
454 .LK_00_19: .word 0x5a827999 | |
455 .LK_20_39: .word 0x6ed9eba1 | |
456 .LK_40_59: .word 0x8f1bbcdc | |
457 .LK_60_79: .word 0xca62c1d6 | |
458 .size sha1_block_data_order,.-sha1_block_data_order | |
459 .asciz "SHA1 block transform for ARMv4, CRYPTOGAMS by <[email protected]>" | |
460 .align 2 |