2 /* Do not modify. This file is auto-generated from ghashv8-armx.pl. */
5 #if __ARM_MAX_ARCH__>=7
11 .type gcm_init_v8,%function
14 vld1.64 {q9},[r1] @ load input H
16 vshl.i64 q11,q11,#57 @ 0xc2.0
20 vext.8 q8,q10,q11,#8 @ t0=0xc2....01
22 vshr.s32 q9,q9,#31 @ broadcast carry bit
27 vorr q3,q3,q10 @ H<<<=1
28 veor q12,q3,q8 @ twisted H
29 vst1.64 {q12},[r0]! @ store Htable[0]
32 vext.8 q8,q12,q12,#8 @ Karatsuba pre-processing
33 .byte 0xa8,0x0e,0xa8,0xf2 @ pmull q0,q12,q12
35 .byte 0xa9,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q12
36 .byte 0xa0,0x2e,0xa0,0xf2 @ pmull q1,q8,q8
38 vext.8 q9,q0,q2,#8 @ Karatsuba post-processing
42 .byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase
44 vmov d4,d3 @ Xh|Xm - 256-bit result
45 vmov d3,d0 @ Xm is rotated Xl
48 vext.8 q10,q0,q0,#8 @ 2nd phase
49 .byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11
53 vext.8 q9,q14,q14,#8 @ Karatsuba pre-processing
55 vext.8 q13,q8,q9,#8 @ pack Karatsuba pre-processed
56 vst1.64 {q13,q14},[r0]! @ store Htable[1..2]
58 .size gcm_init_v8,.-gcm_init_v8
60 .type gcm_gmult_v8,%function
63 vld1.64 {q9},[r0] @ load Xi
65 vld1.64 {q12,q13},[r1] @ load twisted H, ...
72 .byte 0x86,0x0e,0xa8,0xf2 @ pmull q0,q12,q3 @ H.lo·Xi.lo
73 veor q9,q9,q3 @ Karatsuba pre-processing
74 .byte 0x87,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q3 @ H.hi·Xi.hi
75 .byte 0xa2,0x2e,0xaa,0xf2 @ pmull q1,q13,q9 @ (H.lo+H.hi)·(Xi.lo+Xi.hi)
77 vext.8 q9,q0,q2,#8 @ Karatsuba post-processing
81 .byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase of reduction
83 vmov d4,d3 @ Xh|Xm - 256-bit result
84 vmov d3,d0 @ Xm is rotated Xl
87 vext.8 q10,q0,q0,#8 @ 2nd phase of reduction
88 .byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11
96 vst1.64 {q0},[r0] @ write out Xi
99 .size gcm_gmult_v8,.-gcm_gmult_v8
101 .type gcm_ghash_v8,%function
104 vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ 32-bit ABI says so
105 vld1.64 {q0},[r0] @ load [rotated] Xi
106 @ "[rotated]" means that
107 @ loaded value would have
108 @ to be rotated in order to
109 @ make it appear as in
110 @ algorithm specification
111 subs r3,r3,#32 @ see if r3 is 32 or larger
112 mov r12,#16 @ r12 is used as post-
113 @ increment for input pointer;
114 @ as loop is modulo-scheduled
115 @ r12 is zeroed just in time
116 @ to preclude overstepping
117 @ inp[len], which means that
118 @ last block[s] are actually
119 @ loaded twice, but last
120 @ copy is not processed
121 vld1.64 {q12,q13},[r1]! @ load twisted H, ..., H^2
124 moveq r12,#0 @ is it time to zero r12?
125 vext.8 q0,q0,q0,#8 @ rotate Xi
126 vld1.64 {q8},[r2]! @ load [rotated] I[0]
127 vshl.u64 q11,q11,#57 @ compose 0xc2.0 constant
132 vext.8 q3,q8,q8,#8 @ rotate I[0]
133 blo .Lodd_tail_v8 @ r3 was less than 32
134 vld1.64 {q9},[r2],r12 @ load [rotated] I[1]
139 veor q3,q3,q0 @ I[i]^=Xi
140 .byte 0x8e,0x8e,0xa8,0xf2 @ pmull q4,q12,q7 @ H·Ii+1
141 veor q9,q9,q7 @ Karatsuba pre-processing
142 .byte 0x8f,0xce,0xa9,0xf2 @ pmull2 q6,q12,q7
148 subs r3,r3,#32 @ is there more data?
149 .byte 0x86,0x0e,0xac,0xf2 @ pmull q0,q14,q3 @ H^2.lo·Xi.lo
150 movlo r12,#0 @ is it time to zero r12?
152 .byte 0xa2,0xae,0xaa,0xf2 @ pmull q5,q13,q9
153 veor q10,q10,q3 @ Karatsuba pre-processing
154 .byte 0x87,0x4e,0xad,0xf2 @ pmull2 q2,q14,q3 @ H^2.hi·Xi.hi
155 veor q0,q0,q4 @ accumulate
156 .byte 0xa5,0x2e,0xab,0xf2 @ pmull2 q1,q13,q10 @ (H^2.lo+H^2.hi)·(Xi.lo+Xi.hi)
157 vld1.64 {q8},[r2],r12 @ load [rotated] I[i+2]
160 moveq r12,#0 @ is it time to zero r12?
163 vext.8 q9,q0,q2,#8 @ Karatsuba post-processing
166 vld1.64 {q9},[r2],r12 @ load [rotated] I[i+3]
171 .byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase of reduction
176 vmov d4,d3 @ Xh|Xm - 256-bit result
177 vmov d3,d0 @ Xm is rotated Xl
181 .byte 0x8e,0x8e,0xa8,0xf2 @ pmull q4,q12,q7 @ H·Ii+1
182 veor q3,q3,q2 @ accumulate q3 early
184 vext.8 q10,q0,q0,#8 @ 2nd phase of reduction
185 .byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11
187 veor q9,q9,q7 @ Karatsuba pre-processing
189 .byte 0x8f,0xce,0xa9,0xf2 @ pmull2 q6,q12,q7
190 bhs .Loop_mod2x_v8 @ there was at least 32 more bytes
193 vext.8 q3,q8,q8,#8 @ re-construct q3
194 adds r3,r3,#32 @ re-construct r3
195 veor q0,q0,q2 @ re-construct q0
196 beq .Ldone_v8 @ is r3 zero?
199 veor q3,q3,q0 @ inp^=Xi
200 veor q9,q8,q10 @ q9 is rotated inp^Xi
202 .byte 0x86,0x0e,0xa8,0xf2 @ pmull q0,q12,q3 @ H.lo·Xi.lo
203 veor q9,q9,q3 @ Karatsuba pre-processing
204 .byte 0x87,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q3 @ H.hi·Xi.hi
205 .byte 0xa2,0x2e,0xaa,0xf2 @ pmull q1,q13,q9 @ (H.lo+H.hi)·(Xi.lo+Xi.hi)
207 vext.8 q9,q0,q2,#8 @ Karatsuba post-processing
211 .byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase of reduction
213 vmov d4,d3 @ Xh|Xm - 256-bit result
214 vmov d3,d0 @ Xm is rotated Xl
217 vext.8 q10,q0,q0,#8 @ 2nd phase of reduction
218 .byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11
227 vst1.64 {q0},[r0] @ write out Xi
229 vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ 32-bit ABI says so
231 .size gcm_ghash_v8,.-gcm_ghash_v8
232 .byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0