diff options
author | Nikos Mavrogiannopoulos <nmav@redhat.com> | 2016-09-13 08:38:36 +0200 |
---|---|---|
committer | Nikos Mavrogiannopoulos <nmav@redhat.com> | 2016-09-13 09:07:38 +0200 |
commit | 5afe97a600be5fc3ba98fd161f01b41f3bfa0c84 (patch) | |
tree | 94dbc48b45a4d385813e60090c2ccb054c128a51 /lib/accelerated | |
parent | 2dc84c06534d71890737bcdea5380a9dd810a681 (diff) | |
download | gnutls-5afe97a600be5fc3ba98fd161f01b41f3bfa0c84.tar.gz |
openssl asm: reverted to AESNI-x86 code to gnutls 3.4.x code
The newer code was creating position dependent code.
Diffstat (limited to 'lib/accelerated')
-rw-r--r-- | lib/accelerated/x86/coff/aesni-x86.s | 1157 | ||||
-rw-r--r-- | lib/accelerated/x86/elf/aesni-x86.s | 1162 | ||||
-rw-r--r-- | lib/accelerated/x86/macosx/aesni-x86.s | 1157 |
3 files changed, 1283 insertions, 2193 deletions
diff --git a/lib/accelerated/x86/coff/aesni-x86.s b/lib/accelerated/x86/coff/aesni-x86.s index 2c535e0917..502be77883 100644 --- a/lib/accelerated/x86/coff/aesni-x86.s +++ b/lib/accelerated/x86/coff/aesni-x86.s @@ -60,10 +60,7 @@ _aesni_encrypt: leal 16(%edx),%edx jnz .L000enc1_loop_1 .byte 102,15,56,221,209 - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 movups %xmm2,(%eax) - pxor %xmm2,%xmm2 ret .globl _aesni_decrypt .def _aesni_decrypt; .scl 2; .type 32; .endef @@ -86,87 +83,31 @@ _aesni_decrypt: leal 16(%edx),%edx jnz .L001dec1_loop_2 .byte 102,15,56,223,209 - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 movups %xmm2,(%eax) - pxor %xmm2,%xmm2 - ret -.def __aesni_encrypt2; .scl 3; .type 32; .endef -.align 16 -__aesni_encrypt2: - movups (%edx),%xmm0 - shll $4,%ecx - movups 16(%edx),%xmm1 - xorps %xmm0,%xmm2 - pxor %xmm0,%xmm3 - movups 32(%edx),%xmm0 - leal 32(%edx,%ecx,1),%edx - negl %ecx - addl $16,%ecx -.L002enc2_loop: -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 - movups (%edx,%ecx,1),%xmm1 - addl $32,%ecx -.byte 102,15,56,220,208 -.byte 102,15,56,220,216 - movups -16(%edx,%ecx,1),%xmm0 - jnz .L002enc2_loop -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 -.byte 102,15,56,221,208 -.byte 102,15,56,221,216 - ret -.def __aesni_decrypt2; .scl 3; .type 32; .endef -.align 16 -__aesni_decrypt2: - movups (%edx),%xmm0 - shll $4,%ecx - movups 16(%edx),%xmm1 - xorps %xmm0,%xmm2 - pxor %xmm0,%xmm3 - movups 32(%edx),%xmm0 - leal 32(%edx,%ecx,1),%edx - negl %ecx - addl $16,%ecx -.L003dec2_loop: -.byte 102,15,56,222,209 -.byte 102,15,56,222,217 - movups (%edx,%ecx,1),%xmm1 - addl $32,%ecx -.byte 102,15,56,222,208 -.byte 102,15,56,222,216 - movups -16(%edx,%ecx,1),%xmm0 - jnz .L003dec2_loop -.byte 102,15,56,222,209 -.byte 102,15,56,222,217 -.byte 102,15,56,223,208 -.byte 102,15,56,223,216 ret .def __aesni_encrypt3; .scl 3; .type 32; .endef .align 16 __aesni_encrypt3: movups (%edx),%xmm0 - shll $4,%ecx + shrl $1,%ecx movups 16(%edx),%xmm1 + leal 32(%edx),%edx xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 pxor %xmm0,%xmm4 - movups 32(%edx),%xmm0 - leal 32(%edx,%ecx,1),%edx - negl %ecx - addl $16,%ecx -.L004enc3_loop: + movups (%edx),%xmm0 +.L002enc3_loop: .byte 102,15,56,220,209 .byte 102,15,56,220,217 + decl %ecx .byte 102,15,56,220,225 - movups (%edx,%ecx,1),%xmm1 - addl $32,%ecx + movups 16(%edx),%xmm1 .byte 102,15,56,220,208 .byte 102,15,56,220,216 + leal 32(%edx),%edx .byte 102,15,56,220,224 - movups -16(%edx,%ecx,1),%xmm0 - jnz .L004enc3_loop + movups (%edx),%xmm0 + jnz .L002enc3_loop .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 @@ -178,26 +119,25 @@ __aesni_encrypt3: .align 16 __aesni_decrypt3: movups (%edx),%xmm0 - shll $4,%ecx + shrl $1,%ecx movups 16(%edx),%xmm1 + leal 32(%edx),%edx xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 pxor %xmm0,%xmm4 - movups 32(%edx),%xmm0 - leal 32(%edx,%ecx,1),%edx - negl %ecx - addl $16,%ecx -.L005dec3_loop: + movups (%edx),%xmm0 +.L003dec3_loop: .byte 102,15,56,222,209 .byte 102,15,56,222,217 + decl %ecx .byte 102,15,56,222,225 - movups (%edx,%ecx,1),%xmm1 - addl $32,%ecx + movups 16(%edx),%xmm1 .byte 102,15,56,222,208 .byte 102,15,56,222,216 + leal 32(%edx),%edx .byte 102,15,56,222,224 - movups -16(%edx,%ecx,1),%xmm0 - jnz .L005dec3_loop + movups (%edx),%xmm0 + jnz .L003dec3_loop .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 @@ -210,29 +150,27 @@ __aesni_decrypt3: __aesni_encrypt4: movups (%edx),%xmm0 movups 16(%edx),%xmm1 - shll $4,%ecx + shrl $1,%ecx + leal 32(%edx),%edx xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 pxor %xmm0,%xmm4 pxor %xmm0,%xmm5 - movups 32(%edx),%xmm0 - leal 32(%edx,%ecx,1),%edx - negl %ecx -.byte 15,31,64,0 - addl $16,%ecx -.L006enc4_loop: + movups (%edx),%xmm0 +.L004enc4_loop: .byte 102,15,56,220,209 .byte 102,15,56,220,217 + decl %ecx .byte 102,15,56,220,225 .byte 102,15,56,220,233 - movups (%edx,%ecx,1),%xmm1 - addl $32,%ecx + movups 16(%edx),%xmm1 .byte 102,15,56,220,208 .byte 102,15,56,220,216 + leal 32(%edx),%edx .byte 102,15,56,220,224 .byte 102,15,56,220,232 - movups -16(%edx,%ecx,1),%xmm0 - jnz .L006enc4_loop + movups (%edx),%xmm0 + jnz .L004enc4_loop .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 @@ -247,29 +185,27 @@ __aesni_encrypt4: __aesni_decrypt4: movups (%edx),%xmm0 movups 16(%edx),%xmm1 - shll $4,%ecx + shrl $1,%ecx + leal 32(%edx),%edx xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 pxor %xmm0,%xmm4 pxor %xmm0,%xmm5 - movups 32(%edx),%xmm0 - leal 32(%edx,%ecx,1),%edx - negl %ecx -.byte 15,31,64,0 - addl $16,%ecx -.L007dec4_loop: + movups (%edx),%xmm0 +.L005dec4_loop: .byte 102,15,56,222,209 .byte 102,15,56,222,217 + decl %ecx .byte 102,15,56,222,225 .byte 102,15,56,222,233 - movups (%edx,%ecx,1),%xmm1 - addl $32,%ecx + movups 16(%edx),%xmm1 .byte 102,15,56,222,208 .byte 102,15,56,222,216 + leal 32(%edx),%edx .byte 102,15,56,222,224 .byte 102,15,56,222,232 - movups -16(%edx,%ecx,1),%xmm0 - jnz .L007dec4_loop + movups (%edx),%xmm0 + jnz .L005dec4_loop .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 @@ -283,42 +219,45 @@ __aesni_decrypt4: .align 16 __aesni_encrypt6: movups (%edx),%xmm0 - shll $4,%ecx + shrl $1,%ecx movups 16(%edx),%xmm1 + leal 32(%edx),%edx xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 - pxor %xmm0,%xmm4 .byte 102,15,56,220,209 - pxor %xmm0,%xmm5 - pxor %xmm0,%xmm6 + pxor %xmm0,%xmm4 .byte 102,15,56,220,217 - leal 32(%edx,%ecx,1),%edx - negl %ecx + pxor %xmm0,%xmm5 + decl %ecx .byte 102,15,56,220,225 + pxor %xmm0,%xmm6 +.byte 102,15,56,220,233 pxor %xmm0,%xmm7 - movups (%edx,%ecx,1),%xmm0 - addl $16,%ecx - jmp .L008_aesni_encrypt6_inner +.byte 102,15,56,220,241 + movups (%edx),%xmm0 +.byte 102,15,56,220,249 + jmp .L_aesni_encrypt6_enter .align 16 -.L009enc6_loop: +.L006enc6_loop: .byte 102,15,56,220,209 .byte 102,15,56,220,217 + decl %ecx .byte 102,15,56,220,225 -.L008_aesni_encrypt6_inner: .byte 102,15,56,220,233 .byte 102,15,56,220,241 .byte 102,15,56,220,249 +.align 16 .L_aesni_encrypt6_enter: - movups (%edx,%ecx,1),%xmm1 - addl $32,%ecx + movups 16(%edx),%xmm1 .byte 102,15,56,220,208 .byte 102,15,56,220,216 + leal 32(%edx),%edx .byte 102,15,56,220,224 .byte 102,15,56,220,232 .byte 102,15,56,220,240 .byte 102,15,56,220,248 - movups -16(%edx,%ecx,1),%xmm0 - jnz .L009enc6_loop + movups (%edx),%xmm0 + jnz .L006enc6_loop .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 @@ -336,42 +275,45 @@ __aesni_encrypt6: .align 16 __aesni_decrypt6: movups (%edx),%xmm0 - shll $4,%ecx + shrl $1,%ecx movups 16(%edx),%xmm1 + leal 32(%edx),%edx xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 - pxor %xmm0,%xmm4 .byte 102,15,56,222,209 - pxor %xmm0,%xmm5 - pxor %xmm0,%xmm6 + pxor %xmm0,%xmm4 .byte 102,15,56,222,217 - leal 32(%edx,%ecx,1),%edx - negl %ecx + pxor %xmm0,%xmm5 + decl %ecx .byte 102,15,56,222,225 + pxor %xmm0,%xmm6 +.byte 102,15,56,222,233 pxor %xmm0,%xmm7 - movups (%edx,%ecx,1),%xmm0 - addl $16,%ecx - jmp .L010_aesni_decrypt6_inner +.byte 102,15,56,222,241 + movups (%edx),%xmm0 +.byte 102,15,56,222,249 + jmp .L_aesni_decrypt6_enter .align 16 -.L011dec6_loop: +.L007dec6_loop: .byte 102,15,56,222,209 .byte 102,15,56,222,217 + decl %ecx .byte 102,15,56,222,225 -.L010_aesni_decrypt6_inner: .byte 102,15,56,222,233 .byte 102,15,56,222,241 .byte 102,15,56,222,249 +.align 16 .L_aesni_decrypt6_enter: - movups (%edx,%ecx,1),%xmm1 - addl $32,%ecx + movups 16(%edx),%xmm1 .byte 102,15,56,222,208 .byte 102,15,56,222,216 + leal 32(%edx),%edx .byte 102,15,56,222,224 .byte 102,15,56,222,232 .byte 102,15,56,222,240 .byte 102,15,56,222,248 - movups -16(%edx,%ecx,1),%xmm0 - jnz .L011dec6_loop + movups (%edx),%xmm0 + jnz .L007dec6_loop .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 @@ -400,14 +342,14 @@ _aesni_ecb_encrypt: movl 32(%esp),%edx movl 36(%esp),%ebx andl $-16,%eax - jz .L012ecb_ret + jz .L008ecb_ret movl 240(%edx),%ecx testl %ebx,%ebx - jz .L013ecb_decrypt + jz .L009ecb_decrypt movl %edx,%ebp movl %ecx,%ebx cmpl $96,%eax - jb .L014ecb_enc_tail + jb .L010ecb_enc_tail movdqu (%esi),%xmm2 movdqu 16(%esi),%xmm3 movdqu 32(%esi),%xmm4 @@ -416,9 +358,9 @@ _aesni_ecb_encrypt: movdqu 80(%esi),%xmm7 leal 96(%esi),%esi subl $96,%eax - jmp .L015ecb_enc_loop6_enter + jmp .L011ecb_enc_loop6_enter .align 16 -.L016ecb_enc_loop6: +.L012ecb_enc_loop6: movups %xmm2,(%edi) movdqu (%esi),%xmm2 movups %xmm3,16(%edi) @@ -433,12 +375,12 @@ _aesni_ecb_encrypt: leal 96(%edi),%edi movdqu 80(%esi),%xmm7 leal 96(%esi),%esi -.L015ecb_enc_loop6_enter: +.L011ecb_enc_loop6_enter: call __aesni_encrypt6 movl %ebp,%edx movl %ebx,%ecx subl $96,%eax - jnc .L016ecb_enc_loop6 + jnc .L012ecb_enc_loop6 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) @@ -447,18 +389,18 @@ _aesni_ecb_encrypt: movups %xmm7,80(%edi) leal 96(%edi),%edi addl $96,%eax - jz .L012ecb_ret -.L014ecb_enc_tail: + jz .L008ecb_ret +.L010ecb_enc_tail: movups (%esi),%xmm2 cmpl $32,%eax - jb .L017ecb_enc_one + jb .L013ecb_enc_one movups 16(%esi),%xmm3 - je .L018ecb_enc_two + je .L014ecb_enc_two movups 32(%esi),%xmm4 cmpl $64,%eax - jb .L019ecb_enc_three + jb .L015ecb_enc_three movups 48(%esi),%xmm5 - je .L020ecb_enc_four + je .L016ecb_enc_four movups 64(%esi),%xmm6 xorps %xmm7,%xmm7 call __aesni_encrypt6 @@ -467,49 +409,50 @@ _aesni_ecb_encrypt: movups %xmm4,32(%edi) movups %xmm5,48(%edi) movups %xmm6,64(%edi) - jmp .L012ecb_ret + jmp .L008ecb_ret .align 16 -.L017ecb_enc_one: +.L013ecb_enc_one: movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 -.L021enc1_loop_3: +.L017enc1_loop_3: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx - jnz .L021enc1_loop_3 + jnz .L017enc1_loop_3 .byte 102,15,56,221,209 movups %xmm2,(%edi) - jmp .L012ecb_ret + jmp .L008ecb_ret .align 16 -.L018ecb_enc_two: - call __aesni_encrypt2 +.L014ecb_enc_two: + xorps %xmm4,%xmm4 + call __aesni_encrypt3 movups %xmm2,(%edi) movups %xmm3,16(%edi) - jmp .L012ecb_ret + jmp .L008ecb_ret .align 16 -.L019ecb_enc_three: +.L015ecb_enc_three: call __aesni_encrypt3 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) - jmp .L012ecb_ret + jmp .L008ecb_ret .align 16 -.L020ecb_enc_four: +.L016ecb_enc_four: call __aesni_encrypt4 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) movups %xmm5,48(%edi) - jmp .L012ecb_ret + jmp .L008ecb_ret .align 16 -.L013ecb_decrypt: +.L009ecb_decrypt: movl %edx,%ebp movl %ecx,%ebx cmpl $96,%eax - jb .L022ecb_dec_tail + jb .L018ecb_dec_tail movdqu (%esi),%xmm2 movdqu 16(%esi),%xmm3 movdqu 32(%esi),%xmm4 @@ -518,9 +461,9 @@ _aesni_ecb_encrypt: movdqu 80(%esi),%xmm7 leal 96(%esi),%esi subl $96,%eax - jmp .L023ecb_dec_loop6_enter + jmp .L019ecb_dec_loop6_enter .align 16 -.L024ecb_dec_loop6: +.L020ecb_dec_loop6: movups %xmm2,(%edi) movdqu (%esi),%xmm2 movups %xmm3,16(%edi) @@ -535,12 +478,12 @@ _aesni_ecb_encrypt: leal 96(%edi),%edi movdqu 80(%esi),%xmm7 leal 96(%esi),%esi -.L023ecb_dec_loop6_enter: +.L019ecb_dec_loop6_enter: call __aesni_decrypt6 movl %ebp,%edx movl %ebx,%ecx subl $96,%eax - jnc .L024ecb_dec_loop6 + jnc .L020ecb_dec_loop6 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) @@ -549,18 +492,18 @@ _aesni_ecb_encrypt: movups %xmm7,80(%edi) leal 96(%edi),%edi addl $96,%eax - jz .L012ecb_ret -.L022ecb_dec_tail: + jz .L008ecb_ret +.L018ecb_dec_tail: movups (%esi),%xmm2 cmpl $32,%eax - jb .L025ecb_dec_one + jb .L021ecb_dec_one movups 16(%esi),%xmm3 - je .L026ecb_dec_two + je .L022ecb_dec_two movups 32(%esi),%xmm4 cmpl $64,%eax - jb .L027ecb_dec_three + jb .L023ecb_dec_three movups 48(%esi),%xmm5 - je .L028ecb_dec_four + je .L024ecb_dec_four movups 64(%esi),%xmm6 xorps %xmm7,%xmm7 call __aesni_decrypt6 @@ -569,51 +512,44 @@ _aesni_ecb_encrypt: movups %xmm4,32(%edi) movups %xmm5,48(%edi) movups %xmm6,64(%edi) - jmp .L012ecb_ret + jmp .L008ecb_ret .align 16 -.L025ecb_dec_one: +.L021ecb_dec_one: movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 -.L029dec1_loop_4: +.L025dec1_loop_4: .byte 102,15,56,222,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx - jnz .L029dec1_loop_4 + jnz .L025dec1_loop_4 .byte 102,15,56,223,209 movups %xmm2,(%edi) - jmp .L012ecb_ret + jmp .L008ecb_ret .align 16 -.L026ecb_dec_two: - call __aesni_decrypt2 +.L022ecb_dec_two: + xorps %xmm4,%xmm4 + call __aesni_decrypt3 movups %xmm2,(%edi) movups %xmm3,16(%edi) - jmp .L012ecb_ret + jmp .L008ecb_ret .align 16 -.L027ecb_dec_three: +.L023ecb_dec_three: call __aesni_decrypt3 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) - jmp .L012ecb_ret + jmp .L008ecb_ret .align 16 -.L028ecb_dec_four: +.L024ecb_dec_four: call __aesni_decrypt4 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) movups %xmm5,48(%edi) -.L012ecb_ret: - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - pxor %xmm2,%xmm2 - pxor %xmm3,%xmm3 - pxor %xmm4,%xmm4 - pxor %xmm5,%xmm5 - pxor %xmm6,%xmm6 - pxor %xmm7,%xmm7 +.L008ecb_ret: popl %edi popl %esi popl %ebx @@ -651,56 +587,48 @@ _aesni_ccm64_encrypt_blocks: movl %ebp,20(%esp) movl %ebp,24(%esp) movl %ebp,28(%esp) - shll $4,%ecx - movl $16,%ebx + shrl $1,%ecx leal (%edx),%ebp movdqa (%esp),%xmm5 movdqa %xmm7,%xmm2 - leal 32(%edx,%ecx,1),%edx - subl %ecx,%ebx + movl %ecx,%ebx .byte 102,15,56,0,253 -.L030ccm64_enc_outer: +.L026ccm64_enc_outer: movups (%ebp),%xmm0 movl %ebx,%ecx movups (%esi),%xmm6 xorps %xmm0,%xmm2 movups 16(%ebp),%xmm1 xorps %xmm6,%xmm0 + leal 32(%ebp),%edx xorps %xmm0,%xmm3 - movups 32(%ebp),%xmm0 -.L031ccm64_enc2_loop: + movups (%edx),%xmm0 +.L027ccm64_enc2_loop: .byte 102,15,56,220,209 + decl %ecx .byte 102,15,56,220,217 - movups (%edx,%ecx,1),%xmm1 - addl $32,%ecx + movups 16(%edx),%xmm1 .byte 102,15,56,220,208 + leal 32(%edx),%edx .byte 102,15,56,220,216 - movups -16(%edx,%ecx,1),%xmm0 - jnz .L031ccm64_enc2_loop + movups (%edx),%xmm0 + jnz .L027ccm64_enc2_loop .byte 102,15,56,220,209 .byte 102,15,56,220,217 paddq 16(%esp),%xmm7 - decl %eax .byte 102,15,56,221,208 .byte 102,15,56,221,216 + decl %eax leal 16(%esi),%esi xorps %xmm2,%xmm6 movdqa %xmm7,%xmm2 movups %xmm6,(%edi) -.byte 102,15,56,0,213 leal 16(%edi),%edi - jnz .L030ccm64_enc_outer +.byte 102,15,56,0,213 + jnz .L026ccm64_enc_outer movl 48(%esp),%esp movl 40(%esp),%edi movups %xmm3,(%edi) - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - pxor %xmm2,%xmm2 - pxor %xmm3,%xmm3 - pxor %xmm4,%xmm4 - pxor %xmm5,%xmm5 - pxor %xmm6,%xmm6 - pxor %xmm7,%xmm7 popl %edi popl %esi popl %ebx @@ -747,82 +675,71 @@ _aesni_ccm64_decrypt_blocks: movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 -.L032enc1_loop_5: +.L028enc1_loop_5: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx - jnz .L032enc1_loop_5 + jnz .L028enc1_loop_5 .byte 102,15,56,221,209 - shll $4,%ebx - movl $16,%ecx movups (%esi),%xmm6 paddq 16(%esp),%xmm7 leal 16(%esi),%esi - subl %ebx,%ecx - leal 32(%ebp,%ebx,1),%edx - movl %ecx,%ebx - jmp .L033ccm64_dec_outer + jmp .L029ccm64_dec_outer .align 16 -.L033ccm64_dec_outer: +.L029ccm64_dec_outer: xorps %xmm2,%xmm6 movdqa %xmm7,%xmm2 + movl %ebx,%ecx movups %xmm6,(%edi) leal 16(%edi),%edi .byte 102,15,56,0,213 subl $1,%eax - jz .L034ccm64_dec_break + jz .L030ccm64_dec_break movups (%ebp),%xmm0 - movl %ebx,%ecx + shrl $1,%ecx movups 16(%ebp),%xmm1 xorps %xmm0,%xmm6 + leal 32(%ebp),%edx xorps %xmm0,%xmm2 xorps %xmm6,%xmm3 - movups 32(%ebp),%xmm0 -.L035ccm64_dec2_loop: + movups (%edx),%xmm0 +.L031ccm64_dec2_loop: .byte 102,15,56,220,209 + decl %ecx .byte 102,15,56,220,217 - movups (%edx,%ecx,1),%xmm1 - addl $32,%ecx + movups 16(%edx),%xmm1 .byte 102,15,56,220,208 + leal 32(%edx),%edx .byte 102,15,56,220,216 - movups -16(%edx,%ecx,1),%xmm0 - jnz .L035ccm64_dec2_loop + movups (%edx),%xmm0 + jnz .L031ccm64_dec2_loop movups (%esi),%xmm6 paddq 16(%esp),%xmm7 .byte 102,15,56,220,209 .byte 102,15,56,220,217 + leal 16(%esi),%esi .byte 102,15,56,221,208 .byte 102,15,56,221,216 - leal 16(%esi),%esi - jmp .L033ccm64_dec_outer + jmp .L029ccm64_dec_outer .align 16 -.L034ccm64_dec_break: - movl 240(%ebp),%ecx +.L030ccm64_dec_break: movl %ebp,%edx movups (%edx),%xmm0 movups 16(%edx),%xmm1 xorps %xmm0,%xmm6 leal 32(%edx),%edx xorps %xmm6,%xmm3 -.L036enc1_loop_6: +.L032enc1_loop_6: .byte 102,15,56,220,217 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx - jnz .L036enc1_loop_6 + jnz .L032enc1_loop_6 .byte 102,15,56,221,217 movl 48(%esp),%esp movl 40(%esp),%edi movups %xmm3,(%edi) - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - pxor %xmm2,%xmm2 - pxor %xmm3,%xmm3 - pxor %xmm4,%xmm4 - pxor %xmm5,%xmm5 - pxor %xmm6,%xmm6 - pxor %xmm7,%xmm7 popl %edi popl %esi popl %ebx @@ -847,7 +764,7 @@ _aesni_ctr32_encrypt_blocks: andl $-16,%esp movl %ebp,80(%esp) cmpl $1,%eax - je .L037ctr32_one_shortcut + je .L033ctr32_one_shortcut movdqu (%ebx),%xmm7 movl $202182159,(%esp) movl $134810123,4(%esp) @@ -863,59 +780,63 @@ _aesni_ctr32_encrypt_blocks: .byte 102,15,58,34,253,3 movl 240(%edx),%ecx bswap %ebx - pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 + pxor %xmm0,%xmm0 movdqa (%esp),%xmm2 -.byte 102,15,58,34,195,0 +.byte 102,15,58,34,203,0 leal 3(%ebx),%ebp -.byte 102,15,58,34,205,0 +.byte 102,15,58,34,197,0 incl %ebx -.byte 102,15,58,34,195,1 +.byte 102,15,58,34,203,1 incl %ebp -.byte 102,15,58,34,205,1 +.byte 102,15,58,34,197,1 incl %ebx -.byte 102,15,58,34,195,2 +.byte 102,15,58,34,203,2 incl %ebp -.byte 102,15,58,34,205,2 - movdqa %xmm0,48(%esp) -.byte 102,15,56,0,194 - movdqu (%edx),%xmm6 - movdqa %xmm1,64(%esp) +.byte 102,15,58,34,197,2 + movdqa %xmm1,48(%esp) .byte 102,15,56,0,202 - pshufd $192,%xmm0,%xmm2 - pshufd $128,%xmm0,%xmm3 + movdqa %xmm0,64(%esp) +.byte 102,15,56,0,194 + pshufd $192,%xmm1,%xmm2 + pshufd $128,%xmm1,%xmm3 cmpl $6,%eax - jb .L038ctr32_tail - pxor %xmm6,%xmm7 - shll $4,%ecx - movl $16,%ebx + jb .L034ctr32_tail movdqa %xmm7,32(%esp) + shrl $1,%ecx movl %edx,%ebp - subl %ecx,%ebx - leal 32(%edx,%ecx,1),%edx + movl %ecx,%ebx subl $6,%eax - jmp .L039ctr32_loop6 -.align 16 -.L039ctr32_loop6: - pshufd $64,%xmm0,%xmm4 - movdqa 32(%esp),%xmm0 - pshufd $192,%xmm1,%xmm5 + jmp .L035ctr32_loop6 +.align 16 +.L035ctr32_loop6: + pshufd $64,%xmm1,%xmm4 + movdqa 32(%esp),%xmm1 + pshufd $192,%xmm0,%xmm5 + por %xmm1,%xmm2 + pshufd $128,%xmm0,%xmm6 + por %xmm1,%xmm3 + pshufd $64,%xmm0,%xmm7 + por %xmm1,%xmm4 + por %xmm1,%xmm5 + por %xmm1,%xmm6 + por %xmm1,%xmm7 + movups (%ebp),%xmm0 + movups 16(%ebp),%xmm1 + leal 32(%ebp),%edx + decl %ecx pxor %xmm0,%xmm2 - pshufd $128,%xmm1,%xmm6 pxor %xmm0,%xmm3 - pshufd $64,%xmm1,%xmm7 - movups 16(%ebp),%xmm1 - pxor %xmm0,%xmm4 - pxor %xmm0,%xmm5 .byte 102,15,56,220,209 - pxor %xmm0,%xmm6 - pxor %xmm0,%xmm7 + pxor %xmm0,%xmm4 .byte 102,15,56,220,217 - movups 32(%ebp),%xmm0 - movl %ebx,%ecx + pxor %xmm0,%xmm5 .byte 102,15,56,220,225 + pxor %xmm0,%xmm6 .byte 102,15,56,220,233 + pxor %xmm0,%xmm7 .byte 102,15,56,220,241 + movups (%edx),%xmm0 .byte 102,15,56,220,249 call .L_aesni_encrypt6_enter movups (%esi),%xmm1 @@ -926,51 +847,51 @@ _aesni_ctr32_encrypt_blocks: movups %xmm2,(%edi) movdqa 16(%esp),%xmm0 xorps %xmm1,%xmm4 - movdqa 64(%esp),%xmm1 + movdqa 48(%esp),%xmm1 movups %xmm3,16(%edi) movups %xmm4,32(%edi) paddd %xmm0,%xmm1 - paddd 48(%esp),%xmm0 + paddd 64(%esp),%xmm0 movdqa (%esp),%xmm2 movups 48(%esi),%xmm3 movups 64(%esi),%xmm4 xorps %xmm3,%xmm5 movups 80(%esi),%xmm3 leal 96(%esi),%esi - movdqa %xmm0,48(%esp) -.byte 102,15,56,0,194 + movdqa %xmm1,48(%esp) +.byte 102,15,56,0,202 xorps %xmm4,%xmm6 movups %xmm5,48(%edi) xorps %xmm3,%xmm7 - movdqa %xmm1,64(%esp) -.byte 102,15,56,0,202 + movdqa %xmm0,64(%esp) +.byte 102,15,56,0,194 movups %xmm6,64(%edi) - pshufd $192,%xmm0,%xmm2 + pshufd $192,%xmm1,%xmm2 movups %xmm7,80(%edi) leal 96(%edi),%edi - pshufd $128,%xmm0,%xmm3 + movl %ebx,%ecx + pshufd $128,%xmm1,%xmm3 subl $6,%eax - jnc .L039ctr32_loop6 + jnc .L035ctr32_loop6 addl $6,%eax - jz .L040ctr32_ret - movdqu (%ebp),%xmm7 + jz .L036ctr32_ret movl %ebp,%edx - pxor 32(%esp),%xmm7 - movl 240(%ebp),%ecx -.L038ctr32_tail: + leal 1(,%ecx,2),%ecx + movdqa 32(%esp),%xmm7 +.L034ctr32_tail: por %xmm7,%xmm2 cmpl $2,%eax - jb .L041ctr32_one - pshufd $64,%xmm0,%xmm4 + jb .L037ctr32_one + pshufd $64,%xmm1,%xmm4 por %xmm7,%xmm3 - je .L042ctr32_two - pshufd $192,%xmm1,%xmm5 + je .L038ctr32_two + pshufd $192,%xmm0,%xmm5 por %xmm7,%xmm4 cmpl $4,%eax - jb .L043ctr32_three - pshufd $128,%xmm1,%xmm6 + jb .L039ctr32_three + pshufd $128,%xmm0,%xmm6 por %xmm7,%xmm5 - je .L044ctr32_four + je .L040ctr32_four por %xmm7,%xmm6 call __aesni_encrypt6 movups (%esi),%xmm1 @@ -988,39 +909,39 @@ _aesni_ctr32_encrypt_blocks: movups %xmm4,32(%edi) movups %xmm5,48(%edi) movups %xmm6,64(%edi) - jmp .L040ctr32_ret + jmp .L036ctr32_ret .align 16 -.L037ctr32_one_shortcut: +.L033ctr32_one_shortcut: movups (%ebx),%xmm2 movl 240(%edx),%ecx -.L041ctr32_one: +.L037ctr32_one: movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 -.L045enc1_loop_7: +.L041enc1_loop_7: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx - jnz .L045enc1_loop_7 + jnz .L041enc1_loop_7 .byte 102,15,56,221,209 movups (%esi),%xmm6 xorps %xmm2,%xmm6 movups %xmm6,(%edi) - jmp .L040ctr32_ret + jmp .L036ctr32_ret .align 16 -.L042ctr32_two: - call __aesni_encrypt2 +.L038ctr32_two: + call __aesni_encrypt3 movups (%esi),%xmm5 movups 16(%esi),%xmm6 xorps %xmm5,%xmm2 xorps %xmm6,%xmm3 movups %xmm2,(%edi) movups %xmm3,16(%edi) - jmp .L040ctr32_ret + jmp .L036ctr32_ret .align 16 -.L043ctr32_three: +.L039ctr32_three: call __aesni_encrypt3 movups (%esi),%xmm5 movups 16(%esi),%xmm6 @@ -1031,9 +952,9 @@ _aesni_ctr32_encrypt_blocks: xorps %xmm7,%xmm4 movups %xmm3,16(%edi) movups %xmm4,32(%edi) - jmp .L040ctr32_ret + jmp .L036ctr32_ret .align 16 -.L044ctr32_four: +.L040ctr32_four: call __aesni_encrypt4 movups (%esi),%xmm6 movups 16(%esi),%xmm7 @@ -1047,18 +968,7 @@ _aesni_ctr32_encrypt_blocks: xorps %xmm0,%xmm5 movups %xmm4,32(%edi) movups %xmm5,48(%edi) -.L040ctr32_ret: - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - pxor %xmm2,%xmm2 - pxor %xmm3,%xmm3 - pxor %xmm4,%xmm4 - movdqa %xmm0,32(%esp) - pxor %xmm5,%xmm5 - movdqa %xmm0,48(%esp) - pxor %xmm6,%xmm6 - movdqa %xmm0,64(%esp) - pxor %xmm7,%xmm7 +.L036ctr32_ret: movl 80(%esp),%esp popl %edi popl %esi @@ -1082,12 +992,12 @@ _aesni_xts_encrypt: movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 -.L046enc1_loop_8: +.L042enc1_loop_8: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx - jnz .L046enc1_loop_8 + jnz .L042enc1_loop_8 .byte 102,15,56,221,209 movl 20(%esp),%esi movl 24(%esp),%edi @@ -1111,14 +1021,12 @@ _aesni_xts_encrypt: movl %edx,%ebp movl %ecx,%ebx subl $96,%eax - jc .L047xts_enc_short - shll $4,%ecx - movl $16,%ebx - subl %ecx,%ebx - leal 32(%edx,%ecx,1),%edx - jmp .L048xts_enc_loop6 + jc .L043xts_enc_short + shrl $1,%ecx + movl %ecx,%ebx + jmp .L044xts_enc_loop6 .align 16 -.L048xts_enc_loop6: +.L044xts_enc_loop6: pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,(%esp) @@ -1154,7 +1062,6 @@ _aesni_xts_encrypt: pand %xmm3,%xmm7 movups (%esi),%xmm2 pxor %xmm1,%xmm7 - movl %ebx,%ecx movdqu 16(%esi),%xmm3 xorps %xmm0,%xmm2 movdqu 32(%esi),%xmm4 @@ -1170,17 +1077,19 @@ _aesni_xts_encrypt: movdqa %xmm7,80(%esp) pxor %xmm1,%xmm7 movups 16(%ebp),%xmm1 + leal 32(%ebp),%edx pxor 16(%esp),%xmm3 - pxor 32(%esp),%xmm4 .byte 102,15,56,220,209 - pxor 48(%esp),%xmm5 - pxor 64(%esp),%xmm6 + pxor 32(%esp),%xmm4 .byte 102,15,56,220,217 - pxor %xmm0,%xmm7 - movups 32(%ebp),%xmm0 + pxor 48(%esp),%xmm5 + decl %ecx .byte 102,15,56,220,225 + pxor 64(%esp),%xmm6 .byte 102,15,56,220,233 + pxor %xmm0,%xmm7 .byte 102,15,56,220,241 + movups (%edx),%xmm0 .byte 102,15,56,220,249 call .L_aesni_encrypt6_enter movdqa 80(%esp),%xmm1 @@ -1205,25 +1114,26 @@ _aesni_xts_encrypt: paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 + movl %ebx,%ecx pxor %xmm2,%xmm1 subl $96,%eax - jnc .L048xts_enc_loop6 - movl 240(%ebp),%ecx + jnc .L044xts_enc_loop6 + leal 1(,%ecx,2),%ecx movl %ebp,%edx movl %ecx,%ebx -.L047xts_enc_short: +.L043xts_enc_short: addl $96,%eax - jz .L049xts_enc_done6x + jz .L045xts_enc_done6x movdqa %xmm1,%xmm5 cmpl $32,%eax - jb .L050xts_enc_one + jb .L046xts_enc_one pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 - je .L051xts_enc_two + je .L047xts_enc_two pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,%xmm6 @@ -1232,7 +1142,7 @@ _aesni_xts_encrypt: pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 cmpl $64,%eax - jb .L052xts_enc_three + jb .L048xts_enc_three pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,%xmm7 @@ -1242,7 +1152,7 @@ _aesni_xts_encrypt: pxor %xmm2,%xmm1 movdqa %xmm5,(%esp) movdqa %xmm6,16(%esp) - je .L053xts_enc_four + je .L049xts_enc_four movdqa %xmm7,32(%esp) pshufd $19,%xmm0,%xmm7 movdqa %xmm1,48(%esp) @@ -1274,9 +1184,9 @@ _aesni_xts_encrypt: movups %xmm5,48(%edi) movups %xmm6,64(%edi) leal 80(%edi),%edi - jmp .L054xts_enc_done + jmp .L050xts_enc_done .align 16 -.L050xts_enc_one: +.L046xts_enc_one: movups (%esi),%xmm2 leal 16(%esi),%esi xorps %xmm5,%xmm2 @@ -1284,36 +1194,37 @@ _aesni_xts_encrypt: movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 -.L055enc1_loop_9: +.L051enc1_loop_9: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx - jnz .L055enc1_loop_9 + jnz .L051enc1_loop_9 .byte 102,15,56,221,209 xorps %xmm5,%xmm2 movups %xmm2,(%edi) leal 16(%edi),%edi movdqa %xmm5,%xmm1 - jmp .L054xts_enc_done + jmp .L050xts_enc_done .align 16 -.L051xts_enc_two: +.L047xts_enc_two: movaps %xmm1,%xmm6 movups (%esi),%xmm2 movups 16(%esi),%xmm3 leal 32(%esi),%esi xorps %xmm5,%xmm2 xorps %xmm6,%xmm3 - call __aesni_encrypt2 + xorps %xmm4,%xmm4 + call __aesni_encrypt3 xorps %xmm5,%xmm2 xorps %xmm6,%xmm3 movups %xmm2,(%edi) movups %xmm3,16(%edi) leal 32(%edi),%edi movdqa %xmm6,%xmm1 - jmp .L054xts_enc_done + jmp .L050xts_enc_done .align 16 -.L052xts_enc_three: +.L048xts_enc_three: movaps %xmm1,%xmm7 movups (%esi),%xmm2 movups 16(%esi),%xmm3 @@ -1331,9 +1242,9 @@ _aesni_xts_encrypt: movups %xmm4,32(%edi) leal 48(%edi),%edi movdqa %xmm7,%xmm1 - jmp .L054xts_enc_done + jmp .L050xts_enc_done .align 16 -.L053xts_enc_four: +.L049xts_enc_four: movaps %xmm1,%xmm6 movups (%esi),%xmm2 movups 16(%esi),%xmm3 @@ -1355,28 +1266,28 @@ _aesni_xts_encrypt: movups %xmm5,48(%edi) leal 64(%edi),%edi movdqa %xmm6,%xmm1 - jmp .L054xts_enc_done + jmp .L050xts_enc_done .align 16 -.L049xts_enc_done6x: +.L045xts_enc_done6x: movl 112(%esp),%eax andl $15,%eax - jz .L056xts_enc_ret + jz .L052xts_enc_ret movdqa %xmm1,%xmm5 movl %eax,112(%esp) - jmp .L057xts_enc_steal + jmp .L053xts_enc_steal .align 16 -.L054xts_enc_done: +.L050xts_enc_done: movl 112(%esp),%eax pxor %xmm0,%xmm0 andl $15,%eax - jz .L056xts_enc_ret + jz .L052xts_enc_ret pcmpgtd %xmm1,%xmm0 movl %eax,112(%esp) pshufd $19,%xmm0,%xmm5 paddq %xmm1,%xmm1 pand 96(%esp),%xmm5 pxor %xmm1,%xmm5 -.L057xts_enc_steal: +.L053xts_enc_steal: movzbl (%esi),%ecx movzbl -16(%edi),%edx leal 1(%esi),%esi @@ -1384,7 +1295,7 @@ _aesni_xts_encrypt: movb %dl,(%edi) leal 1(%edi),%edi subl $1,%eax - jnz .L057xts_enc_steal + jnz .L053xts_enc_steal subl 112(%esp),%edi movl %ebp,%edx movl %ebx,%ecx @@ -1394,30 +1305,16 @@ _aesni_xts_encrypt: movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 -.L058enc1_loop_10: +.L054enc1_loop_10: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx - jnz .L058enc1_loop_10 + jnz .L054enc1_loop_10 .byte 102,15,56,221,209 xorps %xmm5,%xmm2 movups %xmm2,-16(%edi) -.L056xts_enc_ret: - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - pxor %xmm2,%xmm2 - movdqa %xmm0,(%esp) - pxor %xmm3,%xmm3 - movdqa %xmm0,16(%esp) - pxor %xmm4,%xmm4 - movdqa %xmm0,32(%esp) - pxor %xmm5,%xmm5 - movdqa %xmm0,48(%esp) - pxor %xmm6,%xmm6 - movdqa %xmm0,64(%esp) - pxor %xmm7,%xmm7 - movdqa %xmm0,80(%esp) +.L052xts_enc_ret: movl 116(%esp),%esp popl %edi popl %esi @@ -1441,12 +1338,12 @@ _aesni_xts_decrypt: movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 -.L059enc1_loop_11: +.L055enc1_loop_11: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx - jnz .L059enc1_loop_11 + jnz .L055enc1_loop_11 .byte 102,15,56,221,209 movl 20(%esp),%esi movl 24(%esp),%edi @@ -1475,14 +1372,12 @@ _aesni_xts_decrypt: pcmpgtd %xmm1,%xmm0 andl $-16,%eax subl $96,%eax - jc .L060xts_dec_short - shll $4,%ecx - movl $16,%ebx - subl %ecx,%ebx - leal 32(%edx,%ecx,1),%edx - jmp .L061xts_dec_loop6 + jc .L056xts_dec_short + shrl $1,%ecx + movl %ecx,%ebx + jmp .L057xts_dec_loop6 .align 16 -.L061xts_dec_loop6: +.L057xts_dec_loop6: pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,(%esp) @@ -1518,7 +1413,6 @@ _aesni_xts_decrypt: pand %xmm3,%xmm7 movups (%esi),%xmm2 pxor %xmm1,%xmm7 - movl %ebx,%ecx movdqu 16(%esi),%xmm3 xorps %xmm0,%xmm2 movdqu 32(%esi),%xmm4 @@ -1534,17 +1428,19 @@ _aesni_xts_decrypt: movdqa %xmm7,80(%esp) pxor %xmm1,%xmm7 movups 16(%ebp),%xmm1 + leal 32(%ebp),%edx pxor 16(%esp),%xmm3 - pxor 32(%esp),%xmm4 .byte 102,15,56,222,209 - pxor 48(%esp),%xmm5 - pxor 64(%esp),%xmm6 + pxor 32(%esp),%xmm4 .byte 102,15,56,222,217 - pxor %xmm0,%xmm7 - movups 32(%ebp),%xmm0 + pxor 48(%esp),%xmm5 + decl %ecx .byte 102,15,56,222,225 + pxor 64(%esp),%xmm6 .byte 102,15,56,222,233 + pxor %xmm0,%xmm7 .byte 102,15,56,222,241 + movups (%edx),%xmm0 .byte 102,15,56,222,249 call .L_aesni_decrypt6_enter movdqa 80(%esp),%xmm1 @@ -1569,25 +1465,26 @@ _aesni_xts_decrypt: paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 + movl %ebx,%ecx pxor %xmm2,%xmm1 subl $96,%eax - jnc .L061xts_dec_loop6 - movl 240(%ebp),%ecx + jnc .L057xts_dec_loop6 + leal 1(,%ecx,2),%ecx movl %ebp,%edx movl %ecx,%ebx -.L060xts_dec_short: +.L056xts_dec_short: addl $96,%eax - jz .L062xts_dec_done6x + jz .L058xts_dec_done6x movdqa %xmm1,%xmm5 cmpl $32,%eax - jb .L063xts_dec_one + jb .L059xts_dec_one pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 - je .L064xts_dec_two + je .L060xts_dec_two pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,%xmm6 @@ -1596,7 +1493,7 @@ _aesni_xts_decrypt: pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 cmpl $64,%eax - jb .L065xts_dec_three + jb .L061xts_dec_three pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,%xmm7 @@ -1606,7 +1503,7 @@ _aesni_xts_decrypt: pxor %xmm2,%xmm1 movdqa %xmm5,(%esp) movdqa %xmm6,16(%esp) - je .L066xts_dec_four + je .L062xts_dec_four movdqa %xmm7,32(%esp) pshufd $19,%xmm0,%xmm7 movdqa %xmm1,48(%esp) @@ -1638,9 +1535,9 @@ _aesni_xts_decrypt: movups %xmm5,48(%edi) movups %xmm6,64(%edi) leal 80(%edi),%edi - jmp .L067xts_dec_done + jmp .L063xts_dec_done .align 16 -.L063xts_dec_one: +.L059xts_dec_one: movups (%esi),%xmm2 leal 16(%esi),%esi xorps %xmm5,%xmm2 @@ -1648,36 +1545,36 @@ _aesni_xts_decrypt: movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 -.L068dec1_loop_12: +.L064dec1_loop_12: .byte 102,15,56,222,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx - jnz .L068dec1_loop_12 + jnz .L064dec1_loop_12 .byte 102,15,56,223,209 xorps %xmm5,%xmm2 movups %xmm2,(%edi) leal 16(%edi),%edi movdqa %xmm5,%xmm1 - jmp .L067xts_dec_done + jmp .L063xts_dec_done .align 16 -.L064xts_dec_two: +.L060xts_dec_two: movaps %xmm1,%xmm6 movups (%esi),%xmm2 movups 16(%esi),%xmm3 leal 32(%esi),%esi xorps %xmm5,%xmm2 xorps %xmm6,%xmm3 - call __aesni_decrypt2 + call __aesni_decrypt3 xorps %xmm5,%xmm2 xorps %xmm6,%xmm3 movups %xmm2,(%edi) movups %xmm3,16(%edi) leal 32(%edi),%edi movdqa %xmm6,%xmm1 - jmp .L067xts_dec_done + jmp .L063xts_dec_done .align 16 -.L065xts_dec_three: +.L061xts_dec_three: movaps %xmm1,%xmm7 movups (%esi),%xmm2 movups 16(%esi),%xmm3 @@ -1695,9 +1592,9 @@ _aesni_xts_decrypt: movups %xmm4,32(%edi) leal 48(%edi),%edi movdqa %xmm7,%xmm1 - jmp .L067xts_dec_done + jmp .L063xts_dec_done .align 16 -.L066xts_dec_four: +.L062xts_dec_four: movaps %xmm1,%xmm6 movups (%esi),%xmm2 movups 16(%esi),%xmm3 @@ -1719,20 +1616,20 @@ _aesni_xts_decrypt: movups %xmm5,48(%edi) leal 64(%edi),%edi movdqa %xmm6,%xmm1 - jmp .L067xts_dec_done + jmp .L063xts_dec_done .align 16 -.L062xts_dec_done6x: +.L058xts_dec_done6x: movl 112(%esp),%eax andl $15,%eax - jz .L069xts_dec_ret + jz .L065xts_dec_ret movl %eax,112(%esp) - jmp .L070xts_dec_only_one_more + jmp .L066xts_dec_only_one_more .align 16 -.L067xts_dec_done: +.L063xts_dec_done: movl 112(%esp),%eax pxor %xmm0,%xmm0 andl $15,%eax - jz .L069xts_dec_ret + jz .L065xts_dec_ret pcmpgtd %xmm1,%xmm0 movl %eax,112(%esp) pshufd $19,%xmm0,%xmm2 @@ -1742,7 +1639,7 @@ _aesni_xts_decrypt: pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 -.L070xts_dec_only_one_more: +.L066xts_dec_only_one_more: pshufd $19,%xmm0,%xmm5 movdqa %xmm1,%xmm6 paddq %xmm1,%xmm1 @@ -1756,16 +1653,16 @@ _aesni_xts_decrypt: movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 -.L071dec1_loop_13: +.L067dec1_loop_13: .byte 102,15,56,222,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx - jnz .L071dec1_loop_13 + jnz .L067dec1_loop_13 .byte 102,15,56,223,209 xorps %xmm5,%xmm2 movups %xmm2,(%edi) -.L072xts_dec_steal: +.L068xts_dec_steal: movzbl 16(%esi),%ecx movzbl (%edi),%edx leal 1(%esi),%esi @@ -1773,7 +1670,7 @@ _aesni_xts_decrypt: movb %dl,16(%edi) leal 1(%edi),%edi subl $1,%eax - jnz .L072xts_dec_steal + jnz .L068xts_dec_steal subl 112(%esp),%edi movl %ebp,%edx movl %ebx,%ecx @@ -1783,30 +1680,16 @@ _aesni_xts_decrypt: movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 -.L073dec1_loop_14: +.L069dec1_loop_14: .byte 102,15,56,222,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx - jnz .L073dec1_loop_14 + jnz .L069dec1_loop_14 .byte 102,15,56,223,209 xorps %xmm6,%xmm2 movups %xmm2,(%edi) -.L069xts_dec_ret: - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - pxor %xmm2,%xmm2 - movdqa %xmm0,(%esp) - pxor %xmm3,%xmm3 - movdqa %xmm0,16(%esp) - pxor %xmm4,%xmm4 - movdqa %xmm0,32(%esp) - pxor %xmm5,%xmm5 - movdqa %xmm0,48(%esp) - pxor %xmm6,%xmm6 - movdqa %xmm0,64(%esp) - pxor %xmm7,%xmm7 - movdqa %xmm0,80(%esp) +.L065xts_dec_ret: movl 116(%esp),%esp popl %edi popl %esi @@ -1831,7 +1714,7 @@ _aesni_cbc_encrypt: movl 32(%esp),%edx movl 36(%esp),%ebp testl %eax,%eax - jz .L074cbc_abort + jz .L070cbc_abort cmpl $0,40(%esp) xchgl %esp,%ebx movups (%ebp),%xmm7 @@ -1839,14 +1722,14 @@ _aesni_cbc_encrypt: movl %edx,%ebp movl %ebx,16(%esp) movl %ecx,%ebx - je .L075cbc_decrypt + je .L071cbc_decrypt movaps %xmm7,%xmm2 cmpl $16,%eax - jb .L076cbc_enc_tail + jb .L072cbc_enc_tail subl $16,%eax - jmp .L077cbc_enc_loop + jmp .L073cbc_enc_loop .align 16 -.L077cbc_enc_loop: +.L073cbc_enc_loop: movups (%esi),%xmm7 leal 16(%esi),%esi movups (%edx),%xmm0 @@ -1854,25 +1737,24 @@ _aesni_cbc_encrypt: xorps %xmm0,%xmm7 leal 32(%edx),%edx xorps %xmm7,%xmm2 -.L078enc1_loop_15: +.L074enc1_loop_15: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx - jnz .L078enc1_loop_15 + jnz .L074enc1_loop_15 .byte 102,15,56,221,209 movl %ebx,%ecx movl %ebp,%edx movups %xmm2,(%edi) leal 16(%edi),%edi subl $16,%eax - jnc .L077cbc_enc_loop + jnc .L073cbc_enc_loop addl $16,%eax - jnz .L076cbc_enc_tail + jnz .L072cbc_enc_tail movaps %xmm2,%xmm7 - pxor %xmm2,%xmm2 - jmp .L079cbc_ret -.L076cbc_enc_tail: + jmp .L075cbc_ret +.L072cbc_enc_tail: movl %eax,%ecx .long 2767451785 movl $16,%ecx @@ -1883,20 +1765,20 @@ _aesni_cbc_encrypt: movl %ebx,%ecx movl %edi,%esi movl %ebp,%edx - jmp .L077cbc_enc_loop + jmp .L073cbc_enc_loop .align 16 -.L075cbc_decrypt: +.L071cbc_decrypt: cmpl $80,%eax - jbe .L080cbc_dec_tail + jbe .L076cbc_dec_tail movaps %xmm7,(%esp) subl $80,%eax - jmp .L081cbc_dec_loop6_enter + jmp .L077cbc_dec_loop6_enter .align 16 -.L082cbc_dec_loop6: +.L078cbc_dec_loop6: movaps %xmm0,(%esp) movups %xmm7,(%edi) leal 16(%edi),%edi -.L081cbc_dec_loop6_enter: +.L077cbc_dec_loop6_enter: movdqu (%esi),%xmm2 movdqu 16(%esi),%xmm3 movdqu 32(%esi),%xmm4 @@ -1926,28 +1808,28 @@ _aesni_cbc_encrypt: movups %xmm6,64(%edi) leal 80(%edi),%edi subl $96,%eax - ja .L082cbc_dec_loop6 + ja .L078cbc_dec_loop6 movaps %xmm7,%xmm2 movaps %xmm0,%xmm7 addl $80,%eax - jle .L083cbc_dec_clear_tail_collected + jle .L079cbc_dec_tail_collected movups %xmm2,(%edi) leal 16(%edi),%edi -.L080cbc_dec_tail: +.L076cbc_dec_tail: movups (%esi),%xmm2 movaps %xmm2,%xmm6 cmpl $16,%eax - jbe .L084cbc_dec_one + jbe .L080cbc_dec_one movups 16(%esi),%xmm3 movaps %xmm3,%xmm5 cmpl $32,%eax - jbe .L085cbc_dec_two + jbe .L081cbc_dec_two movups 32(%esi),%xmm4 cmpl $48,%eax - jbe .L086cbc_dec_three + jbe .L082cbc_dec_three movups 48(%esi),%xmm5 cmpl $64,%eax - jbe .L087cbc_dec_four + jbe .L083cbc_dec_four movups 64(%esi),%xmm6 movaps %xmm7,(%esp) movups (%esi),%xmm2 @@ -1965,62 +1847,56 @@ _aesni_cbc_encrypt: xorps %xmm0,%xmm6 movups %xmm2,(%edi) movups %xmm3,16(%edi) - pxor %xmm3,%xmm3 movups %xmm4,32(%edi) - pxor %xmm4,%xmm4 movups %xmm5,48(%edi) - pxor %xmm5,%xmm5 leal 64(%edi),%edi movaps %xmm6,%xmm2 - pxor %xmm6,%xmm6 subl $80,%eax - jmp .L088cbc_dec_tail_collected + jmp .L079cbc_dec_tail_collected .align 16 -.L084cbc_dec_one: +.L080cbc_dec_one: movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 -.L089dec1_loop_16: +.L084dec1_loop_16: .byte 102,15,56,222,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx - jnz .L089dec1_loop_16 + jnz .L084dec1_loop_16 .byte 102,15,56,223,209 xorps %xmm7,%xmm2 movaps %xmm6,%xmm7 subl $16,%eax - jmp .L088cbc_dec_tail_collected + jmp .L079cbc_dec_tail_collected .align 16 -.L085cbc_dec_two: - call __aesni_decrypt2 +.L081cbc_dec_two: + xorps %xmm4,%xmm4 + call __aesni_decrypt3 xorps %xmm7,%xmm2 xorps %xmm6,%xmm3 movups %xmm2,(%edi) movaps %xmm3,%xmm2 - pxor %xmm3,%xmm3 leal 16(%edi),%edi movaps %xmm5,%xmm7 subl $32,%eax - jmp .L088cbc_dec_tail_collected + jmp .L079cbc_dec_tail_collected .align 16 -.L086cbc_dec_three: +.L082cbc_dec_three: call __aesni_decrypt3 xorps %xmm7,%xmm2 xorps %xmm6,%xmm3 xorps %xmm5,%xmm4 movups %xmm2,(%edi) movaps %xmm4,%xmm2 - pxor %xmm4,%xmm4 movups %xmm3,16(%edi) - pxor %xmm3,%xmm3 leal 32(%edi),%edi movups 32(%esi),%xmm7 subl $48,%eax - jmp .L088cbc_dec_tail_collected + jmp .L079cbc_dec_tail_collected .align 16 -.L087cbc_dec_four: +.L083cbc_dec_four: call __aesni_decrypt4 movups 16(%esi),%xmm1 movups 32(%esi),%xmm0 @@ -2030,44 +1906,28 @@ _aesni_cbc_encrypt: movups %xmm2,(%edi) xorps %xmm1,%xmm4 movups %xmm3,16(%edi) - pxor %xmm3,%xmm3 xorps %xmm0,%xmm5 movups %xmm4,32(%edi) - pxor %xmm4,%xmm4 leal 48(%edi),%edi movaps %xmm5,%xmm2 - pxor %xmm5,%xmm5 subl $64,%eax - jmp .L088cbc_dec_tail_collected -.align 16 -.L083cbc_dec_clear_tail_collected: - pxor %xmm3,%xmm3 - pxor %xmm4,%xmm4 - pxor %xmm5,%xmm5 - pxor %xmm6,%xmm6 -.L088cbc_dec_tail_collected: +.L079cbc_dec_tail_collected: andl $15,%eax - jnz .L090cbc_dec_tail_partial + jnz .L085cbc_dec_tail_partial movups %xmm2,(%edi) - pxor %xmm0,%xmm0 - jmp .L079cbc_ret + jmp .L075cbc_ret .align 16 -.L090cbc_dec_tail_partial: +.L085cbc_dec_tail_partial: movaps %xmm2,(%esp) - pxor %xmm0,%xmm0 movl $16,%ecx movl %esp,%esi subl %eax,%ecx .long 2767451785 - movdqa %xmm2,(%esp) -.L079cbc_ret: +.L075cbc_ret: movl 16(%esp),%esp movl 36(%esp),%ebp - pxor %xmm2,%xmm2 - pxor %xmm1,%xmm1 movups %xmm7,(%ebp) - pxor %xmm7,%xmm7 -.L074cbc_abort: +.L070cbc_abort: popl %edi popl %esi popl %ebx @@ -2076,62 +1936,52 @@ _aesni_cbc_encrypt: .def __aesni_set_encrypt_key; .scl 3; .type 32; .endef .align 16 __aesni_set_encrypt_key: - pushl %ebp - pushl %ebx testl %eax,%eax - jz .L091bad_pointer + jz .L086bad_pointer testl %edx,%edx - jz .L091bad_pointer - call .L092pic -.L092pic: - popl %ebx - leal .Lkey_const-.L092pic(%ebx),%ebx - leal __gnutls_x86_cpuid_s,%ebp + jz .L086bad_pointer movups (%eax),%xmm0 xorps %xmm4,%xmm4 - movl 4(%ebp),%ebp leal 16(%edx),%edx - andl $268437504,%ebp cmpl $256,%ecx - je .L09314rounds + je .L08714rounds cmpl $192,%ecx - je .L09412rounds + je .L08812rounds cmpl $128,%ecx - jne .L095bad_keybits + jne .L089bad_keybits .align 16 -.L09610rounds: - cmpl $268435456,%ebp - je .L09710rounds_alt +.L09010rounds: movl $9,%ecx movups %xmm0,-16(%edx) .byte 102,15,58,223,200,1 - call .L098key_128_cold + call .L091key_128_cold .byte 102,15,58,223,200,2 - call .L099key_128 + call .L092key_128 .byte 102,15,58,223,200,4 - call .L099key_128 + call .L092key_128 .byte 102,15,58,223,200,8 - call .L099key_128 + call .L092key_128 .byte 102,15,58,223,200,16 - call .L099key_128 + call .L092key_128 .byte 102,15,58,223,200,32 - call .L099key_128 + call .L092key_128 .byte 102,15,58,223,200,64 - call .L099key_128 + call .L092key_128 .byte 102,15,58,223,200,128 - call .L099key_128 + call .L092key_128 .byte 102,15,58,223,200,27 - call .L099key_128 + call .L092key_128 .byte 102,15,58,223,200,54 - call .L099key_128 + call .L092key_128 movups %xmm0,(%edx) movl %ecx,80(%edx) - jmp .L100good_key + xorl %eax,%eax + ret .align 16 -.L099key_128: +.L092key_128: movups %xmm0,(%edx) leal 16(%edx),%edx -.L098key_128_cold: +.L091key_128_cold: shufps $16,%xmm0,%xmm4 xorps %xmm4,%xmm0 shufps $140,%xmm0,%xmm4 @@ -2140,91 +1990,38 @@ __aesni_set_encrypt_key: xorps %xmm1,%xmm0 ret .align 16 -.L09710rounds_alt: - movdqa (%ebx),%xmm5 - movl $8,%ecx - movdqa 32(%ebx),%xmm4 - movdqa %xmm0,%xmm2 - movdqu %xmm0,-16(%edx) -.L101loop_key128: -.byte 102,15,56,0,197 -.byte 102,15,56,221,196 - pslld $1,%xmm4 - leal 16(%edx),%edx - movdqa %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm3,%xmm2 - pxor %xmm2,%xmm0 - movdqu %xmm0,-16(%edx) - movdqa %xmm0,%xmm2 - decl %ecx - jnz .L101loop_key128 - movdqa 48(%ebx),%xmm4 -.byte 102,15,56,0,197 -.byte 102,15,56,221,196 - pslld $1,%xmm4 - movdqa %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm3,%xmm2 - pxor %xmm2,%xmm0 - movdqu %xmm0,(%edx) - movdqa %xmm0,%xmm2 -.byte 102,15,56,0,197 -.byte 102,15,56,221,196 - movdqa %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm3,%xmm2 - pxor %xmm2,%xmm0 - movdqu %xmm0,16(%edx) - movl $9,%ecx - movl %ecx,96(%edx) - jmp .L100good_key -.align 16 -.L09412rounds: +.L08812rounds: movq 16(%eax),%xmm2 - cmpl $268435456,%ebp - je .L10212rounds_alt movl $11,%ecx movups %xmm0,-16(%edx) .byte 102,15,58,223,202,1 - call .L103key_192a_cold + call .L093key_192a_cold .byte 102,15,58,223,202,2 - call .L104key_192b + call .L094key_192b .byte 102,15,58,223,202,4 - call .L105key_192a + call .L095key_192a .byte 102,15,58,223,202,8 - call .L104key_192b + call .L094key_192b .byte 102,15,58,223,202,16 - call .L105key_192a + call .L095key_192a .byte 102,15,58,223,202,32 - call .L104key_192b + call .L094key_192b .byte 102,15,58,223,202,64 - call .L105key_192a + call .L095key_192a .byte 102,15,58,223,202,128 - call .L104key_192b + call .L094key_192b movups %xmm0,(%edx) movl %ecx,48(%edx) - jmp .L100good_key + xorl %eax,%eax + ret .align 16 -.L105key_192a: +.L095key_192a: movups %xmm0,(%edx) leal 16(%edx),%edx .align 16 -.L103key_192a_cold: +.L093key_192a_cold: movaps %xmm2,%xmm5 -.L106key_192b_warm: +.L096key_192b_warm: shufps $16,%xmm0,%xmm4 movdqa %xmm2,%xmm3 xorps %xmm4,%xmm0 @@ -2238,90 +2035,56 @@ __aesni_set_encrypt_key: pxor %xmm3,%xmm2 ret .align 16 -.L104key_192b: +.L094key_192b: movaps %xmm0,%xmm3 shufps $68,%xmm0,%xmm5 movups %xmm5,(%edx) shufps $78,%xmm2,%xmm3 movups %xmm3,16(%edx) leal 32(%edx),%edx - jmp .L106key_192b_warm -.align 16 -.L10212rounds_alt: - movdqa 16(%ebx),%xmm5 - movdqa 32(%ebx),%xmm4 - movl $8,%ecx - movdqu %xmm0,-16(%edx) -.L107loop_key192: - movq %xmm2,(%edx) - movdqa %xmm2,%xmm1 -.byte 102,15,56,0,213 -.byte 102,15,56,221,212 - pslld $1,%xmm4 - leal 24(%edx),%edx - movdqa %xmm0,%xmm3 - pslldq $4,%xmm0 - pxor %xmm0,%xmm3 - pslldq $4,%xmm0 - pxor %xmm0,%xmm3 - pslldq $4,%xmm0 - pxor %xmm3,%xmm0 - pshufd $255,%xmm0,%xmm3 - pxor %xmm1,%xmm3 - pslldq $4,%xmm1 - pxor %xmm1,%xmm3 - pxor %xmm2,%xmm0 - pxor %xmm3,%xmm2 - movdqu %xmm0,-16(%edx) - decl %ecx - jnz .L107loop_key192 - movl $11,%ecx - movl %ecx,32(%edx) - jmp .L100good_key + jmp .L096key_192b_warm .align 16 -.L09314rounds: +.L08714rounds: movups 16(%eax),%xmm2 - leal 16(%edx),%edx - cmpl $268435456,%ebp - je .L10814rounds_alt movl $13,%ecx + leal 16(%edx),%edx movups %xmm0,-32(%edx) movups %xmm2,-16(%edx) .byte 102,15,58,223,202,1 - call .L109key_256a_cold + call .L097key_256a_cold .byte 102,15,58,223,200,1 - call .L110key_256b + call .L098key_256b .byte 102,15,58,223,202,2 - call .L111key_256a + call .L099key_256a .byte 102,15,58,223,200,2 - call .L110key_256b + call .L098key_256b .byte 102,15,58,223,202,4 - call .L111key_256a + call .L099key_256a .byte 102,15,58,223,200,4 - call .L110key_256b + call .L098key_256b .byte 102,15,58,223,202,8 - call .L111key_256a + call .L099key_256a .byte 102,15,58,223,200,8 - call .L110key_256b + call .L098key_256b .byte 102,15,58,223,202,16 - call .L111key_256a + call .L099key_256a .byte 102,15,58,223,200,16 - call .L110key_256b + call .L098key_256b .byte 102,15,58,223,202,32 - call .L111key_256a + call .L099key_256a .byte 102,15,58,223,200,32 - call .L110key_256b + call .L098key_256b .byte 102,15,58,223,202,64 - call .L111key_256a + call .L099key_256a movups %xmm0,(%edx) movl %ecx,16(%edx) xorl %eax,%eax - jmp .L100good_key + ret .align 16 -.L111key_256a: +.L099key_256a: movups %xmm2,(%edx) leal 16(%edx),%edx -.L109key_256a_cold: +.L097key_256a_cold: shufps $16,%xmm0,%xmm4 xorps %xmm4,%xmm0 shufps $140,%xmm0,%xmm4 @@ -2330,7 +2093,7 @@ __aesni_set_encrypt_key: xorps %xmm1,%xmm0 ret .align 16 -.L110key_256b: +.L098key_256b: movups %xmm0,(%edx) leal 16(%edx),%edx shufps $16,%xmm2,%xmm4 @@ -2340,70 +2103,13 @@ __aesni_set_encrypt_key: shufps $170,%xmm1,%xmm1 xorps %xmm1,%xmm2 ret -.align 16 -.L10814rounds_alt: - movdqa (%ebx),%xmm5 - movdqa 32(%ebx),%xmm4 - movl $7,%ecx - movdqu %xmm0,-32(%edx) - movdqa %xmm2,%xmm1 - movdqu %xmm2,-16(%edx) -.L112loop_key256: -.byte 102,15,56,0,213 -.byte 102,15,56,221,212 - movdqa %xmm0,%xmm3 - pslldq $4,%xmm0 - pxor %xmm0,%xmm3 - pslldq $4,%xmm0 - pxor %xmm0,%xmm3 - pslldq $4,%xmm0 - pxor %xmm3,%xmm0 - pslld $1,%xmm4 - pxor %xmm2,%xmm0 - movdqu %xmm0,(%edx) - decl %ecx - jz .L113done_key256 - pshufd $255,%xmm0,%xmm2 - pxor %xmm3,%xmm3 -.byte 102,15,56,221,211 - movdqa %xmm1,%xmm3 - pslldq $4,%xmm1 - pxor %xmm1,%xmm3 - pslldq $4,%xmm1 - pxor %xmm1,%xmm3 - pslldq $4,%xmm1 - pxor %xmm3,%xmm1 - pxor %xmm1,%xmm2 - movdqu %xmm2,16(%edx) - leal 32(%edx),%edx - movdqa %xmm2,%xmm1 - jmp .L112loop_key256 -.L113done_key256: - movl $13,%ecx - movl %ecx,16(%edx) -.L100good_key: - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - pxor %xmm2,%xmm2 - pxor %xmm3,%xmm3 - pxor %xmm4,%xmm4 - pxor %xmm5,%xmm5 - xorl %eax,%eax - popl %ebx - popl %ebp - ret .align 4 -.L091bad_pointer: +.L086bad_pointer: movl $-1,%eax - popl %ebx - popl %ebp ret .align 4 -.L095bad_keybits: - pxor %xmm0,%xmm0 +.L089bad_keybits: movl $-2,%eax - popl %ebx - popl %ebp ret .globl _aesni_set_encrypt_key .def _aesni_set_encrypt_key; .scl 2; .type 32; .endef @@ -2427,7 +2133,7 @@ _aesni_set_decrypt_key: movl 12(%esp),%edx shll $4,%ecx testl %eax,%eax - jnz .L114dec_key_ret + jnz .L100dec_key_ret leal 16(%edx,%ecx,1),%eax movups (%edx),%xmm0 movups (%eax),%xmm1 @@ -2435,7 +2141,7 @@ _aesni_set_decrypt_key: movups %xmm1,(%edx) leal 16(%edx),%edx leal -16(%eax),%eax -.L115dec_key_inverse: +.L101dec_key_inverse: movups (%edx),%xmm0 movups (%eax),%xmm1 .byte 102,15,56,219,192 @@ -2445,24 +2151,15 @@ _aesni_set_decrypt_key: movups %xmm0,16(%eax) movups %xmm1,-16(%edx) cmpl %edx,%eax - ja .L115dec_key_inverse + ja .L101dec_key_inverse movups (%edx),%xmm0 .byte 102,15,56,219,192 movups %xmm0,(%edx) - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 xorl %eax,%eax -.L114dec_key_ret: +.L100dec_key_ret: ret -.align 64 -.Lkey_const: -.long 202313229,202313229,202313229,202313229 -.long 67569157,67569157,67569157,67569157 -.long 1,1,1,1 -.long 27,27,27,27 .byte 65,69,83,32,102,111,114,32,73,110,116,101,108,32,65,69 .byte 83,45,78,73,44,32,67,82,89,80,84,79,71,65,77,83 .byte 32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115 .byte 115,108,46,111,114,103,62,0 -.comm __gnutls_x86_cpuid_s,16 diff --git a/lib/accelerated/x86/elf/aesni-x86.s b/lib/accelerated/x86/elf/aesni-x86.s index 73d623cbda..5d70f2568f 100644 --- a/lib/accelerated/x86/elf/aesni-x86.s +++ b/lib/accelerated/x86/elf/aesni-x86.s @@ -60,10 +60,7 @@ aesni_encrypt: leal 16(%edx),%edx jnz .L000enc1_loop_1 .byte 102,15,56,221,209 - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 movups %xmm2,(%eax) - pxor %xmm2,%xmm2 ret .size aesni_encrypt,.-.L_aesni_encrypt_begin .globl aesni_decrypt @@ -87,90 +84,32 @@ aesni_decrypt: leal 16(%edx),%edx jnz .L001dec1_loop_2 .byte 102,15,56,223,209 - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 movups %xmm2,(%eax) - pxor %xmm2,%xmm2 ret .size aesni_decrypt,.-.L_aesni_decrypt_begin -.type _aesni_encrypt2,@function -.align 16 -_aesni_encrypt2: - movups (%edx),%xmm0 - shll $4,%ecx - movups 16(%edx),%xmm1 - xorps %xmm0,%xmm2 - pxor %xmm0,%xmm3 - movups 32(%edx),%xmm0 - leal 32(%edx,%ecx,1),%edx - negl %ecx - addl $16,%ecx -.L002enc2_loop: -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 - movups (%edx,%ecx,1),%xmm1 - addl $32,%ecx -.byte 102,15,56,220,208 -.byte 102,15,56,220,216 - movups -16(%edx,%ecx,1),%xmm0 - jnz .L002enc2_loop -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 -.byte 102,15,56,221,208 -.byte 102,15,56,221,216 - ret -.size _aesni_encrypt2,.-_aesni_encrypt2 -.type _aesni_decrypt2,@function -.align 16 -_aesni_decrypt2: - movups (%edx),%xmm0 - shll $4,%ecx - movups 16(%edx),%xmm1 - xorps %xmm0,%xmm2 - pxor %xmm0,%xmm3 - movups 32(%edx),%xmm0 - leal 32(%edx,%ecx,1),%edx - negl %ecx - addl $16,%ecx -.L003dec2_loop: -.byte 102,15,56,222,209 -.byte 102,15,56,222,217 - movups (%edx,%ecx,1),%xmm1 - addl $32,%ecx -.byte 102,15,56,222,208 -.byte 102,15,56,222,216 - movups -16(%edx,%ecx,1),%xmm0 - jnz .L003dec2_loop -.byte 102,15,56,222,209 -.byte 102,15,56,222,217 -.byte 102,15,56,223,208 -.byte 102,15,56,223,216 - ret -.size _aesni_decrypt2,.-_aesni_decrypt2 .type _aesni_encrypt3,@function .align 16 _aesni_encrypt3: movups (%edx),%xmm0 - shll $4,%ecx + shrl $1,%ecx movups 16(%edx),%xmm1 + leal 32(%edx),%edx xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 pxor %xmm0,%xmm4 - movups 32(%edx),%xmm0 - leal 32(%edx,%ecx,1),%edx - negl %ecx - addl $16,%ecx -.L004enc3_loop: + movups (%edx),%xmm0 +.L002enc3_loop: .byte 102,15,56,220,209 .byte 102,15,56,220,217 + decl %ecx .byte 102,15,56,220,225 - movups (%edx,%ecx,1),%xmm1 - addl $32,%ecx + movups 16(%edx),%xmm1 .byte 102,15,56,220,208 .byte 102,15,56,220,216 + leal 32(%edx),%edx .byte 102,15,56,220,224 - movups -16(%edx,%ecx,1),%xmm0 - jnz .L004enc3_loop + movups (%edx),%xmm0 + jnz .L002enc3_loop .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 @@ -183,26 +122,25 @@ _aesni_encrypt3: .align 16 _aesni_decrypt3: movups (%edx),%xmm0 - shll $4,%ecx + shrl $1,%ecx movups 16(%edx),%xmm1 + leal 32(%edx),%edx xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 pxor %xmm0,%xmm4 - movups 32(%edx),%xmm0 - leal 32(%edx,%ecx,1),%edx - negl %ecx - addl $16,%ecx -.L005dec3_loop: + movups (%edx),%xmm0 +.L003dec3_loop: .byte 102,15,56,222,209 .byte 102,15,56,222,217 + decl %ecx .byte 102,15,56,222,225 - movups (%edx,%ecx,1),%xmm1 - addl $32,%ecx + movups 16(%edx),%xmm1 .byte 102,15,56,222,208 .byte 102,15,56,222,216 + leal 32(%edx),%edx .byte 102,15,56,222,224 - movups -16(%edx,%ecx,1),%xmm0 - jnz .L005dec3_loop + movups (%edx),%xmm0 + jnz .L003dec3_loop .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 @@ -216,29 +154,27 @@ _aesni_decrypt3: _aesni_encrypt4: movups (%edx),%xmm0 movups 16(%edx),%xmm1 - shll $4,%ecx + shrl $1,%ecx + leal 32(%edx),%edx xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 pxor %xmm0,%xmm4 pxor %xmm0,%xmm5 - movups 32(%edx),%xmm0 - leal 32(%edx,%ecx,1),%edx - negl %ecx -.byte 15,31,64,0 - addl $16,%ecx -.L006enc4_loop: + movups (%edx),%xmm0 +.L004enc4_loop: .byte 102,15,56,220,209 .byte 102,15,56,220,217 + decl %ecx .byte 102,15,56,220,225 .byte 102,15,56,220,233 - movups (%edx,%ecx,1),%xmm1 - addl $32,%ecx + movups 16(%edx),%xmm1 .byte 102,15,56,220,208 .byte 102,15,56,220,216 + leal 32(%edx),%edx .byte 102,15,56,220,224 .byte 102,15,56,220,232 - movups -16(%edx,%ecx,1),%xmm0 - jnz .L006enc4_loop + movups (%edx),%xmm0 + jnz .L004enc4_loop .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 @@ -254,29 +190,27 @@ _aesni_encrypt4: _aesni_decrypt4: movups (%edx),%xmm0 movups 16(%edx),%xmm1 - shll $4,%ecx + shrl $1,%ecx + leal 32(%edx),%edx xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 pxor %xmm0,%xmm4 pxor %xmm0,%xmm5 - movups 32(%edx),%xmm0 - leal 32(%edx,%ecx,1),%edx - negl %ecx -.byte 15,31,64,0 - addl $16,%ecx -.L007dec4_loop: + movups (%edx),%xmm0 +.L005dec4_loop: .byte 102,15,56,222,209 .byte 102,15,56,222,217 + decl %ecx .byte 102,15,56,222,225 .byte 102,15,56,222,233 - movups (%edx,%ecx,1),%xmm1 - addl $32,%ecx + movups 16(%edx),%xmm1 .byte 102,15,56,222,208 .byte 102,15,56,222,216 + leal 32(%edx),%edx .byte 102,15,56,222,224 .byte 102,15,56,222,232 - movups -16(%edx,%ecx,1),%xmm0 - jnz .L007dec4_loop + movups (%edx),%xmm0 + jnz .L005dec4_loop .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 @@ -291,42 +225,45 @@ _aesni_decrypt4: .align 16 _aesni_encrypt6: movups (%edx),%xmm0 - shll $4,%ecx + shrl $1,%ecx movups 16(%edx),%xmm1 + leal 32(%edx),%edx xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 - pxor %xmm0,%xmm4 .byte 102,15,56,220,209 - pxor %xmm0,%xmm5 - pxor %xmm0,%xmm6 + pxor %xmm0,%xmm4 .byte 102,15,56,220,217 - leal 32(%edx,%ecx,1),%edx - negl %ecx + pxor %xmm0,%xmm5 + decl %ecx .byte 102,15,56,220,225 + pxor %xmm0,%xmm6 +.byte 102,15,56,220,233 pxor %xmm0,%xmm7 - movups (%edx,%ecx,1),%xmm0 - addl $16,%ecx - jmp .L008_aesni_encrypt6_inner +.byte 102,15,56,220,241 + movups (%edx),%xmm0 +.byte 102,15,56,220,249 + jmp .L_aesni_encrypt6_enter .align 16 -.L009enc6_loop: +.L006enc6_loop: .byte 102,15,56,220,209 .byte 102,15,56,220,217 + decl %ecx .byte 102,15,56,220,225 -.L008_aesni_encrypt6_inner: .byte 102,15,56,220,233 .byte 102,15,56,220,241 .byte 102,15,56,220,249 +.align 16 .L_aesni_encrypt6_enter: - movups (%edx,%ecx,1),%xmm1 - addl $32,%ecx + movups 16(%edx),%xmm1 .byte 102,15,56,220,208 .byte 102,15,56,220,216 + leal 32(%edx),%edx .byte 102,15,56,220,224 .byte 102,15,56,220,232 .byte 102,15,56,220,240 .byte 102,15,56,220,248 - movups -16(%edx,%ecx,1),%xmm0 - jnz .L009enc6_loop + movups (%edx),%xmm0 + jnz .L006enc6_loop .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 @@ -345,42 +282,45 @@ _aesni_encrypt6: .align 16 _aesni_decrypt6: movups (%edx),%xmm0 - shll $4,%ecx + shrl $1,%ecx movups 16(%edx),%xmm1 + leal 32(%edx),%edx xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 - pxor %xmm0,%xmm4 .byte 102,15,56,222,209 - pxor %xmm0,%xmm5 - pxor %xmm0,%xmm6 + pxor %xmm0,%xmm4 .byte 102,15,56,222,217 - leal 32(%edx,%ecx,1),%edx - negl %ecx + pxor %xmm0,%xmm5 + decl %ecx .byte 102,15,56,222,225 + pxor %xmm0,%xmm6 +.byte 102,15,56,222,233 pxor %xmm0,%xmm7 - movups (%edx,%ecx,1),%xmm0 - addl $16,%ecx - jmp .L010_aesni_decrypt6_inner +.byte 102,15,56,222,241 + movups (%edx),%xmm0 +.byte 102,15,56,222,249 + jmp .L_aesni_decrypt6_enter .align 16 -.L011dec6_loop: +.L007dec6_loop: .byte 102,15,56,222,209 .byte 102,15,56,222,217 + decl %ecx .byte 102,15,56,222,225 -.L010_aesni_decrypt6_inner: .byte 102,15,56,222,233 .byte 102,15,56,222,241 .byte 102,15,56,222,249 +.align 16 .L_aesni_decrypt6_enter: - movups (%edx,%ecx,1),%xmm1 - addl $32,%ecx + movups 16(%edx),%xmm1 .byte 102,15,56,222,208 .byte 102,15,56,222,216 + leal 32(%edx),%edx .byte 102,15,56,222,224 .byte 102,15,56,222,232 .byte 102,15,56,222,240 .byte 102,15,56,222,248 - movups -16(%edx,%ecx,1),%xmm0 - jnz .L011dec6_loop + movups (%edx),%xmm0 + jnz .L007dec6_loop .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 @@ -410,14 +350,14 @@ aesni_ecb_encrypt: movl 32(%esp),%edx movl 36(%esp),%ebx andl $-16,%eax - jz .L012ecb_ret + jz .L008ecb_ret movl 240(%edx),%ecx testl %ebx,%ebx - jz .L013ecb_decrypt + jz .L009ecb_decrypt movl %edx,%ebp movl %ecx,%ebx cmpl $96,%eax - jb .L014ecb_enc_tail + jb .L010ecb_enc_tail movdqu (%esi),%xmm2 movdqu 16(%esi),%xmm3 movdqu 32(%esi),%xmm4 @@ -426,9 +366,9 @@ aesni_ecb_encrypt: movdqu 80(%esi),%xmm7 leal 96(%esi),%esi subl $96,%eax - jmp .L015ecb_enc_loop6_enter + jmp .L011ecb_enc_loop6_enter .align 16 -.L016ecb_enc_loop6: +.L012ecb_enc_loop6: movups %xmm2,(%edi) movdqu (%esi),%xmm2 movups %xmm3,16(%edi) @@ -443,12 +383,12 @@ aesni_ecb_encrypt: leal 96(%edi),%edi movdqu 80(%esi),%xmm7 leal 96(%esi),%esi -.L015ecb_enc_loop6_enter: +.L011ecb_enc_loop6_enter: call _aesni_encrypt6 movl %ebp,%edx movl %ebx,%ecx subl $96,%eax - jnc .L016ecb_enc_loop6 + jnc .L012ecb_enc_loop6 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) @@ -457,18 +397,18 @@ aesni_ecb_encrypt: movups %xmm7,80(%edi) leal 96(%edi),%edi addl $96,%eax - jz .L012ecb_ret -.L014ecb_enc_tail: + jz .L008ecb_ret +.L010ecb_enc_tail: movups (%esi),%xmm2 cmpl $32,%eax - jb .L017ecb_enc_one + jb .L013ecb_enc_one movups 16(%esi),%xmm3 - je .L018ecb_enc_two + je .L014ecb_enc_two movups 32(%esi),%xmm4 cmpl $64,%eax - jb .L019ecb_enc_three + jb .L015ecb_enc_three movups 48(%esi),%xmm5 - je .L020ecb_enc_four + je .L016ecb_enc_four movups 64(%esi),%xmm6 xorps %xmm7,%xmm7 call _aesni_encrypt6 @@ -477,49 +417,50 @@ aesni_ecb_encrypt: movups %xmm4,32(%edi) movups %xmm5,48(%edi) movups %xmm6,64(%edi) - jmp .L012ecb_ret + jmp .L008ecb_ret .align 16 -.L017ecb_enc_one: +.L013ecb_enc_one: movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 -.L021enc1_loop_3: +.L017enc1_loop_3: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx - jnz .L021enc1_loop_3 + jnz .L017enc1_loop_3 .byte 102,15,56,221,209 movups %xmm2,(%edi) - jmp .L012ecb_ret + jmp .L008ecb_ret .align 16 -.L018ecb_enc_two: - call _aesni_encrypt2 +.L014ecb_enc_two: + xorps %xmm4,%xmm4 + call _aesni_encrypt3 movups %xmm2,(%edi) movups %xmm3,16(%edi) - jmp .L012ecb_ret + jmp .L008ecb_ret .align 16 -.L019ecb_enc_three: +.L015ecb_enc_three: call _aesni_encrypt3 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) - jmp .L012ecb_ret + jmp .L008ecb_ret .align 16 -.L020ecb_enc_four: +.L016ecb_enc_four: call _aesni_encrypt4 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) movups %xmm5,48(%edi) - jmp .L012ecb_ret + jmp .L008ecb_ret .align 16 -.L013ecb_decrypt: +.L009ecb_decrypt: movl %edx,%ebp movl %ecx,%ebx cmpl $96,%eax - jb .L022ecb_dec_tail + jb .L018ecb_dec_tail movdqu (%esi),%xmm2 movdqu 16(%esi),%xmm3 movdqu 32(%esi),%xmm4 @@ -528,9 +469,9 @@ aesni_ecb_encrypt: movdqu 80(%esi),%xmm7 leal 96(%esi),%esi subl $96,%eax - jmp .L023ecb_dec_loop6_enter + jmp .L019ecb_dec_loop6_enter .align 16 -.L024ecb_dec_loop6: +.L020ecb_dec_loop6: movups %xmm2,(%edi) movdqu (%esi),%xmm2 movups %xmm3,16(%edi) @@ -545,12 +486,12 @@ aesni_ecb_encrypt: leal 96(%edi),%edi movdqu 80(%esi),%xmm7 leal 96(%esi),%esi -.L023ecb_dec_loop6_enter: +.L019ecb_dec_loop6_enter: call _aesni_decrypt6 movl %ebp,%edx movl %ebx,%ecx subl $96,%eax - jnc .L024ecb_dec_loop6 + jnc .L020ecb_dec_loop6 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) @@ -559,18 +500,18 @@ aesni_ecb_encrypt: movups %xmm7,80(%edi) leal 96(%edi),%edi addl $96,%eax - jz .L012ecb_ret -.L022ecb_dec_tail: + jz .L008ecb_ret +.L018ecb_dec_tail: movups (%esi),%xmm2 cmpl $32,%eax - jb .L025ecb_dec_one + jb .L021ecb_dec_one movups 16(%esi),%xmm3 - je .L026ecb_dec_two + je .L022ecb_dec_two movups 32(%esi),%xmm4 cmpl $64,%eax - jb .L027ecb_dec_three + jb .L023ecb_dec_three movups 48(%esi),%xmm5 - je .L028ecb_dec_four + je .L024ecb_dec_four movups 64(%esi),%xmm6 xorps %xmm7,%xmm7 call _aesni_decrypt6 @@ -579,51 +520,44 @@ aesni_ecb_encrypt: movups %xmm4,32(%edi) movups %xmm5,48(%edi) movups %xmm6,64(%edi) - jmp .L012ecb_ret + jmp .L008ecb_ret .align 16 -.L025ecb_dec_one: +.L021ecb_dec_one: movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 -.L029dec1_loop_4: +.L025dec1_loop_4: .byte 102,15,56,222,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx - jnz .L029dec1_loop_4 + jnz .L025dec1_loop_4 .byte 102,15,56,223,209 movups %xmm2,(%edi) - jmp .L012ecb_ret + jmp .L008ecb_ret .align 16 -.L026ecb_dec_two: - call _aesni_decrypt2 +.L022ecb_dec_two: + xorps %xmm4,%xmm4 + call _aesni_decrypt3 movups %xmm2,(%edi) movups %xmm3,16(%edi) - jmp .L012ecb_ret + jmp .L008ecb_ret .align 16 -.L027ecb_dec_three: +.L023ecb_dec_three: call _aesni_decrypt3 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) - jmp .L012ecb_ret + jmp .L008ecb_ret .align 16 -.L028ecb_dec_four: +.L024ecb_dec_four: call _aesni_decrypt4 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) movups %xmm5,48(%edi) -.L012ecb_ret: - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - pxor %xmm2,%xmm2 - pxor %xmm3,%xmm3 - pxor %xmm4,%xmm4 - pxor %xmm5,%xmm5 - pxor %xmm6,%xmm6 - pxor %xmm7,%xmm7 +.L008ecb_ret: popl %edi popl %esi popl %ebx @@ -662,56 +596,48 @@ aesni_ccm64_encrypt_blocks: movl %ebp,20(%esp) movl %ebp,24(%esp) movl %ebp,28(%esp) - shll $4,%ecx - movl $16,%ebx + shrl $1,%ecx leal (%edx),%ebp movdqa (%esp),%xmm5 movdqa %xmm7,%xmm2 - leal 32(%edx,%ecx,1),%edx - subl %ecx,%ebx + movl %ecx,%ebx .byte 102,15,56,0,253 -.L030ccm64_enc_outer: +.L026ccm64_enc_outer: movups (%ebp),%xmm0 movl %ebx,%ecx movups (%esi),%xmm6 xorps %xmm0,%xmm2 movups 16(%ebp),%xmm1 xorps %xmm6,%xmm0 + leal 32(%ebp),%edx xorps %xmm0,%xmm3 - movups 32(%ebp),%xmm0 -.L031ccm64_enc2_loop: + movups (%edx),%xmm0 +.L027ccm64_enc2_loop: .byte 102,15,56,220,209 + decl %ecx .byte 102,15,56,220,217 - movups (%edx,%ecx,1),%xmm1 - addl $32,%ecx + movups 16(%edx),%xmm1 .byte 102,15,56,220,208 + leal 32(%edx),%edx .byte 102,15,56,220,216 - movups -16(%edx,%ecx,1),%xmm0 - jnz .L031ccm64_enc2_loop + movups (%edx),%xmm0 + jnz .L027ccm64_enc2_loop .byte 102,15,56,220,209 .byte 102,15,56,220,217 paddq 16(%esp),%xmm7 - decl %eax .byte 102,15,56,221,208 .byte 102,15,56,221,216 + decl %eax leal 16(%esi),%esi xorps %xmm2,%xmm6 movdqa %xmm7,%xmm2 movups %xmm6,(%edi) -.byte 102,15,56,0,213 leal 16(%edi),%edi - jnz .L030ccm64_enc_outer +.byte 102,15,56,0,213 + jnz .L026ccm64_enc_outer movl 48(%esp),%esp movl 40(%esp),%edi movups %xmm3,(%edi) - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - pxor %xmm2,%xmm2 - pxor %xmm3,%xmm3 - pxor %xmm4,%xmm4 - pxor %xmm5,%xmm5 - pxor %xmm6,%xmm6 - pxor %xmm7,%xmm7 popl %edi popl %esi popl %ebx @@ -759,82 +685,71 @@ aesni_ccm64_decrypt_blocks: movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 -.L032enc1_loop_5: +.L028enc1_loop_5: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx - jnz .L032enc1_loop_5 + jnz .L028enc1_loop_5 .byte 102,15,56,221,209 - shll $4,%ebx - movl $16,%ecx movups (%esi),%xmm6 paddq 16(%esp),%xmm7 leal 16(%esi),%esi - subl %ebx,%ecx - leal 32(%ebp,%ebx,1),%edx - movl %ecx,%ebx - jmp .L033ccm64_dec_outer + jmp .L029ccm64_dec_outer .align 16 -.L033ccm64_dec_outer: +.L029ccm64_dec_outer: xorps %xmm2,%xmm6 movdqa %xmm7,%xmm2 + movl %ebx,%ecx movups %xmm6,(%edi) leal 16(%edi),%edi .byte 102,15,56,0,213 subl $1,%eax - jz .L034ccm64_dec_break + jz .L030ccm64_dec_break movups (%ebp),%xmm0 - movl %ebx,%ecx + shrl $1,%ecx movups 16(%ebp),%xmm1 xorps %xmm0,%xmm6 + leal 32(%ebp),%edx xorps %xmm0,%xmm2 xorps %xmm6,%xmm3 - movups 32(%ebp),%xmm0 -.L035ccm64_dec2_loop: + movups (%edx),%xmm0 +.L031ccm64_dec2_loop: .byte 102,15,56,220,209 + decl %ecx .byte 102,15,56,220,217 - movups (%edx,%ecx,1),%xmm1 - addl $32,%ecx + movups 16(%edx),%xmm1 .byte 102,15,56,220,208 + leal 32(%edx),%edx .byte 102,15,56,220,216 - movups -16(%edx,%ecx,1),%xmm0 - jnz .L035ccm64_dec2_loop + movups (%edx),%xmm0 + jnz .L031ccm64_dec2_loop movups (%esi),%xmm6 paddq 16(%esp),%xmm7 .byte 102,15,56,220,209 .byte 102,15,56,220,217 + leal 16(%esi),%esi .byte 102,15,56,221,208 .byte 102,15,56,221,216 - leal 16(%esi),%esi - jmp .L033ccm64_dec_outer + jmp .L029ccm64_dec_outer .align 16 -.L034ccm64_dec_break: - movl 240(%ebp),%ecx +.L030ccm64_dec_break: movl %ebp,%edx movups (%edx),%xmm0 movups 16(%edx),%xmm1 xorps %xmm0,%xmm6 leal 32(%edx),%edx xorps %xmm6,%xmm3 -.L036enc1_loop_6: +.L032enc1_loop_6: .byte 102,15,56,220,217 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx - jnz .L036enc1_loop_6 + jnz .L032enc1_loop_6 .byte 102,15,56,221,217 movl 48(%esp),%esp movl 40(%esp),%edi movups %xmm3,(%edi) - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - pxor %xmm2,%xmm2 - pxor %xmm3,%xmm3 - pxor %xmm4,%xmm4 - pxor %xmm5,%xmm5 - pxor %xmm6,%xmm6 - pxor %xmm7,%xmm7 popl %edi popl %esi popl %ebx @@ -860,7 +775,7 @@ aesni_ctr32_encrypt_blocks: andl $-16,%esp movl %ebp,80(%esp) cmpl $1,%eax - je .L037ctr32_one_shortcut + je .L033ctr32_one_shortcut movdqu (%ebx),%xmm7 movl $202182159,(%esp) movl $134810123,4(%esp) @@ -876,59 +791,63 @@ aesni_ctr32_encrypt_blocks: .byte 102,15,58,34,253,3 movl 240(%edx),%ecx bswap %ebx - pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 + pxor %xmm0,%xmm0 movdqa (%esp),%xmm2 -.byte 102,15,58,34,195,0 +.byte 102,15,58,34,203,0 leal 3(%ebx),%ebp -.byte 102,15,58,34,205,0 +.byte 102,15,58,34,197,0 incl %ebx -.byte 102,15,58,34,195,1 +.byte 102,15,58,34,203,1 incl %ebp -.byte 102,15,58,34,205,1 +.byte 102,15,58,34,197,1 incl %ebx -.byte 102,15,58,34,195,2 +.byte 102,15,58,34,203,2 incl %ebp -.byte 102,15,58,34,205,2 - movdqa %xmm0,48(%esp) -.byte 102,15,56,0,194 - movdqu (%edx),%xmm6 - movdqa %xmm1,64(%esp) +.byte 102,15,58,34,197,2 + movdqa %xmm1,48(%esp) .byte 102,15,56,0,202 - pshufd $192,%xmm0,%xmm2 - pshufd $128,%xmm0,%xmm3 + movdqa %xmm0,64(%esp) +.byte 102,15,56,0,194 + pshufd $192,%xmm1,%xmm2 + pshufd $128,%xmm1,%xmm3 cmpl $6,%eax - jb .L038ctr32_tail - pxor %xmm6,%xmm7 - shll $4,%ecx - movl $16,%ebx + jb .L034ctr32_tail movdqa %xmm7,32(%esp) + shrl $1,%ecx movl %edx,%ebp - subl %ecx,%ebx - leal 32(%edx,%ecx,1),%edx + movl %ecx,%ebx subl $6,%eax - jmp .L039ctr32_loop6 -.align 16 -.L039ctr32_loop6: - pshufd $64,%xmm0,%xmm4 - movdqa 32(%esp),%xmm0 - pshufd $192,%xmm1,%xmm5 + jmp .L035ctr32_loop6 +.align 16 +.L035ctr32_loop6: + pshufd $64,%xmm1,%xmm4 + movdqa 32(%esp),%xmm1 + pshufd $192,%xmm0,%xmm5 + por %xmm1,%xmm2 + pshufd $128,%xmm0,%xmm6 + por %xmm1,%xmm3 + pshufd $64,%xmm0,%xmm7 + por %xmm1,%xmm4 + por %xmm1,%xmm5 + por %xmm1,%xmm6 + por %xmm1,%xmm7 + movups (%ebp),%xmm0 + movups 16(%ebp),%xmm1 + leal 32(%ebp),%edx + decl %ecx pxor %xmm0,%xmm2 - pshufd $128,%xmm1,%xmm6 pxor %xmm0,%xmm3 - pshufd $64,%xmm1,%xmm7 - movups 16(%ebp),%xmm1 - pxor %xmm0,%xmm4 - pxor %xmm0,%xmm5 .byte 102,15,56,220,209 - pxor %xmm0,%xmm6 - pxor %xmm0,%xmm7 + pxor %xmm0,%xmm4 .byte 102,15,56,220,217 - movups 32(%ebp),%xmm0 - movl %ebx,%ecx + pxor %xmm0,%xmm5 .byte 102,15,56,220,225 + pxor %xmm0,%xmm6 .byte 102,15,56,220,233 + pxor %xmm0,%xmm7 .byte 102,15,56,220,241 + movups (%edx),%xmm0 .byte 102,15,56,220,249 call .L_aesni_encrypt6_enter movups (%esi),%xmm1 @@ -939,51 +858,51 @@ aesni_ctr32_encrypt_blocks: movups %xmm2,(%edi) movdqa 16(%esp),%xmm0 xorps %xmm1,%xmm4 - movdqa 64(%esp),%xmm1 + movdqa 48(%esp),%xmm1 movups %xmm3,16(%edi) movups %xmm4,32(%edi) paddd %xmm0,%xmm1 - paddd 48(%esp),%xmm0 + paddd 64(%esp),%xmm0 movdqa (%esp),%xmm2 movups 48(%esi),%xmm3 movups 64(%esi),%xmm4 xorps %xmm3,%xmm5 movups 80(%esi),%xmm3 leal 96(%esi),%esi - movdqa %xmm0,48(%esp) -.byte 102,15,56,0,194 + movdqa %xmm1,48(%esp) +.byte 102,15,56,0,202 xorps %xmm4,%xmm6 movups %xmm5,48(%edi) xorps %xmm3,%xmm7 - movdqa %xmm1,64(%esp) -.byte 102,15,56,0,202 + movdqa %xmm0,64(%esp) +.byte 102,15,56,0,194 movups %xmm6,64(%edi) - pshufd $192,%xmm0,%xmm2 + pshufd $192,%xmm1,%xmm2 movups %xmm7,80(%edi) leal 96(%edi),%edi - pshufd $128,%xmm0,%xmm3 + movl %ebx,%ecx + pshufd $128,%xmm1,%xmm3 subl $6,%eax - jnc .L039ctr32_loop6 + jnc .L035ctr32_loop6 addl $6,%eax - jz .L040ctr32_ret - movdqu (%ebp),%xmm7 + jz .L036ctr32_ret movl %ebp,%edx - pxor 32(%esp),%xmm7 - movl 240(%ebp),%ecx -.L038ctr32_tail: + leal 1(,%ecx,2),%ecx + movdqa 32(%esp),%xmm7 +.L034ctr32_tail: por %xmm7,%xmm2 cmpl $2,%eax - jb .L041ctr32_one - pshufd $64,%xmm0,%xmm4 + jb .L037ctr32_one + pshufd $64,%xmm1,%xmm4 por %xmm7,%xmm3 - je .L042ctr32_two - pshufd $192,%xmm1,%xmm5 + je .L038ctr32_two + pshufd $192,%xmm0,%xmm5 por %xmm7,%xmm4 cmpl $4,%eax - jb .L043ctr32_three - pshufd $128,%xmm1,%xmm6 + jb .L039ctr32_three + pshufd $128,%xmm0,%xmm6 por %xmm7,%xmm5 - je .L044ctr32_four + je .L040ctr32_four por %xmm7,%xmm6 call _aesni_encrypt6 movups (%esi),%xmm1 @@ -1001,39 +920,39 @@ aesni_ctr32_encrypt_blocks: movups %xmm4,32(%edi) movups %xmm5,48(%edi) movups %xmm6,64(%edi) - jmp .L040ctr32_ret + jmp .L036ctr32_ret .align 16 -.L037ctr32_one_shortcut: +.L033ctr32_one_shortcut: movups (%ebx),%xmm2 movl 240(%edx),%ecx -.L041ctr32_one: +.L037ctr32_one: movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 -.L045enc1_loop_7: +.L041enc1_loop_7: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx - jnz .L045enc1_loop_7 + jnz .L041enc1_loop_7 .byte 102,15,56,221,209 movups (%esi),%xmm6 xorps %xmm2,%xmm6 movups %xmm6,(%edi) - jmp .L040ctr32_ret + jmp .L036ctr32_ret .align 16 -.L042ctr32_two: - call _aesni_encrypt2 +.L038ctr32_two: + call _aesni_encrypt3 movups (%esi),%xmm5 movups 16(%esi),%xmm6 xorps %xmm5,%xmm2 xorps %xmm6,%xmm3 movups %xmm2,(%edi) movups %xmm3,16(%edi) - jmp .L040ctr32_ret + jmp .L036ctr32_ret .align 16 -.L043ctr32_three: +.L039ctr32_three: call _aesni_encrypt3 movups (%esi),%xmm5 movups 16(%esi),%xmm6 @@ -1044,9 +963,9 @@ aesni_ctr32_encrypt_blocks: xorps %xmm7,%xmm4 movups %xmm3,16(%edi) movups %xmm4,32(%edi) - jmp .L040ctr32_ret + jmp .L036ctr32_ret .align 16 -.L044ctr32_four: +.L040ctr32_four: call _aesni_encrypt4 movups (%esi),%xmm6 movups 16(%esi),%xmm7 @@ -1060,18 +979,7 @@ aesni_ctr32_encrypt_blocks: xorps %xmm0,%xmm5 movups %xmm4,32(%edi) movups %xmm5,48(%edi) -.L040ctr32_ret: - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - pxor %xmm2,%xmm2 - pxor %xmm3,%xmm3 - pxor %xmm4,%xmm4 - movdqa %xmm0,32(%esp) - pxor %xmm5,%xmm5 - movdqa %xmm0,48(%esp) - pxor %xmm6,%xmm6 - movdqa %xmm0,64(%esp) - pxor %xmm7,%xmm7 +.L036ctr32_ret: movl 80(%esp),%esp popl %edi popl %esi @@ -1096,12 +1004,12 @@ aesni_xts_encrypt: movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 -.L046enc1_loop_8: +.L042enc1_loop_8: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx - jnz .L046enc1_loop_8 + jnz .L042enc1_loop_8 .byte 102,15,56,221,209 movl 20(%esp),%esi movl 24(%esp),%edi @@ -1125,14 +1033,12 @@ aesni_xts_encrypt: movl %edx,%ebp movl %ecx,%ebx subl $96,%eax - jc .L047xts_enc_short - shll $4,%ecx - movl $16,%ebx - subl %ecx,%ebx - leal 32(%edx,%ecx,1),%edx - jmp .L048xts_enc_loop6 + jc .L043xts_enc_short + shrl $1,%ecx + movl %ecx,%ebx + jmp .L044xts_enc_loop6 .align 16 -.L048xts_enc_loop6: +.L044xts_enc_loop6: pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,(%esp) @@ -1168,7 +1074,6 @@ aesni_xts_encrypt: pand %xmm3,%xmm7 movups (%esi),%xmm2 pxor %xmm1,%xmm7 - movl %ebx,%ecx movdqu 16(%esi),%xmm3 xorps %xmm0,%xmm2 movdqu 32(%esi),%xmm4 @@ -1184,17 +1089,19 @@ aesni_xts_encrypt: movdqa %xmm7,80(%esp) pxor %xmm1,%xmm7 movups 16(%ebp),%xmm1 + leal 32(%ebp),%edx pxor 16(%esp),%xmm3 - pxor 32(%esp),%xmm4 .byte 102,15,56,220,209 - pxor 48(%esp),%xmm5 - pxor 64(%esp),%xmm6 + pxor 32(%esp),%xmm4 .byte 102,15,56,220,217 - pxor %xmm0,%xmm7 - movups 32(%ebp),%xmm0 + pxor 48(%esp),%xmm5 + decl %ecx .byte 102,15,56,220,225 + pxor 64(%esp),%xmm6 .byte 102,15,56,220,233 + pxor %xmm0,%xmm7 .byte 102,15,56,220,241 + movups (%edx),%xmm0 .byte 102,15,56,220,249 call .L_aesni_encrypt6_enter movdqa 80(%esp),%xmm1 @@ -1219,25 +1126,26 @@ aesni_xts_encrypt: paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 + movl %ebx,%ecx pxor %xmm2,%xmm1 subl $96,%eax - jnc .L048xts_enc_loop6 - movl 240(%ebp),%ecx + jnc .L044xts_enc_loop6 + leal 1(,%ecx,2),%ecx movl %ebp,%edx movl %ecx,%ebx -.L047xts_enc_short: +.L043xts_enc_short: addl $96,%eax - jz .L049xts_enc_done6x + jz .L045xts_enc_done6x movdqa %xmm1,%xmm5 cmpl $32,%eax - jb .L050xts_enc_one + jb .L046xts_enc_one pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 - je .L051xts_enc_two + je .L047xts_enc_two pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,%xmm6 @@ -1246,7 +1154,7 @@ aesni_xts_encrypt: pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 cmpl $64,%eax - jb .L052xts_enc_three + jb .L048xts_enc_three pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,%xmm7 @@ -1256,7 +1164,7 @@ aesni_xts_encrypt: pxor %xmm2,%xmm1 movdqa %xmm5,(%esp) movdqa %xmm6,16(%esp) - je .L053xts_enc_four + je .L049xts_enc_four movdqa %xmm7,32(%esp) pshufd $19,%xmm0,%xmm7 movdqa %xmm1,48(%esp) @@ -1288,9 +1196,9 @@ aesni_xts_encrypt: movups %xmm5,48(%edi) movups %xmm6,64(%edi) leal 80(%edi),%edi - jmp .L054xts_enc_done + jmp .L050xts_enc_done .align 16 -.L050xts_enc_one: +.L046xts_enc_one: movups (%esi),%xmm2 leal 16(%esi),%esi xorps %xmm5,%xmm2 @@ -1298,36 +1206,37 @@ aesni_xts_encrypt: movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 -.L055enc1_loop_9: +.L051enc1_loop_9: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx - jnz .L055enc1_loop_9 + jnz .L051enc1_loop_9 .byte 102,15,56,221,209 xorps %xmm5,%xmm2 movups %xmm2,(%edi) leal 16(%edi),%edi movdqa %xmm5,%xmm1 - jmp .L054xts_enc_done + jmp .L050xts_enc_done .align 16 -.L051xts_enc_two: +.L047xts_enc_two: movaps %xmm1,%xmm6 movups (%esi),%xmm2 movups 16(%esi),%xmm3 leal 32(%esi),%esi xorps %xmm5,%xmm2 xorps %xmm6,%xmm3 - call _aesni_encrypt2 + xorps %xmm4,%xmm4 + call _aesni_encrypt3 xorps %xmm5,%xmm2 xorps %xmm6,%xmm3 movups %xmm2,(%edi) movups %xmm3,16(%edi) leal 32(%edi),%edi movdqa %xmm6,%xmm1 - jmp .L054xts_enc_done + jmp .L050xts_enc_done .align 16 -.L052xts_enc_three: +.L048xts_enc_three: movaps %xmm1,%xmm7 movups (%esi),%xmm2 movups 16(%esi),%xmm3 @@ -1345,9 +1254,9 @@ aesni_xts_encrypt: movups %xmm4,32(%edi) leal 48(%edi),%edi movdqa %xmm7,%xmm1 - jmp .L054xts_enc_done + jmp .L050xts_enc_done .align 16 -.L053xts_enc_four: +.L049xts_enc_four: movaps %xmm1,%xmm6 movups (%esi),%xmm2 movups 16(%esi),%xmm3 @@ -1369,28 +1278,28 @@ aesni_xts_encrypt: movups %xmm5,48(%edi) leal 64(%edi),%edi movdqa %xmm6,%xmm1 - jmp .L054xts_enc_done + jmp .L050xts_enc_done .align 16 -.L049xts_enc_done6x: +.L045xts_enc_done6x: movl 112(%esp),%eax andl $15,%eax - jz .L056xts_enc_ret + jz .L052xts_enc_ret movdqa %xmm1,%xmm5 movl %eax,112(%esp) - jmp .L057xts_enc_steal + jmp .L053xts_enc_steal .align 16 -.L054xts_enc_done: +.L050xts_enc_done: movl 112(%esp),%eax pxor %xmm0,%xmm0 andl $15,%eax - jz .L056xts_enc_ret + jz .L052xts_enc_ret pcmpgtd %xmm1,%xmm0 movl %eax,112(%esp) pshufd $19,%xmm0,%xmm5 paddq %xmm1,%xmm1 pand 96(%esp),%xmm5 pxor %xmm1,%xmm5 -.L057xts_enc_steal: +.L053xts_enc_steal: movzbl (%esi),%ecx movzbl -16(%edi),%edx leal 1(%esi),%esi @@ -1398,7 +1307,7 @@ aesni_xts_encrypt: movb %dl,(%edi) leal 1(%edi),%edi subl $1,%eax - jnz .L057xts_enc_steal + jnz .L053xts_enc_steal subl 112(%esp),%edi movl %ebp,%edx movl %ebx,%ecx @@ -1408,30 +1317,16 @@ aesni_xts_encrypt: movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 -.L058enc1_loop_10: +.L054enc1_loop_10: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx - jnz .L058enc1_loop_10 + jnz .L054enc1_loop_10 .byte 102,15,56,221,209 xorps %xmm5,%xmm2 movups %xmm2,-16(%edi) -.L056xts_enc_ret: - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - pxor %xmm2,%xmm2 - movdqa %xmm0,(%esp) - pxor %xmm3,%xmm3 - movdqa %xmm0,16(%esp) - pxor %xmm4,%xmm4 - movdqa %xmm0,32(%esp) - pxor %xmm5,%xmm5 - movdqa %xmm0,48(%esp) - pxor %xmm6,%xmm6 - movdqa %xmm0,64(%esp) - pxor %xmm7,%xmm7 - movdqa %xmm0,80(%esp) +.L052xts_enc_ret: movl 116(%esp),%esp popl %edi popl %esi @@ -1456,12 +1351,12 @@ aesni_xts_decrypt: movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 -.L059enc1_loop_11: +.L055enc1_loop_11: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx - jnz .L059enc1_loop_11 + jnz .L055enc1_loop_11 .byte 102,15,56,221,209 movl 20(%esp),%esi movl 24(%esp),%edi @@ -1490,14 +1385,12 @@ aesni_xts_decrypt: pcmpgtd %xmm1,%xmm0 andl $-16,%eax subl $96,%eax - jc .L060xts_dec_short - shll $4,%ecx - movl $16,%ebx - subl %ecx,%ebx - leal 32(%edx,%ecx,1),%edx - jmp .L061xts_dec_loop6 + jc .L056xts_dec_short + shrl $1,%ecx + movl %ecx,%ebx + jmp .L057xts_dec_loop6 .align 16 -.L061xts_dec_loop6: +.L057xts_dec_loop6: pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,(%esp) @@ -1533,7 +1426,6 @@ aesni_xts_decrypt: pand %xmm3,%xmm7 movups (%esi),%xmm2 pxor %xmm1,%xmm7 - movl %ebx,%ecx movdqu 16(%esi),%xmm3 xorps %xmm0,%xmm2 movdqu 32(%esi),%xmm4 @@ -1549,17 +1441,19 @@ aesni_xts_decrypt: movdqa %xmm7,80(%esp) pxor %xmm1,%xmm7 movups 16(%ebp),%xmm1 + leal 32(%ebp),%edx pxor 16(%esp),%xmm3 - pxor 32(%esp),%xmm4 .byte 102,15,56,222,209 - pxor 48(%esp),%xmm5 - pxor 64(%esp),%xmm6 + pxor 32(%esp),%xmm4 .byte 102,15,56,222,217 - pxor %xmm0,%xmm7 - movups 32(%ebp),%xmm0 + pxor 48(%esp),%xmm5 + decl %ecx .byte 102,15,56,222,225 + pxor 64(%esp),%xmm6 .byte 102,15,56,222,233 + pxor %xmm0,%xmm7 .byte 102,15,56,222,241 + movups (%edx),%xmm0 .byte 102,15,56,222,249 call .L_aesni_decrypt6_enter movdqa 80(%esp),%xmm1 @@ -1584,25 +1478,26 @@ aesni_xts_decrypt: paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 + movl %ebx,%ecx pxor %xmm2,%xmm1 subl $96,%eax - jnc .L061xts_dec_loop6 - movl 240(%ebp),%ecx + jnc .L057xts_dec_loop6 + leal 1(,%ecx,2),%ecx movl %ebp,%edx movl %ecx,%ebx -.L060xts_dec_short: +.L056xts_dec_short: addl $96,%eax - jz .L062xts_dec_done6x + jz .L058xts_dec_done6x movdqa %xmm1,%xmm5 cmpl $32,%eax - jb .L063xts_dec_one + jb .L059xts_dec_one pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 - je .L064xts_dec_two + je .L060xts_dec_two pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,%xmm6 @@ -1611,7 +1506,7 @@ aesni_xts_decrypt: pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 cmpl $64,%eax - jb .L065xts_dec_three + jb .L061xts_dec_three pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,%xmm7 @@ -1621,7 +1516,7 @@ aesni_xts_decrypt: pxor %xmm2,%xmm1 movdqa %xmm5,(%esp) movdqa %xmm6,16(%esp) - je .L066xts_dec_four + je .L062xts_dec_four movdqa %xmm7,32(%esp) pshufd $19,%xmm0,%xmm7 movdqa %xmm1,48(%esp) @@ -1653,9 +1548,9 @@ aesni_xts_decrypt: movups %xmm5,48(%edi) movups %xmm6,64(%edi) leal 80(%edi),%edi - jmp .L067xts_dec_done + jmp .L063xts_dec_done .align 16 -.L063xts_dec_one: +.L059xts_dec_one: movups (%esi),%xmm2 leal 16(%esi),%esi xorps %xmm5,%xmm2 @@ -1663,36 +1558,36 @@ aesni_xts_decrypt: movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 -.L068dec1_loop_12: +.L064dec1_loop_12: .byte 102,15,56,222,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx - jnz .L068dec1_loop_12 + jnz .L064dec1_loop_12 .byte 102,15,56,223,209 xorps %xmm5,%xmm2 movups %xmm2,(%edi) leal 16(%edi),%edi movdqa %xmm5,%xmm1 - jmp .L067xts_dec_done + jmp .L063xts_dec_done .align 16 -.L064xts_dec_two: +.L060xts_dec_two: movaps %xmm1,%xmm6 movups (%esi),%xmm2 movups 16(%esi),%xmm3 leal 32(%esi),%esi xorps %xmm5,%xmm2 xorps %xmm6,%xmm3 - call _aesni_decrypt2 + call _aesni_decrypt3 xorps %xmm5,%xmm2 xorps %xmm6,%xmm3 movups %xmm2,(%edi) movups %xmm3,16(%edi) leal 32(%edi),%edi movdqa %xmm6,%xmm1 - jmp .L067xts_dec_done + jmp .L063xts_dec_done .align 16 -.L065xts_dec_three: +.L061xts_dec_three: movaps %xmm1,%xmm7 movups (%esi),%xmm2 movups 16(%esi),%xmm3 @@ -1710,9 +1605,9 @@ aesni_xts_decrypt: movups %xmm4,32(%edi) leal 48(%edi),%edi movdqa %xmm7,%xmm1 - jmp .L067xts_dec_done + jmp .L063xts_dec_done .align 16 -.L066xts_dec_four: +.L062xts_dec_four: movaps %xmm1,%xmm6 movups (%esi),%xmm2 movups 16(%esi),%xmm3 @@ -1734,20 +1629,20 @@ aesni_xts_decrypt: movups %xmm5,48(%edi) leal 64(%edi),%edi movdqa %xmm6,%xmm1 - jmp .L067xts_dec_done + jmp .L063xts_dec_done .align 16 -.L062xts_dec_done6x: +.L058xts_dec_done6x: movl 112(%esp),%eax andl $15,%eax - jz .L069xts_dec_ret + jz .L065xts_dec_ret movl %eax,112(%esp) - jmp .L070xts_dec_only_one_more + jmp .L066xts_dec_only_one_more .align 16 -.L067xts_dec_done: +.L063xts_dec_done: movl 112(%esp),%eax pxor %xmm0,%xmm0 andl $15,%eax - jz .L069xts_dec_ret + jz .L065xts_dec_ret pcmpgtd %xmm1,%xmm0 movl %eax,112(%esp) pshufd $19,%xmm0,%xmm2 @@ -1757,7 +1652,7 @@ aesni_xts_decrypt: pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 -.L070xts_dec_only_one_more: +.L066xts_dec_only_one_more: pshufd $19,%xmm0,%xmm5 movdqa %xmm1,%xmm6 paddq %xmm1,%xmm1 @@ -1771,16 +1666,16 @@ aesni_xts_decrypt: movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 -.L071dec1_loop_13: +.L067dec1_loop_13: .byte 102,15,56,222,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx - jnz .L071dec1_loop_13 + jnz .L067dec1_loop_13 .byte 102,15,56,223,209 xorps %xmm5,%xmm2 movups %xmm2,(%edi) -.L072xts_dec_steal: +.L068xts_dec_steal: movzbl 16(%esi),%ecx movzbl (%edi),%edx leal 1(%esi),%esi @@ -1788,7 +1683,7 @@ aesni_xts_decrypt: movb %dl,16(%edi) leal 1(%edi),%edi subl $1,%eax - jnz .L072xts_dec_steal + jnz .L068xts_dec_steal subl 112(%esp),%edi movl %ebp,%edx movl %ebx,%ecx @@ -1798,30 +1693,16 @@ aesni_xts_decrypt: movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 -.L073dec1_loop_14: +.L069dec1_loop_14: .byte 102,15,56,222,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx - jnz .L073dec1_loop_14 + jnz .L069dec1_loop_14 .byte 102,15,56,223,209 xorps %xmm6,%xmm2 movups %xmm2,(%edi) -.L069xts_dec_ret: - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - pxor %xmm2,%xmm2 - movdqa %xmm0,(%esp) - pxor %xmm3,%xmm3 - movdqa %xmm0,16(%esp) - pxor %xmm4,%xmm4 - movdqa %xmm0,32(%esp) - pxor %xmm5,%xmm5 - movdqa %xmm0,48(%esp) - pxor %xmm6,%xmm6 - movdqa %xmm0,64(%esp) - pxor %xmm7,%xmm7 - movdqa %xmm0,80(%esp) +.L065xts_dec_ret: movl 116(%esp),%esp popl %edi popl %esi @@ -1847,7 +1728,7 @@ aesni_cbc_encrypt: movl 32(%esp),%edx movl 36(%esp),%ebp testl %eax,%eax - jz .L074cbc_abort + jz .L070cbc_abort cmpl $0,40(%esp) xchgl %esp,%ebx movups (%ebp),%xmm7 @@ -1855,14 +1736,14 @@ aesni_cbc_encrypt: movl %edx,%ebp movl %ebx,16(%esp) movl %ecx,%ebx - je .L075cbc_decrypt + je .L071cbc_decrypt movaps %xmm7,%xmm2 cmpl $16,%eax - jb .L076cbc_enc_tail + jb .L072cbc_enc_tail subl $16,%eax - jmp .L077cbc_enc_loop + jmp .L073cbc_enc_loop .align 16 -.L077cbc_enc_loop: +.L073cbc_enc_loop: movups (%esi),%xmm7 leal 16(%esi),%esi movups (%edx),%xmm0 @@ -1870,25 +1751,24 @@ aesni_cbc_encrypt: xorps %xmm0,%xmm7 leal 32(%edx),%edx xorps %xmm7,%xmm2 -.L078enc1_loop_15: +.L074enc1_loop_15: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx - jnz .L078enc1_loop_15 + jnz .L074enc1_loop_15 .byte 102,15,56,221,209 movl %ebx,%ecx movl %ebp,%edx movups %xmm2,(%edi) leal 16(%edi),%edi subl $16,%eax - jnc .L077cbc_enc_loop + jnc .L073cbc_enc_loop addl $16,%eax - jnz .L076cbc_enc_tail + jnz .L072cbc_enc_tail movaps %xmm2,%xmm7 - pxor %xmm2,%xmm2 - jmp .L079cbc_ret -.L076cbc_enc_tail: + jmp .L075cbc_ret +.L072cbc_enc_tail: movl %eax,%ecx .long 2767451785 movl $16,%ecx @@ -1899,20 +1779,20 @@ aesni_cbc_encrypt: movl %ebx,%ecx movl %edi,%esi movl %ebp,%edx - jmp .L077cbc_enc_loop + jmp .L073cbc_enc_loop .align 16 -.L075cbc_decrypt: +.L071cbc_decrypt: cmpl $80,%eax - jbe .L080cbc_dec_tail + jbe .L076cbc_dec_tail movaps %xmm7,(%esp) subl $80,%eax - jmp .L081cbc_dec_loop6_enter + jmp .L077cbc_dec_loop6_enter .align 16 -.L082cbc_dec_loop6: +.L078cbc_dec_loop6: movaps %xmm0,(%esp) movups %xmm7,(%edi) leal 16(%edi),%edi -.L081cbc_dec_loop6_enter: +.L077cbc_dec_loop6_enter: movdqu (%esi),%xmm2 movdqu 16(%esi),%xmm3 movdqu 32(%esi),%xmm4 @@ -1942,28 +1822,28 @@ aesni_cbc_encrypt: movups %xmm6,64(%edi) leal 80(%edi),%edi subl $96,%eax - ja .L082cbc_dec_loop6 + ja .L078cbc_dec_loop6 movaps %xmm7,%xmm2 movaps %xmm0,%xmm7 addl $80,%eax - jle .L083cbc_dec_clear_tail_collected + jle .L079cbc_dec_tail_collected movups %xmm2,(%edi) leal 16(%edi),%edi -.L080cbc_dec_tail: +.L076cbc_dec_tail: movups (%esi),%xmm2 movaps %xmm2,%xmm6 cmpl $16,%eax - jbe .L084cbc_dec_one + jbe .L080cbc_dec_one movups 16(%esi),%xmm3 movaps %xmm3,%xmm5 cmpl $32,%eax - jbe .L085cbc_dec_two + jbe .L081cbc_dec_two movups 32(%esi),%xmm4 cmpl $48,%eax - jbe .L086cbc_dec_three + jbe .L082cbc_dec_three movups 48(%esi),%xmm5 cmpl $64,%eax - jbe .L087cbc_dec_four + jbe .L083cbc_dec_four movups 64(%esi),%xmm6 movaps %xmm7,(%esp) movups (%esi),%xmm2 @@ -1981,62 +1861,56 @@ aesni_cbc_encrypt: xorps %xmm0,%xmm6 movups %xmm2,(%edi) movups %xmm3,16(%edi) - pxor %xmm3,%xmm3 movups %xmm4,32(%edi) - pxor %xmm4,%xmm4 movups %xmm5,48(%edi) - pxor %xmm5,%xmm5 leal 64(%edi),%edi movaps %xmm6,%xmm2 - pxor %xmm6,%xmm6 subl $80,%eax - jmp .L088cbc_dec_tail_collected + jmp .L079cbc_dec_tail_collected .align 16 -.L084cbc_dec_one: +.L080cbc_dec_one: movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 -.L089dec1_loop_16: +.L084dec1_loop_16: .byte 102,15,56,222,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx - jnz .L089dec1_loop_16 + jnz .L084dec1_loop_16 .byte 102,15,56,223,209 xorps %xmm7,%xmm2 movaps %xmm6,%xmm7 subl $16,%eax - jmp .L088cbc_dec_tail_collected + jmp .L079cbc_dec_tail_collected .align 16 -.L085cbc_dec_two: - call _aesni_decrypt2 +.L081cbc_dec_two: + xorps %xmm4,%xmm4 + call _aesni_decrypt3 xorps %xmm7,%xmm2 xorps %xmm6,%xmm3 movups %xmm2,(%edi) movaps %xmm3,%xmm2 - pxor %xmm3,%xmm3 leal 16(%edi),%edi movaps %xmm5,%xmm7 subl $32,%eax - jmp .L088cbc_dec_tail_collected + jmp .L079cbc_dec_tail_collected .align 16 -.L086cbc_dec_three: +.L082cbc_dec_three: call _aesni_decrypt3 xorps %xmm7,%xmm2 xorps %xmm6,%xmm3 xorps %xmm5,%xmm4 movups %xmm2,(%edi) movaps %xmm4,%xmm2 - pxor %xmm4,%xmm4 movups %xmm3,16(%edi) - pxor %xmm3,%xmm3 leal 32(%edi),%edi movups 32(%esi),%xmm7 subl $48,%eax - jmp .L088cbc_dec_tail_collected + jmp .L079cbc_dec_tail_collected .align 16 -.L087cbc_dec_four: +.L083cbc_dec_four: call _aesni_decrypt4 movups 16(%esi),%xmm1 movups 32(%esi),%xmm0 @@ -2046,44 +1920,28 @@ aesni_cbc_encrypt: movups %xmm2,(%edi) xorps %xmm1,%xmm4 movups %xmm3,16(%edi) - pxor %xmm3,%xmm3 xorps %xmm0,%xmm5 movups %xmm4,32(%edi) - pxor %xmm4,%xmm4 leal 48(%edi),%edi movaps %xmm5,%xmm2 - pxor %xmm5,%xmm5 subl $64,%eax - jmp .L088cbc_dec_tail_collected -.align 16 -.L083cbc_dec_clear_tail_collected: - pxor %xmm3,%xmm3 - pxor %xmm4,%xmm4 - pxor %xmm5,%xmm5 - pxor %xmm6,%xmm6 -.L088cbc_dec_tail_collected: +.L079cbc_dec_tail_collected: andl $15,%eax - jnz .L090cbc_dec_tail_partial + jnz .L085cbc_dec_tail_partial movups %xmm2,(%edi) - pxor %xmm0,%xmm0 - jmp .L079cbc_ret + jmp .L075cbc_ret .align 16 -.L090cbc_dec_tail_partial: +.L085cbc_dec_tail_partial: movaps %xmm2,(%esp) - pxor %xmm0,%xmm0 movl $16,%ecx movl %esp,%esi subl %eax,%ecx .long 2767451785 - movdqa %xmm2,(%esp) -.L079cbc_ret: +.L075cbc_ret: movl 16(%esp),%esp movl 36(%esp),%ebp - pxor %xmm2,%xmm2 - pxor %xmm1,%xmm1 movups %xmm7,(%ebp) - pxor %xmm7,%xmm7 -.L074cbc_abort: +.L070cbc_abort: popl %edi popl %esi popl %ebx @@ -2093,62 +1951,52 @@ aesni_cbc_encrypt: .type _aesni_set_encrypt_key,@function .align 16 _aesni_set_encrypt_key: - pushl %ebp - pushl %ebx testl %eax,%eax - jz .L091bad_pointer + jz .L086bad_pointer testl %edx,%edx - jz .L091bad_pointer - call .L092pic -.L092pic: - popl %ebx - leal .Lkey_const-.L092pic(%ebx),%ebx - leal _gnutls_x86_cpuid_s,%ebp + jz .L086bad_pointer movups (%eax),%xmm0 xorps %xmm4,%xmm4 - movl 4(%ebp),%ebp leal 16(%edx),%edx - andl $268437504,%ebp cmpl $256,%ecx - je .L09314rounds + je .L08714rounds cmpl $192,%ecx - je .L09412rounds + je .L08812rounds cmpl $128,%ecx - jne .L095bad_keybits + jne .L089bad_keybits .align 16 -.L09610rounds: - cmpl $268435456,%ebp - je .L09710rounds_alt +.L09010rounds: movl $9,%ecx movups %xmm0,-16(%edx) .byte 102,15,58,223,200,1 - call .L098key_128_cold + call .L091key_128_cold .byte 102,15,58,223,200,2 - call .L099key_128 + call .L092key_128 .byte 102,15,58,223,200,4 - call .L099key_128 + call .L092key_128 .byte 102,15,58,223,200,8 - call .L099key_128 + call .L092key_128 .byte 102,15,58,223,200,16 - call .L099key_128 + call .L092key_128 .byte 102,15,58,223,200,32 - call .L099key_128 + call .L092key_128 .byte 102,15,58,223,200,64 - call .L099key_128 + call .L092key_128 .byte 102,15,58,223,200,128 - call .L099key_128 + call .L092key_128 .byte 102,15,58,223,200,27 - call .L099key_128 + call .L092key_128 .byte 102,15,58,223,200,54 - call .L099key_128 + call .L092key_128 movups %xmm0,(%edx) movl %ecx,80(%edx) - jmp .L100good_key + xorl %eax,%eax + ret .align 16 -.L099key_128: +.L092key_128: movups %xmm0,(%edx) leal 16(%edx),%edx -.L098key_128_cold: +.L091key_128_cold: shufps $16,%xmm0,%xmm4 xorps %xmm4,%xmm0 shufps $140,%xmm0,%xmm4 @@ -2157,91 +2005,38 @@ _aesni_set_encrypt_key: xorps %xmm1,%xmm0 ret .align 16 -.L09710rounds_alt: - movdqa (%ebx),%xmm5 - movl $8,%ecx - movdqa 32(%ebx),%xmm4 - movdqa %xmm0,%xmm2 - movdqu %xmm0,-16(%edx) -.L101loop_key128: -.byte 102,15,56,0,197 -.byte 102,15,56,221,196 - pslld $1,%xmm4 - leal 16(%edx),%edx - movdqa %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm3,%xmm2 - pxor %xmm2,%xmm0 - movdqu %xmm0,-16(%edx) - movdqa %xmm0,%xmm2 - decl %ecx - jnz .L101loop_key128 - movdqa 48(%ebx),%xmm4 -.byte 102,15,56,0,197 -.byte 102,15,56,221,196 - pslld $1,%xmm4 - movdqa %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm3,%xmm2 - pxor %xmm2,%xmm0 - movdqu %xmm0,(%edx) - movdqa %xmm0,%xmm2 -.byte 102,15,56,0,197 -.byte 102,15,56,221,196 - movdqa %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm3,%xmm2 - pxor %xmm2,%xmm0 - movdqu %xmm0,16(%edx) - movl $9,%ecx - movl %ecx,96(%edx) - jmp .L100good_key -.align 16 -.L09412rounds: +.L08812rounds: movq 16(%eax),%xmm2 - cmpl $268435456,%ebp - je .L10212rounds_alt movl $11,%ecx movups %xmm0,-16(%edx) .byte 102,15,58,223,202,1 - call .L103key_192a_cold + call .L093key_192a_cold .byte 102,15,58,223,202,2 - call .L104key_192b + call .L094key_192b .byte 102,15,58,223,202,4 - call .L105key_192a + call .L095key_192a .byte 102,15,58,223,202,8 - call .L104key_192b + call .L094key_192b .byte 102,15,58,223,202,16 - call .L105key_192a + call .L095key_192a .byte 102,15,58,223,202,32 - call .L104key_192b + call .L094key_192b .byte 102,15,58,223,202,64 - call .L105key_192a + call .L095key_192a .byte 102,15,58,223,202,128 - call .L104key_192b + call .L094key_192b movups %xmm0,(%edx) movl %ecx,48(%edx) - jmp .L100good_key + xorl %eax,%eax + ret .align 16 -.L105key_192a: +.L095key_192a: movups %xmm0,(%edx) leal 16(%edx),%edx .align 16 -.L103key_192a_cold: +.L093key_192a_cold: movaps %xmm2,%xmm5 -.L106key_192b_warm: +.L096key_192b_warm: shufps $16,%xmm0,%xmm4 movdqa %xmm2,%xmm3 xorps %xmm4,%xmm0 @@ -2255,90 +2050,56 @@ _aesni_set_encrypt_key: pxor %xmm3,%xmm2 ret .align 16 -.L104key_192b: +.L094key_192b: movaps %xmm0,%xmm3 shufps $68,%xmm0,%xmm5 movups %xmm5,(%edx) shufps $78,%xmm2,%xmm3 movups %xmm3,16(%edx) leal 32(%edx),%edx - jmp .L106key_192b_warm -.align 16 -.L10212rounds_alt: - movdqa 16(%ebx),%xmm5 - movdqa 32(%ebx),%xmm4 - movl $8,%ecx - movdqu %xmm0,-16(%edx) -.L107loop_key192: - movq %xmm2,(%edx) - movdqa %xmm2,%xmm1 -.byte 102,15,56,0,213 -.byte 102,15,56,221,212 - pslld $1,%xmm4 - leal 24(%edx),%edx - movdqa %xmm0,%xmm3 - pslldq $4,%xmm0 - pxor %xmm0,%xmm3 - pslldq $4,%xmm0 - pxor %xmm0,%xmm3 - pslldq $4,%xmm0 - pxor %xmm3,%xmm0 - pshufd $255,%xmm0,%xmm3 - pxor %xmm1,%xmm3 - pslldq $4,%xmm1 - pxor %xmm1,%xmm3 - pxor %xmm2,%xmm0 - pxor %xmm3,%xmm2 - movdqu %xmm0,-16(%edx) - decl %ecx - jnz .L107loop_key192 - movl $11,%ecx - movl %ecx,32(%edx) - jmp .L100good_key + jmp .L096key_192b_warm .align 16 -.L09314rounds: +.L08714rounds: movups 16(%eax),%xmm2 - leal 16(%edx),%edx - cmpl $268435456,%ebp - je .L10814rounds_alt movl $13,%ecx + leal 16(%edx),%edx movups %xmm0,-32(%edx) movups %xmm2,-16(%edx) .byte 102,15,58,223,202,1 - call .L109key_256a_cold + call .L097key_256a_cold .byte 102,15,58,223,200,1 - call .L110key_256b + call .L098key_256b .byte 102,15,58,223,202,2 - call .L111key_256a + call .L099key_256a .byte 102,15,58,223,200,2 - call .L110key_256b + call .L098key_256b .byte 102,15,58,223,202,4 - call .L111key_256a + call .L099key_256a .byte 102,15,58,223,200,4 - call .L110key_256b + call .L098key_256b .byte 102,15,58,223,202,8 - call .L111key_256a + call .L099key_256a .byte 102,15,58,223,200,8 - call .L110key_256b + call .L098key_256b .byte 102,15,58,223,202,16 - call .L111key_256a + call .L099key_256a .byte 102,15,58,223,200,16 - call .L110key_256b + call .L098key_256b .byte 102,15,58,223,202,32 - call .L111key_256a + call .L099key_256a .byte 102,15,58,223,200,32 - call .L110key_256b + call .L098key_256b .byte 102,15,58,223,202,64 - call .L111key_256a + call .L099key_256a movups %xmm0,(%edx) movl %ecx,16(%edx) xorl %eax,%eax - jmp .L100good_key + ret .align 16 -.L111key_256a: +.L099key_256a: movups %xmm2,(%edx) leal 16(%edx),%edx -.L109key_256a_cold: +.L097key_256a_cold: shufps $16,%xmm0,%xmm4 xorps %xmm4,%xmm0 shufps $140,%xmm0,%xmm4 @@ -2347,7 +2108,7 @@ _aesni_set_encrypt_key: xorps %xmm1,%xmm0 ret .align 16 -.L110key_256b: +.L098key_256b: movups %xmm0,(%edx) leal 16(%edx),%edx shufps $16,%xmm2,%xmm4 @@ -2357,70 +2118,13 @@ _aesni_set_encrypt_key: shufps $170,%xmm1,%xmm1 xorps %xmm1,%xmm2 ret -.align 16 -.L10814rounds_alt: - movdqa (%ebx),%xmm5 - movdqa 32(%ebx),%xmm4 - movl $7,%ecx - movdqu %xmm0,-32(%edx) - movdqa %xmm2,%xmm1 - movdqu %xmm2,-16(%edx) -.L112loop_key256: -.byte 102,15,56,0,213 -.byte 102,15,56,221,212 - movdqa %xmm0,%xmm3 - pslldq $4,%xmm0 - pxor %xmm0,%xmm3 - pslldq $4,%xmm0 - pxor %xmm0,%xmm3 - pslldq $4,%xmm0 - pxor %xmm3,%xmm0 - pslld $1,%xmm4 - pxor %xmm2,%xmm0 - movdqu %xmm0,(%edx) - decl %ecx - jz .L113done_key256 - pshufd $255,%xmm0,%xmm2 - pxor %xmm3,%xmm3 -.byte 102,15,56,221,211 - movdqa %xmm1,%xmm3 - pslldq $4,%xmm1 - pxor %xmm1,%xmm3 - pslldq $4,%xmm1 - pxor %xmm1,%xmm3 - pslldq $4,%xmm1 - pxor %xmm3,%xmm1 - pxor %xmm1,%xmm2 - movdqu %xmm2,16(%edx) - leal 32(%edx),%edx - movdqa %xmm2,%xmm1 - jmp .L112loop_key256 -.L113done_key256: - movl $13,%ecx - movl %ecx,16(%edx) -.L100good_key: - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - pxor %xmm2,%xmm2 - pxor %xmm3,%xmm3 - pxor %xmm4,%xmm4 - pxor %xmm5,%xmm5 - xorl %eax,%eax - popl %ebx - popl %ebp - ret .align 4 -.L091bad_pointer: +.L086bad_pointer: movl $-1,%eax - popl %ebx - popl %ebp ret .align 4 -.L095bad_keybits: - pxor %xmm0,%xmm0 +.L089bad_keybits: movl $-2,%eax - popl %ebx - popl %ebp ret .size _aesni_set_encrypt_key,.-_aesni_set_encrypt_key .globl aesni_set_encrypt_key @@ -2446,7 +2150,7 @@ aesni_set_decrypt_key: movl 12(%esp),%edx shll $4,%ecx testl %eax,%eax - jnz .L114dec_key_ret + jnz .L100dec_key_ret leal 16(%edx,%ecx,1),%eax movups (%edx),%xmm0 movups (%eax),%xmm1 @@ -2454,7 +2158,7 @@ aesni_set_decrypt_key: movups %xmm1,(%edx) leal 16(%edx),%edx leal -16(%eax),%eax -.L115dec_key_inverse: +.L101dec_key_inverse: movups (%edx),%xmm0 movups (%eax),%xmm1 .byte 102,15,56,219,192 @@ -2464,26 +2168,20 @@ aesni_set_decrypt_key: movups %xmm0,16(%eax) movups %xmm1,-16(%edx) cmpl %edx,%eax - ja .L115dec_key_inverse + ja .L101dec_key_inverse movups (%edx),%xmm0 .byte 102,15,56,219,192 movups %xmm0,(%edx) - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 xorl %eax,%eax -.L114dec_key_ret: +.L100dec_key_ret: ret .size aesni_set_decrypt_key,.-.L_aesni_set_decrypt_key_begin -.align 64 -.Lkey_const: -.long 202313229,202313229,202313229,202313229 -.long 67569157,67569157,67569157,67569157 -.long 1,1,1,1 -.long 27,27,27,27 .byte 65,69,83,32,102,111,114,32,73,110,116,101,108,32,65,69 .byte 83,45,78,73,44,32,67,82,89,80,84,79,71,65,77,83 .byte 32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115 .byte 115,108,46,111,114,103,62,0 -.comm _gnutls_x86_cpuid_s,16,4 + .section .note.GNU-stack,"",%progbits + + diff --git a/lib/accelerated/x86/macosx/aesni-x86.s b/lib/accelerated/x86/macosx/aesni-x86.s index 275ab58ec5..09ca1cbc5c 100644 --- a/lib/accelerated/x86/macosx/aesni-x86.s +++ b/lib/accelerated/x86/macosx/aesni-x86.s @@ -59,10 +59,7 @@ L000enc1_loop_1: leal 16(%edx),%edx jnz L000enc1_loop_1 .byte 102,15,56,221,209 - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 movups %xmm2,(%eax) - pxor %xmm2,%xmm2 ret .globl _aesni_decrypt .align 4 @@ -84,84 +81,30 @@ L001dec1_loop_2: leal 16(%edx),%edx jnz L001dec1_loop_2 .byte 102,15,56,223,209 - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 movups %xmm2,(%eax) - pxor %xmm2,%xmm2 - ret -.align 4 -__aesni_encrypt2: - movups (%edx),%xmm0 - shll $4,%ecx - movups 16(%edx),%xmm1 - xorps %xmm0,%xmm2 - pxor %xmm0,%xmm3 - movups 32(%edx),%xmm0 - leal 32(%edx,%ecx,1),%edx - negl %ecx - addl $16,%ecx -L002enc2_loop: -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 - movups (%edx,%ecx,1),%xmm1 - addl $32,%ecx -.byte 102,15,56,220,208 -.byte 102,15,56,220,216 - movups -16(%edx,%ecx,1),%xmm0 - jnz L002enc2_loop -.byte 102,15,56,220,209 -.byte 102,15,56,220,217 -.byte 102,15,56,221,208 -.byte 102,15,56,221,216 - ret -.align 4 -__aesni_decrypt2: - movups (%edx),%xmm0 - shll $4,%ecx - movups 16(%edx),%xmm1 - xorps %xmm0,%xmm2 - pxor %xmm0,%xmm3 - movups 32(%edx),%xmm0 - leal 32(%edx,%ecx,1),%edx - negl %ecx - addl $16,%ecx -L003dec2_loop: -.byte 102,15,56,222,209 -.byte 102,15,56,222,217 - movups (%edx,%ecx,1),%xmm1 - addl $32,%ecx -.byte 102,15,56,222,208 -.byte 102,15,56,222,216 - movups -16(%edx,%ecx,1),%xmm0 - jnz L003dec2_loop -.byte 102,15,56,222,209 -.byte 102,15,56,222,217 -.byte 102,15,56,223,208 -.byte 102,15,56,223,216 ret .align 4 __aesni_encrypt3: movups (%edx),%xmm0 - shll $4,%ecx + shrl $1,%ecx movups 16(%edx),%xmm1 + leal 32(%edx),%edx xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 pxor %xmm0,%xmm4 - movups 32(%edx),%xmm0 - leal 32(%edx,%ecx,1),%edx - negl %ecx - addl $16,%ecx -L004enc3_loop: + movups (%edx),%xmm0 +L002enc3_loop: .byte 102,15,56,220,209 .byte 102,15,56,220,217 + decl %ecx .byte 102,15,56,220,225 - movups (%edx,%ecx,1),%xmm1 - addl $32,%ecx + movups 16(%edx),%xmm1 .byte 102,15,56,220,208 .byte 102,15,56,220,216 + leal 32(%edx),%edx .byte 102,15,56,220,224 - movups -16(%edx,%ecx,1),%xmm0 - jnz L004enc3_loop + movups (%edx),%xmm0 + jnz L002enc3_loop .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 @@ -172,26 +115,25 @@ L004enc3_loop: .align 4 __aesni_decrypt3: movups (%edx),%xmm0 - shll $4,%ecx + shrl $1,%ecx movups 16(%edx),%xmm1 + leal 32(%edx),%edx xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 pxor %xmm0,%xmm4 - movups 32(%edx),%xmm0 - leal 32(%edx,%ecx,1),%edx - negl %ecx - addl $16,%ecx -L005dec3_loop: + movups (%edx),%xmm0 +L003dec3_loop: .byte 102,15,56,222,209 .byte 102,15,56,222,217 + decl %ecx .byte 102,15,56,222,225 - movups (%edx,%ecx,1),%xmm1 - addl $32,%ecx + movups 16(%edx),%xmm1 .byte 102,15,56,222,208 .byte 102,15,56,222,216 + leal 32(%edx),%edx .byte 102,15,56,222,224 - movups -16(%edx,%ecx,1),%xmm0 - jnz L005dec3_loop + movups (%edx),%xmm0 + jnz L003dec3_loop .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 @@ -203,29 +145,27 @@ L005dec3_loop: __aesni_encrypt4: movups (%edx),%xmm0 movups 16(%edx),%xmm1 - shll $4,%ecx + shrl $1,%ecx + leal 32(%edx),%edx xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 pxor %xmm0,%xmm4 pxor %xmm0,%xmm5 - movups 32(%edx),%xmm0 - leal 32(%edx,%ecx,1),%edx - negl %ecx -.byte 15,31,64,0 - addl $16,%ecx -L006enc4_loop: + movups (%edx),%xmm0 +L004enc4_loop: .byte 102,15,56,220,209 .byte 102,15,56,220,217 + decl %ecx .byte 102,15,56,220,225 .byte 102,15,56,220,233 - movups (%edx,%ecx,1),%xmm1 - addl $32,%ecx + movups 16(%edx),%xmm1 .byte 102,15,56,220,208 .byte 102,15,56,220,216 + leal 32(%edx),%edx .byte 102,15,56,220,224 .byte 102,15,56,220,232 - movups -16(%edx,%ecx,1),%xmm0 - jnz L006enc4_loop + movups (%edx),%xmm0 + jnz L004enc4_loop .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 @@ -239,29 +179,27 @@ L006enc4_loop: __aesni_decrypt4: movups (%edx),%xmm0 movups 16(%edx),%xmm1 - shll $4,%ecx + shrl $1,%ecx + leal 32(%edx),%edx xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 pxor %xmm0,%xmm4 pxor %xmm0,%xmm5 - movups 32(%edx),%xmm0 - leal 32(%edx,%ecx,1),%edx - negl %ecx -.byte 15,31,64,0 - addl $16,%ecx -L007dec4_loop: + movups (%edx),%xmm0 +L005dec4_loop: .byte 102,15,56,222,209 .byte 102,15,56,222,217 + decl %ecx .byte 102,15,56,222,225 .byte 102,15,56,222,233 - movups (%edx,%ecx,1),%xmm1 - addl $32,%ecx + movups 16(%edx),%xmm1 .byte 102,15,56,222,208 .byte 102,15,56,222,216 + leal 32(%edx),%edx .byte 102,15,56,222,224 .byte 102,15,56,222,232 - movups -16(%edx,%ecx,1),%xmm0 - jnz L007dec4_loop + movups (%edx),%xmm0 + jnz L005dec4_loop .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 @@ -274,42 +212,45 @@ L007dec4_loop: .align 4 __aesni_encrypt6: movups (%edx),%xmm0 - shll $4,%ecx + shrl $1,%ecx movups 16(%edx),%xmm1 + leal 32(%edx),%edx xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 - pxor %xmm0,%xmm4 .byte 102,15,56,220,209 - pxor %xmm0,%xmm5 - pxor %xmm0,%xmm6 + pxor %xmm0,%xmm4 .byte 102,15,56,220,217 - leal 32(%edx,%ecx,1),%edx - negl %ecx + pxor %xmm0,%xmm5 + decl %ecx .byte 102,15,56,220,225 + pxor %xmm0,%xmm6 +.byte 102,15,56,220,233 pxor %xmm0,%xmm7 - movups (%edx,%ecx,1),%xmm0 - addl $16,%ecx - jmp L008_aesni_encrypt6_inner +.byte 102,15,56,220,241 + movups (%edx),%xmm0 +.byte 102,15,56,220,249 + jmp L_aesni_encrypt6_enter .align 4,0x90 -L009enc6_loop: +L006enc6_loop: .byte 102,15,56,220,209 .byte 102,15,56,220,217 + decl %ecx .byte 102,15,56,220,225 -L008_aesni_encrypt6_inner: .byte 102,15,56,220,233 .byte 102,15,56,220,241 .byte 102,15,56,220,249 +.align 4,0x90 L_aesni_encrypt6_enter: - movups (%edx,%ecx,1),%xmm1 - addl $32,%ecx + movups 16(%edx),%xmm1 .byte 102,15,56,220,208 .byte 102,15,56,220,216 + leal 32(%edx),%edx .byte 102,15,56,220,224 .byte 102,15,56,220,232 .byte 102,15,56,220,240 .byte 102,15,56,220,248 - movups -16(%edx,%ecx,1),%xmm0 - jnz L009enc6_loop + movups (%edx),%xmm0 + jnz L006enc6_loop .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 @@ -326,42 +267,45 @@ L_aesni_encrypt6_enter: .align 4 __aesni_decrypt6: movups (%edx),%xmm0 - shll $4,%ecx + shrl $1,%ecx movups 16(%edx),%xmm1 + leal 32(%edx),%edx xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 - pxor %xmm0,%xmm4 .byte 102,15,56,222,209 - pxor %xmm0,%xmm5 - pxor %xmm0,%xmm6 + pxor %xmm0,%xmm4 .byte 102,15,56,222,217 - leal 32(%edx,%ecx,1),%edx - negl %ecx + pxor %xmm0,%xmm5 + decl %ecx .byte 102,15,56,222,225 + pxor %xmm0,%xmm6 +.byte 102,15,56,222,233 pxor %xmm0,%xmm7 - movups (%edx,%ecx,1),%xmm0 - addl $16,%ecx - jmp L010_aesni_decrypt6_inner +.byte 102,15,56,222,241 + movups (%edx),%xmm0 +.byte 102,15,56,222,249 + jmp L_aesni_decrypt6_enter .align 4,0x90 -L011dec6_loop: +L007dec6_loop: .byte 102,15,56,222,209 .byte 102,15,56,222,217 + decl %ecx .byte 102,15,56,222,225 -L010_aesni_decrypt6_inner: .byte 102,15,56,222,233 .byte 102,15,56,222,241 .byte 102,15,56,222,249 +.align 4,0x90 L_aesni_decrypt6_enter: - movups (%edx,%ecx,1),%xmm1 - addl $32,%ecx + movups 16(%edx),%xmm1 .byte 102,15,56,222,208 .byte 102,15,56,222,216 + leal 32(%edx),%edx .byte 102,15,56,222,224 .byte 102,15,56,222,232 .byte 102,15,56,222,240 .byte 102,15,56,222,248 - movups -16(%edx,%ecx,1),%xmm0 - jnz L011dec6_loop + movups (%edx),%xmm0 + jnz L007dec6_loop .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 @@ -389,14 +333,14 @@ L_aesni_ecb_encrypt_begin: movl 32(%esp),%edx movl 36(%esp),%ebx andl $-16,%eax - jz L012ecb_ret + jz L008ecb_ret movl 240(%edx),%ecx testl %ebx,%ebx - jz L013ecb_decrypt + jz L009ecb_decrypt movl %edx,%ebp movl %ecx,%ebx cmpl $96,%eax - jb L014ecb_enc_tail + jb L010ecb_enc_tail movdqu (%esi),%xmm2 movdqu 16(%esi),%xmm3 movdqu 32(%esi),%xmm4 @@ -405,9 +349,9 @@ L_aesni_ecb_encrypt_begin: movdqu 80(%esi),%xmm7 leal 96(%esi),%esi subl $96,%eax - jmp L015ecb_enc_loop6_enter + jmp L011ecb_enc_loop6_enter .align 4,0x90 -L016ecb_enc_loop6: +L012ecb_enc_loop6: movups %xmm2,(%edi) movdqu (%esi),%xmm2 movups %xmm3,16(%edi) @@ -422,12 +366,12 @@ L016ecb_enc_loop6: leal 96(%edi),%edi movdqu 80(%esi),%xmm7 leal 96(%esi),%esi -L015ecb_enc_loop6_enter: +L011ecb_enc_loop6_enter: call __aesni_encrypt6 movl %ebp,%edx movl %ebx,%ecx subl $96,%eax - jnc L016ecb_enc_loop6 + jnc L012ecb_enc_loop6 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) @@ -436,18 +380,18 @@ L015ecb_enc_loop6_enter: movups %xmm7,80(%edi) leal 96(%edi),%edi addl $96,%eax - jz L012ecb_ret -L014ecb_enc_tail: + jz L008ecb_ret +L010ecb_enc_tail: movups (%esi),%xmm2 cmpl $32,%eax - jb L017ecb_enc_one + jb L013ecb_enc_one movups 16(%esi),%xmm3 - je L018ecb_enc_two + je L014ecb_enc_two movups 32(%esi),%xmm4 cmpl $64,%eax - jb L019ecb_enc_three + jb L015ecb_enc_three movups 48(%esi),%xmm5 - je L020ecb_enc_four + je L016ecb_enc_four movups 64(%esi),%xmm6 xorps %xmm7,%xmm7 call __aesni_encrypt6 @@ -456,49 +400,50 @@ L014ecb_enc_tail: movups %xmm4,32(%edi) movups %xmm5,48(%edi) movups %xmm6,64(%edi) - jmp L012ecb_ret + jmp L008ecb_ret .align 4,0x90 -L017ecb_enc_one: +L013ecb_enc_one: movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 -L021enc1_loop_3: +L017enc1_loop_3: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx - jnz L021enc1_loop_3 + jnz L017enc1_loop_3 .byte 102,15,56,221,209 movups %xmm2,(%edi) - jmp L012ecb_ret + jmp L008ecb_ret .align 4,0x90 -L018ecb_enc_two: - call __aesni_encrypt2 +L014ecb_enc_two: + xorps %xmm4,%xmm4 + call __aesni_encrypt3 movups %xmm2,(%edi) movups %xmm3,16(%edi) - jmp L012ecb_ret + jmp L008ecb_ret .align 4,0x90 -L019ecb_enc_three: +L015ecb_enc_three: call __aesni_encrypt3 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) - jmp L012ecb_ret + jmp L008ecb_ret .align 4,0x90 -L020ecb_enc_four: +L016ecb_enc_four: call __aesni_encrypt4 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) movups %xmm5,48(%edi) - jmp L012ecb_ret + jmp L008ecb_ret .align 4,0x90 -L013ecb_decrypt: +L009ecb_decrypt: movl %edx,%ebp movl %ecx,%ebx cmpl $96,%eax - jb L022ecb_dec_tail + jb L018ecb_dec_tail movdqu (%esi),%xmm2 movdqu 16(%esi),%xmm3 movdqu 32(%esi),%xmm4 @@ -507,9 +452,9 @@ L013ecb_decrypt: movdqu 80(%esi),%xmm7 leal 96(%esi),%esi subl $96,%eax - jmp L023ecb_dec_loop6_enter + jmp L019ecb_dec_loop6_enter .align 4,0x90 -L024ecb_dec_loop6: +L020ecb_dec_loop6: movups %xmm2,(%edi) movdqu (%esi),%xmm2 movups %xmm3,16(%edi) @@ -524,12 +469,12 @@ L024ecb_dec_loop6: leal 96(%edi),%edi movdqu 80(%esi),%xmm7 leal 96(%esi),%esi -L023ecb_dec_loop6_enter: +L019ecb_dec_loop6_enter: call __aesni_decrypt6 movl %ebp,%edx movl %ebx,%ecx subl $96,%eax - jnc L024ecb_dec_loop6 + jnc L020ecb_dec_loop6 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) @@ -538,18 +483,18 @@ L023ecb_dec_loop6_enter: movups %xmm7,80(%edi) leal 96(%edi),%edi addl $96,%eax - jz L012ecb_ret -L022ecb_dec_tail: + jz L008ecb_ret +L018ecb_dec_tail: movups (%esi),%xmm2 cmpl $32,%eax - jb L025ecb_dec_one + jb L021ecb_dec_one movups 16(%esi),%xmm3 - je L026ecb_dec_two + je L022ecb_dec_two movups 32(%esi),%xmm4 cmpl $64,%eax - jb L027ecb_dec_three + jb L023ecb_dec_three movups 48(%esi),%xmm5 - je L028ecb_dec_four + je L024ecb_dec_four movups 64(%esi),%xmm6 xorps %xmm7,%xmm7 call __aesni_decrypt6 @@ -558,51 +503,44 @@ L022ecb_dec_tail: movups %xmm4,32(%edi) movups %xmm5,48(%edi) movups %xmm6,64(%edi) - jmp L012ecb_ret + jmp L008ecb_ret .align 4,0x90 -L025ecb_dec_one: +L021ecb_dec_one: movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 -L029dec1_loop_4: +L025dec1_loop_4: .byte 102,15,56,222,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx - jnz L029dec1_loop_4 + jnz L025dec1_loop_4 .byte 102,15,56,223,209 movups %xmm2,(%edi) - jmp L012ecb_ret + jmp L008ecb_ret .align 4,0x90 -L026ecb_dec_two: - call __aesni_decrypt2 +L022ecb_dec_two: + xorps %xmm4,%xmm4 + call __aesni_decrypt3 movups %xmm2,(%edi) movups %xmm3,16(%edi) - jmp L012ecb_ret + jmp L008ecb_ret .align 4,0x90 -L027ecb_dec_three: +L023ecb_dec_three: call __aesni_decrypt3 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) - jmp L012ecb_ret + jmp L008ecb_ret .align 4,0x90 -L028ecb_dec_four: +L024ecb_dec_four: call __aesni_decrypt4 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) movups %xmm5,48(%edi) -L012ecb_ret: - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - pxor %xmm2,%xmm2 - pxor %xmm3,%xmm3 - pxor %xmm4,%xmm4 - pxor %xmm5,%xmm5 - pxor %xmm6,%xmm6 - pxor %xmm7,%xmm7 +L008ecb_ret: popl %edi popl %esi popl %ebx @@ -639,56 +577,48 @@ L_aesni_ccm64_encrypt_blocks_begin: movl %ebp,20(%esp) movl %ebp,24(%esp) movl %ebp,28(%esp) - shll $4,%ecx - movl $16,%ebx + shrl $1,%ecx leal (%edx),%ebp movdqa (%esp),%xmm5 movdqa %xmm7,%xmm2 - leal 32(%edx,%ecx,1),%edx - subl %ecx,%ebx + movl %ecx,%ebx .byte 102,15,56,0,253 -L030ccm64_enc_outer: +L026ccm64_enc_outer: movups (%ebp),%xmm0 movl %ebx,%ecx movups (%esi),%xmm6 xorps %xmm0,%xmm2 movups 16(%ebp),%xmm1 xorps %xmm6,%xmm0 + leal 32(%ebp),%edx xorps %xmm0,%xmm3 - movups 32(%ebp),%xmm0 -L031ccm64_enc2_loop: + movups (%edx),%xmm0 +L027ccm64_enc2_loop: .byte 102,15,56,220,209 + decl %ecx .byte 102,15,56,220,217 - movups (%edx,%ecx,1),%xmm1 - addl $32,%ecx + movups 16(%edx),%xmm1 .byte 102,15,56,220,208 + leal 32(%edx),%edx .byte 102,15,56,220,216 - movups -16(%edx,%ecx,1),%xmm0 - jnz L031ccm64_enc2_loop + movups (%edx),%xmm0 + jnz L027ccm64_enc2_loop .byte 102,15,56,220,209 .byte 102,15,56,220,217 paddq 16(%esp),%xmm7 - decl %eax .byte 102,15,56,221,208 .byte 102,15,56,221,216 + decl %eax leal 16(%esi),%esi xorps %xmm2,%xmm6 movdqa %xmm7,%xmm2 movups %xmm6,(%edi) -.byte 102,15,56,0,213 leal 16(%edi),%edi - jnz L030ccm64_enc_outer +.byte 102,15,56,0,213 + jnz L026ccm64_enc_outer movl 48(%esp),%esp movl 40(%esp),%edi movups %xmm3,(%edi) - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - pxor %xmm2,%xmm2 - pxor %xmm3,%xmm3 - pxor %xmm4,%xmm4 - pxor %xmm5,%xmm5 - pxor %xmm6,%xmm6 - pxor %xmm7,%xmm7 popl %edi popl %esi popl %ebx @@ -734,82 +664,71 @@ L_aesni_ccm64_decrypt_blocks_begin: movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 -L032enc1_loop_5: +L028enc1_loop_5: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx - jnz L032enc1_loop_5 + jnz L028enc1_loop_5 .byte 102,15,56,221,209 - shll $4,%ebx - movl $16,%ecx movups (%esi),%xmm6 paddq 16(%esp),%xmm7 leal 16(%esi),%esi - subl %ebx,%ecx - leal 32(%ebp,%ebx,1),%edx - movl %ecx,%ebx - jmp L033ccm64_dec_outer + jmp L029ccm64_dec_outer .align 4,0x90 -L033ccm64_dec_outer: +L029ccm64_dec_outer: xorps %xmm2,%xmm6 movdqa %xmm7,%xmm2 + movl %ebx,%ecx movups %xmm6,(%edi) leal 16(%edi),%edi .byte 102,15,56,0,213 subl $1,%eax - jz L034ccm64_dec_break + jz L030ccm64_dec_break movups (%ebp),%xmm0 - movl %ebx,%ecx + shrl $1,%ecx movups 16(%ebp),%xmm1 xorps %xmm0,%xmm6 + leal 32(%ebp),%edx xorps %xmm0,%xmm2 xorps %xmm6,%xmm3 - movups 32(%ebp),%xmm0 -L035ccm64_dec2_loop: + movups (%edx),%xmm0 +L031ccm64_dec2_loop: .byte 102,15,56,220,209 + decl %ecx .byte 102,15,56,220,217 - movups (%edx,%ecx,1),%xmm1 - addl $32,%ecx + movups 16(%edx),%xmm1 .byte 102,15,56,220,208 + leal 32(%edx),%edx .byte 102,15,56,220,216 - movups -16(%edx,%ecx,1),%xmm0 - jnz L035ccm64_dec2_loop + movups (%edx),%xmm0 + jnz L031ccm64_dec2_loop movups (%esi),%xmm6 paddq 16(%esp),%xmm7 .byte 102,15,56,220,209 .byte 102,15,56,220,217 + leal 16(%esi),%esi .byte 102,15,56,221,208 .byte 102,15,56,221,216 - leal 16(%esi),%esi - jmp L033ccm64_dec_outer + jmp L029ccm64_dec_outer .align 4,0x90 -L034ccm64_dec_break: - movl 240(%ebp),%ecx +L030ccm64_dec_break: movl %ebp,%edx movups (%edx),%xmm0 movups 16(%edx),%xmm1 xorps %xmm0,%xmm6 leal 32(%edx),%edx xorps %xmm6,%xmm3 -L036enc1_loop_6: +L032enc1_loop_6: .byte 102,15,56,220,217 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx - jnz L036enc1_loop_6 + jnz L032enc1_loop_6 .byte 102,15,56,221,217 movl 48(%esp),%esp movl 40(%esp),%edi movups %xmm3,(%edi) - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - pxor %xmm2,%xmm2 - pxor %xmm3,%xmm3 - pxor %xmm4,%xmm4 - pxor %xmm5,%xmm5 - pxor %xmm6,%xmm6 - pxor %xmm7,%xmm7 popl %edi popl %esi popl %ebx @@ -833,7 +752,7 @@ L_aesni_ctr32_encrypt_blocks_begin: andl $-16,%esp movl %ebp,80(%esp) cmpl $1,%eax - je L037ctr32_one_shortcut + je L033ctr32_one_shortcut movdqu (%ebx),%xmm7 movl $202182159,(%esp) movl $134810123,4(%esp) @@ -849,59 +768,63 @@ L_aesni_ctr32_encrypt_blocks_begin: .byte 102,15,58,34,253,3 movl 240(%edx),%ecx bswap %ebx - pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 + pxor %xmm0,%xmm0 movdqa (%esp),%xmm2 -.byte 102,15,58,34,195,0 +.byte 102,15,58,34,203,0 leal 3(%ebx),%ebp -.byte 102,15,58,34,205,0 +.byte 102,15,58,34,197,0 incl %ebx -.byte 102,15,58,34,195,1 +.byte 102,15,58,34,203,1 incl %ebp -.byte 102,15,58,34,205,1 +.byte 102,15,58,34,197,1 incl %ebx -.byte 102,15,58,34,195,2 +.byte 102,15,58,34,203,2 incl %ebp -.byte 102,15,58,34,205,2 - movdqa %xmm0,48(%esp) -.byte 102,15,56,0,194 - movdqu (%edx),%xmm6 - movdqa %xmm1,64(%esp) +.byte 102,15,58,34,197,2 + movdqa %xmm1,48(%esp) .byte 102,15,56,0,202 - pshufd $192,%xmm0,%xmm2 - pshufd $128,%xmm0,%xmm3 + movdqa %xmm0,64(%esp) +.byte 102,15,56,0,194 + pshufd $192,%xmm1,%xmm2 + pshufd $128,%xmm1,%xmm3 cmpl $6,%eax - jb L038ctr32_tail - pxor %xmm6,%xmm7 - shll $4,%ecx - movl $16,%ebx + jb L034ctr32_tail movdqa %xmm7,32(%esp) + shrl $1,%ecx movl %edx,%ebp - subl %ecx,%ebx - leal 32(%edx,%ecx,1),%edx + movl %ecx,%ebx subl $6,%eax - jmp L039ctr32_loop6 + jmp L035ctr32_loop6 .align 4,0x90 -L039ctr32_loop6: - pshufd $64,%xmm0,%xmm4 - movdqa 32(%esp),%xmm0 - pshufd $192,%xmm1,%xmm5 +L035ctr32_loop6: + pshufd $64,%xmm1,%xmm4 + movdqa 32(%esp),%xmm1 + pshufd $192,%xmm0,%xmm5 + por %xmm1,%xmm2 + pshufd $128,%xmm0,%xmm6 + por %xmm1,%xmm3 + pshufd $64,%xmm0,%xmm7 + por %xmm1,%xmm4 + por %xmm1,%xmm5 + por %xmm1,%xmm6 + por %xmm1,%xmm7 + movups (%ebp),%xmm0 + movups 16(%ebp),%xmm1 + leal 32(%ebp),%edx + decl %ecx pxor %xmm0,%xmm2 - pshufd $128,%xmm1,%xmm6 pxor %xmm0,%xmm3 - pshufd $64,%xmm1,%xmm7 - movups 16(%ebp),%xmm1 - pxor %xmm0,%xmm4 - pxor %xmm0,%xmm5 .byte 102,15,56,220,209 - pxor %xmm0,%xmm6 - pxor %xmm0,%xmm7 + pxor %xmm0,%xmm4 .byte 102,15,56,220,217 - movups 32(%ebp),%xmm0 - movl %ebx,%ecx + pxor %xmm0,%xmm5 .byte 102,15,56,220,225 + pxor %xmm0,%xmm6 .byte 102,15,56,220,233 + pxor %xmm0,%xmm7 .byte 102,15,56,220,241 + movups (%edx),%xmm0 .byte 102,15,56,220,249 call L_aesni_encrypt6_enter movups (%esi),%xmm1 @@ -912,51 +835,51 @@ L039ctr32_loop6: movups %xmm2,(%edi) movdqa 16(%esp),%xmm0 xorps %xmm1,%xmm4 - movdqa 64(%esp),%xmm1 + movdqa 48(%esp),%xmm1 movups %xmm3,16(%edi) movups %xmm4,32(%edi) paddd %xmm0,%xmm1 - paddd 48(%esp),%xmm0 + paddd 64(%esp),%xmm0 movdqa (%esp),%xmm2 movups 48(%esi),%xmm3 movups 64(%esi),%xmm4 xorps %xmm3,%xmm5 movups 80(%esi),%xmm3 leal 96(%esi),%esi - movdqa %xmm0,48(%esp) -.byte 102,15,56,0,194 + movdqa %xmm1,48(%esp) +.byte 102,15,56,0,202 xorps %xmm4,%xmm6 movups %xmm5,48(%edi) xorps %xmm3,%xmm7 - movdqa %xmm1,64(%esp) -.byte 102,15,56,0,202 + movdqa %xmm0,64(%esp) +.byte 102,15,56,0,194 movups %xmm6,64(%edi) - pshufd $192,%xmm0,%xmm2 + pshufd $192,%xmm1,%xmm2 movups %xmm7,80(%edi) leal 96(%edi),%edi - pshufd $128,%xmm0,%xmm3 + movl %ebx,%ecx + pshufd $128,%xmm1,%xmm3 subl $6,%eax - jnc L039ctr32_loop6 + jnc L035ctr32_loop6 addl $6,%eax - jz L040ctr32_ret - movdqu (%ebp),%xmm7 + jz L036ctr32_ret movl %ebp,%edx - pxor 32(%esp),%xmm7 - movl 240(%ebp),%ecx -L038ctr32_tail: + leal 1(,%ecx,2),%ecx + movdqa 32(%esp),%xmm7 +L034ctr32_tail: por %xmm7,%xmm2 cmpl $2,%eax - jb L041ctr32_one - pshufd $64,%xmm0,%xmm4 + jb L037ctr32_one + pshufd $64,%xmm1,%xmm4 por %xmm7,%xmm3 - je L042ctr32_two - pshufd $192,%xmm1,%xmm5 + je L038ctr32_two + pshufd $192,%xmm0,%xmm5 por %xmm7,%xmm4 cmpl $4,%eax - jb L043ctr32_three - pshufd $128,%xmm1,%xmm6 + jb L039ctr32_three + pshufd $128,%xmm0,%xmm6 por %xmm7,%xmm5 - je L044ctr32_four + je L040ctr32_four por %xmm7,%xmm6 call __aesni_encrypt6 movups (%esi),%xmm1 @@ -974,39 +897,39 @@ L038ctr32_tail: movups %xmm4,32(%edi) movups %xmm5,48(%edi) movups %xmm6,64(%edi) - jmp L040ctr32_ret + jmp L036ctr32_ret .align 4,0x90 -L037ctr32_one_shortcut: +L033ctr32_one_shortcut: movups (%ebx),%xmm2 movl 240(%edx),%ecx -L041ctr32_one: +L037ctr32_one: movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 -L045enc1_loop_7: +L041enc1_loop_7: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx - jnz L045enc1_loop_7 + jnz L041enc1_loop_7 .byte 102,15,56,221,209 movups (%esi),%xmm6 xorps %xmm2,%xmm6 movups %xmm6,(%edi) - jmp L040ctr32_ret + jmp L036ctr32_ret .align 4,0x90 -L042ctr32_two: - call __aesni_encrypt2 +L038ctr32_two: + call __aesni_encrypt3 movups (%esi),%xmm5 movups 16(%esi),%xmm6 xorps %xmm5,%xmm2 xorps %xmm6,%xmm3 movups %xmm2,(%edi) movups %xmm3,16(%edi) - jmp L040ctr32_ret + jmp L036ctr32_ret .align 4,0x90 -L043ctr32_three: +L039ctr32_three: call __aesni_encrypt3 movups (%esi),%xmm5 movups 16(%esi),%xmm6 @@ -1017,9 +940,9 @@ L043ctr32_three: xorps %xmm7,%xmm4 movups %xmm3,16(%edi) movups %xmm4,32(%edi) - jmp L040ctr32_ret + jmp L036ctr32_ret .align 4,0x90 -L044ctr32_four: +L040ctr32_four: call __aesni_encrypt4 movups (%esi),%xmm6 movups 16(%esi),%xmm7 @@ -1033,18 +956,7 @@ L044ctr32_four: xorps %xmm0,%xmm5 movups %xmm4,32(%edi) movups %xmm5,48(%edi) -L040ctr32_ret: - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - pxor %xmm2,%xmm2 - pxor %xmm3,%xmm3 - pxor %xmm4,%xmm4 - movdqa %xmm0,32(%esp) - pxor %xmm5,%xmm5 - movdqa %xmm0,48(%esp) - pxor %xmm6,%xmm6 - movdqa %xmm0,64(%esp) - pxor %xmm7,%xmm7 +L036ctr32_ret: movl 80(%esp),%esp popl %edi popl %esi @@ -1067,12 +979,12 @@ L_aesni_xts_encrypt_begin: movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 -L046enc1_loop_8: +L042enc1_loop_8: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx - jnz L046enc1_loop_8 + jnz L042enc1_loop_8 .byte 102,15,56,221,209 movl 20(%esp),%esi movl 24(%esp),%edi @@ -1096,14 +1008,12 @@ L046enc1_loop_8: movl %edx,%ebp movl %ecx,%ebx subl $96,%eax - jc L047xts_enc_short - shll $4,%ecx - movl $16,%ebx - subl %ecx,%ebx - leal 32(%edx,%ecx,1),%edx - jmp L048xts_enc_loop6 + jc L043xts_enc_short + shrl $1,%ecx + movl %ecx,%ebx + jmp L044xts_enc_loop6 .align 4,0x90 -L048xts_enc_loop6: +L044xts_enc_loop6: pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,(%esp) @@ -1139,7 +1049,6 @@ L048xts_enc_loop6: pand %xmm3,%xmm7 movups (%esi),%xmm2 pxor %xmm1,%xmm7 - movl %ebx,%ecx movdqu 16(%esi),%xmm3 xorps %xmm0,%xmm2 movdqu 32(%esi),%xmm4 @@ -1155,17 +1064,19 @@ L048xts_enc_loop6: movdqa %xmm7,80(%esp) pxor %xmm1,%xmm7 movups 16(%ebp),%xmm1 + leal 32(%ebp),%edx pxor 16(%esp),%xmm3 - pxor 32(%esp),%xmm4 .byte 102,15,56,220,209 - pxor 48(%esp),%xmm5 - pxor 64(%esp),%xmm6 + pxor 32(%esp),%xmm4 .byte 102,15,56,220,217 - pxor %xmm0,%xmm7 - movups 32(%ebp),%xmm0 + pxor 48(%esp),%xmm5 + decl %ecx .byte 102,15,56,220,225 + pxor 64(%esp),%xmm6 .byte 102,15,56,220,233 + pxor %xmm0,%xmm7 .byte 102,15,56,220,241 + movups (%edx),%xmm0 .byte 102,15,56,220,249 call L_aesni_encrypt6_enter movdqa 80(%esp),%xmm1 @@ -1190,25 +1101,26 @@ L048xts_enc_loop6: paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 + movl %ebx,%ecx pxor %xmm2,%xmm1 subl $96,%eax - jnc L048xts_enc_loop6 - movl 240(%ebp),%ecx + jnc L044xts_enc_loop6 + leal 1(,%ecx,2),%ecx movl %ebp,%edx movl %ecx,%ebx -L047xts_enc_short: +L043xts_enc_short: addl $96,%eax - jz L049xts_enc_done6x + jz L045xts_enc_done6x movdqa %xmm1,%xmm5 cmpl $32,%eax - jb L050xts_enc_one + jb L046xts_enc_one pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 - je L051xts_enc_two + je L047xts_enc_two pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,%xmm6 @@ -1217,7 +1129,7 @@ L047xts_enc_short: pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 cmpl $64,%eax - jb L052xts_enc_three + jb L048xts_enc_three pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,%xmm7 @@ -1227,7 +1139,7 @@ L047xts_enc_short: pxor %xmm2,%xmm1 movdqa %xmm5,(%esp) movdqa %xmm6,16(%esp) - je L053xts_enc_four + je L049xts_enc_four movdqa %xmm7,32(%esp) pshufd $19,%xmm0,%xmm7 movdqa %xmm1,48(%esp) @@ -1259,9 +1171,9 @@ L047xts_enc_short: movups %xmm5,48(%edi) movups %xmm6,64(%edi) leal 80(%edi),%edi - jmp L054xts_enc_done + jmp L050xts_enc_done .align 4,0x90 -L050xts_enc_one: +L046xts_enc_one: movups (%esi),%xmm2 leal 16(%esi),%esi xorps %xmm5,%xmm2 @@ -1269,36 +1181,37 @@ L050xts_enc_one: movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 -L055enc1_loop_9: +L051enc1_loop_9: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx - jnz L055enc1_loop_9 + jnz L051enc1_loop_9 .byte 102,15,56,221,209 xorps %xmm5,%xmm2 movups %xmm2,(%edi) leal 16(%edi),%edi movdqa %xmm5,%xmm1 - jmp L054xts_enc_done + jmp L050xts_enc_done .align 4,0x90 -L051xts_enc_two: +L047xts_enc_two: movaps %xmm1,%xmm6 movups (%esi),%xmm2 movups 16(%esi),%xmm3 leal 32(%esi),%esi xorps %xmm5,%xmm2 xorps %xmm6,%xmm3 - call __aesni_encrypt2 + xorps %xmm4,%xmm4 + call __aesni_encrypt3 xorps %xmm5,%xmm2 xorps %xmm6,%xmm3 movups %xmm2,(%edi) movups %xmm3,16(%edi) leal 32(%edi),%edi movdqa %xmm6,%xmm1 - jmp L054xts_enc_done + jmp L050xts_enc_done .align 4,0x90 -L052xts_enc_three: +L048xts_enc_three: movaps %xmm1,%xmm7 movups (%esi),%xmm2 movups 16(%esi),%xmm3 @@ -1316,9 +1229,9 @@ L052xts_enc_three: movups %xmm4,32(%edi) leal 48(%edi),%edi movdqa %xmm7,%xmm1 - jmp L054xts_enc_done + jmp L050xts_enc_done .align 4,0x90 -L053xts_enc_four: +L049xts_enc_four: movaps %xmm1,%xmm6 movups (%esi),%xmm2 movups 16(%esi),%xmm3 @@ -1340,28 +1253,28 @@ L053xts_enc_four: movups %xmm5,48(%edi) leal 64(%edi),%edi movdqa %xmm6,%xmm1 - jmp L054xts_enc_done + jmp L050xts_enc_done .align 4,0x90 -L049xts_enc_done6x: +L045xts_enc_done6x: movl 112(%esp),%eax andl $15,%eax - jz L056xts_enc_ret + jz L052xts_enc_ret movdqa %xmm1,%xmm5 movl %eax,112(%esp) - jmp L057xts_enc_steal + jmp L053xts_enc_steal .align 4,0x90 -L054xts_enc_done: +L050xts_enc_done: movl 112(%esp),%eax pxor %xmm0,%xmm0 andl $15,%eax - jz L056xts_enc_ret + jz L052xts_enc_ret pcmpgtd %xmm1,%xmm0 movl %eax,112(%esp) pshufd $19,%xmm0,%xmm5 paddq %xmm1,%xmm1 pand 96(%esp),%xmm5 pxor %xmm1,%xmm5 -L057xts_enc_steal: +L053xts_enc_steal: movzbl (%esi),%ecx movzbl -16(%edi),%edx leal 1(%esi),%esi @@ -1369,7 +1282,7 @@ L057xts_enc_steal: movb %dl,(%edi) leal 1(%edi),%edi subl $1,%eax - jnz L057xts_enc_steal + jnz L053xts_enc_steal subl 112(%esp),%edi movl %ebp,%edx movl %ebx,%ecx @@ -1379,30 +1292,16 @@ L057xts_enc_steal: movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 -L058enc1_loop_10: +L054enc1_loop_10: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx - jnz L058enc1_loop_10 + jnz L054enc1_loop_10 .byte 102,15,56,221,209 xorps %xmm5,%xmm2 movups %xmm2,-16(%edi) -L056xts_enc_ret: - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - pxor %xmm2,%xmm2 - movdqa %xmm0,(%esp) - pxor %xmm3,%xmm3 - movdqa %xmm0,16(%esp) - pxor %xmm4,%xmm4 - movdqa %xmm0,32(%esp) - pxor %xmm5,%xmm5 - movdqa %xmm0,48(%esp) - pxor %xmm6,%xmm6 - movdqa %xmm0,64(%esp) - pxor %xmm7,%xmm7 - movdqa %xmm0,80(%esp) +L052xts_enc_ret: movl 116(%esp),%esp popl %edi popl %esi @@ -1425,12 +1324,12 @@ L_aesni_xts_decrypt_begin: movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 -L059enc1_loop_11: +L055enc1_loop_11: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx - jnz L059enc1_loop_11 + jnz L055enc1_loop_11 .byte 102,15,56,221,209 movl 20(%esp),%esi movl 24(%esp),%edi @@ -1459,14 +1358,12 @@ L059enc1_loop_11: pcmpgtd %xmm1,%xmm0 andl $-16,%eax subl $96,%eax - jc L060xts_dec_short - shll $4,%ecx - movl $16,%ebx - subl %ecx,%ebx - leal 32(%edx,%ecx,1),%edx - jmp L061xts_dec_loop6 + jc L056xts_dec_short + shrl $1,%ecx + movl %ecx,%ebx + jmp L057xts_dec_loop6 .align 4,0x90 -L061xts_dec_loop6: +L057xts_dec_loop6: pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,(%esp) @@ -1502,7 +1399,6 @@ L061xts_dec_loop6: pand %xmm3,%xmm7 movups (%esi),%xmm2 pxor %xmm1,%xmm7 - movl %ebx,%ecx movdqu 16(%esi),%xmm3 xorps %xmm0,%xmm2 movdqu 32(%esi),%xmm4 @@ -1518,17 +1414,19 @@ L061xts_dec_loop6: movdqa %xmm7,80(%esp) pxor %xmm1,%xmm7 movups 16(%ebp),%xmm1 + leal 32(%ebp),%edx pxor 16(%esp),%xmm3 - pxor 32(%esp),%xmm4 .byte 102,15,56,222,209 - pxor 48(%esp),%xmm5 - pxor 64(%esp),%xmm6 + pxor 32(%esp),%xmm4 .byte 102,15,56,222,217 - pxor %xmm0,%xmm7 - movups 32(%ebp),%xmm0 + pxor 48(%esp),%xmm5 + decl %ecx .byte 102,15,56,222,225 + pxor 64(%esp),%xmm6 .byte 102,15,56,222,233 + pxor %xmm0,%xmm7 .byte 102,15,56,222,241 + movups (%edx),%xmm0 .byte 102,15,56,222,249 call L_aesni_decrypt6_enter movdqa 80(%esp),%xmm1 @@ -1553,25 +1451,26 @@ L061xts_dec_loop6: paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 + movl %ebx,%ecx pxor %xmm2,%xmm1 subl $96,%eax - jnc L061xts_dec_loop6 - movl 240(%ebp),%ecx + jnc L057xts_dec_loop6 + leal 1(,%ecx,2),%ecx movl %ebp,%edx movl %ecx,%ebx -L060xts_dec_short: +L056xts_dec_short: addl $96,%eax - jz L062xts_dec_done6x + jz L058xts_dec_done6x movdqa %xmm1,%xmm5 cmpl $32,%eax - jb L063xts_dec_one + jb L059xts_dec_one pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 - je L064xts_dec_two + je L060xts_dec_two pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,%xmm6 @@ -1580,7 +1479,7 @@ L060xts_dec_short: pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 cmpl $64,%eax - jb L065xts_dec_three + jb L061xts_dec_three pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,%xmm7 @@ -1590,7 +1489,7 @@ L060xts_dec_short: pxor %xmm2,%xmm1 movdqa %xmm5,(%esp) movdqa %xmm6,16(%esp) - je L066xts_dec_four + je L062xts_dec_four movdqa %xmm7,32(%esp) pshufd $19,%xmm0,%xmm7 movdqa %xmm1,48(%esp) @@ -1622,9 +1521,9 @@ L060xts_dec_short: movups %xmm5,48(%edi) movups %xmm6,64(%edi) leal 80(%edi),%edi - jmp L067xts_dec_done + jmp L063xts_dec_done .align 4,0x90 -L063xts_dec_one: +L059xts_dec_one: movups (%esi),%xmm2 leal 16(%esi),%esi xorps %xmm5,%xmm2 @@ -1632,36 +1531,36 @@ L063xts_dec_one: movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 -L068dec1_loop_12: +L064dec1_loop_12: .byte 102,15,56,222,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx - jnz L068dec1_loop_12 + jnz L064dec1_loop_12 .byte 102,15,56,223,209 xorps %xmm5,%xmm2 movups %xmm2,(%edi) leal 16(%edi),%edi movdqa %xmm5,%xmm1 - jmp L067xts_dec_done + jmp L063xts_dec_done .align 4,0x90 -L064xts_dec_two: +L060xts_dec_two: movaps %xmm1,%xmm6 movups (%esi),%xmm2 movups 16(%esi),%xmm3 leal 32(%esi),%esi xorps %xmm5,%xmm2 xorps %xmm6,%xmm3 - call __aesni_decrypt2 + call __aesni_decrypt3 xorps %xmm5,%xmm2 xorps %xmm6,%xmm3 movups %xmm2,(%edi) movups %xmm3,16(%edi) leal 32(%edi),%edi movdqa %xmm6,%xmm1 - jmp L067xts_dec_done + jmp L063xts_dec_done .align 4,0x90 -L065xts_dec_three: +L061xts_dec_three: movaps %xmm1,%xmm7 movups (%esi),%xmm2 movups 16(%esi),%xmm3 @@ -1679,9 +1578,9 @@ L065xts_dec_three: movups %xmm4,32(%edi) leal 48(%edi),%edi movdqa %xmm7,%xmm1 - jmp L067xts_dec_done + jmp L063xts_dec_done .align 4,0x90 -L066xts_dec_four: +L062xts_dec_four: movaps %xmm1,%xmm6 movups (%esi),%xmm2 movups 16(%esi),%xmm3 @@ -1703,20 +1602,20 @@ L066xts_dec_four: movups %xmm5,48(%edi) leal 64(%edi),%edi movdqa %xmm6,%xmm1 - jmp L067xts_dec_done + jmp L063xts_dec_done .align 4,0x90 -L062xts_dec_done6x: +L058xts_dec_done6x: movl 112(%esp),%eax andl $15,%eax - jz L069xts_dec_ret + jz L065xts_dec_ret movl %eax,112(%esp) - jmp L070xts_dec_only_one_more + jmp L066xts_dec_only_one_more .align 4,0x90 -L067xts_dec_done: +L063xts_dec_done: movl 112(%esp),%eax pxor %xmm0,%xmm0 andl $15,%eax - jz L069xts_dec_ret + jz L065xts_dec_ret pcmpgtd %xmm1,%xmm0 movl %eax,112(%esp) pshufd $19,%xmm0,%xmm2 @@ -1726,7 +1625,7 @@ L067xts_dec_done: pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 -L070xts_dec_only_one_more: +L066xts_dec_only_one_more: pshufd $19,%xmm0,%xmm5 movdqa %xmm1,%xmm6 paddq %xmm1,%xmm1 @@ -1740,16 +1639,16 @@ L070xts_dec_only_one_more: movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 -L071dec1_loop_13: +L067dec1_loop_13: .byte 102,15,56,222,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx - jnz L071dec1_loop_13 + jnz L067dec1_loop_13 .byte 102,15,56,223,209 xorps %xmm5,%xmm2 movups %xmm2,(%edi) -L072xts_dec_steal: +L068xts_dec_steal: movzbl 16(%esi),%ecx movzbl (%edi),%edx leal 1(%esi),%esi @@ -1757,7 +1656,7 @@ L072xts_dec_steal: movb %dl,16(%edi) leal 1(%edi),%edi subl $1,%eax - jnz L072xts_dec_steal + jnz L068xts_dec_steal subl 112(%esp),%edi movl %ebp,%edx movl %ebx,%ecx @@ -1767,30 +1666,16 @@ L072xts_dec_steal: movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 -L073dec1_loop_14: +L069dec1_loop_14: .byte 102,15,56,222,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx - jnz L073dec1_loop_14 + jnz L069dec1_loop_14 .byte 102,15,56,223,209 xorps %xmm6,%xmm2 movups %xmm2,(%edi) -L069xts_dec_ret: - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - pxor %xmm2,%xmm2 - movdqa %xmm0,(%esp) - pxor %xmm3,%xmm3 - movdqa %xmm0,16(%esp) - pxor %xmm4,%xmm4 - movdqa %xmm0,32(%esp) - pxor %xmm5,%xmm5 - movdqa %xmm0,48(%esp) - pxor %xmm6,%xmm6 - movdqa %xmm0,64(%esp) - pxor %xmm7,%xmm7 - movdqa %xmm0,80(%esp) +L065xts_dec_ret: movl 116(%esp),%esp popl %edi popl %esi @@ -1814,7 +1699,7 @@ L_aesni_cbc_encrypt_begin: movl 32(%esp),%edx movl 36(%esp),%ebp testl %eax,%eax - jz L074cbc_abort + jz L070cbc_abort cmpl $0,40(%esp) xchgl %esp,%ebx movups (%ebp),%xmm7 @@ -1822,14 +1707,14 @@ L_aesni_cbc_encrypt_begin: movl %edx,%ebp movl %ebx,16(%esp) movl %ecx,%ebx - je L075cbc_decrypt + je L071cbc_decrypt movaps %xmm7,%xmm2 cmpl $16,%eax - jb L076cbc_enc_tail + jb L072cbc_enc_tail subl $16,%eax - jmp L077cbc_enc_loop + jmp L073cbc_enc_loop .align 4,0x90 -L077cbc_enc_loop: +L073cbc_enc_loop: movups (%esi),%xmm7 leal 16(%esi),%esi movups (%edx),%xmm0 @@ -1837,25 +1722,24 @@ L077cbc_enc_loop: xorps %xmm0,%xmm7 leal 32(%edx),%edx xorps %xmm7,%xmm2 -L078enc1_loop_15: +L074enc1_loop_15: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx - jnz L078enc1_loop_15 + jnz L074enc1_loop_15 .byte 102,15,56,221,209 movl %ebx,%ecx movl %ebp,%edx movups %xmm2,(%edi) leal 16(%edi),%edi subl $16,%eax - jnc L077cbc_enc_loop + jnc L073cbc_enc_loop addl $16,%eax - jnz L076cbc_enc_tail + jnz L072cbc_enc_tail movaps %xmm2,%xmm7 - pxor %xmm2,%xmm2 - jmp L079cbc_ret -L076cbc_enc_tail: + jmp L075cbc_ret +L072cbc_enc_tail: movl %eax,%ecx .long 2767451785 movl $16,%ecx @@ -1866,20 +1750,20 @@ L076cbc_enc_tail: movl %ebx,%ecx movl %edi,%esi movl %ebp,%edx - jmp L077cbc_enc_loop + jmp L073cbc_enc_loop .align 4,0x90 -L075cbc_decrypt: +L071cbc_decrypt: cmpl $80,%eax - jbe L080cbc_dec_tail + jbe L076cbc_dec_tail movaps %xmm7,(%esp) subl $80,%eax - jmp L081cbc_dec_loop6_enter + jmp L077cbc_dec_loop6_enter .align 4,0x90 -L082cbc_dec_loop6: +L078cbc_dec_loop6: movaps %xmm0,(%esp) movups %xmm7,(%edi) leal 16(%edi),%edi -L081cbc_dec_loop6_enter: +L077cbc_dec_loop6_enter: movdqu (%esi),%xmm2 movdqu 16(%esi),%xmm3 movdqu 32(%esi),%xmm4 @@ -1909,28 +1793,28 @@ L081cbc_dec_loop6_enter: movups %xmm6,64(%edi) leal 80(%edi),%edi subl $96,%eax - ja L082cbc_dec_loop6 + ja L078cbc_dec_loop6 movaps %xmm7,%xmm2 movaps %xmm0,%xmm7 addl $80,%eax - jle L083cbc_dec_clear_tail_collected + jle L079cbc_dec_tail_collected movups %xmm2,(%edi) leal 16(%edi),%edi -L080cbc_dec_tail: +L076cbc_dec_tail: movups (%esi),%xmm2 movaps %xmm2,%xmm6 cmpl $16,%eax - jbe L084cbc_dec_one + jbe L080cbc_dec_one movups 16(%esi),%xmm3 movaps %xmm3,%xmm5 cmpl $32,%eax - jbe L085cbc_dec_two + jbe L081cbc_dec_two movups 32(%esi),%xmm4 cmpl $48,%eax - jbe L086cbc_dec_three + jbe L082cbc_dec_three movups 48(%esi),%xmm5 cmpl $64,%eax - jbe L087cbc_dec_four + jbe L083cbc_dec_four movups 64(%esi),%xmm6 movaps %xmm7,(%esp) movups (%esi),%xmm2 @@ -1948,62 +1832,56 @@ L080cbc_dec_tail: xorps %xmm0,%xmm6 movups %xmm2,(%edi) movups %xmm3,16(%edi) - pxor %xmm3,%xmm3 movups %xmm4,32(%edi) - pxor %xmm4,%xmm4 movups %xmm5,48(%edi) - pxor %xmm5,%xmm5 leal 64(%edi),%edi movaps %xmm6,%xmm2 - pxor %xmm6,%xmm6 subl $80,%eax - jmp L088cbc_dec_tail_collected + jmp L079cbc_dec_tail_collected .align 4,0x90 -L084cbc_dec_one: +L080cbc_dec_one: movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 -L089dec1_loop_16: +L084dec1_loop_16: .byte 102,15,56,222,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx - jnz L089dec1_loop_16 + jnz L084dec1_loop_16 .byte 102,15,56,223,209 xorps %xmm7,%xmm2 movaps %xmm6,%xmm7 subl $16,%eax - jmp L088cbc_dec_tail_collected + jmp L079cbc_dec_tail_collected .align 4,0x90 -L085cbc_dec_two: - call __aesni_decrypt2 +L081cbc_dec_two: + xorps %xmm4,%xmm4 + call __aesni_decrypt3 xorps %xmm7,%xmm2 xorps %xmm6,%xmm3 movups %xmm2,(%edi) movaps %xmm3,%xmm2 - pxor %xmm3,%xmm3 leal 16(%edi),%edi movaps %xmm5,%xmm7 subl $32,%eax - jmp L088cbc_dec_tail_collected + jmp L079cbc_dec_tail_collected .align 4,0x90 -L086cbc_dec_three: +L082cbc_dec_three: call __aesni_decrypt3 xorps %xmm7,%xmm2 xorps %xmm6,%xmm3 xorps %xmm5,%xmm4 movups %xmm2,(%edi) movaps %xmm4,%xmm2 - pxor %xmm4,%xmm4 movups %xmm3,16(%edi) - pxor %xmm3,%xmm3 leal 32(%edi),%edi movups 32(%esi),%xmm7 subl $48,%eax - jmp L088cbc_dec_tail_collected + jmp L079cbc_dec_tail_collected .align 4,0x90 -L087cbc_dec_four: +L083cbc_dec_four: call __aesni_decrypt4 movups 16(%esi),%xmm1 movups 32(%esi),%xmm0 @@ -2013,44 +1891,28 @@ L087cbc_dec_four: movups %xmm2,(%edi) xorps %xmm1,%xmm4 movups %xmm3,16(%edi) - pxor %xmm3,%xmm3 xorps %xmm0,%xmm5 movups %xmm4,32(%edi) - pxor %xmm4,%xmm4 leal 48(%edi),%edi movaps %xmm5,%xmm2 - pxor %xmm5,%xmm5 subl $64,%eax - jmp L088cbc_dec_tail_collected -.align 4,0x90 -L083cbc_dec_clear_tail_collected: - pxor %xmm3,%xmm3 - pxor %xmm4,%xmm4 - pxor %xmm5,%xmm5 - pxor %xmm6,%xmm6 -L088cbc_dec_tail_collected: +L079cbc_dec_tail_collected: andl $15,%eax - jnz L090cbc_dec_tail_partial + jnz L085cbc_dec_tail_partial movups %xmm2,(%edi) - pxor %xmm0,%xmm0 - jmp L079cbc_ret + jmp L075cbc_ret .align 4,0x90 -L090cbc_dec_tail_partial: +L085cbc_dec_tail_partial: movaps %xmm2,(%esp) - pxor %xmm0,%xmm0 movl $16,%ecx movl %esp,%esi subl %eax,%ecx .long 2767451785 - movdqa %xmm2,(%esp) -L079cbc_ret: +L075cbc_ret: movl 16(%esp),%esp movl 36(%esp),%ebp - pxor %xmm2,%xmm2 - pxor %xmm1,%xmm1 movups %xmm7,(%ebp) - pxor %xmm7,%xmm7 -L074cbc_abort: +L070cbc_abort: popl %edi popl %esi popl %ebx @@ -2058,62 +1920,52 @@ L074cbc_abort: ret .align 4 __aesni_set_encrypt_key: - pushl %ebp - pushl %ebx testl %eax,%eax - jz L091bad_pointer + jz L086bad_pointer testl %edx,%edx - jz L091bad_pointer - call L092pic -L092pic: - popl %ebx - leal Lkey_const-L092pic(%ebx),%ebx - movl L__gnutls_x86_cpuid_s$non_lazy_ptr-Lkey_const(%ebx),%ebp + jz L086bad_pointer movups (%eax),%xmm0 xorps %xmm4,%xmm4 - movl 4(%ebp),%ebp leal 16(%edx),%edx - andl $268437504,%ebp cmpl $256,%ecx - je L09314rounds + je L08714rounds cmpl $192,%ecx - je L09412rounds + je L08812rounds cmpl $128,%ecx - jne L095bad_keybits + jne L089bad_keybits .align 4,0x90 -L09610rounds: - cmpl $268435456,%ebp - je L09710rounds_alt +L09010rounds: movl $9,%ecx movups %xmm0,-16(%edx) .byte 102,15,58,223,200,1 - call L098key_128_cold + call L091key_128_cold .byte 102,15,58,223,200,2 - call L099key_128 + call L092key_128 .byte 102,15,58,223,200,4 - call L099key_128 + call L092key_128 .byte 102,15,58,223,200,8 - call L099key_128 + call L092key_128 .byte 102,15,58,223,200,16 - call L099key_128 + call L092key_128 .byte 102,15,58,223,200,32 - call L099key_128 + call L092key_128 .byte 102,15,58,223,200,64 - call L099key_128 + call L092key_128 .byte 102,15,58,223,200,128 - call L099key_128 + call L092key_128 .byte 102,15,58,223,200,27 - call L099key_128 + call L092key_128 .byte 102,15,58,223,200,54 - call L099key_128 + call L092key_128 movups %xmm0,(%edx) movl %ecx,80(%edx) - jmp L100good_key + xorl %eax,%eax + ret .align 4,0x90 -L099key_128: +L092key_128: movups %xmm0,(%edx) leal 16(%edx),%edx -L098key_128_cold: +L091key_128_cold: shufps $16,%xmm0,%xmm4 xorps %xmm4,%xmm0 shufps $140,%xmm0,%xmm4 @@ -2122,91 +1974,38 @@ L098key_128_cold: xorps %xmm1,%xmm0 ret .align 4,0x90 -L09710rounds_alt: - movdqa (%ebx),%xmm5 - movl $8,%ecx - movdqa 32(%ebx),%xmm4 - movdqa %xmm0,%xmm2 - movdqu %xmm0,-16(%edx) -L101loop_key128: -.byte 102,15,56,0,197 -.byte 102,15,56,221,196 - pslld $1,%xmm4 - leal 16(%edx),%edx - movdqa %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm3,%xmm2 - pxor %xmm2,%xmm0 - movdqu %xmm0,-16(%edx) - movdqa %xmm0,%xmm2 - decl %ecx - jnz L101loop_key128 - movdqa 48(%ebx),%xmm4 -.byte 102,15,56,0,197 -.byte 102,15,56,221,196 - pslld $1,%xmm4 - movdqa %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm3,%xmm2 - pxor %xmm2,%xmm0 - movdqu %xmm0,(%edx) - movdqa %xmm0,%xmm2 -.byte 102,15,56,0,197 -.byte 102,15,56,221,196 - movdqa %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm2,%xmm3 - pslldq $4,%xmm2 - pxor %xmm3,%xmm2 - pxor %xmm2,%xmm0 - movdqu %xmm0,16(%edx) - movl $9,%ecx - movl %ecx,96(%edx) - jmp L100good_key -.align 4,0x90 -L09412rounds: +L08812rounds: movq 16(%eax),%xmm2 - cmpl $268435456,%ebp - je L10212rounds_alt movl $11,%ecx movups %xmm0,-16(%edx) .byte 102,15,58,223,202,1 - call L103key_192a_cold + call L093key_192a_cold .byte 102,15,58,223,202,2 - call L104key_192b + call L094key_192b .byte 102,15,58,223,202,4 - call L105key_192a + call L095key_192a .byte 102,15,58,223,202,8 - call L104key_192b + call L094key_192b .byte 102,15,58,223,202,16 - call L105key_192a + call L095key_192a .byte 102,15,58,223,202,32 - call L104key_192b + call L094key_192b .byte 102,15,58,223,202,64 - call L105key_192a + call L095key_192a .byte 102,15,58,223,202,128 - call L104key_192b + call L094key_192b movups %xmm0,(%edx) movl %ecx,48(%edx) - jmp L100good_key + xorl %eax,%eax + ret .align 4,0x90 -L105key_192a: +L095key_192a: movups %xmm0,(%edx) leal 16(%edx),%edx .align 4,0x90 -L103key_192a_cold: +L093key_192a_cold: movaps %xmm2,%xmm5 -L106key_192b_warm: +L096key_192b_warm: shufps $16,%xmm0,%xmm4 movdqa %xmm2,%xmm3 xorps %xmm4,%xmm0 @@ -2220,90 +2019,56 @@ L106key_192b_warm: pxor %xmm3,%xmm2 ret .align 4,0x90 -L104key_192b: +L094key_192b: movaps %xmm0,%xmm3 shufps $68,%xmm0,%xmm5 movups %xmm5,(%edx) shufps $78,%xmm2,%xmm3 movups %xmm3,16(%edx) leal 32(%edx),%edx - jmp L106key_192b_warm + jmp L096key_192b_warm .align 4,0x90 -L10212rounds_alt: - movdqa 16(%ebx),%xmm5 - movdqa 32(%ebx),%xmm4 - movl $8,%ecx - movdqu %xmm0,-16(%edx) -L107loop_key192: - movq %xmm2,(%edx) - movdqa %xmm2,%xmm1 -.byte 102,15,56,0,213 -.byte 102,15,56,221,212 - pslld $1,%xmm4 - leal 24(%edx),%edx - movdqa %xmm0,%xmm3 - pslldq $4,%xmm0 - pxor %xmm0,%xmm3 - pslldq $4,%xmm0 - pxor %xmm0,%xmm3 - pslldq $4,%xmm0 - pxor %xmm3,%xmm0 - pshufd $255,%xmm0,%xmm3 - pxor %xmm1,%xmm3 - pslldq $4,%xmm1 - pxor %xmm1,%xmm3 - pxor %xmm2,%xmm0 - pxor %xmm3,%xmm2 - movdqu %xmm0,-16(%edx) - decl %ecx - jnz L107loop_key192 - movl $11,%ecx - movl %ecx,32(%edx) - jmp L100good_key -.align 4,0x90 -L09314rounds: +L08714rounds: movups 16(%eax),%xmm2 - leal 16(%edx),%edx - cmpl $268435456,%ebp - je L10814rounds_alt movl $13,%ecx + leal 16(%edx),%edx movups %xmm0,-32(%edx) movups %xmm2,-16(%edx) .byte 102,15,58,223,202,1 - call L109key_256a_cold + call L097key_256a_cold .byte 102,15,58,223,200,1 - call L110key_256b + call L098key_256b .byte 102,15,58,223,202,2 - call L111key_256a + call L099key_256a .byte 102,15,58,223,200,2 - call L110key_256b + call L098key_256b .byte 102,15,58,223,202,4 - call L111key_256a + call L099key_256a .byte 102,15,58,223,200,4 - call L110key_256b + call L098key_256b .byte 102,15,58,223,202,8 - call L111key_256a + call L099key_256a .byte 102,15,58,223,200,8 - call L110key_256b + call L098key_256b .byte 102,15,58,223,202,16 - call L111key_256a + call L099key_256a .byte 102,15,58,223,200,16 - call L110key_256b + call L098key_256b .byte 102,15,58,223,202,32 - call L111key_256a + call L099key_256a .byte 102,15,58,223,200,32 - call L110key_256b + call L098key_256b .byte 102,15,58,223,202,64 - call L111key_256a + call L099key_256a movups %xmm0,(%edx) movl %ecx,16(%edx) xorl %eax,%eax - jmp L100good_key + ret .align 4,0x90 -L111key_256a: +L099key_256a: movups %xmm2,(%edx) leal 16(%edx),%edx -L109key_256a_cold: +L097key_256a_cold: shufps $16,%xmm0,%xmm4 xorps %xmm4,%xmm0 shufps $140,%xmm0,%xmm4 @@ -2312,7 +2077,7 @@ L109key_256a_cold: xorps %xmm1,%xmm0 ret .align 4,0x90 -L110key_256b: +L098key_256b: movups %xmm0,(%edx) leal 16(%edx),%edx shufps $16,%xmm2,%xmm4 @@ -2322,70 +2087,13 @@ L110key_256b: shufps $170,%xmm1,%xmm1 xorps %xmm1,%xmm2 ret -.align 4,0x90 -L10814rounds_alt: - movdqa (%ebx),%xmm5 - movdqa 32(%ebx),%xmm4 - movl $7,%ecx - movdqu %xmm0,-32(%edx) - movdqa %xmm2,%xmm1 - movdqu %xmm2,-16(%edx) -L112loop_key256: -.byte 102,15,56,0,213 -.byte 102,15,56,221,212 - movdqa %xmm0,%xmm3 - pslldq $4,%xmm0 - pxor %xmm0,%xmm3 - pslldq $4,%xmm0 - pxor %xmm0,%xmm3 - pslldq $4,%xmm0 - pxor %xmm3,%xmm0 - pslld $1,%xmm4 - pxor %xmm2,%xmm0 - movdqu %xmm0,(%edx) - decl %ecx - jz L113done_key256 - pshufd $255,%xmm0,%xmm2 - pxor %xmm3,%xmm3 -.byte 102,15,56,221,211 - movdqa %xmm1,%xmm3 - pslldq $4,%xmm1 - pxor %xmm1,%xmm3 - pslldq $4,%xmm1 - pxor %xmm1,%xmm3 - pslldq $4,%xmm1 - pxor %xmm3,%xmm1 - pxor %xmm1,%xmm2 - movdqu %xmm2,16(%edx) - leal 32(%edx),%edx - movdqa %xmm2,%xmm1 - jmp L112loop_key256 -L113done_key256: - movl $13,%ecx - movl %ecx,16(%edx) -L100good_key: - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 - pxor %xmm2,%xmm2 - pxor %xmm3,%xmm3 - pxor %xmm4,%xmm4 - pxor %xmm5,%xmm5 - xorl %eax,%eax - popl %ebx - popl %ebp - ret .align 2,0x90 -L091bad_pointer: +L086bad_pointer: movl $-1,%eax - popl %ebx - popl %ebp ret .align 2,0x90 -L095bad_keybits: - pxor %xmm0,%xmm0 +L089bad_keybits: movl $-2,%eax - popl %ebx - popl %ebp ret .globl _aesni_set_encrypt_key .align 4 @@ -2407,7 +2115,7 @@ L_aesni_set_decrypt_key_begin: movl 12(%esp),%edx shll $4,%ecx testl %eax,%eax - jnz L114dec_key_ret + jnz L100dec_key_ret leal 16(%edx,%ecx,1),%eax movups (%edx),%xmm0 movups (%eax),%xmm1 @@ -2415,7 +2123,7 @@ L_aesni_set_decrypt_key_begin: movups %xmm1,(%edx) leal 16(%edx),%edx leal -16(%eax),%eax -L115dec_key_inverse: +L101dec_key_inverse: movups (%edx),%xmm0 movups (%eax),%xmm1 .byte 102,15,56,219,192 @@ -2425,28 +2133,15 @@ L115dec_key_inverse: movups %xmm0,16(%eax) movups %xmm1,-16(%edx) cmpl %edx,%eax - ja L115dec_key_inverse + ja L101dec_key_inverse movups (%edx),%xmm0 .byte 102,15,56,219,192 movups %xmm0,(%edx) - pxor %xmm0,%xmm0 - pxor %xmm1,%xmm1 xorl %eax,%eax -L114dec_key_ret: +L100dec_key_ret: ret -.align 6,0x90 -Lkey_const: -.long 202313229,202313229,202313229,202313229 -.long 67569157,67569157,67569157,67569157 -.long 1,1,1,1 -.long 27,27,27,27 .byte 65,69,83,32,102,111,114,32,73,110,116,101,108,32,65,69 .byte 83,45,78,73,44,32,67,82,89,80,84,79,71,65,77,83 .byte 32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115 .byte 115,108,46,111,114,103,62,0 -.section __IMPORT,__pointers,non_lazy_symbol_pointers -L__gnutls_x86_cpuid_s$non_lazy_ptr: -.indirect_symbol __gnutls_x86_cpuid_s -.long 0 -.comm __gnutls_x86_cpuid_s,16,2 |