svn commit: r338667 - in projects/openssl111/secure/lib/libcrypto: . amd64
Jung-uk Kim
jkim at FreeBSD.org
Thu Sep 13 21:07:11 UTC 2018
Author: jkim
Date: Thu Sep 13 21:07:09 2018
New Revision: 338667
URL: https://svnweb.freebsd.org/changeset/base/338667
Log:
Regen amd64 assembly files for OpenSSL 1.1.1.
Added:
projects/openssl111/secure/lib/libcrypto/amd64/chacha-x86_64.S (contents, props changed)
projects/openssl111/secure/lib/libcrypto/amd64/e_padlock-x86_64.S (contents, props changed)
projects/openssl111/secure/lib/libcrypto/amd64/ecp_nistz256-avx2.S (contents, props changed)
projects/openssl111/secure/lib/libcrypto/amd64/keccak1600-avx2.S (contents, props changed)
projects/openssl111/secure/lib/libcrypto/amd64/keccak1600-avx512.S (contents, props changed)
projects/openssl111/secure/lib/libcrypto/amd64/keccak1600-avx512vl.S (contents, props changed)
projects/openssl111/secure/lib/libcrypto/amd64/keccak1600-x86_64.S (contents, props changed)
projects/openssl111/secure/lib/libcrypto/amd64/poly1305-x86_64.S (contents, props changed)
projects/openssl111/secure/lib/libcrypto/amd64/x25519-x86_64.S (contents, props changed)
Modified:
projects/openssl111/secure/lib/libcrypto/Makefile.asm
projects/openssl111/secure/lib/libcrypto/amd64/aes-x86_64.S
projects/openssl111/secure/lib/libcrypto/amd64/aesni-gcm-x86_64.S
projects/openssl111/secure/lib/libcrypto/amd64/aesni-mb-x86_64.S
projects/openssl111/secure/lib/libcrypto/amd64/aesni-sha1-x86_64.S
projects/openssl111/secure/lib/libcrypto/amd64/aesni-sha256-x86_64.S
projects/openssl111/secure/lib/libcrypto/amd64/aesni-x86_64.S
projects/openssl111/secure/lib/libcrypto/amd64/bsaes-x86_64.S
projects/openssl111/secure/lib/libcrypto/amd64/cmll-x86_64.S
projects/openssl111/secure/lib/libcrypto/amd64/ecp_nistz256-x86_64.S
projects/openssl111/secure/lib/libcrypto/amd64/ghash-x86_64.S
projects/openssl111/secure/lib/libcrypto/amd64/md5-x86_64.S
projects/openssl111/secure/lib/libcrypto/amd64/rc4-md5-x86_64.S
projects/openssl111/secure/lib/libcrypto/amd64/rc4-x86_64.S
projects/openssl111/secure/lib/libcrypto/amd64/rsaz-avx2.S
projects/openssl111/secure/lib/libcrypto/amd64/rsaz-x86_64.S
projects/openssl111/secure/lib/libcrypto/amd64/sha1-mb-x86_64.S
projects/openssl111/secure/lib/libcrypto/amd64/sha1-x86_64.S
projects/openssl111/secure/lib/libcrypto/amd64/sha256-mb-x86_64.S
projects/openssl111/secure/lib/libcrypto/amd64/sha256-x86_64.S
projects/openssl111/secure/lib/libcrypto/amd64/sha512-x86_64.S
projects/openssl111/secure/lib/libcrypto/amd64/wp-x86_64.S
projects/openssl111/secure/lib/libcrypto/amd64/x86_64-gf2m.S
projects/openssl111/secure/lib/libcrypto/amd64/x86_64-mont.S
projects/openssl111/secure/lib/libcrypto/amd64/x86_64-mont5.S
projects/openssl111/secure/lib/libcrypto/amd64/x86_64cpuid.S
Modified: projects/openssl111/secure/lib/libcrypto/Makefile.asm
==============================================================================
--- projects/openssl111/secure/lib/libcrypto/Makefile.asm Thu Sep 13 21:00:17 2018 (r338666)
+++ projects/openssl111/secure/lib/libcrypto/Makefile.asm Thu Sep 13 21:07:09 2018 (r338667)
@@ -49,12 +49,15 @@ sha256-armv8.S: sha512-armv8.pl
${LCRYPTO_SRC}/crypto/aes/asm \
${LCRYPTO_SRC}/crypto/bn/asm \
${LCRYPTO_SRC}/crypto/camellia/asm \
+ ${LCRYPTO_SRC}/crypto/chacha/asm \
${LCRYPTO_SRC}/crypto/ec/asm \
${LCRYPTO_SRC}/crypto/md5/asm \
${LCRYPTO_SRC}/crypto/modes/asm \
+ ${LCRYPTO_SRC}/crypto/poly1305/asm \
${LCRYPTO_SRC}/crypto/rc4/asm \
${LCRYPTO_SRC}/crypto/sha/asm \
- ${LCRYPTO_SRC}/crypto/whrlpool/asm
+ ${LCRYPTO_SRC}/crypto/whrlpool/asm \
+ ${LCRYPTO_SRC}/engines/asm
# aes
SRCS= aes-x86_64.pl aesni-mb-x86_64.pl aesni-sha1-x86_64.pl \
@@ -68,20 +71,31 @@ SRCS+= rsaz-avx2.pl rsaz-x86_64.pl x86_64-gf2m.pl x86_
# camellia
SRCS+= cmll-x86_64.pl
+# chacha
+SRCS+= chacha-x86_64.pl
+
# ec
-SRCS+= ecp_nistz256-x86_64.pl
+SRCS+= ecp_nistz256-avx2.pl ecp_nistz256-x86_64.pl x25519-x86_64.pl
+# engines
+SRCS+= e_padlock-x86_64.pl
+
# md5
SRCS+= md5-x86_64.pl
# modes
SRCS+= aesni-gcm-x86_64.pl ghash-x86_64.pl
+# poly1305
+SRCS+= poly1305-x86_64.pl
+
# rc4
SRCS+= rc4-md5-x86_64.pl rc4-x86_64.pl
# sha
-SRCS+= sha1-mb-x86_64.pl sha1-x86_64.pl sha256-mb-x86_64.pl
+SRCS+= keccak1600-avx2.pl keccak1600-avx512.pl keccak1600-avx512vl.pl \
+ keccak1600-x86_64.pl sha1-mb-x86_64.pl sha1-x86_64.pl \
+ sha256-mb-x86_64.pl
# whrlpool
SRCS+= wp-x86_64.pl
Modified: projects/openssl111/secure/lib/libcrypto/amd64/aes-x86_64.S
==============================================================================
--- projects/openssl111/secure/lib/libcrypto/amd64/aes-x86_64.S Thu Sep 13 21:00:17 2018 (r338666)
+++ projects/openssl111/secure/lib/libcrypto/amd64/aes-x86_64.S Thu Sep 13 21:07:09 2018 (r338667)
@@ -334,15 +334,23 @@ _x86_64_AES_encrypt_compact:
.hidden asm_AES_encrypt
asm_AES_encrypt:
AES_encrypt:
+.cfi_startproc
+ movq %rsp,%rax
+.cfi_def_cfa_register %rax
pushq %rbx
+.cfi_offset %rbx,-16
pushq %rbp
+.cfi_offset %rbp,-24
pushq %r12
+.cfi_offset %r12,-32
pushq %r13
+.cfi_offset %r13,-40
pushq %r14
+.cfi_offset %r14,-48
pushq %r15
+.cfi_offset %r15,-56
- movq %rsp,%r10
leaq -63(%rdx),%rcx
andq $-64,%rsp
subq %rsp,%rcx
@@ -352,7 +360,8 @@ AES_encrypt:
subq $32,%rsp
movq %rsi,16(%rsp)
- movq %r10,24(%rsp)
+ movq %rax,24(%rsp)
+.cfi_escape 0x0f,0x05,0x77,0x18,0x06,0x23,0x08
.Lenc_prologue:
movq %rdx,%r15
@@ -379,20 +388,29 @@ AES_encrypt:
movq 16(%rsp),%r9
movq 24(%rsp),%rsi
+.cfi_def_cfa %rsi,8
movl %eax,0(%r9)
movl %ebx,4(%r9)
movl %ecx,8(%r9)
movl %edx,12(%r9)
- movq (%rsi),%r15
- movq 8(%rsi),%r14
- movq 16(%rsi),%r13
- movq 24(%rsi),%r12
- movq 32(%rsi),%rbp
- movq 40(%rsi),%rbx
- leaq 48(%rsi),%rsp
+ movq -48(%rsi),%r15
+.cfi_restore %r15
+ movq -40(%rsi),%r14
+.cfi_restore %r14
+ movq -32(%rsi),%r13
+.cfi_restore %r13
+ movq -24(%rsi),%r12
+.cfi_restore %r12
+ movq -16(%rsi),%rbp
+.cfi_restore %rbp
+ movq -8(%rsi),%rbx
+.cfi_restore %rbx
+ leaq (%rsi),%rsp
+.cfi_def_cfa_register %rsp
.Lenc_epilogue:
.byte 0xf3,0xc3
+.cfi_endproc
.size AES_encrypt,.-AES_encrypt
.type _x86_64_AES_decrypt, at function
.align 16
@@ -781,15 +799,23 @@ _x86_64_AES_decrypt_compact:
.hidden asm_AES_decrypt
asm_AES_decrypt:
AES_decrypt:
+.cfi_startproc
+ movq %rsp,%rax
+.cfi_def_cfa_register %rax
pushq %rbx
+.cfi_offset %rbx,-16
pushq %rbp
+.cfi_offset %rbp,-24
pushq %r12
+.cfi_offset %r12,-32
pushq %r13
+.cfi_offset %r13,-40
pushq %r14
+.cfi_offset %r14,-48
pushq %r15
+.cfi_offset %r15,-56
- movq %rsp,%r10
leaq -63(%rdx),%rcx
andq $-64,%rsp
subq %rsp,%rcx
@@ -799,7 +825,8 @@ AES_decrypt:
subq $32,%rsp
movq %rsi,16(%rsp)
- movq %r10,24(%rsp)
+ movq %rax,24(%rsp)
+.cfi_escape 0x0f,0x05,0x77,0x18,0x06,0x23,0x08
.Ldec_prologue:
movq %rdx,%r15
@@ -828,42 +855,69 @@ AES_decrypt:
movq 16(%rsp),%r9
movq 24(%rsp),%rsi
+.cfi_def_cfa %rsi,8
movl %eax,0(%r9)
movl %ebx,4(%r9)
movl %ecx,8(%r9)
movl %edx,12(%r9)
- movq (%rsi),%r15
- movq 8(%rsi),%r14
- movq 16(%rsi),%r13
- movq 24(%rsi),%r12
- movq 32(%rsi),%rbp
- movq 40(%rsi),%rbx
- leaq 48(%rsi),%rsp
+ movq -48(%rsi),%r15
+.cfi_restore %r15
+ movq -40(%rsi),%r14
+.cfi_restore %r14
+ movq -32(%rsi),%r13
+.cfi_restore %r13
+ movq -24(%rsi),%r12
+.cfi_restore %r12
+ movq -16(%rsi),%rbp
+.cfi_restore %rbp
+ movq -8(%rsi),%rbx
+.cfi_restore %rbx
+ leaq (%rsi),%rsp
+.cfi_def_cfa_register %rsp
.Ldec_epilogue:
.byte 0xf3,0xc3
+.cfi_endproc
.size AES_decrypt,.-AES_decrypt
-.globl private_AES_set_encrypt_key
-.type private_AES_set_encrypt_key, at function
+.globl AES_set_encrypt_key
+.type AES_set_encrypt_key, at function
.align 16
-private_AES_set_encrypt_key:
+AES_set_encrypt_key:
+.cfi_startproc
pushq %rbx
+.cfi_adjust_cfa_offset 8
+.cfi_offset %rbx,-16
pushq %rbp
+.cfi_adjust_cfa_offset 8
+.cfi_offset %rbp,-24
pushq %r12
+.cfi_adjust_cfa_offset 8
+.cfi_offset %r12,-32
pushq %r13
+.cfi_adjust_cfa_offset 8
+.cfi_offset %r13,-40
pushq %r14
+.cfi_adjust_cfa_offset 8
+.cfi_offset %r14,-48
pushq %r15
+.cfi_adjust_cfa_offset 8
+.cfi_offset %r15,-56
subq $8,%rsp
+.cfi_adjust_cfa_offset 8
.Lenc_key_prologue:
call _x86_64_AES_set_encrypt_key
movq 40(%rsp),%rbp
+.cfi_restore %rbp
movq 48(%rsp),%rbx
+.cfi_restore %rbx
addq $56,%rsp
+.cfi_adjust_cfa_offset -56
.Lenc_key_epilogue:
.byte 0xf3,0xc3
-.size private_AES_set_encrypt_key,.-private_AES_set_encrypt_key
+.cfi_endproc
+.size AES_set_encrypt_key,.-AES_set_encrypt_key
.type _x86_64_AES_set_encrypt_key, at function
.align 16
@@ -1104,17 +1158,31 @@ _x86_64_AES_set_encrypt_key:
.Lexit:
.byte 0xf3,0xc3
.size _x86_64_AES_set_encrypt_key,.-_x86_64_AES_set_encrypt_key
-.globl private_AES_set_decrypt_key
-.type private_AES_set_decrypt_key, at function
+.globl AES_set_decrypt_key
+.type AES_set_decrypt_key, at function
.align 16
-private_AES_set_decrypt_key:
+AES_set_decrypt_key:
+.cfi_startproc
pushq %rbx
+.cfi_adjust_cfa_offset 8
+.cfi_offset %rbx,-16
pushq %rbp
+.cfi_adjust_cfa_offset 8
+.cfi_offset %rbp,-24
pushq %r12
+.cfi_adjust_cfa_offset 8
+.cfi_offset %r12,-32
pushq %r13
+.cfi_adjust_cfa_offset 8
+.cfi_offset %r13,-40
pushq %r14
+.cfi_adjust_cfa_offset 8
+.cfi_offset %r14,-48
pushq %r15
+.cfi_adjust_cfa_offset 8
+.cfi_offset %r15,-56
pushq %rdx
+.cfi_adjust_cfa_offset 8
.Ldec_key_prologue:
call _x86_64_AES_set_encrypt_key
@@ -1282,15 +1350,23 @@ private_AES_set_decrypt_key:
xorq %rax,%rax
.Labort:
movq 8(%rsp),%r15
+.cfi_restore %r15
movq 16(%rsp),%r14
+.cfi_restore %r14
movq 24(%rsp),%r13
+.cfi_restore %r13
movq 32(%rsp),%r12
+.cfi_restore %r12
movq 40(%rsp),%rbp
+.cfi_restore %rbp
movq 48(%rsp),%rbx
+.cfi_restore %rbx
addq $56,%rsp
+.cfi_adjust_cfa_offset -56
.Ldec_key_epilogue:
.byte 0xf3,0xc3
-.size private_AES_set_decrypt_key,.-private_AES_set_decrypt_key
+.cfi_endproc
+.size AES_set_decrypt_key,.-AES_set_decrypt_key
.globl AES_cbc_encrypt
.type AES_cbc_encrypt, at function
.align 16
@@ -1299,25 +1375,39 @@ private_AES_set_decrypt_key:
.hidden asm_AES_cbc_encrypt
asm_AES_cbc_encrypt:
AES_cbc_encrypt:
+.cfi_startproc
cmpq $0,%rdx
je .Lcbc_epilogue
pushfq
+.cfi_adjust_cfa_offset 8
+.cfi_offset 49,-16
pushq %rbx
+.cfi_adjust_cfa_offset 8
+.cfi_offset %rbx,-24
pushq %rbp
+.cfi_adjust_cfa_offset 8
+.cfi_offset %rbp,-32
pushq %r12
+.cfi_adjust_cfa_offset 8
+.cfi_offset %r12,-40
pushq %r13
+.cfi_adjust_cfa_offset 8
+.cfi_offset %r13,-48
pushq %r14
+.cfi_adjust_cfa_offset 8
+.cfi_offset %r14,-56
pushq %r15
+.cfi_adjust_cfa_offset 8
+.cfi_offset %r15,-64
.Lcbc_prologue:
cld
movl %r9d,%r9d
leaq .LAES_Te(%rip),%r14
+ leaq .LAES_Td(%rip),%r10
cmpq $0,%r9
- jne .Lcbc_picked_te
- leaq .LAES_Td(%rip),%r14
-.Lcbc_picked_te:
+ cmoveq %r10,%r14
movl OPENSSL_ia32cap_P(%rip),%r10d
cmpq $512,%rdx
@@ -1353,8 +1443,10 @@ AES_cbc_encrypt:
.Lcbc_te_ok:
xchgq %rsp,%r15
+.cfi_def_cfa_register %r15
movq %r15,16(%rsp)
+.cfi_escape 0x0f,0x05,0x77,0x10,0x06,0x23,0x40
.Lcbc_fast_body:
movq %rdi,24(%rsp)
movq %rsi,32(%rsp)
@@ -1736,17 +1828,28 @@ AES_cbc_encrypt:
.align 16
.Lcbc_exit:
movq 16(%rsp),%rsi
+.cfi_def_cfa %rsi,64
movq (%rsi),%r15
+.cfi_restore %r15
movq 8(%rsi),%r14
+.cfi_restore %r14
movq 16(%rsi),%r13
+.cfi_restore %r13
movq 24(%rsi),%r12
+.cfi_restore %r12
movq 32(%rsi),%rbp
+.cfi_restore %rbp
movq 40(%rsi),%rbx
+.cfi_restore %rbx
leaq 48(%rsi),%rsp
+.cfi_def_cfa %rsp,16
.Lcbc_popfq:
popfq
+.cfi_adjust_cfa_offset -8
+.cfi_restore 49
.Lcbc_epilogue:
.byte 0xf3,0xc3
+.cfi_endproc
.size AES_cbc_encrypt,.-AES_cbc_encrypt
.align 64
.LAES_Te:
Modified: projects/openssl111/secure/lib/libcrypto/amd64/aesni-gcm-x86_64.S
==============================================================================
--- projects/openssl111/secure/lib/libcrypto/amd64/aesni-gcm-x86_64.S Thu Sep 13 21:00:17 2018 (r338666)
+++ projects/openssl111/secure/lib/libcrypto/amd64/aesni-gcm-x86_64.S Thu Sep 13 21:07:09 2018 (r338667)
@@ -317,17 +317,25 @@ _aesni_ctr32_ghash_6x:
.type aesni_gcm_decrypt, at function
.align 32
aesni_gcm_decrypt:
+.cfi_startproc
xorq %r10,%r10
cmpq $0x60,%rdx
jb .Lgcm_dec_abort
leaq (%rsp),%rax
+.cfi_def_cfa_register %rax
pushq %rbx
+.cfi_offset %rbx,-16
pushq %rbp
+.cfi_offset %rbp,-24
pushq %r12
+.cfi_offset %r12,-32
pushq %r13
+.cfi_offset %r13,-40
pushq %r14
+.cfi_offset %r14,-48
pushq %r15
+.cfi_offset %r15,-56
vzeroupper
vmovdqu (%r8),%xmm1
@@ -389,15 +397,23 @@ aesni_gcm_decrypt:
vzeroupper
movq -48(%rax),%r15
+.cfi_restore %r15
movq -40(%rax),%r14
+.cfi_restore %r14
movq -32(%rax),%r13
+.cfi_restore %r13
movq -24(%rax),%r12
+.cfi_restore %r12
movq -16(%rax),%rbp
+.cfi_restore %rbp
movq -8(%rax),%rbx
+.cfi_restore %rbx
leaq (%rax),%rsp
+.cfi_def_cfa_register %rsp
.Lgcm_dec_abort:
movq %r10,%rax
.byte 0xf3,0xc3
+.cfi_endproc
.size aesni_gcm_decrypt,.-aesni_gcm_decrypt
.type _aesni_ctr32_6x, at function
.align 32
@@ -494,17 +510,25 @@ _aesni_ctr32_6x:
.type aesni_gcm_encrypt, at function
.align 32
aesni_gcm_encrypt:
+.cfi_startproc
xorq %r10,%r10
cmpq $288,%rdx
jb .Lgcm_enc_abort
leaq (%rsp),%rax
+.cfi_def_cfa_register %rax
pushq %rbx
+.cfi_offset %rbx,-16
pushq %rbp
+.cfi_offset %rbp,-24
pushq %r12
+.cfi_offset %r12,-32
pushq %r13
+.cfi_offset %r13,-40
pushq %r14
+.cfi_offset %r14,-48
pushq %r15
+.cfi_offset %r15,-56
vzeroupper
vmovdqu (%r8),%xmm1
@@ -730,15 +754,23 @@ aesni_gcm_encrypt:
vzeroupper
movq -48(%rax),%r15
+.cfi_restore %r15
movq -40(%rax),%r14
+.cfi_restore %r14
movq -32(%rax),%r13
+.cfi_restore %r13
movq -24(%rax),%r12
+.cfi_restore %r12
movq -16(%rax),%rbp
+.cfi_restore %rbp
movq -8(%rax),%rbx
+.cfi_restore %rbx
leaq (%rax),%rsp
+.cfi_def_cfa_register %rsp
.Lgcm_enc_abort:
movq %r10,%rax
.byte 0xf3,0xc3
+.cfi_endproc
.size aesni_gcm_encrypt,.-aesni_gcm_encrypt
.align 64
.Lbswap_mask:
Modified: projects/openssl111/secure/lib/libcrypto/amd64/aesni-mb-x86_64.S
==============================================================================
--- projects/openssl111/secure/lib/libcrypto/amd64/aesni-mb-x86_64.S Thu Sep 13 21:00:17 2018 (r338666)
+++ projects/openssl111/secure/lib/libcrypto/amd64/aesni-mb-x86_64.S Thu Sep 13 21:07:09 2018 (r338667)
@@ -8,6 +8,7 @@
.type aesni_multi_cbc_encrypt, at function
.align 32
aesni_multi_cbc_encrypt:
+.cfi_startproc
cmpl $2,%edx
jb .Lenc_non_avx
movl OPENSSL_ia32cap_P+4(%rip),%ecx
@@ -17,12 +18,19 @@ aesni_multi_cbc_encrypt:
.align 16
.Lenc_non_avx:
movq %rsp,%rax
+.cfi_def_cfa_register %rax
pushq %rbx
+.cfi_offset %rbx,-16
pushq %rbp
+.cfi_offset %rbp,-24
pushq %r12
+.cfi_offset %r12,-32
pushq %r13
+.cfi_offset %r13,-40
pushq %r14
+.cfi_offset %r14,-48
pushq %r15
+.cfi_offset %r15,-56
@@ -32,6 +40,7 @@ aesni_multi_cbc_encrypt:
subq $48,%rsp
andq $-64,%rsp
movq %rax,16(%rsp)
+.cfi_escape 0x0f,0x05,0x77,0x10,0x06,0x23,0x08
.Lenc4x_body:
movdqu (%rsi),%xmm12
@@ -241,6 +250,7 @@ aesni_multi_cbc_encrypt:
jnz .Loop_enc4x
movq 16(%rsp),%rax
+.cfi_def_cfa %rax,8
movl 24(%rsp),%edx
@@ -258,20 +268,29 @@ aesni_multi_cbc_encrypt:
.Lenc4x_done:
movq -48(%rax),%r15
+.cfi_restore %r15
movq -40(%rax),%r14
+.cfi_restore %r14
movq -32(%rax),%r13
+.cfi_restore %r13
movq -24(%rax),%r12
+.cfi_restore %r12
movq -16(%rax),%rbp
+.cfi_restore %rbp
movq -8(%rax),%rbx
+.cfi_restore %rbx
leaq (%rax),%rsp
+.cfi_def_cfa_register %rsp
.Lenc4x_epilogue:
.byte 0xf3,0xc3
+.cfi_endproc
.size aesni_multi_cbc_encrypt,.-aesni_multi_cbc_encrypt
.globl aesni_multi_cbc_decrypt
.type aesni_multi_cbc_decrypt, at function
.align 32
aesni_multi_cbc_decrypt:
+.cfi_startproc
cmpl $2,%edx
jb .Ldec_non_avx
movl OPENSSL_ia32cap_P+4(%rip),%ecx
@@ -281,12 +300,19 @@ aesni_multi_cbc_decrypt:
.align 16
.Ldec_non_avx:
movq %rsp,%rax
+.cfi_def_cfa_register %rax
pushq %rbx
+.cfi_offset %rbx,-16
pushq %rbp
+.cfi_offset %rbp,-24
pushq %r12
+.cfi_offset %r12,-32
pushq %r13
+.cfi_offset %r13,-40
pushq %r14
+.cfi_offset %r14,-48
pushq %r15
+.cfi_offset %r15,-56
@@ -296,6 +322,7 @@ aesni_multi_cbc_decrypt:
subq $48,%rsp
andq $-64,%rsp
movq %rax,16(%rsp)
+.cfi_escape 0x0f,0x05,0x77,0x10,0x06,0x23,0x08
.Ldec4x_body:
movdqu (%rsi),%xmm12
@@ -505,6 +532,7 @@ aesni_multi_cbc_decrypt:
jnz .Loop_dec4x
movq 16(%rsp),%rax
+.cfi_def_cfa %rax,8
movl 24(%rsp),%edx
leaq 160(%rdi),%rdi
@@ -513,26 +541,42 @@ aesni_multi_cbc_decrypt:
.Ldec4x_done:
movq -48(%rax),%r15
+.cfi_restore %r15
movq -40(%rax),%r14
+.cfi_restore %r14
movq -32(%rax),%r13
+.cfi_restore %r13
movq -24(%rax),%r12
+.cfi_restore %r12
movq -16(%rax),%rbp
+.cfi_restore %rbp
movq -8(%rax),%rbx
+.cfi_restore %rbx
leaq (%rax),%rsp
+.cfi_def_cfa_register %rsp
.Ldec4x_epilogue:
.byte 0xf3,0xc3
+.cfi_endproc
.size aesni_multi_cbc_decrypt,.-aesni_multi_cbc_decrypt
.type aesni_multi_cbc_encrypt_avx, at function
.align 32
aesni_multi_cbc_encrypt_avx:
+.cfi_startproc
_avx_cbc_enc_shortcut:
movq %rsp,%rax
+.cfi_def_cfa_register %rax
pushq %rbx
+.cfi_offset %rbx,-16
pushq %rbp
+.cfi_offset %rbp,-24
pushq %r12
+.cfi_offset %r12,-32
pushq %r13
+.cfi_offset %r13,-40
pushq %r14
+.cfi_offset %r14,-48
pushq %r15
+.cfi_offset %r15,-56
@@ -544,6 +588,7 @@ _avx_cbc_enc_shortcut:
subq $192,%rsp
andq $-128,%rsp
movq %rax,16(%rsp)
+.cfi_escape 0x0f,0x05,0x77,0x10,0x06,0x23,0x08
.Lenc8x_body:
vzeroupper
@@ -941,6 +986,7 @@ _avx_cbc_enc_shortcut:
jnz .Loop_enc8x
movq 16(%rsp),%rax
+.cfi_def_cfa %rax,8
@@ -949,27 +995,43 @@ _avx_cbc_enc_shortcut:
.Lenc8x_done:
vzeroupper
movq -48(%rax),%r15
+.cfi_restore %r15
movq -40(%rax),%r14
+.cfi_restore %r14
movq -32(%rax),%r13
+.cfi_restore %r13
movq -24(%rax),%r12
+.cfi_restore %r12
movq -16(%rax),%rbp
+.cfi_restore %rbp
movq -8(%rax),%rbx
+.cfi_restore %rbx
leaq (%rax),%rsp
+.cfi_def_cfa_register %rsp
.Lenc8x_epilogue:
.byte 0xf3,0xc3
+.cfi_endproc
.size aesni_multi_cbc_encrypt_avx,.-aesni_multi_cbc_encrypt_avx
.type aesni_multi_cbc_decrypt_avx, at function
.align 32
aesni_multi_cbc_decrypt_avx:
+.cfi_startproc
_avx_cbc_dec_shortcut:
movq %rsp,%rax
+.cfi_def_cfa_register %rax
pushq %rbx
+.cfi_offset %rbx,-16
pushq %rbp
+.cfi_offset %rbp,-24
pushq %r12
+.cfi_offset %r12,-32
pushq %r13
+.cfi_offset %r13,-40
pushq %r14
+.cfi_offset %r14,-48
pushq %r15
+.cfi_offset %r15,-56
@@ -983,6 +1045,7 @@ _avx_cbc_dec_shortcut:
andq $-256,%rsp
subq $192,%rsp
movq %rax,16(%rsp)
+.cfi_escape 0x0f,0x05,0x77,0x10,0x06,0x23,0x08
.Ldec8x_body:
vzeroupper
@@ -1418,6 +1481,7 @@ _avx_cbc_dec_shortcut:
jnz .Loop_dec8x
movq 16(%rsp),%rax
+.cfi_def_cfa %rax,8
@@ -1426,12 +1490,20 @@ _avx_cbc_dec_shortcut:
.Ldec8x_done:
vzeroupper
movq -48(%rax),%r15
+.cfi_restore %r15
movq -40(%rax),%r14
+.cfi_restore %r14
movq -32(%rax),%r13
+.cfi_restore %r13
movq -24(%rax),%r12
+.cfi_restore %r12
movq -16(%rax),%rbp
+.cfi_restore %rbp
movq -8(%rax),%rbx
+.cfi_restore %rbx
leaq (%rax),%rsp
+.cfi_def_cfa_register %rsp
.Ldec8x_epilogue:
.byte 0xf3,0xc3
+.cfi_endproc
.size aesni_multi_cbc_decrypt_avx,.-aesni_multi_cbc_decrypt_avx
Modified: projects/openssl111/secure/lib/libcrypto/amd64/aesni-sha1-x86_64.S
==============================================================================
--- projects/openssl111/secure/lib/libcrypto/amd64/aesni-sha1-x86_64.S Thu Sep 13 21:00:17 2018 (r338666)
+++ projects/openssl111/secure/lib/libcrypto/amd64/aesni-sha1-x86_64.S Thu Sep 13 21:07:09 2018 (r338667)
@@ -23,16 +23,30 @@ aesni_cbc_sha1_enc:
.type aesni_cbc_sha1_enc_ssse3, at function
.align 32
aesni_cbc_sha1_enc_ssse3:
+.cfi_startproc
movq 8(%rsp),%r10
pushq %rbx
+.cfi_adjust_cfa_offset 8
+.cfi_offset %rbx,-16
pushq %rbp
+.cfi_adjust_cfa_offset 8
+.cfi_offset %rbp,-24
pushq %r12
+.cfi_adjust_cfa_offset 8
+.cfi_offset %r12,-32
pushq %r13
+.cfi_adjust_cfa_offset 8
+.cfi_offset %r13,-40
pushq %r14
+.cfi_adjust_cfa_offset 8
+.cfi_offset %r14,-48
pushq %r15
+.cfi_adjust_cfa_offset 8
+.cfi_offset %r15,-56
leaq -104(%rsp),%rsp
+.cfi_adjust_cfa_offset 104
movq %rdi,%r12
@@ -1364,29 +1378,52 @@ aesni_cbc_sha1_enc_ssse3:
movl %ebp,16(%r9)
movups %xmm2,(%r8)
leaq 104(%rsp),%rsi
+.cfi_def_cfa %rsi,56
movq 0(%rsi),%r15
+.cfi_restore %r15
movq 8(%rsi),%r14
+.cfi_restore %r14
movq 16(%rsi),%r13
+.cfi_restore %r13
movq 24(%rsi),%r12
+.cfi_restore %r12
movq 32(%rsi),%rbp
+.cfi_restore %rbp
movq 40(%rsi),%rbx
+.cfi_restore %rbx
leaq 48(%rsi),%rsp
+.cfi_def_cfa %rsp,8
.Lepilogue_ssse3:
.byte 0xf3,0xc3
+.cfi_endproc
.size aesni_cbc_sha1_enc_ssse3,.-aesni_cbc_sha1_enc_ssse3
.type aesni_cbc_sha1_enc_avx, at function
.align 32
aesni_cbc_sha1_enc_avx:
+.cfi_startproc
movq 8(%rsp),%r10
pushq %rbx
+.cfi_adjust_cfa_offset 8
+.cfi_offset %rbx,-16
pushq %rbp
+.cfi_adjust_cfa_offset 8
+.cfi_offset %rbp,-24
pushq %r12
+.cfi_adjust_cfa_offset 8
+.cfi_offset %r12,-32
pushq %r13
+.cfi_adjust_cfa_offset 8
+.cfi_offset %r13,-40
pushq %r14
+.cfi_adjust_cfa_offset 8
+.cfi_offset %r14,-48
pushq %r15
+.cfi_adjust_cfa_offset 8
+.cfi_offset %r15,-56
leaq -104(%rsp),%rsp
+.cfi_adjust_cfa_offset 104
vzeroall
@@ -2662,15 +2699,24 @@ aesni_cbc_sha1_enc_avx:
vmovups %xmm12,(%r8)
vzeroall
leaq 104(%rsp),%rsi
+.cfi_def_cfa %rsi,56
movq 0(%rsi),%r15
+.cfi_restore %r15
movq 8(%rsi),%r14
+.cfi_restore %r14
movq 16(%rsi),%r13
+.cfi_restore %r13
movq 24(%rsi),%r12
+.cfi_restore %r12
movq 32(%rsi),%rbp
+.cfi_restore %rbp
movq 40(%rsi),%rbx
+.cfi_restore %rbx
leaq 48(%rsi),%rsp
+.cfi_def_cfa %rsp,8
.Lepilogue_avx:
.byte 0xf3,0xc3
+.cfi_endproc
.size aesni_cbc_sha1_enc_avx,.-aesni_cbc_sha1_enc_avx
.align 64
K_XX_XX:
Modified: projects/openssl111/secure/lib/libcrypto/amd64/aesni-sha256-x86_64.S
==============================================================================
--- projects/openssl111/secure/lib/libcrypto/amd64/aesni-sha256-x86_64.S Thu Sep 13 21:00:17 2018 (r338666)
+++ projects/openssl111/secure/lib/libcrypto/amd64/aesni-sha256-x86_64.S Thu Sep 13 21:07:09 2018 (r338667)
@@ -79,15 +79,23 @@ K256:
.type aesni_cbc_sha256_enc_xop, at function
.align 64
aesni_cbc_sha256_enc_xop:
+.cfi_startproc
.Lxop_shortcut:
movq 8(%rsp),%r10
+ movq %rsp,%rax
+.cfi_def_cfa_register %rax
pushq %rbx
+.cfi_offset %rbx,-16
pushq %rbp
+.cfi_offset %rbp,-24
pushq %r12
+.cfi_offset %r12,-32
pushq %r13
+.cfi_offset %r13,-40
pushq %r14
+.cfi_offset %r14,-48
pushq %r15
- movq %rsp,%r11
+.cfi_offset %r15,-56
subq $128,%rsp
andq $-64,%rsp
@@ -103,7 +111,8 @@ aesni_cbc_sha256_enc_xop:
movq %r8,64+32(%rsp)
movq %r9,64+40(%rsp)
movq %r10,64+48(%rsp)
- movq %r11,64+56(%rsp)
+ movq %rax,120(%rsp)
+.cfi_escape 0x0f,0x06,0x77,0xf8,0x00,0x06,0x23,0x08
.Lprologue_xop:
vzeroall
@@ -1209,31 +1218,48 @@ aesni_cbc_sha256_enc_xop:
jb .Lloop_xop
movq 64+32(%rsp),%r8
- movq 64+56(%rsp),%rsi
+ movq 120(%rsp),%rsi
+.cfi_def_cfa %rsi,8
vmovdqu %xmm8,(%r8)
vzeroall
- movq (%rsi),%r15
- movq 8(%rsi),%r14
- movq 16(%rsi),%r13
- movq 24(%rsi),%r12
- movq 32(%rsi),%rbp
- movq 40(%rsi),%rbx
- leaq 48(%rsi),%rsp
+ movq -48(%rsi),%r15
+.cfi_restore %r15
+ movq -40(%rsi),%r14
+.cfi_restore %r14
+ movq -32(%rsi),%r13
+.cfi_restore %r13
+ movq -24(%rsi),%r12
+.cfi_restore %r12
+ movq -16(%rsi),%rbp
+.cfi_restore %rbp
+ movq -8(%rsi),%rbx
+.cfi_restore %rbx
+ leaq (%rsi),%rsp
+.cfi_def_cfa_register %rsp
.Lepilogue_xop:
.byte 0xf3,0xc3
+.cfi_endproc
.size aesni_cbc_sha256_enc_xop,.-aesni_cbc_sha256_enc_xop
.type aesni_cbc_sha256_enc_avx, at function
.align 64
aesni_cbc_sha256_enc_avx:
+.cfi_startproc
.Lavx_shortcut:
movq 8(%rsp),%r10
+ movq %rsp,%rax
+.cfi_def_cfa_register %rax
pushq %rbx
+.cfi_offset %rbx,-16
pushq %rbp
+.cfi_offset %rbp,-24
pushq %r12
+.cfi_offset %r12,-32
pushq %r13
+.cfi_offset %r13,-40
pushq %r14
+.cfi_offset %r14,-48
pushq %r15
- movq %rsp,%r11
+.cfi_offset %r15,-56
subq $128,%rsp
andq $-64,%rsp
@@ -1249,7 +1275,8 @@ aesni_cbc_sha256_enc_avx:
movq %r8,64+32(%rsp)
movq %r9,64+40(%rsp)
movq %r10,64+48(%rsp)
- movq %r11,64+56(%rsp)
+ movq %rax,120(%rsp)
+.cfi_escape 0x0f,0x06,0x77,0xf8,0x00,0x06,0x23,0x08
.Lprologue_avx:
vzeroall
@@ -2386,31 +2413,48 @@ aesni_cbc_sha256_enc_avx:
jb .Lloop_avx
movq 64+32(%rsp),%r8
- movq 64+56(%rsp),%rsi
+ movq 120(%rsp),%rsi
+.cfi_def_cfa %rsi,8
vmovdqu %xmm8,(%r8)
vzeroall
- movq (%rsi),%r15
- movq 8(%rsi),%r14
- movq 16(%rsi),%r13
- movq 24(%rsi),%r12
- movq 32(%rsi),%rbp
- movq 40(%rsi),%rbx
- leaq 48(%rsi),%rsp
+ movq -48(%rsi),%r15
+.cfi_restore %r15
+ movq -40(%rsi),%r14
+.cfi_restore %r14
+ movq -32(%rsi),%r13
+.cfi_restore %r13
+ movq -24(%rsi),%r12
+.cfi_restore %r12
+ movq -16(%rsi),%rbp
+.cfi_restore %rbp
+ movq -8(%rsi),%rbx
+.cfi_restore %rbx
+ leaq (%rsi),%rsp
+.cfi_def_cfa_register %rsp
.Lepilogue_avx:
.byte 0xf3,0xc3
+.cfi_endproc
.size aesni_cbc_sha256_enc_avx,.-aesni_cbc_sha256_enc_avx
.type aesni_cbc_sha256_enc_avx2, at function
.align 64
aesni_cbc_sha256_enc_avx2:
+.cfi_startproc
.Lavx2_shortcut:
movq 8(%rsp),%r10
+ movq %rsp,%rax
+.cfi_def_cfa_register %rax
pushq %rbx
+.cfi_offset %rbx,-16
*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
More information about the svn-src-projects
mailing list