repo_id
stringlengths 5
115
| size
int64 590
5.01M
| file_path
stringlengths 4
212
| content
stringlengths 590
5.01M
|
|---|---|---|---|
usenix-security-verdict/verdict
| 210,728
|
deps/libcrux/sys/hacl/c/vale/src/aesgcm-x86_64-mingw.S
|
.text
.global aes128_key_expansion
aes128_key_expansion:
movdqu 0(%rcx), %xmm1
movdqu %xmm1, 0(%rdx)
aeskeygenassist $1, %xmm1, %xmm2
pshufd $255, %xmm2, %xmm2
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
pxor %xmm2, %xmm1
movdqu %xmm1, 16(%rdx)
aeskeygenassist $2, %xmm1, %xmm2
pshufd $255, %xmm2, %xmm2
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
pxor %xmm2, %xmm1
movdqu %xmm1, 32(%rdx)
aeskeygenassist $4, %xmm1, %xmm2
pshufd $255, %xmm2, %xmm2
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
pxor %xmm2, %xmm1
movdqu %xmm1, 48(%rdx)
aeskeygenassist $8, %xmm1, %xmm2
pshufd $255, %xmm2, %xmm2
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
pxor %xmm2, %xmm1
movdqu %xmm1, 64(%rdx)
aeskeygenassist $16, %xmm1, %xmm2
pshufd $255, %xmm2, %xmm2
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
pxor %xmm2, %xmm1
movdqu %xmm1, 80(%rdx)
aeskeygenassist $32, %xmm1, %xmm2
pshufd $255, %xmm2, %xmm2
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
pxor %xmm2, %xmm1
movdqu %xmm1, 96(%rdx)
aeskeygenassist $64, %xmm1, %xmm2
pshufd $255, %xmm2, %xmm2
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
pxor %xmm2, %xmm1
movdqu %xmm1, 112(%rdx)
aeskeygenassist $128, %xmm1, %xmm2
pshufd $255, %xmm2, %xmm2
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
pxor %xmm2, %xmm1
movdqu %xmm1, 128(%rdx)
aeskeygenassist $27, %xmm1, %xmm2
pshufd $255, %xmm2, %xmm2
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
pxor %xmm2, %xmm1
movdqu %xmm1, 144(%rdx)
aeskeygenassist $54, %xmm1, %xmm2
pshufd $255, %xmm2, %xmm2
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
pxor %xmm2, %xmm1
movdqu %xmm1, 160(%rdx)
pxor %xmm1, %xmm1
pxor %xmm2, %xmm2
pxor %xmm3, %xmm3
ret
.global aes128_keyhash_init
aes128_keyhash_init:
mov $579005069656919567, %r8
pinsrq $0, %r8, %xmm4
mov $283686952306183, %r8
pinsrq $1, %r8, %xmm4
pxor %xmm0, %xmm0
movdqu %xmm0, 80(%rdx)
mov %rcx, %r8
movdqu 0(%r8), %xmm2
pxor %xmm2, %xmm0
movdqu 16(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 32(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 48(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 64(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 80(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 96(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 112(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 128(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 144(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 160(%r8), %xmm2
aesenclast %xmm2, %xmm0
pxor %xmm2, %xmm2
pshufb %xmm4, %xmm0
mov %rdx, %rcx
movdqu %xmm0, 32(%rcx)
movdqu %xmm6, %xmm0
mov %r12, %rax
movdqu 32(%rcx), %xmm1
movdqu %xmm1, %xmm6
movdqu %xmm1, %xmm3
pxor %xmm4, %xmm4
pxor %xmm5, %xmm5
mov $3254779904, %r12
pinsrd $3, %r12d, %xmm4
mov $1, %r12
pinsrd $0, %r12d, %xmm4
mov $2147483648, %r12
pinsrd $3, %r12d, %xmm5
movdqu %xmm3, %xmm1
movdqu %xmm1, %xmm2
psrld $31, %xmm2
pslld $1, %xmm1
vpslldq $4, %xmm2, %xmm2
pxor %xmm2, %xmm1
pand %xmm5, %xmm3
pcmpeqd %xmm5, %xmm3
pshufd $255, %xmm3, %xmm3
pand %xmm4, %xmm3
vpxor %xmm3, %xmm1, %xmm1
movdqu %xmm1, 0(%rcx)
movdqu %xmm6, %xmm1
movdqu %xmm6, %xmm2
movdqu %xmm1, %xmm5
pclmulqdq $16, %xmm2, %xmm1
movdqu %xmm1, %xmm3
movdqu %xmm5, %xmm1
pclmulqdq $1, %xmm2, %xmm1
movdqu %xmm1, %xmm4
movdqu %xmm5, %xmm1
pclmulqdq $0, %xmm2, %xmm1
pclmulqdq $17, %xmm2, %xmm5
movdqu %xmm5, %xmm2
movdqu %xmm1, %xmm5
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm4, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm1
pshufd $79, %xmm1, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm4
pshufd $79, %xmm4, %xmm4
pxor %xmm4, %xmm1
pxor %xmm5, %xmm1
movdqu %xmm1, %xmm3
psrld $31, %xmm3
movdqu %xmm2, %xmm4
psrld $31, %xmm4
pslld $1, %xmm1
pslld $1, %xmm2
vpslldq $4, %xmm3, %xmm5
vpslldq $4, %xmm4, %xmm4
mov $0, %r12
pinsrd $0, %r12d, %xmm3
pshufd $3, %xmm3, %xmm3
pxor %xmm4, %xmm3
pxor %xmm5, %xmm1
pxor %xmm3, %xmm2
movdqu %xmm2, %xmm6
pxor %xmm2, %xmm2
mov $3774873600, %r12
pinsrd $3, %r12d, %xmm2
movdqu %xmm1, %xmm5
pclmulqdq $16, %xmm2, %xmm1
movdqu %xmm1, %xmm3
movdqu %xmm5, %xmm1
pclmulqdq $1, %xmm2, %xmm1
movdqu %xmm1, %xmm4
movdqu %xmm5, %xmm1
pclmulqdq $0, %xmm2, %xmm1
pclmulqdq $17, %xmm2, %xmm5
movdqu %xmm5, %xmm2
movdqu %xmm1, %xmm5
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm4, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm1
pshufd $79, %xmm1, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm4
pshufd $79, %xmm4, %xmm4
pxor %xmm4, %xmm1
pxor %xmm5, %xmm1
movdqu %xmm1, %xmm3
psrld $31, %xmm3
movdqu %xmm2, %xmm4
psrld $31, %xmm4
pslld $1, %xmm1
pslld $1, %xmm2
vpslldq $4, %xmm3, %xmm5
vpslldq $4, %xmm4, %xmm4
mov $0, %r12
pinsrd $0, %r12d, %xmm3
pshufd $3, %xmm3, %xmm3
pxor %xmm4, %xmm3
pxor %xmm5, %xmm1
pxor %xmm3, %xmm2
movdqu %xmm2, %xmm5
pxor %xmm2, %xmm2
mov $3774873600, %r12
pinsrd $3, %r12d, %xmm2
pclmulqdq $17, %xmm2, %xmm1
movdqu %xmm1, %xmm2
psrld $31, %xmm2
pslld $1, %xmm1
vpslldq $4, %xmm2, %xmm2
pxor %xmm2, %xmm1
pxor %xmm5, %xmm1
pxor %xmm6, %xmm1
movdqu %xmm1, %xmm6
movdqu %xmm1, %xmm3
pxor %xmm4, %xmm4
pxor %xmm5, %xmm5
mov $3254779904, %r12
pinsrd $3, %r12d, %xmm4
mov $1, %r12
pinsrd $0, %r12d, %xmm4
mov $2147483648, %r12
pinsrd $3, %r12d, %xmm5
movdqu %xmm3, %xmm1
movdqu %xmm1, %xmm2
psrld $31, %xmm2
pslld $1, %xmm1
vpslldq $4, %xmm2, %xmm2
pxor %xmm2, %xmm1
pand %xmm5, %xmm3
pcmpeqd %xmm5, %xmm3
pshufd $255, %xmm3, %xmm3
pand %xmm4, %xmm3
vpxor %xmm3, %xmm1, %xmm1
movdqu %xmm1, 16(%rcx)
movdqu %xmm6, %xmm2
movdqu 32(%rcx), %xmm1
movdqu %xmm1, %xmm5
pclmulqdq $16, %xmm2, %xmm1
movdqu %xmm1, %xmm3
movdqu %xmm5, %xmm1
pclmulqdq $1, %xmm2, %xmm1
movdqu %xmm1, %xmm4
movdqu %xmm5, %xmm1
pclmulqdq $0, %xmm2, %xmm1
pclmulqdq $17, %xmm2, %xmm5
movdqu %xmm5, %xmm2
movdqu %xmm1, %xmm5
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm4, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm1
pshufd $79, %xmm1, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm4
pshufd $79, %xmm4, %xmm4
pxor %xmm4, %xmm1
pxor %xmm5, %xmm1
movdqu %xmm1, %xmm3
psrld $31, %xmm3
movdqu %xmm2, %xmm4
psrld $31, %xmm4
pslld $1, %xmm1
pslld $1, %xmm2
vpslldq $4, %xmm3, %xmm5
vpslldq $4, %xmm4, %xmm4
mov $0, %r12
pinsrd $0, %r12d, %xmm3
pshufd $3, %xmm3, %xmm3
pxor %xmm4, %xmm3
pxor %xmm5, %xmm1
pxor %xmm3, %xmm2
movdqu %xmm2, %xmm6
pxor %xmm2, %xmm2
mov $3774873600, %r12
pinsrd $3, %r12d, %xmm2
movdqu %xmm1, %xmm5
pclmulqdq $16, %xmm2, %xmm1
movdqu %xmm1, %xmm3
movdqu %xmm5, %xmm1
pclmulqdq $1, %xmm2, %xmm1
movdqu %xmm1, %xmm4
movdqu %xmm5, %xmm1
pclmulqdq $0, %xmm2, %xmm1
pclmulqdq $17, %xmm2, %xmm5
movdqu %xmm5, %xmm2
movdqu %xmm1, %xmm5
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm4, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm1
pshufd $79, %xmm1, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm4
pshufd $79, %xmm4, %xmm4
pxor %xmm4, %xmm1
pxor %xmm5, %xmm1
movdqu %xmm1, %xmm3
psrld $31, %xmm3
movdqu %xmm2, %xmm4
psrld $31, %xmm4
pslld $1, %xmm1
pslld $1, %xmm2
vpslldq $4, %xmm3, %xmm5
vpslldq $4, %xmm4, %xmm4
mov $0, %r12
pinsrd $0, %r12d, %xmm3
pshufd $3, %xmm3, %xmm3
pxor %xmm4, %xmm3
pxor %xmm5, %xmm1
pxor %xmm3, %xmm2
movdqu %xmm2, %xmm5
pxor %xmm2, %xmm2
mov $3774873600, %r12
pinsrd $3, %r12d, %xmm2
pclmulqdq $17, %xmm2, %xmm1
movdqu %xmm1, %xmm2
psrld $31, %xmm2
pslld $1, %xmm1
vpslldq $4, %xmm2, %xmm2
pxor %xmm2, %xmm1
pxor %xmm5, %xmm1
pxor %xmm6, %xmm1
movdqu %xmm1, %xmm6
movdqu %xmm1, %xmm3
pxor %xmm4, %xmm4
pxor %xmm5, %xmm5
mov $3254779904, %r12
pinsrd $3, %r12d, %xmm4
mov $1, %r12
pinsrd $0, %r12d, %xmm4
mov $2147483648, %r12
pinsrd $3, %r12d, %xmm5
movdqu %xmm3, %xmm1
movdqu %xmm1, %xmm2
psrld $31, %xmm2
pslld $1, %xmm1
vpslldq $4, %xmm2, %xmm2
pxor %xmm2, %xmm1
pand %xmm5, %xmm3
pcmpeqd %xmm5, %xmm3
pshufd $255, %xmm3, %xmm3
pand %xmm4, %xmm3
vpxor %xmm3, %xmm1, %xmm1
movdqu %xmm1, 48(%rcx)
movdqu %xmm6, %xmm2
movdqu 32(%rcx), %xmm1
movdqu %xmm1, %xmm5
pclmulqdq $16, %xmm2, %xmm1
movdqu %xmm1, %xmm3
movdqu %xmm5, %xmm1
pclmulqdq $1, %xmm2, %xmm1
movdqu %xmm1, %xmm4
movdqu %xmm5, %xmm1
pclmulqdq $0, %xmm2, %xmm1
pclmulqdq $17, %xmm2, %xmm5
movdqu %xmm5, %xmm2
movdqu %xmm1, %xmm5
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm4, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm1
pshufd $79, %xmm1, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm4
pshufd $79, %xmm4, %xmm4
pxor %xmm4, %xmm1
pxor %xmm5, %xmm1
movdqu %xmm1, %xmm3
psrld $31, %xmm3
movdqu %xmm2, %xmm4
psrld $31, %xmm4
pslld $1, %xmm1
pslld $1, %xmm2
vpslldq $4, %xmm3, %xmm5
vpslldq $4, %xmm4, %xmm4
mov $0, %r12
pinsrd $0, %r12d, %xmm3
pshufd $3, %xmm3, %xmm3
pxor %xmm4, %xmm3
pxor %xmm5, %xmm1
pxor %xmm3, %xmm2
movdqu %xmm2, %xmm6
pxor %xmm2, %xmm2
mov $3774873600, %r12
pinsrd $3, %r12d, %xmm2
movdqu %xmm1, %xmm5
pclmulqdq $16, %xmm2, %xmm1
movdqu %xmm1, %xmm3
movdqu %xmm5, %xmm1
pclmulqdq $1, %xmm2, %xmm1
movdqu %xmm1, %xmm4
movdqu %xmm5, %xmm1
pclmulqdq $0, %xmm2, %xmm1
pclmulqdq $17, %xmm2, %xmm5
movdqu %xmm5, %xmm2
movdqu %xmm1, %xmm5
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm4, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm1
pshufd $79, %xmm1, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm4
pshufd $79, %xmm4, %xmm4
pxor %xmm4, %xmm1
pxor %xmm5, %xmm1
movdqu %xmm1, %xmm3
psrld $31, %xmm3
movdqu %xmm2, %xmm4
psrld $31, %xmm4
pslld $1, %xmm1
pslld $1, %xmm2
vpslldq $4, %xmm3, %xmm5
vpslldq $4, %xmm4, %xmm4
mov $0, %r12
pinsrd $0, %r12d, %xmm3
pshufd $3, %xmm3, %xmm3
pxor %xmm4, %xmm3
pxor %xmm5, %xmm1
pxor %xmm3, %xmm2
movdqu %xmm2, %xmm5
pxor %xmm2, %xmm2
mov $3774873600, %r12
pinsrd $3, %r12d, %xmm2
pclmulqdq $17, %xmm2, %xmm1
movdqu %xmm1, %xmm2
psrld $31, %xmm2
pslld $1, %xmm1
vpslldq $4, %xmm2, %xmm2
pxor %xmm2, %xmm1
pxor %xmm5, %xmm1
pxor %xmm6, %xmm1
movdqu %xmm1, %xmm6
movdqu %xmm1, %xmm3
pxor %xmm4, %xmm4
pxor %xmm5, %xmm5
mov $3254779904, %r12
pinsrd $3, %r12d, %xmm4
mov $1, %r12
pinsrd $0, %r12d, %xmm4
mov $2147483648, %r12
pinsrd $3, %r12d, %xmm5
movdqu %xmm3, %xmm1
movdqu %xmm1, %xmm2
psrld $31, %xmm2
pslld $1, %xmm1
vpslldq $4, %xmm2, %xmm2
pxor %xmm2, %xmm1
pand %xmm5, %xmm3
pcmpeqd %xmm5, %xmm3
pshufd $255, %xmm3, %xmm3
pand %xmm4, %xmm3
vpxor %xmm3, %xmm1, %xmm1
movdqu %xmm1, 64(%rcx)
movdqu %xmm6, %xmm2
movdqu 32(%rcx), %xmm1
movdqu %xmm1, %xmm5
pclmulqdq $16, %xmm2, %xmm1
movdqu %xmm1, %xmm3
movdqu %xmm5, %xmm1
pclmulqdq $1, %xmm2, %xmm1
movdqu %xmm1, %xmm4
movdqu %xmm5, %xmm1
pclmulqdq $0, %xmm2, %xmm1
pclmulqdq $17, %xmm2, %xmm5
movdqu %xmm5, %xmm2
movdqu %xmm1, %xmm5
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm4, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm1
pshufd $79, %xmm1, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm4
pshufd $79, %xmm4, %xmm4
pxor %xmm4, %xmm1
pxor %xmm5, %xmm1
movdqu %xmm1, %xmm3
psrld $31, %xmm3
movdqu %xmm2, %xmm4
psrld $31, %xmm4
pslld $1, %xmm1
pslld $1, %xmm2
vpslldq $4, %xmm3, %xmm5
vpslldq $4, %xmm4, %xmm4
mov $0, %r12
pinsrd $0, %r12d, %xmm3
pshufd $3, %xmm3, %xmm3
pxor %xmm4, %xmm3
pxor %xmm5, %xmm1
pxor %xmm3, %xmm2
movdqu %xmm2, %xmm6
pxor %xmm2, %xmm2
mov $3774873600, %r12
pinsrd $3, %r12d, %xmm2
movdqu %xmm1, %xmm5
pclmulqdq $16, %xmm2, %xmm1
movdqu %xmm1, %xmm3
movdqu %xmm5, %xmm1
pclmulqdq $1, %xmm2, %xmm1
movdqu %xmm1, %xmm4
movdqu %xmm5, %xmm1
pclmulqdq $0, %xmm2, %xmm1
pclmulqdq $17, %xmm2, %xmm5
movdqu %xmm5, %xmm2
movdqu %xmm1, %xmm5
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm4, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm1
pshufd $79, %xmm1, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm4
pshufd $79, %xmm4, %xmm4
pxor %xmm4, %xmm1
pxor %xmm5, %xmm1
movdqu %xmm1, %xmm3
psrld $31, %xmm3
movdqu %xmm2, %xmm4
psrld $31, %xmm4
pslld $1, %xmm1
pslld $1, %xmm2
vpslldq $4, %xmm3, %xmm5
vpslldq $4, %xmm4, %xmm4
mov $0, %r12
pinsrd $0, %r12d, %xmm3
pshufd $3, %xmm3, %xmm3
pxor %xmm4, %xmm3
pxor %xmm5, %xmm1
pxor %xmm3, %xmm2
movdqu %xmm2, %xmm5
pxor %xmm2, %xmm2
mov $3774873600, %r12
pinsrd $3, %r12d, %xmm2
pclmulqdq $17, %xmm2, %xmm1
movdqu %xmm1, %xmm2
psrld $31, %xmm2
pslld $1, %xmm1
vpslldq $4, %xmm2, %xmm2
pxor %xmm2, %xmm1
pxor %xmm5, %xmm1
pxor %xmm6, %xmm1
movdqu %xmm1, %xmm6
movdqu %xmm1, %xmm3
pxor %xmm4, %xmm4
pxor %xmm5, %xmm5
mov $3254779904, %r12
pinsrd $3, %r12d, %xmm4
mov $1, %r12
pinsrd $0, %r12d, %xmm4
mov $2147483648, %r12
pinsrd $3, %r12d, %xmm5
movdqu %xmm3, %xmm1
movdqu %xmm1, %xmm2
psrld $31, %xmm2
pslld $1, %xmm1
vpslldq $4, %xmm2, %xmm2
pxor %xmm2, %xmm1
pand %xmm5, %xmm3
pcmpeqd %xmm5, %xmm3
pshufd $255, %xmm3, %xmm3
pand %xmm4, %xmm3
vpxor %xmm3, %xmm1, %xmm1
movdqu %xmm1, 96(%rcx)
movdqu %xmm6, %xmm2
movdqu 32(%rcx), %xmm1
movdqu %xmm1, %xmm5
pclmulqdq $16, %xmm2, %xmm1
movdqu %xmm1, %xmm3
movdqu %xmm5, %xmm1
pclmulqdq $1, %xmm2, %xmm1
movdqu %xmm1, %xmm4
movdqu %xmm5, %xmm1
pclmulqdq $0, %xmm2, %xmm1
pclmulqdq $17, %xmm2, %xmm5
movdqu %xmm5, %xmm2
movdqu %xmm1, %xmm5
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm4, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm1
pshufd $79, %xmm1, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm4
pshufd $79, %xmm4, %xmm4
pxor %xmm4, %xmm1
pxor %xmm5, %xmm1
movdqu %xmm1, %xmm3
psrld $31, %xmm3
movdqu %xmm2, %xmm4
psrld $31, %xmm4
pslld $1, %xmm1
pslld $1, %xmm2
vpslldq $4, %xmm3, %xmm5
vpslldq $4, %xmm4, %xmm4
mov $0, %r12
pinsrd $0, %r12d, %xmm3
pshufd $3, %xmm3, %xmm3
pxor %xmm4, %xmm3
pxor %xmm5, %xmm1
pxor %xmm3, %xmm2
movdqu %xmm2, %xmm6
pxor %xmm2, %xmm2
mov $3774873600, %r12
pinsrd $3, %r12d, %xmm2
movdqu %xmm1, %xmm5
pclmulqdq $16, %xmm2, %xmm1
movdqu %xmm1, %xmm3
movdqu %xmm5, %xmm1
pclmulqdq $1, %xmm2, %xmm1
movdqu %xmm1, %xmm4
movdqu %xmm5, %xmm1
pclmulqdq $0, %xmm2, %xmm1
pclmulqdq $17, %xmm2, %xmm5
movdqu %xmm5, %xmm2
movdqu %xmm1, %xmm5
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm4, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm1
pshufd $79, %xmm1, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm4
pshufd $79, %xmm4, %xmm4
pxor %xmm4, %xmm1
pxor %xmm5, %xmm1
movdqu %xmm1, %xmm3
psrld $31, %xmm3
movdqu %xmm2, %xmm4
psrld $31, %xmm4
pslld $1, %xmm1
pslld $1, %xmm2
vpslldq $4, %xmm3, %xmm5
vpslldq $4, %xmm4, %xmm4
mov $0, %r12
pinsrd $0, %r12d, %xmm3
pshufd $3, %xmm3, %xmm3
pxor %xmm4, %xmm3
pxor %xmm5, %xmm1
pxor %xmm3, %xmm2
movdqu %xmm2, %xmm5
pxor %xmm2, %xmm2
mov $3774873600, %r12
pinsrd $3, %r12d, %xmm2
pclmulqdq $17, %xmm2, %xmm1
movdqu %xmm1, %xmm2
psrld $31, %xmm2
pslld $1, %xmm1
vpslldq $4, %xmm2, %xmm2
pxor %xmm2, %xmm1
pxor %xmm5, %xmm1
pxor %xmm6, %xmm1
movdqu %xmm1, %xmm6
movdqu %xmm1, %xmm3
pxor %xmm4, %xmm4
pxor %xmm5, %xmm5
mov $3254779904, %r12
pinsrd $3, %r12d, %xmm4
mov $1, %r12
pinsrd $0, %r12d, %xmm4
mov $2147483648, %r12
pinsrd $3, %r12d, %xmm5
movdqu %xmm3, %xmm1
movdqu %xmm1, %xmm2
psrld $31, %xmm2
pslld $1, %xmm1
vpslldq $4, %xmm2, %xmm2
pxor %xmm2, %xmm1
pand %xmm5, %xmm3
pcmpeqd %xmm5, %xmm3
pshufd $255, %xmm3, %xmm3
pand %xmm4, %xmm3
vpxor %xmm3, %xmm1, %xmm1
movdqu %xmm1, 112(%rcx)
movdqu %xmm0, %xmm6
mov %rax, %r12
ret
.global aes256_key_expansion
aes256_key_expansion:
movdqu 0(%rcx), %xmm1
movdqu 16(%rcx), %xmm3
movdqu %xmm1, 0(%rdx)
movdqu %xmm3, 16(%rdx)
aeskeygenassist $1, %xmm3, %xmm2
pshufd $255, %xmm2, %xmm2
vpslldq $4, %xmm1, %xmm4
pxor %xmm4, %xmm1
vpslldq $4, %xmm1, %xmm4
pxor %xmm4, %xmm1
vpslldq $4, %xmm1, %xmm4
pxor %xmm4, %xmm1
pxor %xmm2, %xmm1
movdqu %xmm1, 32(%rdx)
aeskeygenassist $0, %xmm1, %xmm2
pshufd $170, %xmm2, %xmm2
vpslldq $4, %xmm3, %xmm4
pxor %xmm4, %xmm3
vpslldq $4, %xmm3, %xmm4
pxor %xmm4, %xmm3
vpslldq $4, %xmm3, %xmm4
pxor %xmm4, %xmm3
pxor %xmm2, %xmm3
movdqu %xmm3, 48(%rdx)
aeskeygenassist $2, %xmm3, %xmm2
pshufd $255, %xmm2, %xmm2
vpslldq $4, %xmm1, %xmm4
pxor %xmm4, %xmm1
vpslldq $4, %xmm1, %xmm4
pxor %xmm4, %xmm1
vpslldq $4, %xmm1, %xmm4
pxor %xmm4, %xmm1
pxor %xmm2, %xmm1
movdqu %xmm1, 64(%rdx)
aeskeygenassist $0, %xmm1, %xmm2
pshufd $170, %xmm2, %xmm2
vpslldq $4, %xmm3, %xmm4
pxor %xmm4, %xmm3
vpslldq $4, %xmm3, %xmm4
pxor %xmm4, %xmm3
vpslldq $4, %xmm3, %xmm4
pxor %xmm4, %xmm3
pxor %xmm2, %xmm3
movdqu %xmm3, 80(%rdx)
aeskeygenassist $4, %xmm3, %xmm2
pshufd $255, %xmm2, %xmm2
vpslldq $4, %xmm1, %xmm4
pxor %xmm4, %xmm1
vpslldq $4, %xmm1, %xmm4
pxor %xmm4, %xmm1
vpslldq $4, %xmm1, %xmm4
pxor %xmm4, %xmm1
pxor %xmm2, %xmm1
movdqu %xmm1, 96(%rdx)
aeskeygenassist $0, %xmm1, %xmm2
pshufd $170, %xmm2, %xmm2
vpslldq $4, %xmm3, %xmm4
pxor %xmm4, %xmm3
vpslldq $4, %xmm3, %xmm4
pxor %xmm4, %xmm3
vpslldq $4, %xmm3, %xmm4
pxor %xmm4, %xmm3
pxor %xmm2, %xmm3
movdqu %xmm3, 112(%rdx)
aeskeygenassist $8, %xmm3, %xmm2
pshufd $255, %xmm2, %xmm2
vpslldq $4, %xmm1, %xmm4
pxor %xmm4, %xmm1
vpslldq $4, %xmm1, %xmm4
pxor %xmm4, %xmm1
vpslldq $4, %xmm1, %xmm4
pxor %xmm4, %xmm1
pxor %xmm2, %xmm1
movdqu %xmm1, 128(%rdx)
aeskeygenassist $0, %xmm1, %xmm2
pshufd $170, %xmm2, %xmm2
vpslldq $4, %xmm3, %xmm4
pxor %xmm4, %xmm3
vpslldq $4, %xmm3, %xmm4
pxor %xmm4, %xmm3
vpslldq $4, %xmm3, %xmm4
pxor %xmm4, %xmm3
pxor %xmm2, %xmm3
movdqu %xmm3, 144(%rdx)
aeskeygenassist $16, %xmm3, %xmm2
pshufd $255, %xmm2, %xmm2
vpslldq $4, %xmm1, %xmm4
pxor %xmm4, %xmm1
vpslldq $4, %xmm1, %xmm4
pxor %xmm4, %xmm1
vpslldq $4, %xmm1, %xmm4
pxor %xmm4, %xmm1
pxor %xmm2, %xmm1
movdqu %xmm1, 160(%rdx)
aeskeygenassist $0, %xmm1, %xmm2
pshufd $170, %xmm2, %xmm2
vpslldq $4, %xmm3, %xmm4
pxor %xmm4, %xmm3
vpslldq $4, %xmm3, %xmm4
pxor %xmm4, %xmm3
vpslldq $4, %xmm3, %xmm4
pxor %xmm4, %xmm3
pxor %xmm2, %xmm3
movdqu %xmm3, 176(%rdx)
aeskeygenassist $32, %xmm3, %xmm2
pshufd $255, %xmm2, %xmm2
vpslldq $4, %xmm1, %xmm4
pxor %xmm4, %xmm1
vpslldq $4, %xmm1, %xmm4
pxor %xmm4, %xmm1
vpslldq $4, %xmm1, %xmm4
pxor %xmm4, %xmm1
pxor %xmm2, %xmm1
movdqu %xmm1, 192(%rdx)
aeskeygenassist $0, %xmm1, %xmm2
pshufd $170, %xmm2, %xmm2
vpslldq $4, %xmm3, %xmm4
pxor %xmm4, %xmm3
vpslldq $4, %xmm3, %xmm4
pxor %xmm4, %xmm3
vpslldq $4, %xmm3, %xmm4
pxor %xmm4, %xmm3
pxor %xmm2, %xmm3
movdqu %xmm3, 208(%rdx)
aeskeygenassist $64, %xmm3, %xmm2
pshufd $255, %xmm2, %xmm2
vpslldq $4, %xmm1, %xmm4
pxor %xmm4, %xmm1
vpslldq $4, %xmm1, %xmm4
pxor %xmm4, %xmm1
vpslldq $4, %xmm1, %xmm4
pxor %xmm4, %xmm1
pxor %xmm2, %xmm1
movdqu %xmm1, 224(%rdx)
pxor %xmm1, %xmm1
pxor %xmm2, %xmm2
pxor %xmm3, %xmm3
pxor %xmm4, %xmm4
ret
.global aes256_keyhash_init
aes256_keyhash_init:
mov $579005069656919567, %r8
pinsrq $0, %r8, %xmm4
mov $283686952306183, %r8
pinsrq $1, %r8, %xmm4
pxor %xmm0, %xmm0
movdqu %xmm0, 80(%rdx)
mov %rcx, %r8
movdqu 0(%r8), %xmm2
pxor %xmm2, %xmm0
movdqu 16(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 32(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 48(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 64(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 80(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 96(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 112(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 128(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 144(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 160(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 176(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 192(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 208(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 224(%r8), %xmm2
aesenclast %xmm2, %xmm0
pxor %xmm2, %xmm2
pshufb %xmm4, %xmm0
mov %rdx, %rcx
movdqu %xmm0, 32(%rcx)
movdqu %xmm6, %xmm0
mov %r12, %rax
movdqu 32(%rcx), %xmm1
movdqu %xmm1, %xmm6
movdqu %xmm1, %xmm3
pxor %xmm4, %xmm4
pxor %xmm5, %xmm5
mov $3254779904, %r12
pinsrd $3, %r12d, %xmm4
mov $1, %r12
pinsrd $0, %r12d, %xmm4
mov $2147483648, %r12
pinsrd $3, %r12d, %xmm5
movdqu %xmm3, %xmm1
movdqu %xmm1, %xmm2
psrld $31, %xmm2
pslld $1, %xmm1
vpslldq $4, %xmm2, %xmm2
pxor %xmm2, %xmm1
pand %xmm5, %xmm3
pcmpeqd %xmm5, %xmm3
pshufd $255, %xmm3, %xmm3
pand %xmm4, %xmm3
vpxor %xmm3, %xmm1, %xmm1
movdqu %xmm1, 0(%rcx)
movdqu %xmm6, %xmm1
movdqu %xmm6, %xmm2
movdqu %xmm1, %xmm5
pclmulqdq $16, %xmm2, %xmm1
movdqu %xmm1, %xmm3
movdqu %xmm5, %xmm1
pclmulqdq $1, %xmm2, %xmm1
movdqu %xmm1, %xmm4
movdqu %xmm5, %xmm1
pclmulqdq $0, %xmm2, %xmm1
pclmulqdq $17, %xmm2, %xmm5
movdqu %xmm5, %xmm2
movdqu %xmm1, %xmm5
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm4, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm1
pshufd $79, %xmm1, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm4
pshufd $79, %xmm4, %xmm4
pxor %xmm4, %xmm1
pxor %xmm5, %xmm1
movdqu %xmm1, %xmm3
psrld $31, %xmm3
movdqu %xmm2, %xmm4
psrld $31, %xmm4
pslld $1, %xmm1
pslld $1, %xmm2
vpslldq $4, %xmm3, %xmm5
vpslldq $4, %xmm4, %xmm4
mov $0, %r12
pinsrd $0, %r12d, %xmm3
pshufd $3, %xmm3, %xmm3
pxor %xmm4, %xmm3
pxor %xmm5, %xmm1
pxor %xmm3, %xmm2
movdqu %xmm2, %xmm6
pxor %xmm2, %xmm2
mov $3774873600, %r12
pinsrd $3, %r12d, %xmm2
movdqu %xmm1, %xmm5
pclmulqdq $16, %xmm2, %xmm1
movdqu %xmm1, %xmm3
movdqu %xmm5, %xmm1
pclmulqdq $1, %xmm2, %xmm1
movdqu %xmm1, %xmm4
movdqu %xmm5, %xmm1
pclmulqdq $0, %xmm2, %xmm1
pclmulqdq $17, %xmm2, %xmm5
movdqu %xmm5, %xmm2
movdqu %xmm1, %xmm5
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm4, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm1
pshufd $79, %xmm1, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm4
pshufd $79, %xmm4, %xmm4
pxor %xmm4, %xmm1
pxor %xmm5, %xmm1
movdqu %xmm1, %xmm3
psrld $31, %xmm3
movdqu %xmm2, %xmm4
psrld $31, %xmm4
pslld $1, %xmm1
pslld $1, %xmm2
vpslldq $4, %xmm3, %xmm5
vpslldq $4, %xmm4, %xmm4
mov $0, %r12
pinsrd $0, %r12d, %xmm3
pshufd $3, %xmm3, %xmm3
pxor %xmm4, %xmm3
pxor %xmm5, %xmm1
pxor %xmm3, %xmm2
movdqu %xmm2, %xmm5
pxor %xmm2, %xmm2
mov $3774873600, %r12
pinsrd $3, %r12d, %xmm2
pclmulqdq $17, %xmm2, %xmm1
movdqu %xmm1, %xmm2
psrld $31, %xmm2
pslld $1, %xmm1
vpslldq $4, %xmm2, %xmm2
pxor %xmm2, %xmm1
pxor %xmm5, %xmm1
pxor %xmm6, %xmm1
movdqu %xmm1, %xmm6
movdqu %xmm1, %xmm3
pxor %xmm4, %xmm4
pxor %xmm5, %xmm5
mov $3254779904, %r12
pinsrd $3, %r12d, %xmm4
mov $1, %r12
pinsrd $0, %r12d, %xmm4
mov $2147483648, %r12
pinsrd $3, %r12d, %xmm5
movdqu %xmm3, %xmm1
movdqu %xmm1, %xmm2
psrld $31, %xmm2
pslld $1, %xmm1
vpslldq $4, %xmm2, %xmm2
pxor %xmm2, %xmm1
pand %xmm5, %xmm3
pcmpeqd %xmm5, %xmm3
pshufd $255, %xmm3, %xmm3
pand %xmm4, %xmm3
vpxor %xmm3, %xmm1, %xmm1
movdqu %xmm1, 16(%rcx)
movdqu %xmm6, %xmm2
movdqu 32(%rcx), %xmm1
movdqu %xmm1, %xmm5
pclmulqdq $16, %xmm2, %xmm1
movdqu %xmm1, %xmm3
movdqu %xmm5, %xmm1
pclmulqdq $1, %xmm2, %xmm1
movdqu %xmm1, %xmm4
movdqu %xmm5, %xmm1
pclmulqdq $0, %xmm2, %xmm1
pclmulqdq $17, %xmm2, %xmm5
movdqu %xmm5, %xmm2
movdqu %xmm1, %xmm5
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm4, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm1
pshufd $79, %xmm1, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm4
pshufd $79, %xmm4, %xmm4
pxor %xmm4, %xmm1
pxor %xmm5, %xmm1
movdqu %xmm1, %xmm3
psrld $31, %xmm3
movdqu %xmm2, %xmm4
psrld $31, %xmm4
pslld $1, %xmm1
pslld $1, %xmm2
vpslldq $4, %xmm3, %xmm5
vpslldq $4, %xmm4, %xmm4
mov $0, %r12
pinsrd $0, %r12d, %xmm3
pshufd $3, %xmm3, %xmm3
pxor %xmm4, %xmm3
pxor %xmm5, %xmm1
pxor %xmm3, %xmm2
movdqu %xmm2, %xmm6
pxor %xmm2, %xmm2
mov $3774873600, %r12
pinsrd $3, %r12d, %xmm2
movdqu %xmm1, %xmm5
pclmulqdq $16, %xmm2, %xmm1
movdqu %xmm1, %xmm3
movdqu %xmm5, %xmm1
pclmulqdq $1, %xmm2, %xmm1
movdqu %xmm1, %xmm4
movdqu %xmm5, %xmm1
pclmulqdq $0, %xmm2, %xmm1
pclmulqdq $17, %xmm2, %xmm5
movdqu %xmm5, %xmm2
movdqu %xmm1, %xmm5
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm4, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm1
pshufd $79, %xmm1, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm4
pshufd $79, %xmm4, %xmm4
pxor %xmm4, %xmm1
pxor %xmm5, %xmm1
movdqu %xmm1, %xmm3
psrld $31, %xmm3
movdqu %xmm2, %xmm4
psrld $31, %xmm4
pslld $1, %xmm1
pslld $1, %xmm2
vpslldq $4, %xmm3, %xmm5
vpslldq $4, %xmm4, %xmm4
mov $0, %r12
pinsrd $0, %r12d, %xmm3
pshufd $3, %xmm3, %xmm3
pxor %xmm4, %xmm3
pxor %xmm5, %xmm1
pxor %xmm3, %xmm2
movdqu %xmm2, %xmm5
pxor %xmm2, %xmm2
mov $3774873600, %r12
pinsrd $3, %r12d, %xmm2
pclmulqdq $17, %xmm2, %xmm1
movdqu %xmm1, %xmm2
psrld $31, %xmm2
pslld $1, %xmm1
vpslldq $4, %xmm2, %xmm2
pxor %xmm2, %xmm1
pxor %xmm5, %xmm1
pxor %xmm6, %xmm1
movdqu %xmm1, %xmm6
movdqu %xmm1, %xmm3
pxor %xmm4, %xmm4
pxor %xmm5, %xmm5
mov $3254779904, %r12
pinsrd $3, %r12d, %xmm4
mov $1, %r12
pinsrd $0, %r12d, %xmm4
mov $2147483648, %r12
pinsrd $3, %r12d, %xmm5
movdqu %xmm3, %xmm1
movdqu %xmm1, %xmm2
psrld $31, %xmm2
pslld $1, %xmm1
vpslldq $4, %xmm2, %xmm2
pxor %xmm2, %xmm1
pand %xmm5, %xmm3
pcmpeqd %xmm5, %xmm3
pshufd $255, %xmm3, %xmm3
pand %xmm4, %xmm3
vpxor %xmm3, %xmm1, %xmm1
movdqu %xmm1, 48(%rcx)
movdqu %xmm6, %xmm2
movdqu 32(%rcx), %xmm1
movdqu %xmm1, %xmm5
pclmulqdq $16, %xmm2, %xmm1
movdqu %xmm1, %xmm3
movdqu %xmm5, %xmm1
pclmulqdq $1, %xmm2, %xmm1
movdqu %xmm1, %xmm4
movdqu %xmm5, %xmm1
pclmulqdq $0, %xmm2, %xmm1
pclmulqdq $17, %xmm2, %xmm5
movdqu %xmm5, %xmm2
movdqu %xmm1, %xmm5
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm4, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm1
pshufd $79, %xmm1, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm4
pshufd $79, %xmm4, %xmm4
pxor %xmm4, %xmm1
pxor %xmm5, %xmm1
movdqu %xmm1, %xmm3
psrld $31, %xmm3
movdqu %xmm2, %xmm4
psrld $31, %xmm4
pslld $1, %xmm1
pslld $1, %xmm2
vpslldq $4, %xmm3, %xmm5
vpslldq $4, %xmm4, %xmm4
mov $0, %r12
pinsrd $0, %r12d, %xmm3
pshufd $3, %xmm3, %xmm3
pxor %xmm4, %xmm3
pxor %xmm5, %xmm1
pxor %xmm3, %xmm2
movdqu %xmm2, %xmm6
pxor %xmm2, %xmm2
mov $3774873600, %r12
pinsrd $3, %r12d, %xmm2
movdqu %xmm1, %xmm5
pclmulqdq $16, %xmm2, %xmm1
movdqu %xmm1, %xmm3
movdqu %xmm5, %xmm1
pclmulqdq $1, %xmm2, %xmm1
movdqu %xmm1, %xmm4
movdqu %xmm5, %xmm1
pclmulqdq $0, %xmm2, %xmm1
pclmulqdq $17, %xmm2, %xmm5
movdqu %xmm5, %xmm2
movdqu %xmm1, %xmm5
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm4, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm1
pshufd $79, %xmm1, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm4
pshufd $79, %xmm4, %xmm4
pxor %xmm4, %xmm1
pxor %xmm5, %xmm1
movdqu %xmm1, %xmm3
psrld $31, %xmm3
movdqu %xmm2, %xmm4
psrld $31, %xmm4
pslld $1, %xmm1
pslld $1, %xmm2
vpslldq $4, %xmm3, %xmm5
vpslldq $4, %xmm4, %xmm4
mov $0, %r12
pinsrd $0, %r12d, %xmm3
pshufd $3, %xmm3, %xmm3
pxor %xmm4, %xmm3
pxor %xmm5, %xmm1
pxor %xmm3, %xmm2
movdqu %xmm2, %xmm5
pxor %xmm2, %xmm2
mov $3774873600, %r12
pinsrd $3, %r12d, %xmm2
pclmulqdq $17, %xmm2, %xmm1
movdqu %xmm1, %xmm2
psrld $31, %xmm2
pslld $1, %xmm1
vpslldq $4, %xmm2, %xmm2
pxor %xmm2, %xmm1
pxor %xmm5, %xmm1
pxor %xmm6, %xmm1
movdqu %xmm1, %xmm6
movdqu %xmm1, %xmm3
pxor %xmm4, %xmm4
pxor %xmm5, %xmm5
mov $3254779904, %r12
pinsrd $3, %r12d, %xmm4
mov $1, %r12
pinsrd $0, %r12d, %xmm4
mov $2147483648, %r12
pinsrd $3, %r12d, %xmm5
movdqu %xmm3, %xmm1
movdqu %xmm1, %xmm2
psrld $31, %xmm2
pslld $1, %xmm1
vpslldq $4, %xmm2, %xmm2
pxor %xmm2, %xmm1
pand %xmm5, %xmm3
pcmpeqd %xmm5, %xmm3
pshufd $255, %xmm3, %xmm3
pand %xmm4, %xmm3
vpxor %xmm3, %xmm1, %xmm1
movdqu %xmm1, 64(%rcx)
movdqu %xmm6, %xmm2
movdqu 32(%rcx), %xmm1
movdqu %xmm1, %xmm5
pclmulqdq $16, %xmm2, %xmm1
movdqu %xmm1, %xmm3
movdqu %xmm5, %xmm1
pclmulqdq $1, %xmm2, %xmm1
movdqu %xmm1, %xmm4
movdqu %xmm5, %xmm1
pclmulqdq $0, %xmm2, %xmm1
pclmulqdq $17, %xmm2, %xmm5
movdqu %xmm5, %xmm2
movdqu %xmm1, %xmm5
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm4, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm1
pshufd $79, %xmm1, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm4
pshufd $79, %xmm4, %xmm4
pxor %xmm4, %xmm1
pxor %xmm5, %xmm1
movdqu %xmm1, %xmm3
psrld $31, %xmm3
movdqu %xmm2, %xmm4
psrld $31, %xmm4
pslld $1, %xmm1
pslld $1, %xmm2
vpslldq $4, %xmm3, %xmm5
vpslldq $4, %xmm4, %xmm4
mov $0, %r12
pinsrd $0, %r12d, %xmm3
pshufd $3, %xmm3, %xmm3
pxor %xmm4, %xmm3
pxor %xmm5, %xmm1
pxor %xmm3, %xmm2
movdqu %xmm2, %xmm6
pxor %xmm2, %xmm2
mov $3774873600, %r12
pinsrd $3, %r12d, %xmm2
movdqu %xmm1, %xmm5
pclmulqdq $16, %xmm2, %xmm1
movdqu %xmm1, %xmm3
movdqu %xmm5, %xmm1
pclmulqdq $1, %xmm2, %xmm1
movdqu %xmm1, %xmm4
movdqu %xmm5, %xmm1
pclmulqdq $0, %xmm2, %xmm1
pclmulqdq $17, %xmm2, %xmm5
movdqu %xmm5, %xmm2
movdqu %xmm1, %xmm5
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm4, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm1
pshufd $79, %xmm1, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm4
pshufd $79, %xmm4, %xmm4
pxor %xmm4, %xmm1
pxor %xmm5, %xmm1
movdqu %xmm1, %xmm3
psrld $31, %xmm3
movdqu %xmm2, %xmm4
psrld $31, %xmm4
pslld $1, %xmm1
pslld $1, %xmm2
vpslldq $4, %xmm3, %xmm5
vpslldq $4, %xmm4, %xmm4
mov $0, %r12
pinsrd $0, %r12d, %xmm3
pshufd $3, %xmm3, %xmm3
pxor %xmm4, %xmm3
pxor %xmm5, %xmm1
pxor %xmm3, %xmm2
movdqu %xmm2, %xmm5
pxor %xmm2, %xmm2
mov $3774873600, %r12
pinsrd $3, %r12d, %xmm2
pclmulqdq $17, %xmm2, %xmm1
movdqu %xmm1, %xmm2
psrld $31, %xmm2
pslld $1, %xmm1
vpslldq $4, %xmm2, %xmm2
pxor %xmm2, %xmm1
pxor %xmm5, %xmm1
pxor %xmm6, %xmm1
movdqu %xmm1, %xmm6
movdqu %xmm1, %xmm3
pxor %xmm4, %xmm4
pxor %xmm5, %xmm5
mov $3254779904, %r12
pinsrd $3, %r12d, %xmm4
mov $1, %r12
pinsrd $0, %r12d, %xmm4
mov $2147483648, %r12
pinsrd $3, %r12d, %xmm5
movdqu %xmm3, %xmm1
movdqu %xmm1, %xmm2
psrld $31, %xmm2
pslld $1, %xmm1
vpslldq $4, %xmm2, %xmm2
pxor %xmm2, %xmm1
pand %xmm5, %xmm3
pcmpeqd %xmm5, %xmm3
pshufd $255, %xmm3, %xmm3
pand %xmm4, %xmm3
vpxor %xmm3, %xmm1, %xmm1
movdqu %xmm1, 96(%rcx)
movdqu %xmm6, %xmm2
movdqu 32(%rcx), %xmm1
movdqu %xmm1, %xmm5
pclmulqdq $16, %xmm2, %xmm1
movdqu %xmm1, %xmm3
movdqu %xmm5, %xmm1
pclmulqdq $1, %xmm2, %xmm1
movdqu %xmm1, %xmm4
movdqu %xmm5, %xmm1
pclmulqdq $0, %xmm2, %xmm1
pclmulqdq $17, %xmm2, %xmm5
movdqu %xmm5, %xmm2
movdqu %xmm1, %xmm5
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm4, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm1
pshufd $79, %xmm1, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm4
pshufd $79, %xmm4, %xmm4
pxor %xmm4, %xmm1
pxor %xmm5, %xmm1
movdqu %xmm1, %xmm3
psrld $31, %xmm3
movdqu %xmm2, %xmm4
psrld $31, %xmm4
pslld $1, %xmm1
pslld $1, %xmm2
vpslldq $4, %xmm3, %xmm5
vpslldq $4, %xmm4, %xmm4
mov $0, %r12
pinsrd $0, %r12d, %xmm3
pshufd $3, %xmm3, %xmm3
pxor %xmm4, %xmm3
pxor %xmm5, %xmm1
pxor %xmm3, %xmm2
movdqu %xmm2, %xmm6
pxor %xmm2, %xmm2
mov $3774873600, %r12
pinsrd $3, %r12d, %xmm2
movdqu %xmm1, %xmm5
pclmulqdq $16, %xmm2, %xmm1
movdqu %xmm1, %xmm3
movdqu %xmm5, %xmm1
pclmulqdq $1, %xmm2, %xmm1
movdqu %xmm1, %xmm4
movdqu %xmm5, %xmm1
pclmulqdq $0, %xmm2, %xmm1
pclmulqdq $17, %xmm2, %xmm5
movdqu %xmm5, %xmm2
movdqu %xmm1, %xmm5
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm4, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm1
pshufd $79, %xmm1, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm4
pshufd $79, %xmm4, %xmm4
pxor %xmm4, %xmm1
pxor %xmm5, %xmm1
movdqu %xmm1, %xmm3
psrld $31, %xmm3
movdqu %xmm2, %xmm4
psrld $31, %xmm4
pslld $1, %xmm1
pslld $1, %xmm2
vpslldq $4, %xmm3, %xmm5
vpslldq $4, %xmm4, %xmm4
mov $0, %r12
pinsrd $0, %r12d, %xmm3
pshufd $3, %xmm3, %xmm3
pxor %xmm4, %xmm3
pxor %xmm5, %xmm1
pxor %xmm3, %xmm2
movdqu %xmm2, %xmm5
pxor %xmm2, %xmm2
mov $3774873600, %r12
pinsrd $3, %r12d, %xmm2
pclmulqdq $17, %xmm2, %xmm1
movdqu %xmm1, %xmm2
psrld $31, %xmm2
pslld $1, %xmm1
vpslldq $4, %xmm2, %xmm2
pxor %xmm2, %xmm1
pxor %xmm5, %xmm1
pxor %xmm6, %xmm1
movdqu %xmm1, %xmm6
movdqu %xmm1, %xmm3
pxor %xmm4, %xmm4
pxor %xmm5, %xmm5
mov $3254779904, %r12
pinsrd $3, %r12d, %xmm4
mov $1, %r12
pinsrd $0, %r12d, %xmm4
mov $2147483648, %r12
pinsrd $3, %r12d, %xmm5
movdqu %xmm3, %xmm1
movdqu %xmm1, %xmm2
psrld $31, %xmm2
pslld $1, %xmm1
vpslldq $4, %xmm2, %xmm2
pxor %xmm2, %xmm1
pand %xmm5, %xmm3
pcmpeqd %xmm5, %xmm3
pshufd $255, %xmm3, %xmm3
pand %xmm4, %xmm3
vpxor %xmm3, %xmm1, %xmm1
movdqu %xmm1, 112(%rcx)
movdqu %xmm0, %xmm6
mov %rax, %r12
ret
.global gctr128_bytes
gctr128_bytes:
push %r15
push %r14
push %r13
push %r12
push %rsi
push %rdi
push %rbp
push %rbx
pextrq $0, %xmm15, %rax
push %rax
pextrq $1, %xmm15, %rax
push %rax
pextrq $0, %xmm14, %rax
push %rax
pextrq $1, %xmm14, %rax
push %rax
pextrq $0, %xmm13, %rax
push %rax
pextrq $1, %xmm13, %rax
push %rax
pextrq $0, %xmm12, %rax
push %rax
pextrq $1, %xmm12, %rax
push %rax
pextrq $0, %xmm11, %rax
push %rax
pextrq $1, %xmm11, %rax
push %rax
pextrq $0, %xmm10, %rax
push %rax
pextrq $1, %xmm10, %rax
push %rax
pextrq $0, %xmm9, %rax
push %rax
pextrq $1, %xmm9, %rax
push %rax
pextrq $0, %xmm8, %rax
push %rax
pextrq $1, %xmm8, %rax
push %rax
pextrq $0, %xmm7, %rax
push %rax
pextrq $1, %xmm7, %rax
push %rax
pextrq $0, %xmm6, %rax
push %rax
pextrq $1, %xmm6, %rax
push %rax
mov 272(%rsp), %rax
movdqu 0(%rax), %xmm7
mov %rcx, %rax
mov %r8, %rbx
mov %rdx, %rsi
mov %r9, %r13
mov 264(%rsp), %r8
mov 280(%rsp), %rcx
mov %rcx, %rbp
imul $16, %rbp
mov $579005069656919567, %r12
pinsrq $0, %r12, %xmm8
mov $283686952306183, %r12
pinsrq $1, %r12, %xmm8
mov %rcx, %rdx
shr $2, %rdx
and $3, %rcx
cmp $0, %rdx
jbe L0
mov %rax, %r9
mov %rbx, %r10
pshufb %xmm8, %xmm7
movdqu %xmm7, %xmm9
mov $579005069656919567, %rax
pinsrq $0, %rax, %xmm0
mov $579005069656919567, %rax
pinsrq $1, %rax, %xmm0
pshufb %xmm0, %xmm9
movdqu %xmm9, %xmm10
pxor %xmm3, %xmm3
mov $1, %rax
pinsrd $2, %eax, %xmm3
paddd %xmm3, %xmm9
mov $3, %rax
pinsrd $2, %eax, %xmm3
mov $2, %rax
pinsrd $0, %eax, %xmm3
paddd %xmm3, %xmm10
pshufb %xmm8, %xmm9
pshufb %xmm8, %xmm10
pextrq $0, %xmm7, %rdi
mov $283686952306183, %rax
pinsrq $0, %rax, %xmm0
mov $579005069656919567, %rax
pinsrq $1, %rax, %xmm0
pxor %xmm15, %xmm15
mov $4, %rax
pinsrd $0, %eax, %xmm15
mov $4, %rax
pinsrd $2, %eax, %xmm15
jmp L3
.balign 16
L2:
pinsrq $0, %rdi, %xmm2
pinsrq $0, %rdi, %xmm12
pinsrq $0, %rdi, %xmm13
pinsrq $0, %rdi, %xmm14
shufpd $2, %xmm9, %xmm2
shufpd $0, %xmm9, %xmm12
shufpd $2, %xmm10, %xmm13
shufpd $0, %xmm10, %xmm14
pshufb %xmm0, %xmm9
pshufb %xmm0, %xmm10
movdqu 0(%r8), %xmm3
movdqu 16(%r8), %xmm4
movdqu 32(%r8), %xmm5
movdqu 48(%r8), %xmm6
paddd %xmm15, %xmm9
paddd %xmm15, %xmm10
pxor %xmm3, %xmm2
pxor %xmm3, %xmm12
pxor %xmm3, %xmm13
pxor %xmm3, %xmm14
pshufb %xmm0, %xmm9
pshufb %xmm0, %xmm10
aesenc %xmm4, %xmm2
aesenc %xmm4, %xmm12
aesenc %xmm4, %xmm13
aesenc %xmm4, %xmm14
aesenc %xmm5, %xmm2
aesenc %xmm5, %xmm12
aesenc %xmm5, %xmm13
aesenc %xmm5, %xmm14
aesenc %xmm6, %xmm2
aesenc %xmm6, %xmm12
aesenc %xmm6, %xmm13
aesenc %xmm6, %xmm14
movdqu 64(%r8), %xmm3
movdqu 80(%r8), %xmm4
movdqu 96(%r8), %xmm5
movdqu 112(%r8), %xmm6
aesenc %xmm3, %xmm2
aesenc %xmm3, %xmm12
aesenc %xmm3, %xmm13
aesenc %xmm3, %xmm14
aesenc %xmm4, %xmm2
aesenc %xmm4, %xmm12
aesenc %xmm4, %xmm13
aesenc %xmm4, %xmm14
aesenc %xmm5, %xmm2
aesenc %xmm5, %xmm12
aesenc %xmm5, %xmm13
aesenc %xmm5, %xmm14
aesenc %xmm6, %xmm2
aesenc %xmm6, %xmm12
aesenc %xmm6, %xmm13
aesenc %xmm6, %xmm14
movdqu 128(%r8), %xmm3
movdqu 144(%r8), %xmm4
movdqu 160(%r8), %xmm5
aesenc %xmm3, %xmm2
aesenc %xmm3, %xmm12
aesenc %xmm3, %xmm13
aesenc %xmm3, %xmm14
aesenc %xmm4, %xmm2
aesenc %xmm4, %xmm12
aesenc %xmm4, %xmm13
aesenc %xmm4, %xmm14
aesenclast %xmm5, %xmm2
aesenclast %xmm5, %xmm12
aesenclast %xmm5, %xmm13
aesenclast %xmm5, %xmm14
movdqu 0(%r9), %xmm7
pxor %xmm7, %xmm2
movdqu 16(%r9), %xmm7
pxor %xmm7, %xmm12
movdqu 32(%r9), %xmm7
pxor %xmm7, %xmm13
movdqu 48(%r9), %xmm7
pxor %xmm7, %xmm14
movdqu %xmm2, 0(%r10)
movdqu %xmm12, 16(%r10)
movdqu %xmm13, 32(%r10)
movdqu %xmm14, 48(%r10)
sub $1, %rdx
add $64, %r9
add $64, %r10
.balign 16
L3:
cmp $0, %rdx
ja L2
movdqu %xmm9, %xmm7
pinsrq $0, %rdi, %xmm7
pshufb %xmm8, %xmm7
mov %r9, %rax
mov %r10, %rbx
jmp L1
L0:
L1:
mov $0, %rdx
mov %rax, %r9
mov %rbx, %r10
pxor %xmm4, %xmm4
mov $1, %r12
pinsrd $0, %r12d, %xmm4
jmp L5
.balign 16
L4:
movdqu %xmm7, %xmm0
pshufb %xmm8, %xmm0
movdqu 0(%r8), %xmm2
pxor %xmm2, %xmm0
movdqu 16(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 32(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 48(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 64(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 80(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 96(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 112(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 128(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 144(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 160(%r8), %xmm2
aesenclast %xmm2, %xmm0
pxor %xmm2, %xmm2
movdqu 0(%r9), %xmm2
pxor %xmm0, %xmm2
movdqu %xmm2, 0(%r10)
add $1, %rdx
add $16, %r9
add $16, %r10
paddd %xmm4, %xmm7
.balign 16
L5:
cmp %rcx, %rdx
jne L4
cmp %rbp, %rsi
jbe L6
movdqu 0(%r13), %xmm1
movdqu %xmm7, %xmm0
mov $579005069656919567, %r12
pinsrq $0, %r12, %xmm2
mov $283686952306183, %r12
pinsrq $1, %r12, %xmm2
pshufb %xmm2, %xmm0
movdqu 0(%r8), %xmm2
pxor %xmm2, %xmm0
movdqu 16(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 32(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 48(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 64(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 80(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 96(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 112(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 128(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 144(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 160(%r8), %xmm2
aesenclast %xmm2, %xmm0
pxor %xmm2, %xmm2
pxor %xmm0, %xmm1
movdqu %xmm1, 0(%r13)
jmp L7
L6:
L7:
pop %rax
pinsrq $1, %rax, %xmm6
pop %rax
pinsrq $0, %rax, %xmm6
pop %rax
pinsrq $1, %rax, %xmm7
pop %rax
pinsrq $0, %rax, %xmm7
pop %rax
pinsrq $1, %rax, %xmm8
pop %rax
pinsrq $0, %rax, %xmm8
pop %rax
pinsrq $1, %rax, %xmm9
pop %rax
pinsrq $0, %rax, %xmm9
pop %rax
pinsrq $1, %rax, %xmm10
pop %rax
pinsrq $0, %rax, %xmm10
pop %rax
pinsrq $1, %rax, %xmm11
pop %rax
pinsrq $0, %rax, %xmm11
pop %rax
pinsrq $1, %rax, %xmm12
pop %rax
pinsrq $0, %rax, %xmm12
pop %rax
pinsrq $1, %rax, %xmm13
pop %rax
pinsrq $0, %rax, %xmm13
pop %rax
pinsrq $1, %rax, %xmm14
pop %rax
pinsrq $0, %rax, %xmm14
pop %rax
pinsrq $1, %rax, %xmm15
pop %rax
pinsrq $0, %rax, %xmm15
pop %rbx
pop %rbp
pop %rdi
pop %rsi
pop %r12
pop %r13
pop %r14
pop %r15
ret
.global gctr256_bytes
gctr256_bytes:
push %r15
push %r14
push %r13
push %r12
push %rsi
push %rdi
push %rbp
push %rbx
pextrq $0, %xmm15, %rax
push %rax
pextrq $1, %xmm15, %rax
push %rax
pextrq $0, %xmm14, %rax
push %rax
pextrq $1, %xmm14, %rax
push %rax
pextrq $0, %xmm13, %rax
push %rax
pextrq $1, %xmm13, %rax
push %rax
pextrq $0, %xmm12, %rax
push %rax
pextrq $1, %xmm12, %rax
push %rax
pextrq $0, %xmm11, %rax
push %rax
pextrq $1, %xmm11, %rax
push %rax
pextrq $0, %xmm10, %rax
push %rax
pextrq $1, %xmm10, %rax
push %rax
pextrq $0, %xmm9, %rax
push %rax
pextrq $1, %xmm9, %rax
push %rax
pextrq $0, %xmm8, %rax
push %rax
pextrq $1, %xmm8, %rax
push %rax
pextrq $0, %xmm7, %rax
push %rax
pextrq $1, %xmm7, %rax
push %rax
pextrq $0, %xmm6, %rax
push %rax
pextrq $1, %xmm6, %rax
push %rax
mov 272(%rsp), %rax
movdqu 0(%rax), %xmm7
mov %rcx, %rax
mov %r8, %rbx
mov %rdx, %rsi
mov %r9, %r13
mov 264(%rsp), %r8
mov 280(%rsp), %rcx
mov %rcx, %rbp
imul $16, %rbp
mov $579005069656919567, %r12
pinsrq $0, %r12, %xmm8
mov $283686952306183, %r12
pinsrq $1, %r12, %xmm8
mov %rcx, %rdx
shr $2, %rdx
and $3, %rcx
cmp $0, %rdx
jbe L8
mov %rax, %r9
mov %rbx, %r10
pshufb %xmm8, %xmm7
movdqu %xmm7, %xmm9
mov $579005069656919567, %rax
pinsrq $0, %rax, %xmm0
mov $579005069656919567, %rax
pinsrq $1, %rax, %xmm0
pshufb %xmm0, %xmm9
movdqu %xmm9, %xmm10
pxor %xmm3, %xmm3
mov $1, %rax
pinsrd $2, %eax, %xmm3
paddd %xmm3, %xmm9
mov $3, %rax
pinsrd $2, %eax, %xmm3
mov $2, %rax
pinsrd $0, %eax, %xmm3
paddd %xmm3, %xmm10
pshufb %xmm8, %xmm9
pshufb %xmm8, %xmm10
pextrq $0, %xmm7, %rdi
mov $283686952306183, %rax
pinsrq $0, %rax, %xmm0
mov $579005069656919567, %rax
pinsrq $1, %rax, %xmm0
pxor %xmm15, %xmm15
mov $4, %rax
pinsrd $0, %eax, %xmm15
mov $4, %rax
pinsrd $2, %eax, %xmm15
jmp L11
.balign 16
L10:
pinsrq $0, %rdi, %xmm2
pinsrq $0, %rdi, %xmm12
pinsrq $0, %rdi, %xmm13
pinsrq $0, %rdi, %xmm14
shufpd $2, %xmm9, %xmm2
shufpd $0, %xmm9, %xmm12
shufpd $2, %xmm10, %xmm13
shufpd $0, %xmm10, %xmm14
pshufb %xmm0, %xmm9
pshufb %xmm0, %xmm10
movdqu 0(%r8), %xmm3
movdqu 16(%r8), %xmm4
movdqu 32(%r8), %xmm5
movdqu 48(%r8), %xmm6
paddd %xmm15, %xmm9
paddd %xmm15, %xmm10
pxor %xmm3, %xmm2
pxor %xmm3, %xmm12
pxor %xmm3, %xmm13
pxor %xmm3, %xmm14
pshufb %xmm0, %xmm9
pshufb %xmm0, %xmm10
aesenc %xmm4, %xmm2
aesenc %xmm4, %xmm12
aesenc %xmm4, %xmm13
aesenc %xmm4, %xmm14
aesenc %xmm5, %xmm2
aesenc %xmm5, %xmm12
aesenc %xmm5, %xmm13
aesenc %xmm5, %xmm14
aesenc %xmm6, %xmm2
aesenc %xmm6, %xmm12
aesenc %xmm6, %xmm13
aesenc %xmm6, %xmm14
movdqu 64(%r8), %xmm3
movdqu 80(%r8), %xmm4
movdqu 96(%r8), %xmm5
movdqu 112(%r8), %xmm6
aesenc %xmm3, %xmm2
aesenc %xmm3, %xmm12
aesenc %xmm3, %xmm13
aesenc %xmm3, %xmm14
aesenc %xmm4, %xmm2
aesenc %xmm4, %xmm12
aesenc %xmm4, %xmm13
aesenc %xmm4, %xmm14
aesenc %xmm5, %xmm2
aesenc %xmm5, %xmm12
aesenc %xmm5, %xmm13
aesenc %xmm5, %xmm14
aesenc %xmm6, %xmm2
aesenc %xmm6, %xmm12
aesenc %xmm6, %xmm13
aesenc %xmm6, %xmm14
movdqu 128(%r8), %xmm3
movdqu 144(%r8), %xmm4
movdqu 160(%r8), %xmm5
aesenc %xmm3, %xmm2
aesenc %xmm3, %xmm12
aesenc %xmm3, %xmm13
aesenc %xmm3, %xmm14
aesenc %xmm4, %xmm2
aesenc %xmm4, %xmm12
aesenc %xmm4, %xmm13
aesenc %xmm4, %xmm14
movdqu %xmm5, %xmm3
movdqu 176(%r8), %xmm4
movdqu 192(%r8), %xmm5
movdqu 208(%r8), %xmm6
aesenc %xmm3, %xmm2
aesenc %xmm3, %xmm12
aesenc %xmm3, %xmm13
aesenc %xmm3, %xmm14
aesenc %xmm4, %xmm2
aesenc %xmm4, %xmm12
aesenc %xmm4, %xmm13
aesenc %xmm4, %xmm14
aesenc %xmm5, %xmm2
aesenc %xmm5, %xmm12
aesenc %xmm5, %xmm13
aesenc %xmm5, %xmm14
aesenc %xmm6, %xmm2
aesenc %xmm6, %xmm12
aesenc %xmm6, %xmm13
aesenc %xmm6, %xmm14
movdqu 224(%r8), %xmm5
aesenclast %xmm5, %xmm2
aesenclast %xmm5, %xmm12
aesenclast %xmm5, %xmm13
aesenclast %xmm5, %xmm14
movdqu 0(%r9), %xmm7
pxor %xmm7, %xmm2
movdqu 16(%r9), %xmm7
pxor %xmm7, %xmm12
movdqu 32(%r9), %xmm7
pxor %xmm7, %xmm13
movdqu 48(%r9), %xmm7
pxor %xmm7, %xmm14
movdqu %xmm2, 0(%r10)
movdqu %xmm12, 16(%r10)
movdqu %xmm13, 32(%r10)
movdqu %xmm14, 48(%r10)
sub $1, %rdx
add $64, %r9
add $64, %r10
.balign 16
L11:
cmp $0, %rdx
ja L10
movdqu %xmm9, %xmm7
pinsrq $0, %rdi, %xmm7
pshufb %xmm8, %xmm7
mov %r9, %rax
mov %r10, %rbx
jmp L9
L8:
L9:
mov $0, %rdx
mov %rax, %r9
mov %rbx, %r10
pxor %xmm4, %xmm4
mov $1, %r12
pinsrd $0, %r12d, %xmm4
jmp L13
.balign 16
L12:
movdqu %xmm7, %xmm0
pshufb %xmm8, %xmm0
movdqu 0(%r8), %xmm2
pxor %xmm2, %xmm0
movdqu 16(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 32(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 48(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 64(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 80(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 96(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 112(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 128(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 144(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 160(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 176(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 192(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 208(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 224(%r8), %xmm2
aesenclast %xmm2, %xmm0
pxor %xmm2, %xmm2
movdqu 0(%r9), %xmm2
pxor %xmm0, %xmm2
movdqu %xmm2, 0(%r10)
add $1, %rdx
add $16, %r9
add $16, %r10
paddd %xmm4, %xmm7
.balign 16
L13:
cmp %rcx, %rdx
jne L12
cmp %rbp, %rsi
jbe L14
movdqu 0(%r13), %xmm1
movdqu %xmm7, %xmm0
mov $579005069656919567, %r12
pinsrq $0, %r12, %xmm2
mov $283686952306183, %r12
pinsrq $1, %r12, %xmm2
pshufb %xmm2, %xmm0
movdqu 0(%r8), %xmm2
pxor %xmm2, %xmm0
movdqu 16(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 32(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 48(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 64(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 80(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 96(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 112(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 128(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 144(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 160(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 176(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 192(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 208(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 224(%r8), %xmm2
aesenclast %xmm2, %xmm0
pxor %xmm2, %xmm2
pxor %xmm0, %xmm1
movdqu %xmm1, 0(%r13)
jmp L15
L14:
L15:
pop %rax
pinsrq $1, %rax, %xmm6
pop %rax
pinsrq $0, %rax, %xmm6
pop %rax
pinsrq $1, %rax, %xmm7
pop %rax
pinsrq $0, %rax, %xmm7
pop %rax
pinsrq $1, %rax, %xmm8
pop %rax
pinsrq $0, %rax, %xmm8
pop %rax
pinsrq $1, %rax, %xmm9
pop %rax
pinsrq $0, %rax, %xmm9
pop %rax
pinsrq $1, %rax, %xmm10
pop %rax
pinsrq $0, %rax, %xmm10
pop %rax
pinsrq $1, %rax, %xmm11
pop %rax
pinsrq $0, %rax, %xmm11
pop %rax
pinsrq $1, %rax, %xmm12
pop %rax
pinsrq $0, %rax, %xmm12
pop %rax
pinsrq $1, %rax, %xmm13
pop %rax
pinsrq $0, %rax, %xmm13
pop %rax
pinsrq $1, %rax, %xmm14
pop %rax
pinsrq $0, %rax, %xmm14
pop %rax
pinsrq $1, %rax, %xmm15
pop %rax
pinsrq $0, %rax, %xmm15
pop %rbx
pop %rbp
pop %rdi
pop %rsi
pop %r12
pop %r13
pop %r14
pop %r15
ret
.global compute_iv_stdcall
compute_iv_stdcall:
cmp $12, %rdx
jne L16
push %rdi
push %rsi
mov %rcx, %rdi
mov %rdx, %rsi
mov %r8, %rdx
mov %r9, %rcx
mov 56(%rsp), %r8
mov 64(%rsp), %r9
cmp $12, %rsi
jne L18
movdqu 0(%r8), %xmm0
mov $579005069656919567, %rax
pinsrq $0, %rax, %xmm1
mov $283686952306183, %rax
pinsrq $1, %rax, %xmm1
pshufb %xmm1, %xmm0
mov $1, %rax
pinsrd $0, %eax, %xmm0
movdqu %xmm0, 0(%rcx)
jmp L19
L18:
mov %rcx, %rax
add $32, %r9
mov %r8, %rbx
mov %rdx, %rcx
imul $16, %rcx
mov $579005069656919567, %r10
pinsrq $0, %r10, %xmm9
mov $283686952306183, %r10
pinsrq $1, %r10, %xmm9
pxor %xmm8, %xmm8
mov %rdi, %r11
jmp L21
.balign 16
L20:
add $80, %r11
movdqu -32(%r9), %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
movdqu %xmm1, %xmm4
movdqu -16(%r9), %xmm1
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 16(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 32(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 64(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 80(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
vpxor %xmm1, %xmm4, %xmm4
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
pxor %xmm3, %xmm3
mov $3254779904, %r10
pinsrd $3, %r10d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
add $96, %r11
sub $6, %rdx
.balign 16
L21:
cmp $6, %rdx
jae L20
cmp $0, %rdx
jbe L22
mov %rdx, %r10
sub $1, %r10
imul $16, %r10
add %r10, %r11
movdqu -32(%r9), %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
cmp $1, %rdx
jne L24
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu %xmm1, %xmm4
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
jmp L25
L24:
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
movdqu %xmm1, %xmm4
movdqu -16(%r9), %xmm1
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
movdqu %xmm1, %xmm5
cmp $2, %rdx
je L26
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 16(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
cmp $3, %rdx
je L28
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 32(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
cmp $4, %rdx
je L30
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 64(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
jmp L31
L30:
L31:
jmp L29
L28:
L29:
jmp L27
L26:
L27:
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
vpxor %xmm1, %xmm4, %xmm4
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
L25:
pxor %xmm3, %xmm3
mov $3254779904, %r10
pinsrd $3, %r10d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
jmp L23
L22:
L23:
mov %rsi, %r15
cmp %rcx, %rsi
jbe L32
movdqu 0(%rbx), %xmm0
mov %rsi, %r10
and $15, %r10
cmp $8, %r10
jae L34
mov $0, %rcx
pinsrq $1, %rcx, %xmm0
mov %r10, %rcx
shl $3, %rcx
mov $1, %r11
shl %cl, %r11
sub $1, %r11
pextrq $0, %xmm0, %rcx
and %r11, %rcx
pinsrq $0, %rcx, %xmm0
jmp L35
L34:
mov %r10, %rcx
sub $8, %rcx
shl $3, %rcx
mov $1, %r11
shl %cl, %r11
sub $1, %r11
pextrq $1, %xmm0, %rcx
and %r11, %rcx
pinsrq $1, %rcx, %xmm0
L35:
pshufb %xmm9, %xmm0
movdqu -32(%r9), %xmm5
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu %xmm1, %xmm4
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
pxor %xmm3, %xmm3
mov $3254779904, %r11
pinsrd $3, %r11d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
jmp L33
L32:
L33:
mov %rax, %rcx
mov $0, %r11
mov %rsi, %r13
pxor %xmm0, %xmm0
mov %r11, %rax
imul $8, %rax
pinsrq $1, %rax, %xmm0
mov %r13, %rax
imul $8, %rax
pinsrq $0, %rax, %xmm0
movdqu -32(%r9), %xmm5
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu %xmm1, %xmm4
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
pxor %xmm3, %xmm3
mov $3254779904, %r11
pinsrd $3, %r11d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
movdqu %xmm8, 0(%rcx)
L19:
pop %rsi
pop %rdi
jmp L17
L16:
push %r15
push %r14
push %r13
push %r12
push %rsi
push %rdi
push %rbp
push %rbx
pextrq $0, %xmm15, %rax
push %rax
pextrq $1, %xmm15, %rax
push %rax
pextrq $0, %xmm14, %rax
push %rax
pextrq $1, %xmm14, %rax
push %rax
pextrq $0, %xmm13, %rax
push %rax
pextrq $1, %xmm13, %rax
push %rax
pextrq $0, %xmm12, %rax
push %rax
pextrq $1, %xmm12, %rax
push %rax
pextrq $0, %xmm11, %rax
push %rax
pextrq $1, %xmm11, %rax
push %rax
pextrq $0, %xmm10, %rax
push %rax
pextrq $1, %xmm10, %rax
push %rax
pextrq $0, %xmm9, %rax
push %rax
pextrq $1, %xmm9, %rax
push %rax
pextrq $0, %xmm8, %rax
push %rax
pextrq $1, %xmm8, %rax
push %rax
pextrq $0, %xmm7, %rax
push %rax
pextrq $1, %xmm7, %rax
push %rax
pextrq $0, %xmm6, %rax
push %rax
pextrq $1, %xmm6, %rax
push %rax
mov %rcx, %rdi
mov %rdx, %rsi
mov %r8, %rdx
mov %r9, %rcx
mov 264(%rsp), %r8
mov 272(%rsp), %r9
cmp $12, %rsi
jne L36
movdqu 0(%r8), %xmm0
mov $579005069656919567, %rax
pinsrq $0, %rax, %xmm1
mov $283686952306183, %rax
pinsrq $1, %rax, %xmm1
pshufb %xmm1, %xmm0
mov $1, %rax
pinsrd $0, %eax, %xmm0
movdqu %xmm0, 0(%rcx)
jmp L37
L36:
mov %rcx, %rax
add $32, %r9
mov %r8, %rbx
mov %rdx, %rcx
imul $16, %rcx
mov $579005069656919567, %r10
pinsrq $0, %r10, %xmm9
mov $283686952306183, %r10
pinsrq $1, %r10, %xmm9
pxor %xmm8, %xmm8
mov %rdi, %r11
jmp L39
.balign 16
L38:
add $80, %r11
movdqu -32(%r9), %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
movdqu %xmm1, %xmm4
movdqu -16(%r9), %xmm1
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 16(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 32(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 64(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 80(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
vpxor %xmm1, %xmm4, %xmm4
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
pxor %xmm3, %xmm3
mov $3254779904, %r10
pinsrd $3, %r10d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
add $96, %r11
sub $6, %rdx
.balign 16
L39:
cmp $6, %rdx
jae L38
cmp $0, %rdx
jbe L40
mov %rdx, %r10
sub $1, %r10
imul $16, %r10
add %r10, %r11
movdqu -32(%r9), %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
cmp $1, %rdx
jne L42
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu %xmm1, %xmm4
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
jmp L43
L42:
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
movdqu %xmm1, %xmm4
movdqu -16(%r9), %xmm1
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
movdqu %xmm1, %xmm5
cmp $2, %rdx
je L44
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 16(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
cmp $3, %rdx
je L46
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 32(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
cmp $4, %rdx
je L48
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 64(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
jmp L49
L48:
L49:
jmp L47
L46:
L47:
jmp L45
L44:
L45:
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
vpxor %xmm1, %xmm4, %xmm4
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
L43:
pxor %xmm3, %xmm3
mov $3254779904, %r10
pinsrd $3, %r10d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
jmp L41
L40:
L41:
mov %rsi, %r15
cmp %rcx, %rsi
jbe L50
movdqu 0(%rbx), %xmm0
mov %rsi, %r10
and $15, %r10
cmp $8, %r10
jae L52
mov $0, %rcx
pinsrq $1, %rcx, %xmm0
mov %r10, %rcx
shl $3, %rcx
mov $1, %r11
shl %cl, %r11
sub $1, %r11
pextrq $0, %xmm0, %rcx
and %r11, %rcx
pinsrq $0, %rcx, %xmm0
jmp L53
L52:
mov %r10, %rcx
sub $8, %rcx
shl $3, %rcx
mov $1, %r11
shl %cl, %r11
sub $1, %r11
pextrq $1, %xmm0, %rcx
and %r11, %rcx
pinsrq $1, %rcx, %xmm0
L53:
pshufb %xmm9, %xmm0
movdqu -32(%r9), %xmm5
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu %xmm1, %xmm4
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
pxor %xmm3, %xmm3
mov $3254779904, %r11
pinsrd $3, %r11d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
jmp L51
L50:
L51:
mov %rax, %rcx
mov $0, %r11
mov %rsi, %r13
pxor %xmm0, %xmm0
mov %r11, %rax
imul $8, %rax
pinsrq $1, %rax, %xmm0
mov %r13, %rax
imul $8, %rax
pinsrq $0, %rax, %xmm0
movdqu -32(%r9), %xmm5
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu %xmm1, %xmm4
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
pxor %xmm3, %xmm3
mov $3254779904, %r11
pinsrd $3, %r11d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
movdqu %xmm8, 0(%rcx)
L37:
pop %rax
pinsrq $1, %rax, %xmm6
pop %rax
pinsrq $0, %rax, %xmm6
pop %rax
pinsrq $1, %rax, %xmm7
pop %rax
pinsrq $0, %rax, %xmm7
pop %rax
pinsrq $1, %rax, %xmm8
pop %rax
pinsrq $0, %rax, %xmm8
pop %rax
pinsrq $1, %rax, %xmm9
pop %rax
pinsrq $0, %rax, %xmm9
pop %rax
pinsrq $1, %rax, %xmm10
pop %rax
pinsrq $0, %rax, %xmm10
pop %rax
pinsrq $1, %rax, %xmm11
pop %rax
pinsrq $0, %rax, %xmm11
pop %rax
pinsrq $1, %rax, %xmm12
pop %rax
pinsrq $0, %rax, %xmm12
pop %rax
pinsrq $1, %rax, %xmm13
pop %rax
pinsrq $0, %rax, %xmm13
pop %rax
pinsrq $1, %rax, %xmm14
pop %rax
pinsrq $0, %rax, %xmm14
pop %rax
pinsrq $1, %rax, %xmm15
pop %rax
pinsrq $0, %rax, %xmm15
pop %rbx
pop %rbp
pop %rdi
pop %rsi
pop %r12
pop %r13
pop %r14
pop %r15
L17:
ret
.global gcm128_encrypt_opt
gcm128_encrypt_opt:
push %r15
push %r14
push %r13
push %r12
push %rsi
push %rdi
push %rbp
push %rbx
pextrq $0, %xmm15, %rax
push %rax
pextrq $1, %xmm15, %rax
push %rax
pextrq $0, %xmm14, %rax
push %rax
pextrq $1, %xmm14, %rax
push %rax
pextrq $0, %xmm13, %rax
push %rax
pextrq $1, %xmm13, %rax
push %rax
pextrq $0, %xmm12, %rax
push %rax
pextrq $1, %xmm12, %rax
push %rax
pextrq $0, %xmm11, %rax
push %rax
pextrq $1, %xmm11, %rax
push %rax
pextrq $0, %xmm10, %rax
push %rax
pextrq $1, %xmm10, %rax
push %rax
pextrq $0, %xmm9, %rax
push %rax
pextrq $1, %xmm9, %rax
push %rax
pextrq $0, %xmm8, %rax
push %rax
pextrq $1, %xmm8, %rax
push %rax
pextrq $0, %xmm7, %rax
push %rax
pextrq $1, %xmm7, %rax
push %rax
pextrq $0, %xmm6, %rax
push %rax
pextrq $1, %xmm6, %rax
push %rax
mov %rcx, %rdi
mov %rdx, %rsi
mov %r8, %rdx
mov %r9, %rcx
mov 264(%rsp), %r8
mov 272(%rsp), %r9
mov 352(%rsp), %rbp
mov %rcx, %r13
lea 32(%r9), %r9
mov 280(%rsp), %rbx
mov %rdx, %rcx
imul $16, %rcx
mov $579005069656919567, %r10
pinsrq $0, %r10, %xmm9
mov $283686952306183, %r10
pinsrq $1, %r10, %xmm9
pxor %xmm8, %xmm8
mov %rdi, %r11
jmp L55
.balign 16
L54:
add $80, %r11
movdqu -32(%r9), %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
movdqu %xmm1, %xmm4
movdqu -16(%r9), %xmm1
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 16(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 32(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 64(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 80(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
vpxor %xmm1, %xmm4, %xmm4
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
pxor %xmm3, %xmm3
mov $3254779904, %r10
pinsrd $3, %r10d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
add $96, %r11
sub $6, %rdx
.balign 16
L55:
cmp $6, %rdx
jae L54
cmp $0, %rdx
jbe L56
mov %rdx, %r10
sub $1, %r10
imul $16, %r10
add %r10, %r11
movdqu -32(%r9), %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
cmp $1, %rdx
jne L58
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu %xmm1, %xmm4
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
jmp L59
L58:
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
movdqu %xmm1, %xmm4
movdqu -16(%r9), %xmm1
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
movdqu %xmm1, %xmm5
cmp $2, %rdx
je L60
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 16(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
cmp $3, %rdx
je L62
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 32(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
cmp $4, %rdx
je L64
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 64(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
jmp L65
L64:
L65:
jmp L63
L62:
L63:
jmp L61
L60:
L61:
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
vpxor %xmm1, %xmm4, %xmm4
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
L59:
pxor %xmm3, %xmm3
mov $3254779904, %r10
pinsrd $3, %r10d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
jmp L57
L56:
L57:
mov %rsi, %r15
cmp %rcx, %rsi
jbe L66
movdqu 0(%rbx), %xmm0
mov %rsi, %r10
and $15, %r10
cmp $8, %r10
jae L68
mov $0, %rcx
pinsrq $1, %rcx, %xmm0
mov %r10, %rcx
shl $3, %rcx
mov $1, %r11
shl %cl, %r11
sub $1, %r11
pextrq $0, %xmm0, %rcx
and %r11, %rcx
pinsrq $0, %rcx, %xmm0
jmp L69
L68:
mov %r10, %rcx
sub $8, %rcx
shl $3, %rcx
mov $1, %r11
shl %cl, %r11
sub $1, %r11
pextrq $1, %xmm0, %rcx
and %r11, %rcx
pinsrq $1, %rcx, %xmm0
L69:
pshufb %xmm9, %xmm0
movdqu -32(%r9), %xmm5
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu %xmm1, %xmm4
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
pxor %xmm3, %xmm3
mov $3254779904, %r11
pinsrd $3, %r11d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
jmp L67
L66:
L67:
mov 288(%rsp), %rdi
mov 296(%rsp), %rsi
mov 304(%rsp), %rdx
mov %r13, %rcx
movdqu %xmm9, %xmm0
movdqu 0(%r8), %xmm1
movdqu %xmm1, 0(%rbp)
pxor %xmm10, %xmm10
mov $1, %r11
pinsrq $0, %r11, %xmm10
vpaddd %xmm10, %xmm1, %xmm1
cmp $0, %rdx
jne L70
vpshufb %xmm0, %xmm1, %xmm1
movdqu %xmm1, 32(%rbp)
jmp L71
L70:
movdqu %xmm8, 32(%rbp)
add $128, %rcx
pextrq $0, %xmm1, %rbx
and $255, %rbx
vpshufb %xmm0, %xmm1, %xmm1
lea 96(%rsi), %r14
movdqu -128(%rcx), %xmm4
pxor %xmm2, %xmm2
mov $72057594037927936, %r11
pinsrq $1, %r11, %xmm2
movdqu -112(%rcx), %xmm15
mov %rcx, %r12
sub $96, %r12
vpxor %xmm4, %xmm1, %xmm9
add $6, %rbx
cmp $256, %rbx
jae L72
vpaddd %xmm2, %xmm1, %xmm10
vpaddd %xmm2, %xmm10, %xmm11
vpxor %xmm4, %xmm10, %xmm10
vpaddd %xmm2, %xmm11, %xmm12
vpxor %xmm4, %xmm11, %xmm11
vpaddd %xmm2, %xmm12, %xmm13
vpxor %xmm4, %xmm12, %xmm12
vpaddd %xmm2, %xmm13, %xmm14
vpxor %xmm4, %xmm13, %xmm13
vpaddd %xmm2, %xmm14, %xmm1
vpxor %xmm4, %xmm14, %xmm14
jmp L73
L72:
sub $256, %rbx
vpshufb %xmm0, %xmm1, %xmm6
pxor %xmm5, %xmm5
mov $1, %r11
pinsrq $0, %r11, %xmm5
vpaddd %xmm5, %xmm6, %xmm10
pxor %xmm5, %xmm5
mov $2, %r11
pinsrq $0, %r11, %xmm5
vpaddd %xmm5, %xmm6, %xmm11
vpaddd %xmm5, %xmm10, %xmm12
vpshufb %xmm0, %xmm10, %xmm10
vpaddd %xmm5, %xmm11, %xmm13
vpshufb %xmm0, %xmm11, %xmm11
vpxor %xmm4, %xmm10, %xmm10
vpaddd %xmm5, %xmm12, %xmm14
vpshufb %xmm0, %xmm12, %xmm12
vpxor %xmm4, %xmm11, %xmm11
vpaddd %xmm5, %xmm13, %xmm1
vpshufb %xmm0, %xmm13, %xmm13
vpxor %xmm4, %xmm12, %xmm12
vpshufb %xmm0, %xmm14, %xmm14
vpxor %xmm4, %xmm13, %xmm13
vpshufb %xmm0, %xmm1, %xmm1
vpxor %xmm4, %xmm14, %xmm14
L73:
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu -96(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu -80(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu -64(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu -48(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu -32(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu -16(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu 0(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu 16(%rcx), %xmm15
movdqu 32(%rcx), %xmm3
vaesenc %xmm15, %xmm9, %xmm9
vpxor 0(%rdi), %xmm3, %xmm4
vaesenc %xmm15, %xmm10, %xmm10
vpxor 16(%rdi), %xmm3, %xmm5
vaesenc %xmm15, %xmm11, %xmm11
vpxor 32(%rdi), %xmm3, %xmm6
vaesenc %xmm15, %xmm12, %xmm12
vpxor 48(%rdi), %xmm3, %xmm8
vaesenc %xmm15, %xmm13, %xmm13
vpxor 64(%rdi), %xmm3, %xmm2
vaesenc %xmm15, %xmm14, %xmm14
vpxor 80(%rdi), %xmm3, %xmm3
lea 96(%rdi), %rdi
vaesenclast %xmm4, %xmm9, %xmm9
vaesenclast %xmm5, %xmm10, %xmm10
vaesenclast %xmm6, %xmm11, %xmm11
vaesenclast %xmm8, %xmm12, %xmm12
vaesenclast %xmm2, %xmm13, %xmm13
vaesenclast %xmm3, %xmm14, %xmm14
movdqu %xmm9, 0(%rsi)
movdqu %xmm10, 16(%rsi)
movdqu %xmm11, 32(%rsi)
movdqu %xmm12, 48(%rsi)
movdqu %xmm13, 64(%rsi)
movdqu %xmm14, 80(%rsi)
lea 96(%rsi), %rsi
vpshufb %xmm0, %xmm9, %xmm8
vpshufb %xmm0, %xmm10, %xmm2
movdqu %xmm8, 112(%rbp)
vpshufb %xmm0, %xmm11, %xmm4
movdqu %xmm2, 96(%rbp)
vpshufb %xmm0, %xmm12, %xmm5
movdqu %xmm4, 80(%rbp)
vpshufb %xmm0, %xmm13, %xmm6
movdqu %xmm5, 64(%rbp)
vpshufb %xmm0, %xmm14, %xmm7
movdqu %xmm6, 48(%rbp)
movdqu -128(%rcx), %xmm4
pxor %xmm2, %xmm2
mov $72057594037927936, %r11
pinsrq $1, %r11, %xmm2
movdqu -112(%rcx), %xmm15
mov %rcx, %r12
sub $96, %r12
vpxor %xmm4, %xmm1, %xmm9
add $6, %rbx
cmp $256, %rbx
jae L74
vpaddd %xmm2, %xmm1, %xmm10
vpaddd %xmm2, %xmm10, %xmm11
vpxor %xmm4, %xmm10, %xmm10
vpaddd %xmm2, %xmm11, %xmm12
vpxor %xmm4, %xmm11, %xmm11
vpaddd %xmm2, %xmm12, %xmm13
vpxor %xmm4, %xmm12, %xmm12
vpaddd %xmm2, %xmm13, %xmm14
vpxor %xmm4, %xmm13, %xmm13
vpaddd %xmm2, %xmm14, %xmm1
vpxor %xmm4, %xmm14, %xmm14
jmp L75
L74:
sub $256, %rbx
vpshufb %xmm0, %xmm1, %xmm6
pxor %xmm5, %xmm5
mov $1, %r11
pinsrq $0, %r11, %xmm5
vpaddd %xmm5, %xmm6, %xmm10
pxor %xmm5, %xmm5
mov $2, %r11
pinsrq $0, %r11, %xmm5
vpaddd %xmm5, %xmm6, %xmm11
vpaddd %xmm5, %xmm10, %xmm12
vpshufb %xmm0, %xmm10, %xmm10
vpaddd %xmm5, %xmm11, %xmm13
vpshufb %xmm0, %xmm11, %xmm11
vpxor %xmm4, %xmm10, %xmm10
vpaddd %xmm5, %xmm12, %xmm14
vpshufb %xmm0, %xmm12, %xmm12
vpxor %xmm4, %xmm11, %xmm11
vpaddd %xmm5, %xmm13, %xmm1
vpshufb %xmm0, %xmm13, %xmm13
vpxor %xmm4, %xmm12, %xmm12
vpshufb %xmm0, %xmm14, %xmm14
vpxor %xmm4, %xmm13, %xmm13
vpshufb %xmm0, %xmm1, %xmm1
vpxor %xmm4, %xmm14, %xmm14
L75:
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu -96(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu -80(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu -64(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu -48(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu -32(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu -16(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu 0(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu 16(%rcx), %xmm15
movdqu 32(%rcx), %xmm3
vaesenc %xmm15, %xmm9, %xmm9
vpxor 0(%rdi), %xmm3, %xmm4
vaesenc %xmm15, %xmm10, %xmm10
vpxor 16(%rdi), %xmm3, %xmm5
vaesenc %xmm15, %xmm11, %xmm11
vpxor 32(%rdi), %xmm3, %xmm6
vaesenc %xmm15, %xmm12, %xmm12
vpxor 48(%rdi), %xmm3, %xmm8
vaesenc %xmm15, %xmm13, %xmm13
vpxor 64(%rdi), %xmm3, %xmm2
vaesenc %xmm15, %xmm14, %xmm14
vpxor 80(%rdi), %xmm3, %xmm3
lea 96(%rdi), %rdi
vaesenclast %xmm4, %xmm9, %xmm9
vaesenclast %xmm5, %xmm10, %xmm10
vaesenclast %xmm6, %xmm11, %xmm11
vaesenclast %xmm8, %xmm12, %xmm12
vaesenclast %xmm2, %xmm13, %xmm13
vaesenclast %xmm3, %xmm14, %xmm14
movdqu %xmm9, 0(%rsi)
movdqu %xmm10, 16(%rsi)
movdqu %xmm11, 32(%rsi)
movdqu %xmm12, 48(%rsi)
movdqu %xmm13, 64(%rsi)
movdqu %xmm14, 80(%rsi)
lea 96(%rsi), %rsi
sub $12, %rdx
movdqu 32(%rbp), %xmm8
pxor %xmm2, %xmm2
mov $72057594037927936, %r11
pinsrq $1, %r11, %xmm2
vpxor %xmm4, %xmm4, %xmm4
movdqu -128(%rcx), %xmm15
vpaddd %xmm2, %xmm1, %xmm10
vpaddd %xmm2, %xmm10, %xmm11
vpaddd %xmm2, %xmm11, %xmm12
vpaddd %xmm2, %xmm12, %xmm13
vpaddd %xmm2, %xmm13, %xmm14
vpxor %xmm15, %xmm1, %xmm9
movdqu %xmm4, 16(%rbp)
jmp L77
.balign 16
L76:
add $6, %rbx
cmp $256, %rbx
jb L78
mov $579005069656919567, %r11
pinsrq $0, %r11, %xmm0
mov $283686952306183, %r11
pinsrq $1, %r11, %xmm0
vpshufb %xmm0, %xmm1, %xmm6
pxor %xmm5, %xmm5
mov $1, %r11
pinsrq $0, %r11, %xmm5
vpaddd %xmm5, %xmm6, %xmm10
pxor %xmm5, %xmm5
mov $2, %r11
pinsrq $0, %r11, %xmm5
vpaddd %xmm5, %xmm6, %xmm11
movdqu -32(%r9), %xmm3
vpaddd %xmm5, %xmm10, %xmm12
vpshufb %xmm0, %xmm10, %xmm10
vpaddd %xmm5, %xmm11, %xmm13
vpshufb %xmm0, %xmm11, %xmm11
vpxor %xmm15, %xmm10, %xmm10
vpaddd %xmm5, %xmm12, %xmm14
vpshufb %xmm0, %xmm12, %xmm12
vpxor %xmm15, %xmm11, %xmm11
vpaddd %xmm5, %xmm13, %xmm1
vpshufb %xmm0, %xmm13, %xmm13
vpshufb %xmm0, %xmm14, %xmm14
vpshufb %xmm0, %xmm1, %xmm1
sub $256, %rbx
jmp L79
L78:
movdqu -32(%r9), %xmm3
vpaddd %xmm14, %xmm2, %xmm1
vpxor %xmm15, %xmm10, %xmm10
vpxor %xmm15, %xmm11, %xmm11
L79:
movdqu %xmm1, 128(%rbp)
vpclmulqdq $16, %xmm3, %xmm7, %xmm5
vpxor %xmm15, %xmm12, %xmm12
movdqu -112(%rcx), %xmm2
vpclmulqdq $1, %xmm3, %xmm7, %xmm6
vaesenc %xmm2, %xmm9, %xmm9
movdqu 48(%rbp), %xmm0
vpxor %xmm15, %xmm13, %xmm13
vpclmulqdq $0, %xmm3, %xmm7, %xmm1
vaesenc %xmm2, %xmm10, %xmm10
vpxor %xmm15, %xmm14, %xmm14
vpclmulqdq $17, %xmm3, %xmm7, %xmm7
vaesenc %xmm2, %xmm11, %xmm11
movdqu -16(%r9), %xmm3
vaesenc %xmm2, %xmm12, %xmm12
vpxor %xmm5, %xmm6, %xmm6
vpclmulqdq $0, %xmm3, %xmm0, %xmm5
vpxor %xmm4, %xmm8, %xmm8
vaesenc %xmm2, %xmm13, %xmm13
vpxor %xmm5, %xmm1, %xmm4
vpclmulqdq $16, %xmm3, %xmm0, %xmm1
vaesenc %xmm2, %xmm14, %xmm14
movdqu -96(%rcx), %xmm15
vpclmulqdq $1, %xmm3, %xmm0, %xmm2
vaesenc %xmm15, %xmm9, %xmm9
vpxor 16(%rbp), %xmm8, %xmm8
vpclmulqdq $17, %xmm3, %xmm0, %xmm3
movdqu 64(%rbp), %xmm0
vaesenc %xmm15, %xmm10, %xmm10
movbeq 88(%r14), %r13
vaesenc %xmm15, %xmm11, %xmm11
movbeq 80(%r14), %r12
vaesenc %xmm15, %xmm12, %xmm12
movq %r13, 32(%rbp)
vaesenc %xmm15, %xmm13, %xmm13
movq %r12, 40(%rbp)
movdqu 16(%r9), %xmm5
vaesenc %xmm15, %xmm14, %xmm14
movdqu -80(%rcx), %xmm15
vpxor %xmm1, %xmm6, %xmm6
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vaesenc %xmm15, %xmm9, %xmm9
vpxor %xmm2, %xmm6, %xmm6
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vaesenc %xmm15, %xmm10, %xmm10
vpxor %xmm3, %xmm7, %xmm7
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vaesenc %xmm15, %xmm11, %xmm11
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 80(%rbp), %xmm0
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vpxor %xmm1, %xmm4, %xmm4
movdqu 32(%r9), %xmm1
vaesenc %xmm15, %xmm14, %xmm14
movdqu -64(%rcx), %xmm15
vpxor %xmm2, %xmm6, %xmm6
vpclmulqdq $0, %xmm1, %xmm0, %xmm2
vaesenc %xmm15, %xmm9, %xmm9
vpxor %xmm3, %xmm6, %xmm6
vpclmulqdq $16, %xmm1, %xmm0, %xmm3
vaesenc %xmm15, %xmm10, %xmm10
movbeq 72(%r14), %r13
vpxor %xmm5, %xmm7, %xmm7
vpclmulqdq $1, %xmm1, %xmm0, %xmm5
vaesenc %xmm15, %xmm11, %xmm11
movbeq 64(%r14), %r12
vpclmulqdq $17, %xmm1, %xmm0, %xmm1
movdqu 96(%rbp), %xmm0
vaesenc %xmm15, %xmm12, %xmm12
movq %r13, 48(%rbp)
vaesenc %xmm15, %xmm13, %xmm13
movq %r12, 56(%rbp)
vpxor %xmm2, %xmm4, %xmm4
movdqu 64(%r9), %xmm2
vaesenc %xmm15, %xmm14, %xmm14
movdqu -48(%rcx), %xmm15
vpxor %xmm3, %xmm6, %xmm6
vpclmulqdq $0, %xmm2, %xmm0, %xmm3
vaesenc %xmm15, %xmm9, %xmm9
vpxor %xmm5, %xmm6, %xmm6
vpclmulqdq $16, %xmm2, %xmm0, %xmm5
vaesenc %xmm15, %xmm10, %xmm10
movbeq 56(%r14), %r13
vpxor %xmm1, %xmm7, %xmm7
vpclmulqdq $1, %xmm2, %xmm0, %xmm1
vpxor 112(%rbp), %xmm8, %xmm8
vaesenc %xmm15, %xmm11, %xmm11
movbeq 48(%r14), %r12
vpclmulqdq $17, %xmm2, %xmm0, %xmm2
vaesenc %xmm15, %xmm12, %xmm12
movq %r13, 64(%rbp)
vaesenc %xmm15, %xmm13, %xmm13
movq %r12, 72(%rbp)
vpxor %xmm3, %xmm4, %xmm4
movdqu 80(%r9), %xmm3
vaesenc %xmm15, %xmm14, %xmm14
movdqu -32(%rcx), %xmm15
vpxor %xmm5, %xmm6, %xmm6
vpclmulqdq $16, %xmm3, %xmm8, %xmm5
vaesenc %xmm15, %xmm9, %xmm9
vpxor %xmm1, %xmm6, %xmm6
vpclmulqdq $1, %xmm3, %xmm8, %xmm1
vaesenc %xmm15, %xmm10, %xmm10
movbeq 40(%r14), %r13
vpxor %xmm2, %xmm7, %xmm7
vpclmulqdq $0, %xmm3, %xmm8, %xmm2
vaesenc %xmm15, %xmm11, %xmm11
movbeq 32(%r14), %r12
vpclmulqdq $17, %xmm3, %xmm8, %xmm8
vaesenc %xmm15, %xmm12, %xmm12
movq %r13, 80(%rbp)
vaesenc %xmm15, %xmm13, %xmm13
movq %r12, 88(%rbp)
vpxor %xmm5, %xmm6, %xmm6
vaesenc %xmm15, %xmm14, %xmm14
vpxor %xmm1, %xmm6, %xmm6
movdqu -16(%rcx), %xmm15
vpslldq $8, %xmm6, %xmm5
vpxor %xmm2, %xmm4, %xmm4
pxor %xmm3, %xmm3
mov $13979173243358019584, %r11
pinsrq $1, %r11, %xmm3
vaesenc %xmm15, %xmm9, %xmm9
vpxor %xmm8, %xmm7, %xmm7
vaesenc %xmm15, %xmm10, %xmm10
vpxor %xmm5, %xmm4, %xmm4
movbeq 24(%r14), %r13
vaesenc %xmm15, %xmm11, %xmm11
movbeq 16(%r14), %r12
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
movq %r13, 96(%rbp)
vaesenc %xmm15, %xmm12, %xmm12
movq %r12, 104(%rbp)
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu 0(%rcx), %xmm1
vaesenc %xmm1, %xmm9, %xmm9
movdqu 16(%rcx), %xmm15
vaesenc %xmm1, %xmm10, %xmm10
vpsrldq $8, %xmm6, %xmm6
vaesenc %xmm1, %xmm11, %xmm11
vpxor %xmm6, %xmm7, %xmm7
vaesenc %xmm1, %xmm12, %xmm12
vpxor %xmm0, %xmm4, %xmm4
movbeq 8(%r14), %r13
vaesenc %xmm1, %xmm13, %xmm13
movbeq 0(%r14), %r12
vaesenc %xmm1, %xmm14, %xmm14
movdqu 32(%rcx), %xmm1
vaesenc %xmm15, %xmm9, %xmm9
movdqu %xmm7, 16(%rbp)
vpalignr $8, %xmm4, %xmm4, %xmm8
vaesenc %xmm15, %xmm10, %xmm10
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor 0(%rdi), %xmm1, %xmm2
vaesenc %xmm15, %xmm11, %xmm11
vpxor 16(%rdi), %xmm1, %xmm0
vaesenc %xmm15, %xmm12, %xmm12
vpxor 32(%rdi), %xmm1, %xmm5
vaesenc %xmm15, %xmm13, %xmm13
vpxor 48(%rdi), %xmm1, %xmm6
vaesenc %xmm15, %xmm14, %xmm14
vpxor 64(%rdi), %xmm1, %xmm7
vpxor 80(%rdi), %xmm1, %xmm3
movdqu 128(%rbp), %xmm1
vaesenclast %xmm2, %xmm9, %xmm9
pxor %xmm2, %xmm2
mov $72057594037927936, %r11
pinsrq $1, %r11, %xmm2
vaesenclast %xmm0, %xmm10, %xmm10
vpaddd %xmm2, %xmm1, %xmm0
movq %r13, 112(%rbp)
lea 96(%rdi), %rdi
vaesenclast %xmm5, %xmm11, %xmm11
vpaddd %xmm2, %xmm0, %xmm5
movq %r12, 120(%rbp)
lea 96(%rsi), %rsi
movdqu -128(%rcx), %xmm15
vaesenclast %xmm6, %xmm12, %xmm12
vpaddd %xmm2, %xmm5, %xmm6
vaesenclast %xmm7, %xmm13, %xmm13
vpaddd %xmm2, %xmm6, %xmm7
vaesenclast %xmm3, %xmm14, %xmm14
vpaddd %xmm2, %xmm7, %xmm3
sub $6, %rdx
add $96, %r14
cmp $0, %rdx
jbe L80
movdqu %xmm9, -96(%rsi)
vpxor %xmm15, %xmm1, %xmm9
movdqu %xmm10, -80(%rsi)
movdqu %xmm0, %xmm10
movdqu %xmm11, -64(%rsi)
movdqu %xmm5, %xmm11
movdqu %xmm12, -48(%rsi)
movdqu %xmm6, %xmm12
movdqu %xmm13, -32(%rsi)
movdqu %xmm7, %xmm13
movdqu %xmm14, -16(%rsi)
movdqu %xmm3, %xmm14
movdqu 32(%rbp), %xmm7
jmp L81
L80:
vpxor 16(%rbp), %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
L81:
.balign 16
L77:
cmp $0, %rdx
ja L76
movdqu 32(%rbp), %xmm7
movdqu %xmm1, 32(%rbp)
pxor %xmm4, %xmm4
movdqu %xmm4, 16(%rbp)
movdqu -32(%r9), %xmm3
vpclmulqdq $0, %xmm3, %xmm7, %xmm1
vpclmulqdq $16, %xmm3, %xmm7, %xmm5
movdqu 48(%rbp), %xmm0
vpclmulqdq $1, %xmm3, %xmm7, %xmm6
vpclmulqdq $17, %xmm3, %xmm7, %xmm7
movdqu -16(%r9), %xmm3
vpxor %xmm5, %xmm6, %xmm6
vpclmulqdq $0, %xmm3, %xmm0, %xmm5
vpxor %xmm4, %xmm8, %xmm8
vpxor %xmm5, %xmm1, %xmm4
vpclmulqdq $16, %xmm3, %xmm0, %xmm1
vpclmulqdq $1, %xmm3, %xmm0, %xmm2
vpxor 16(%rbp), %xmm8, %xmm8
vpclmulqdq $17, %xmm3, %xmm0, %xmm3
movdqu 64(%rbp), %xmm0
movdqu 16(%r9), %xmm5
vpxor %xmm1, %xmm6, %xmm6
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpxor %xmm3, %xmm7, %xmm7
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 80(%rbp), %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 32(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpclmulqdq $0, %xmm1, %xmm0, %xmm2
vpxor %xmm3, %xmm6, %xmm6
vpclmulqdq $16, %xmm1, %xmm0, %xmm3
vpxor %xmm5, %xmm7, %xmm7
vpclmulqdq $1, %xmm1, %xmm0, %xmm5
vpclmulqdq $17, %xmm1, %xmm0, %xmm1
movdqu 96(%rbp), %xmm0
vpxor %xmm2, %xmm4, %xmm4
movdqu 64(%r9), %xmm2
vpxor %xmm3, %xmm6, %xmm6
vpclmulqdq $0, %xmm2, %xmm0, %xmm3
vpxor %xmm5, %xmm6, %xmm6
vpclmulqdq $16, %xmm2, %xmm0, %xmm5
vpxor %xmm1, %xmm7, %xmm7
vpclmulqdq $1, %xmm2, %xmm0, %xmm1
vpxor 112(%rbp), %xmm8, %xmm8
vpclmulqdq $17, %xmm2, %xmm0, %xmm2
vpxor %xmm3, %xmm4, %xmm4
movdqu 80(%r9), %xmm3
vpxor %xmm5, %xmm6, %xmm6
vpclmulqdq $16, %xmm3, %xmm8, %xmm5
vpxor %xmm1, %xmm6, %xmm6
vpclmulqdq $1, %xmm3, %xmm8, %xmm1
vpxor %xmm2, %xmm7, %xmm7
vpclmulqdq $0, %xmm3, %xmm8, %xmm2
vpclmulqdq $17, %xmm3, %xmm8, %xmm8
vpxor %xmm5, %xmm6, %xmm6
vpxor %xmm1, %xmm6, %xmm6
vpxor %xmm2, %xmm4, %xmm4
pxor %xmm3, %xmm3
mov $3254779904, %rax
pinsrd $3, %eax, %xmm3
vpxor %xmm8, %xmm7, %xmm7
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
mov $579005069656919567, %r12
pinsrq $0, %r12, %xmm0
mov $283686952306183, %r12
pinsrq $1, %r12, %xmm0
movdqu %xmm9, -96(%rsi)
vpshufb %xmm0, %xmm9, %xmm9
vpxor %xmm7, %xmm1, %xmm1
movdqu %xmm10, -80(%rsi)
vpshufb %xmm0, %xmm10, %xmm10
movdqu %xmm11, -64(%rsi)
vpshufb %xmm0, %xmm11, %xmm11
movdqu %xmm12, -48(%rsi)
vpshufb %xmm0, %xmm12, %xmm12
movdqu %xmm13, -32(%rsi)
vpshufb %xmm0, %xmm13, %xmm13
movdqu %xmm14, -16(%rsi)
vpshufb %xmm0, %xmm14, %xmm14
pxor %xmm4, %xmm4
movdqu %xmm14, %xmm7
movdqu %xmm4, 16(%rbp)
movdqu %xmm13, 48(%rbp)
movdqu %xmm12, 64(%rbp)
movdqu %xmm11, 80(%rbp)
movdqu %xmm10, 96(%rbp)
movdqu %xmm9, 112(%rbp)
movdqu -32(%r9), %xmm3
vpclmulqdq $0, %xmm3, %xmm7, %xmm1
vpclmulqdq $16, %xmm3, %xmm7, %xmm5
movdqu 48(%rbp), %xmm0
vpclmulqdq $1, %xmm3, %xmm7, %xmm6
vpclmulqdq $17, %xmm3, %xmm7, %xmm7
movdqu -16(%r9), %xmm3
vpxor %xmm5, %xmm6, %xmm6
vpclmulqdq $0, %xmm3, %xmm0, %xmm5
vpxor %xmm4, %xmm8, %xmm8
vpxor %xmm5, %xmm1, %xmm4
vpclmulqdq $16, %xmm3, %xmm0, %xmm1
vpclmulqdq $1, %xmm3, %xmm0, %xmm2
vpxor 16(%rbp), %xmm8, %xmm8
vpclmulqdq $17, %xmm3, %xmm0, %xmm3
movdqu 64(%rbp), %xmm0
movdqu 16(%r9), %xmm5
vpxor %xmm1, %xmm6, %xmm6
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpxor %xmm3, %xmm7, %xmm7
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 80(%rbp), %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 32(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpclmulqdq $0, %xmm1, %xmm0, %xmm2
vpxor %xmm3, %xmm6, %xmm6
vpclmulqdq $16, %xmm1, %xmm0, %xmm3
vpxor %xmm5, %xmm7, %xmm7
vpclmulqdq $1, %xmm1, %xmm0, %xmm5
vpclmulqdq $17, %xmm1, %xmm0, %xmm1
movdqu 96(%rbp), %xmm0
vpxor %xmm2, %xmm4, %xmm4
movdqu 64(%r9), %xmm2
vpxor %xmm3, %xmm6, %xmm6
vpclmulqdq $0, %xmm2, %xmm0, %xmm3
vpxor %xmm5, %xmm6, %xmm6
vpclmulqdq $16, %xmm2, %xmm0, %xmm5
vpxor %xmm1, %xmm7, %xmm7
vpclmulqdq $1, %xmm2, %xmm0, %xmm1
vpxor 112(%rbp), %xmm8, %xmm8
vpclmulqdq $17, %xmm2, %xmm0, %xmm2
vpxor %xmm3, %xmm4, %xmm4
movdqu 80(%r9), %xmm3
vpxor %xmm5, %xmm6, %xmm6
vpclmulqdq $16, %xmm3, %xmm8, %xmm5
vpxor %xmm1, %xmm6, %xmm6
vpclmulqdq $1, %xmm3, %xmm8, %xmm1
vpxor %xmm2, %xmm7, %xmm7
vpclmulqdq $0, %xmm3, %xmm8, %xmm2
vpclmulqdq $17, %xmm3, %xmm8, %xmm8
vpxor %xmm5, %xmm6, %xmm6
vpxor %xmm1, %xmm6, %xmm6
vpxor %xmm2, %xmm4, %xmm4
pxor %xmm3, %xmm3
mov $3254779904, %rax
pinsrd $3, %eax, %xmm3
vpxor %xmm8, %xmm7, %xmm7
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
sub $128, %rcx
L71:
movdqu 32(%rbp), %xmm11
mov %rcx, %r8
mov 312(%rsp), %rax
mov 320(%rsp), %rdi
mov 328(%rsp), %rdx
mov %rdx, %r14
mov $579005069656919567, %r12
pinsrq $0, %r12, %xmm9
mov $283686952306183, %r12
pinsrq $1, %r12, %xmm9
pshufb %xmm9, %xmm11
pxor %xmm10, %xmm10
mov $1, %rbx
pinsrd $0, %ebx, %xmm10
mov %rax, %r11
mov %rdi, %r10
mov $0, %rbx
jmp L83
.balign 16
L82:
movdqu %xmm11, %xmm0
pshufb %xmm9, %xmm0
movdqu 0(%r8), %xmm2
pxor %xmm2, %xmm0
movdqu 16(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 32(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 48(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 64(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 80(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 96(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 112(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 128(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 144(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 160(%r8), %xmm2
aesenclast %xmm2, %xmm0
pxor %xmm2, %xmm2
movdqu 0(%r11), %xmm2
pxor %xmm0, %xmm2
movdqu %xmm2, 0(%r10)
add $1, %rbx
add $16, %r11
add $16, %r10
paddd %xmm10, %xmm11
.balign 16
L83:
cmp %rdx, %rbx
jne L82
mov %rdi, %r11
jmp L85
.balign 16
L84:
add $80, %r11
movdqu -32(%r9), %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
movdqu %xmm1, %xmm4
movdqu -16(%r9), %xmm1
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 16(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 32(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 64(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 80(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
vpxor %xmm1, %xmm4, %xmm4
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
pxor %xmm3, %xmm3
mov $3254779904, %r10
pinsrd $3, %r10d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
add $96, %r11
sub $6, %rdx
.balign 16
L85:
cmp $6, %rdx
jae L84
cmp $0, %rdx
jbe L86
mov %rdx, %r10
sub $1, %r10
imul $16, %r10
add %r10, %r11
movdqu -32(%r9), %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
cmp $1, %rdx
jne L88
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu %xmm1, %xmm4
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
jmp L89
L88:
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
movdqu %xmm1, %xmm4
movdqu -16(%r9), %xmm1
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
movdqu %xmm1, %xmm5
cmp $2, %rdx
je L90
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 16(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
cmp $3, %rdx
je L92
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 32(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
cmp $4, %rdx
je L94
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 64(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
jmp L95
L94:
L95:
jmp L93
L92:
L93:
jmp L91
L90:
L91:
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
vpxor %xmm1, %xmm4, %xmm4
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
L89:
pxor %xmm3, %xmm3
mov $3254779904, %r10
pinsrd $3, %r10d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
jmp L87
L86:
L87:
add 304(%rsp), %r14
imul $16, %r14
mov 344(%rsp), %r13
cmp %r14, %r13
jbe L96
mov 336(%rsp), %rax
mov %r13, %r10
and $15, %r10
movdqu %xmm11, %xmm0
pshufb %xmm9, %xmm0
movdqu 0(%r8), %xmm2
pxor %xmm2, %xmm0
movdqu 16(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 32(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 48(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 64(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 80(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 96(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 112(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 128(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 144(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 160(%r8), %xmm2
aesenclast %xmm2, %xmm0
pxor %xmm2, %xmm2
movdqu 0(%rax), %xmm4
pxor %xmm4, %xmm0
movdqu %xmm0, 0(%rax)
cmp $8, %r10
jae L98
mov $0, %rcx
pinsrq $1, %rcx, %xmm0
mov %r10, %rcx
shl $3, %rcx
mov $1, %r11
shl %cl, %r11
sub $1, %r11
pextrq $0, %xmm0, %rcx
and %r11, %rcx
pinsrq $0, %rcx, %xmm0
jmp L99
L98:
mov %r10, %rcx
sub $8, %rcx
shl $3, %rcx
mov $1, %r11
shl %cl, %r11
sub $1, %r11
pextrq $1, %xmm0, %rcx
and %r11, %rcx
pinsrq $1, %rcx, %xmm0
L99:
pshufb %xmm9, %xmm0
movdqu -32(%r9), %xmm5
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu %xmm1, %xmm4
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
pxor %xmm3, %xmm3
mov $3254779904, %r11
pinsrd $3, %r11d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
jmp L97
L96:
L97:
mov %r15, %r11
pxor %xmm0, %xmm0
mov %r11, %rax
imul $8, %rax
pinsrq $1, %rax, %xmm0
mov %r13, %rax
imul $8, %rax
pinsrq $0, %rax, %xmm0
movdqu -32(%r9), %xmm5
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu %xmm1, %xmm4
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
pxor %xmm3, %xmm3
mov $3254779904, %r11
pinsrd $3, %r11d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
movdqu 0(%rbp), %xmm0
pshufb %xmm9, %xmm0
movdqu 0(%r8), %xmm2
pxor %xmm2, %xmm0
movdqu 16(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 32(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 48(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 64(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 80(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 96(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 112(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 128(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 144(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 160(%r8), %xmm2
aesenclast %xmm2, %xmm0
pxor %xmm2, %xmm2
pshufb %xmm9, %xmm8
pxor %xmm0, %xmm8
mov 360(%rsp), %r15
movdqu %xmm8, 0(%r15)
pop %rax
pinsrq $1, %rax, %xmm6
pop %rax
pinsrq $0, %rax, %xmm6
pop %rax
pinsrq $1, %rax, %xmm7
pop %rax
pinsrq $0, %rax, %xmm7
pop %rax
pinsrq $1, %rax, %xmm8
pop %rax
pinsrq $0, %rax, %xmm8
pop %rax
pinsrq $1, %rax, %xmm9
pop %rax
pinsrq $0, %rax, %xmm9
pop %rax
pinsrq $1, %rax, %xmm10
pop %rax
pinsrq $0, %rax, %xmm10
pop %rax
pinsrq $1, %rax, %xmm11
pop %rax
pinsrq $0, %rax, %xmm11
pop %rax
pinsrq $1, %rax, %xmm12
pop %rax
pinsrq $0, %rax, %xmm12
pop %rax
pinsrq $1, %rax, %xmm13
pop %rax
pinsrq $0, %rax, %xmm13
pop %rax
pinsrq $1, %rax, %xmm14
pop %rax
pinsrq $0, %rax, %xmm14
pop %rax
pinsrq $1, %rax, %xmm15
pop %rax
pinsrq $0, %rax, %xmm15
pop %rbx
pop %rbp
pop %rdi
pop %rsi
pop %r12
pop %r13
pop %r14
pop %r15
ret
.global gcm256_encrypt_opt
gcm256_encrypt_opt:
push %r15
push %r14
push %r13
push %r12
push %rsi
push %rdi
push %rbp
push %rbx
pextrq $0, %xmm15, %rax
push %rax
pextrq $1, %xmm15, %rax
push %rax
pextrq $0, %xmm14, %rax
push %rax
pextrq $1, %xmm14, %rax
push %rax
pextrq $0, %xmm13, %rax
push %rax
pextrq $1, %xmm13, %rax
push %rax
pextrq $0, %xmm12, %rax
push %rax
pextrq $1, %xmm12, %rax
push %rax
pextrq $0, %xmm11, %rax
push %rax
pextrq $1, %xmm11, %rax
push %rax
pextrq $0, %xmm10, %rax
push %rax
pextrq $1, %xmm10, %rax
push %rax
pextrq $0, %xmm9, %rax
push %rax
pextrq $1, %xmm9, %rax
push %rax
pextrq $0, %xmm8, %rax
push %rax
pextrq $1, %xmm8, %rax
push %rax
pextrq $0, %xmm7, %rax
push %rax
pextrq $1, %xmm7, %rax
push %rax
pextrq $0, %xmm6, %rax
push %rax
pextrq $1, %xmm6, %rax
push %rax
mov %rcx, %rdi
mov %rdx, %rsi
mov %r8, %rdx
mov %r9, %rcx
mov 264(%rsp), %r8
mov 272(%rsp), %r9
mov 352(%rsp), %rbp
mov %rcx, %r13
lea 32(%r9), %r9
mov 280(%rsp), %rbx
mov %rdx, %rcx
imul $16, %rcx
mov $579005069656919567, %r10
pinsrq $0, %r10, %xmm9
mov $283686952306183, %r10
pinsrq $1, %r10, %xmm9
pxor %xmm8, %xmm8
mov %rdi, %r11
jmp L101
.balign 16
L100:
add $80, %r11
movdqu -32(%r9), %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
movdqu %xmm1, %xmm4
movdqu -16(%r9), %xmm1
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 16(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 32(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 64(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 80(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
vpxor %xmm1, %xmm4, %xmm4
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
pxor %xmm3, %xmm3
mov $3254779904, %r10
pinsrd $3, %r10d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
add $96, %r11
sub $6, %rdx
.balign 16
L101:
cmp $6, %rdx
jae L100
cmp $0, %rdx
jbe L102
mov %rdx, %r10
sub $1, %r10
imul $16, %r10
add %r10, %r11
movdqu -32(%r9), %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
cmp $1, %rdx
jne L104
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu %xmm1, %xmm4
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
jmp L105
L104:
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
movdqu %xmm1, %xmm4
movdqu -16(%r9), %xmm1
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
movdqu %xmm1, %xmm5
cmp $2, %rdx
je L106
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 16(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
cmp $3, %rdx
je L108
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 32(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
cmp $4, %rdx
je L110
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 64(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
jmp L111
L110:
L111:
jmp L109
L108:
L109:
jmp L107
L106:
L107:
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
vpxor %xmm1, %xmm4, %xmm4
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
L105:
pxor %xmm3, %xmm3
mov $3254779904, %r10
pinsrd $3, %r10d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
jmp L103
L102:
L103:
mov %rsi, %r15
cmp %rcx, %rsi
jbe L112
movdqu 0(%rbx), %xmm0
mov %rsi, %r10
and $15, %r10
cmp $8, %r10
jae L114
mov $0, %rcx
pinsrq $1, %rcx, %xmm0
mov %r10, %rcx
shl $3, %rcx
mov $1, %r11
shl %cl, %r11
sub $1, %r11
pextrq $0, %xmm0, %rcx
and %r11, %rcx
pinsrq $0, %rcx, %xmm0
jmp L115
L114:
mov %r10, %rcx
sub $8, %rcx
shl $3, %rcx
mov $1, %r11
shl %cl, %r11
sub $1, %r11
pextrq $1, %xmm0, %rcx
and %r11, %rcx
pinsrq $1, %rcx, %xmm0
L115:
pshufb %xmm9, %xmm0
movdqu -32(%r9), %xmm5
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu %xmm1, %xmm4
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
pxor %xmm3, %xmm3
mov $3254779904, %r11
pinsrd $3, %r11d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
jmp L113
L112:
L113:
mov 288(%rsp), %rdi
mov 296(%rsp), %rsi
mov 304(%rsp), %rdx
mov %r13, %rcx
movdqu %xmm9, %xmm0
movdqu 0(%r8), %xmm1
movdqu %xmm1, 0(%rbp)
pxor %xmm10, %xmm10
mov $1, %r11
pinsrq $0, %r11, %xmm10
vpaddd %xmm10, %xmm1, %xmm1
cmp $0, %rdx
jne L116
vpshufb %xmm0, %xmm1, %xmm1
movdqu %xmm1, 32(%rbp)
jmp L117
L116:
movdqu %xmm8, 32(%rbp)
add $128, %rcx
pextrq $0, %xmm1, %rbx
and $255, %rbx
vpshufb %xmm0, %xmm1, %xmm1
lea 96(%rsi), %r14
movdqu -128(%rcx), %xmm4
pxor %xmm2, %xmm2
mov $72057594037927936, %r11
pinsrq $1, %r11, %xmm2
movdqu -112(%rcx), %xmm15
mov %rcx, %r12
sub $96, %r12
vpxor %xmm4, %xmm1, %xmm9
add $6, %rbx
cmp $256, %rbx
jae L118
vpaddd %xmm2, %xmm1, %xmm10
vpaddd %xmm2, %xmm10, %xmm11
vpxor %xmm4, %xmm10, %xmm10
vpaddd %xmm2, %xmm11, %xmm12
vpxor %xmm4, %xmm11, %xmm11
vpaddd %xmm2, %xmm12, %xmm13
vpxor %xmm4, %xmm12, %xmm12
vpaddd %xmm2, %xmm13, %xmm14
vpxor %xmm4, %xmm13, %xmm13
vpaddd %xmm2, %xmm14, %xmm1
vpxor %xmm4, %xmm14, %xmm14
jmp L119
L118:
sub $256, %rbx
vpshufb %xmm0, %xmm1, %xmm6
pxor %xmm5, %xmm5
mov $1, %r11
pinsrq $0, %r11, %xmm5
vpaddd %xmm5, %xmm6, %xmm10
pxor %xmm5, %xmm5
mov $2, %r11
pinsrq $0, %r11, %xmm5
vpaddd %xmm5, %xmm6, %xmm11
vpaddd %xmm5, %xmm10, %xmm12
vpshufb %xmm0, %xmm10, %xmm10
vpaddd %xmm5, %xmm11, %xmm13
vpshufb %xmm0, %xmm11, %xmm11
vpxor %xmm4, %xmm10, %xmm10
vpaddd %xmm5, %xmm12, %xmm14
vpshufb %xmm0, %xmm12, %xmm12
vpxor %xmm4, %xmm11, %xmm11
vpaddd %xmm5, %xmm13, %xmm1
vpshufb %xmm0, %xmm13, %xmm13
vpxor %xmm4, %xmm12, %xmm12
vpshufb %xmm0, %xmm14, %xmm14
vpxor %xmm4, %xmm13, %xmm13
vpshufb %xmm0, %xmm1, %xmm1
vpxor %xmm4, %xmm14, %xmm14
L119:
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu -96(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu -80(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu -64(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu -48(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu -32(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu -16(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu 0(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu 16(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu 32(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu 48(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu 64(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu 80(%rcx), %xmm15
movdqu 96(%rcx), %xmm3
vaesenc %xmm15, %xmm9, %xmm9
vpxor 0(%rdi), %xmm3, %xmm4
vaesenc %xmm15, %xmm10, %xmm10
vpxor 16(%rdi), %xmm3, %xmm5
vaesenc %xmm15, %xmm11, %xmm11
vpxor 32(%rdi), %xmm3, %xmm6
vaesenc %xmm15, %xmm12, %xmm12
vpxor 48(%rdi), %xmm3, %xmm8
vaesenc %xmm15, %xmm13, %xmm13
vpxor 64(%rdi), %xmm3, %xmm2
vaesenc %xmm15, %xmm14, %xmm14
vpxor 80(%rdi), %xmm3, %xmm3
lea 96(%rdi), %rdi
vaesenclast %xmm4, %xmm9, %xmm9
vaesenclast %xmm5, %xmm10, %xmm10
vaesenclast %xmm6, %xmm11, %xmm11
vaesenclast %xmm8, %xmm12, %xmm12
vaesenclast %xmm2, %xmm13, %xmm13
vaesenclast %xmm3, %xmm14, %xmm14
movdqu %xmm9, 0(%rsi)
movdqu %xmm10, 16(%rsi)
movdqu %xmm11, 32(%rsi)
movdqu %xmm12, 48(%rsi)
movdqu %xmm13, 64(%rsi)
movdqu %xmm14, 80(%rsi)
lea 96(%rsi), %rsi
vpshufb %xmm0, %xmm9, %xmm8
vpshufb %xmm0, %xmm10, %xmm2
movdqu %xmm8, 112(%rbp)
vpshufb %xmm0, %xmm11, %xmm4
movdqu %xmm2, 96(%rbp)
vpshufb %xmm0, %xmm12, %xmm5
movdqu %xmm4, 80(%rbp)
vpshufb %xmm0, %xmm13, %xmm6
movdqu %xmm5, 64(%rbp)
vpshufb %xmm0, %xmm14, %xmm7
movdqu %xmm6, 48(%rbp)
movdqu -128(%rcx), %xmm4
pxor %xmm2, %xmm2
mov $72057594037927936, %r11
pinsrq $1, %r11, %xmm2
movdqu -112(%rcx), %xmm15
mov %rcx, %r12
sub $96, %r12
vpxor %xmm4, %xmm1, %xmm9
add $6, %rbx
cmp $256, %rbx
jae L120
vpaddd %xmm2, %xmm1, %xmm10
vpaddd %xmm2, %xmm10, %xmm11
vpxor %xmm4, %xmm10, %xmm10
vpaddd %xmm2, %xmm11, %xmm12
vpxor %xmm4, %xmm11, %xmm11
vpaddd %xmm2, %xmm12, %xmm13
vpxor %xmm4, %xmm12, %xmm12
vpaddd %xmm2, %xmm13, %xmm14
vpxor %xmm4, %xmm13, %xmm13
vpaddd %xmm2, %xmm14, %xmm1
vpxor %xmm4, %xmm14, %xmm14
jmp L121
L120:
sub $256, %rbx
vpshufb %xmm0, %xmm1, %xmm6
pxor %xmm5, %xmm5
mov $1, %r11
pinsrq $0, %r11, %xmm5
vpaddd %xmm5, %xmm6, %xmm10
pxor %xmm5, %xmm5
mov $2, %r11
pinsrq $0, %r11, %xmm5
vpaddd %xmm5, %xmm6, %xmm11
vpaddd %xmm5, %xmm10, %xmm12
vpshufb %xmm0, %xmm10, %xmm10
vpaddd %xmm5, %xmm11, %xmm13
vpshufb %xmm0, %xmm11, %xmm11
vpxor %xmm4, %xmm10, %xmm10
vpaddd %xmm5, %xmm12, %xmm14
vpshufb %xmm0, %xmm12, %xmm12
vpxor %xmm4, %xmm11, %xmm11
vpaddd %xmm5, %xmm13, %xmm1
vpshufb %xmm0, %xmm13, %xmm13
vpxor %xmm4, %xmm12, %xmm12
vpshufb %xmm0, %xmm14, %xmm14
vpxor %xmm4, %xmm13, %xmm13
vpshufb %xmm0, %xmm1, %xmm1
vpxor %xmm4, %xmm14, %xmm14
L121:
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu -96(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu -80(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu -64(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu -48(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu -32(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu -16(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu 0(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu 16(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu 32(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu 48(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu 64(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu 80(%rcx), %xmm15
movdqu 96(%rcx), %xmm3
vaesenc %xmm15, %xmm9, %xmm9
vpxor 0(%rdi), %xmm3, %xmm4
vaesenc %xmm15, %xmm10, %xmm10
vpxor 16(%rdi), %xmm3, %xmm5
vaesenc %xmm15, %xmm11, %xmm11
vpxor 32(%rdi), %xmm3, %xmm6
vaesenc %xmm15, %xmm12, %xmm12
vpxor 48(%rdi), %xmm3, %xmm8
vaesenc %xmm15, %xmm13, %xmm13
vpxor 64(%rdi), %xmm3, %xmm2
vaesenc %xmm15, %xmm14, %xmm14
vpxor 80(%rdi), %xmm3, %xmm3
lea 96(%rdi), %rdi
vaesenclast %xmm4, %xmm9, %xmm9
vaesenclast %xmm5, %xmm10, %xmm10
vaesenclast %xmm6, %xmm11, %xmm11
vaesenclast %xmm8, %xmm12, %xmm12
vaesenclast %xmm2, %xmm13, %xmm13
vaesenclast %xmm3, %xmm14, %xmm14
movdqu %xmm9, 0(%rsi)
movdqu %xmm10, 16(%rsi)
movdqu %xmm11, 32(%rsi)
movdqu %xmm12, 48(%rsi)
movdqu %xmm13, 64(%rsi)
movdqu %xmm14, 80(%rsi)
lea 96(%rsi), %rsi
sub $12, %rdx
movdqu 32(%rbp), %xmm8
pxor %xmm2, %xmm2
mov $72057594037927936, %r11
pinsrq $1, %r11, %xmm2
vpxor %xmm4, %xmm4, %xmm4
movdqu -128(%rcx), %xmm15
vpaddd %xmm2, %xmm1, %xmm10
vpaddd %xmm2, %xmm10, %xmm11
vpaddd %xmm2, %xmm11, %xmm12
vpaddd %xmm2, %xmm12, %xmm13
vpaddd %xmm2, %xmm13, %xmm14
vpxor %xmm15, %xmm1, %xmm9
movdqu %xmm4, 16(%rbp)
jmp L123
.balign 16
L122:
add $6, %rbx
cmp $256, %rbx
jb L124
mov $579005069656919567, %r11
pinsrq $0, %r11, %xmm0
mov $283686952306183, %r11
pinsrq $1, %r11, %xmm0
vpshufb %xmm0, %xmm1, %xmm6
pxor %xmm5, %xmm5
mov $1, %r11
pinsrq $0, %r11, %xmm5
vpaddd %xmm5, %xmm6, %xmm10
pxor %xmm5, %xmm5
mov $2, %r11
pinsrq $0, %r11, %xmm5
vpaddd %xmm5, %xmm6, %xmm11
movdqu -32(%r9), %xmm3
vpaddd %xmm5, %xmm10, %xmm12
vpshufb %xmm0, %xmm10, %xmm10
vpaddd %xmm5, %xmm11, %xmm13
vpshufb %xmm0, %xmm11, %xmm11
vpxor %xmm15, %xmm10, %xmm10
vpaddd %xmm5, %xmm12, %xmm14
vpshufb %xmm0, %xmm12, %xmm12
vpxor %xmm15, %xmm11, %xmm11
vpaddd %xmm5, %xmm13, %xmm1
vpshufb %xmm0, %xmm13, %xmm13
vpshufb %xmm0, %xmm14, %xmm14
vpshufb %xmm0, %xmm1, %xmm1
sub $256, %rbx
jmp L125
L124:
movdqu -32(%r9), %xmm3
vpaddd %xmm14, %xmm2, %xmm1
vpxor %xmm15, %xmm10, %xmm10
vpxor %xmm15, %xmm11, %xmm11
L125:
movdqu %xmm1, 128(%rbp)
vpclmulqdq $16, %xmm3, %xmm7, %xmm5
vpxor %xmm15, %xmm12, %xmm12
movdqu -112(%rcx), %xmm2
vpclmulqdq $1, %xmm3, %xmm7, %xmm6
vaesenc %xmm2, %xmm9, %xmm9
movdqu 48(%rbp), %xmm0
vpxor %xmm15, %xmm13, %xmm13
vpclmulqdq $0, %xmm3, %xmm7, %xmm1
vaesenc %xmm2, %xmm10, %xmm10
vpxor %xmm15, %xmm14, %xmm14
vpclmulqdq $17, %xmm3, %xmm7, %xmm7
vaesenc %xmm2, %xmm11, %xmm11
movdqu -16(%r9), %xmm3
vaesenc %xmm2, %xmm12, %xmm12
vpxor %xmm5, %xmm6, %xmm6
vpclmulqdq $0, %xmm3, %xmm0, %xmm5
vpxor %xmm4, %xmm8, %xmm8
vaesenc %xmm2, %xmm13, %xmm13
vpxor %xmm5, %xmm1, %xmm4
vpclmulqdq $16, %xmm3, %xmm0, %xmm1
vaesenc %xmm2, %xmm14, %xmm14
movdqu -96(%rcx), %xmm15
vpclmulqdq $1, %xmm3, %xmm0, %xmm2
vaesenc %xmm15, %xmm9, %xmm9
vpxor 16(%rbp), %xmm8, %xmm8
vpclmulqdq $17, %xmm3, %xmm0, %xmm3
movdqu 64(%rbp), %xmm0
vaesenc %xmm15, %xmm10, %xmm10
movbeq 88(%r14), %r13
vaesenc %xmm15, %xmm11, %xmm11
movbeq 80(%r14), %r12
vaesenc %xmm15, %xmm12, %xmm12
movq %r13, 32(%rbp)
vaesenc %xmm15, %xmm13, %xmm13
movq %r12, 40(%rbp)
movdqu 16(%r9), %xmm5
vaesenc %xmm15, %xmm14, %xmm14
movdqu -80(%rcx), %xmm15
vpxor %xmm1, %xmm6, %xmm6
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vaesenc %xmm15, %xmm9, %xmm9
vpxor %xmm2, %xmm6, %xmm6
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vaesenc %xmm15, %xmm10, %xmm10
vpxor %xmm3, %xmm7, %xmm7
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vaesenc %xmm15, %xmm11, %xmm11
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 80(%rbp), %xmm0
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vpxor %xmm1, %xmm4, %xmm4
movdqu 32(%r9), %xmm1
vaesenc %xmm15, %xmm14, %xmm14
movdqu -64(%rcx), %xmm15
vpxor %xmm2, %xmm6, %xmm6
vpclmulqdq $0, %xmm1, %xmm0, %xmm2
vaesenc %xmm15, %xmm9, %xmm9
vpxor %xmm3, %xmm6, %xmm6
vpclmulqdq $16, %xmm1, %xmm0, %xmm3
vaesenc %xmm15, %xmm10, %xmm10
movbeq 72(%r14), %r13
vpxor %xmm5, %xmm7, %xmm7
vpclmulqdq $1, %xmm1, %xmm0, %xmm5
vaesenc %xmm15, %xmm11, %xmm11
movbeq 64(%r14), %r12
vpclmulqdq $17, %xmm1, %xmm0, %xmm1
movdqu 96(%rbp), %xmm0
vaesenc %xmm15, %xmm12, %xmm12
movq %r13, 48(%rbp)
vaesenc %xmm15, %xmm13, %xmm13
movq %r12, 56(%rbp)
vpxor %xmm2, %xmm4, %xmm4
movdqu 64(%r9), %xmm2
vaesenc %xmm15, %xmm14, %xmm14
movdqu -48(%rcx), %xmm15
vpxor %xmm3, %xmm6, %xmm6
vpclmulqdq $0, %xmm2, %xmm0, %xmm3
vaesenc %xmm15, %xmm9, %xmm9
vpxor %xmm5, %xmm6, %xmm6
vpclmulqdq $16, %xmm2, %xmm0, %xmm5
vaesenc %xmm15, %xmm10, %xmm10
movbeq 56(%r14), %r13
vpxor %xmm1, %xmm7, %xmm7
vpclmulqdq $1, %xmm2, %xmm0, %xmm1
vpxor 112(%rbp), %xmm8, %xmm8
vaesenc %xmm15, %xmm11, %xmm11
movbeq 48(%r14), %r12
vpclmulqdq $17, %xmm2, %xmm0, %xmm2
vaesenc %xmm15, %xmm12, %xmm12
movq %r13, 64(%rbp)
vaesenc %xmm15, %xmm13, %xmm13
movq %r12, 72(%rbp)
vpxor %xmm3, %xmm4, %xmm4
movdqu 80(%r9), %xmm3
vaesenc %xmm15, %xmm14, %xmm14
movdqu -32(%rcx), %xmm15
vpxor %xmm5, %xmm6, %xmm6
vpclmulqdq $16, %xmm3, %xmm8, %xmm5
vaesenc %xmm15, %xmm9, %xmm9
vpxor %xmm1, %xmm6, %xmm6
vpclmulqdq $1, %xmm3, %xmm8, %xmm1
vaesenc %xmm15, %xmm10, %xmm10
movbeq 40(%r14), %r13
vpxor %xmm2, %xmm7, %xmm7
vpclmulqdq $0, %xmm3, %xmm8, %xmm2
vaesenc %xmm15, %xmm11, %xmm11
movbeq 32(%r14), %r12
vpclmulqdq $17, %xmm3, %xmm8, %xmm8
vaesenc %xmm15, %xmm12, %xmm12
movq %r13, 80(%rbp)
vaesenc %xmm15, %xmm13, %xmm13
movq %r12, 88(%rbp)
vpxor %xmm5, %xmm6, %xmm6
vaesenc %xmm15, %xmm14, %xmm14
vpxor %xmm1, %xmm6, %xmm6
movdqu -16(%rcx), %xmm15
vpslldq $8, %xmm6, %xmm5
vpxor %xmm2, %xmm4, %xmm4
pxor %xmm3, %xmm3
mov $13979173243358019584, %r11
pinsrq $1, %r11, %xmm3
vaesenc %xmm15, %xmm9, %xmm9
vpxor %xmm8, %xmm7, %xmm7
vaesenc %xmm15, %xmm10, %xmm10
vpxor %xmm5, %xmm4, %xmm4
movbeq 24(%r14), %r13
vaesenc %xmm15, %xmm11, %xmm11
movbeq 16(%r14), %r12
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
movq %r13, 96(%rbp)
vaesenc %xmm15, %xmm12, %xmm12
movq %r12, 104(%rbp)
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu 0(%rcx), %xmm1
vaesenc %xmm1, %xmm9, %xmm9
movdqu 16(%rcx), %xmm15
vaesenc %xmm1, %xmm10, %xmm10
vpsrldq $8, %xmm6, %xmm6
vaesenc %xmm1, %xmm11, %xmm11
vpxor %xmm6, %xmm7, %xmm7
vaesenc %xmm1, %xmm12, %xmm12
vpxor %xmm0, %xmm4, %xmm4
movbeq 8(%r14), %r13
vaesenc %xmm1, %xmm13, %xmm13
movbeq 0(%r14), %r12
vaesenc %xmm1, %xmm14, %xmm14
movdqu 32(%rcx), %xmm1
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
vaesenc %xmm1, %xmm9, %xmm9
vaesenc %xmm1, %xmm10, %xmm10
vaesenc %xmm1, %xmm11, %xmm11
vaesenc %xmm1, %xmm12, %xmm12
vaesenc %xmm1, %xmm13, %xmm13
movdqu 48(%rcx), %xmm15
vaesenc %xmm1, %xmm14, %xmm14
movdqu 64(%rcx), %xmm1
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
vaesenc %xmm1, %xmm9, %xmm9
vaesenc %xmm1, %xmm10, %xmm10
vaesenc %xmm1, %xmm11, %xmm11
vaesenc %xmm1, %xmm12, %xmm12
vaesenc %xmm1, %xmm13, %xmm13
movdqu 80(%rcx), %xmm15
vaesenc %xmm1, %xmm14, %xmm14
movdqu 96(%rcx), %xmm1
vaesenc %xmm15, %xmm9, %xmm9
movdqu %xmm7, 16(%rbp)
vpalignr $8, %xmm4, %xmm4, %xmm8
vaesenc %xmm15, %xmm10, %xmm10
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor 0(%rdi), %xmm1, %xmm2
vaesenc %xmm15, %xmm11, %xmm11
vpxor 16(%rdi), %xmm1, %xmm0
vaesenc %xmm15, %xmm12, %xmm12
vpxor 32(%rdi), %xmm1, %xmm5
vaesenc %xmm15, %xmm13, %xmm13
vpxor 48(%rdi), %xmm1, %xmm6
vaesenc %xmm15, %xmm14, %xmm14
vpxor 64(%rdi), %xmm1, %xmm7
vpxor 80(%rdi), %xmm1, %xmm3
movdqu 128(%rbp), %xmm1
vaesenclast %xmm2, %xmm9, %xmm9
pxor %xmm2, %xmm2
mov $72057594037927936, %r11
pinsrq $1, %r11, %xmm2
vaesenclast %xmm0, %xmm10, %xmm10
vpaddd %xmm2, %xmm1, %xmm0
movq %r13, 112(%rbp)
lea 96(%rdi), %rdi
vaesenclast %xmm5, %xmm11, %xmm11
vpaddd %xmm2, %xmm0, %xmm5
movq %r12, 120(%rbp)
lea 96(%rsi), %rsi
movdqu -128(%rcx), %xmm15
vaesenclast %xmm6, %xmm12, %xmm12
vpaddd %xmm2, %xmm5, %xmm6
vaesenclast %xmm7, %xmm13, %xmm13
vpaddd %xmm2, %xmm6, %xmm7
vaesenclast %xmm3, %xmm14, %xmm14
vpaddd %xmm2, %xmm7, %xmm3
sub $6, %rdx
add $96, %r14
cmp $0, %rdx
jbe L126
movdqu %xmm9, -96(%rsi)
vpxor %xmm15, %xmm1, %xmm9
movdqu %xmm10, -80(%rsi)
movdqu %xmm0, %xmm10
movdqu %xmm11, -64(%rsi)
movdqu %xmm5, %xmm11
movdqu %xmm12, -48(%rsi)
movdqu %xmm6, %xmm12
movdqu %xmm13, -32(%rsi)
movdqu %xmm7, %xmm13
movdqu %xmm14, -16(%rsi)
movdqu %xmm3, %xmm14
movdqu 32(%rbp), %xmm7
jmp L127
L126:
vpxor 16(%rbp), %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
L127:
.balign 16
L123:
cmp $0, %rdx
ja L122
movdqu 32(%rbp), %xmm7
movdqu %xmm1, 32(%rbp)
pxor %xmm4, %xmm4
movdqu %xmm4, 16(%rbp)
movdqu -32(%r9), %xmm3
vpclmulqdq $0, %xmm3, %xmm7, %xmm1
vpclmulqdq $16, %xmm3, %xmm7, %xmm5
movdqu 48(%rbp), %xmm0
vpclmulqdq $1, %xmm3, %xmm7, %xmm6
vpclmulqdq $17, %xmm3, %xmm7, %xmm7
movdqu -16(%r9), %xmm3
vpxor %xmm5, %xmm6, %xmm6
vpclmulqdq $0, %xmm3, %xmm0, %xmm5
vpxor %xmm4, %xmm8, %xmm8
vpxor %xmm5, %xmm1, %xmm4
vpclmulqdq $16, %xmm3, %xmm0, %xmm1
vpclmulqdq $1, %xmm3, %xmm0, %xmm2
vpxor 16(%rbp), %xmm8, %xmm8
vpclmulqdq $17, %xmm3, %xmm0, %xmm3
movdqu 64(%rbp), %xmm0
movdqu 16(%r9), %xmm5
vpxor %xmm1, %xmm6, %xmm6
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpxor %xmm3, %xmm7, %xmm7
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 80(%rbp), %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 32(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpclmulqdq $0, %xmm1, %xmm0, %xmm2
vpxor %xmm3, %xmm6, %xmm6
vpclmulqdq $16, %xmm1, %xmm0, %xmm3
vpxor %xmm5, %xmm7, %xmm7
vpclmulqdq $1, %xmm1, %xmm0, %xmm5
vpclmulqdq $17, %xmm1, %xmm0, %xmm1
movdqu 96(%rbp), %xmm0
vpxor %xmm2, %xmm4, %xmm4
movdqu 64(%r9), %xmm2
vpxor %xmm3, %xmm6, %xmm6
vpclmulqdq $0, %xmm2, %xmm0, %xmm3
vpxor %xmm5, %xmm6, %xmm6
vpclmulqdq $16, %xmm2, %xmm0, %xmm5
vpxor %xmm1, %xmm7, %xmm7
vpclmulqdq $1, %xmm2, %xmm0, %xmm1
vpxor 112(%rbp), %xmm8, %xmm8
vpclmulqdq $17, %xmm2, %xmm0, %xmm2
vpxor %xmm3, %xmm4, %xmm4
movdqu 80(%r9), %xmm3
vpxor %xmm5, %xmm6, %xmm6
vpclmulqdq $16, %xmm3, %xmm8, %xmm5
vpxor %xmm1, %xmm6, %xmm6
vpclmulqdq $1, %xmm3, %xmm8, %xmm1
vpxor %xmm2, %xmm7, %xmm7
vpclmulqdq $0, %xmm3, %xmm8, %xmm2
vpclmulqdq $17, %xmm3, %xmm8, %xmm8
vpxor %xmm5, %xmm6, %xmm6
vpxor %xmm1, %xmm6, %xmm6
vpxor %xmm2, %xmm4, %xmm4
pxor %xmm3, %xmm3
mov $3254779904, %rax
pinsrd $3, %eax, %xmm3
vpxor %xmm8, %xmm7, %xmm7
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
mov $579005069656919567, %r12
pinsrq $0, %r12, %xmm0
mov $283686952306183, %r12
pinsrq $1, %r12, %xmm0
movdqu %xmm9, -96(%rsi)
vpshufb %xmm0, %xmm9, %xmm9
vpxor %xmm7, %xmm1, %xmm1
movdqu %xmm10, -80(%rsi)
vpshufb %xmm0, %xmm10, %xmm10
movdqu %xmm11, -64(%rsi)
vpshufb %xmm0, %xmm11, %xmm11
movdqu %xmm12, -48(%rsi)
vpshufb %xmm0, %xmm12, %xmm12
movdqu %xmm13, -32(%rsi)
vpshufb %xmm0, %xmm13, %xmm13
movdqu %xmm14, -16(%rsi)
vpshufb %xmm0, %xmm14, %xmm14
pxor %xmm4, %xmm4
movdqu %xmm14, %xmm7
movdqu %xmm4, 16(%rbp)
movdqu %xmm13, 48(%rbp)
movdqu %xmm12, 64(%rbp)
movdqu %xmm11, 80(%rbp)
movdqu %xmm10, 96(%rbp)
movdqu %xmm9, 112(%rbp)
movdqu -32(%r9), %xmm3
vpclmulqdq $0, %xmm3, %xmm7, %xmm1
vpclmulqdq $16, %xmm3, %xmm7, %xmm5
movdqu 48(%rbp), %xmm0
vpclmulqdq $1, %xmm3, %xmm7, %xmm6
vpclmulqdq $17, %xmm3, %xmm7, %xmm7
movdqu -16(%r9), %xmm3
vpxor %xmm5, %xmm6, %xmm6
vpclmulqdq $0, %xmm3, %xmm0, %xmm5
vpxor %xmm4, %xmm8, %xmm8
vpxor %xmm5, %xmm1, %xmm4
vpclmulqdq $16, %xmm3, %xmm0, %xmm1
vpclmulqdq $1, %xmm3, %xmm0, %xmm2
vpxor 16(%rbp), %xmm8, %xmm8
vpclmulqdq $17, %xmm3, %xmm0, %xmm3
movdqu 64(%rbp), %xmm0
movdqu 16(%r9), %xmm5
vpxor %xmm1, %xmm6, %xmm6
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpxor %xmm3, %xmm7, %xmm7
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 80(%rbp), %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 32(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpclmulqdq $0, %xmm1, %xmm0, %xmm2
vpxor %xmm3, %xmm6, %xmm6
vpclmulqdq $16, %xmm1, %xmm0, %xmm3
vpxor %xmm5, %xmm7, %xmm7
vpclmulqdq $1, %xmm1, %xmm0, %xmm5
vpclmulqdq $17, %xmm1, %xmm0, %xmm1
movdqu 96(%rbp), %xmm0
vpxor %xmm2, %xmm4, %xmm4
movdqu 64(%r9), %xmm2
vpxor %xmm3, %xmm6, %xmm6
vpclmulqdq $0, %xmm2, %xmm0, %xmm3
vpxor %xmm5, %xmm6, %xmm6
vpclmulqdq $16, %xmm2, %xmm0, %xmm5
vpxor %xmm1, %xmm7, %xmm7
vpclmulqdq $1, %xmm2, %xmm0, %xmm1
vpxor 112(%rbp), %xmm8, %xmm8
vpclmulqdq $17, %xmm2, %xmm0, %xmm2
vpxor %xmm3, %xmm4, %xmm4
movdqu 80(%r9), %xmm3
vpxor %xmm5, %xmm6, %xmm6
vpclmulqdq $16, %xmm3, %xmm8, %xmm5
vpxor %xmm1, %xmm6, %xmm6
vpclmulqdq $1, %xmm3, %xmm8, %xmm1
vpxor %xmm2, %xmm7, %xmm7
vpclmulqdq $0, %xmm3, %xmm8, %xmm2
vpclmulqdq $17, %xmm3, %xmm8, %xmm8
vpxor %xmm5, %xmm6, %xmm6
vpxor %xmm1, %xmm6, %xmm6
vpxor %xmm2, %xmm4, %xmm4
pxor %xmm3, %xmm3
mov $3254779904, %rax
pinsrd $3, %eax, %xmm3
vpxor %xmm8, %xmm7, %xmm7
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
sub $128, %rcx
L117:
movdqu 32(%rbp), %xmm11
mov %rcx, %r8
mov 312(%rsp), %rax
mov 320(%rsp), %rdi
mov 328(%rsp), %rdx
mov %rdx, %r14
mov $579005069656919567, %r12
pinsrq $0, %r12, %xmm9
mov $283686952306183, %r12
pinsrq $1, %r12, %xmm9
pshufb %xmm9, %xmm11
pxor %xmm10, %xmm10
mov $1, %rbx
pinsrd $0, %ebx, %xmm10
mov %rax, %r11
mov %rdi, %r10
mov $0, %rbx
jmp L129
.balign 16
L128:
movdqu %xmm11, %xmm0
pshufb %xmm9, %xmm0
movdqu 0(%r8), %xmm2
pxor %xmm2, %xmm0
movdqu 16(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 32(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 48(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 64(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 80(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 96(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 112(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 128(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 144(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 160(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 176(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 192(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 208(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 224(%r8), %xmm2
aesenclast %xmm2, %xmm0
pxor %xmm2, %xmm2
movdqu 0(%r11), %xmm2
pxor %xmm0, %xmm2
movdqu %xmm2, 0(%r10)
add $1, %rbx
add $16, %r11
add $16, %r10
paddd %xmm10, %xmm11
.balign 16
L129:
cmp %rdx, %rbx
jne L128
mov %rdi, %r11
jmp L131
.balign 16
L130:
add $80, %r11
movdqu -32(%r9), %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
movdqu %xmm1, %xmm4
movdqu -16(%r9), %xmm1
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 16(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 32(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 64(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 80(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
vpxor %xmm1, %xmm4, %xmm4
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
pxor %xmm3, %xmm3
mov $3254779904, %r10
pinsrd $3, %r10d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
add $96, %r11
sub $6, %rdx
.balign 16
L131:
cmp $6, %rdx
jae L130
cmp $0, %rdx
jbe L132
mov %rdx, %r10
sub $1, %r10
imul $16, %r10
add %r10, %r11
movdqu -32(%r9), %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
cmp $1, %rdx
jne L134
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu %xmm1, %xmm4
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
jmp L135
L134:
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
movdqu %xmm1, %xmm4
movdqu -16(%r9), %xmm1
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
movdqu %xmm1, %xmm5
cmp $2, %rdx
je L136
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 16(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
cmp $3, %rdx
je L138
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 32(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
cmp $4, %rdx
je L140
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 64(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
jmp L141
L140:
L141:
jmp L139
L138:
L139:
jmp L137
L136:
L137:
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
vpxor %xmm1, %xmm4, %xmm4
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
L135:
pxor %xmm3, %xmm3
mov $3254779904, %r10
pinsrd $3, %r10d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
jmp L133
L132:
L133:
add 304(%rsp), %r14
imul $16, %r14
mov 344(%rsp), %r13
cmp %r14, %r13
jbe L142
mov 336(%rsp), %rax
mov %r13, %r10
and $15, %r10
movdqu %xmm11, %xmm0
pshufb %xmm9, %xmm0
movdqu 0(%r8), %xmm2
pxor %xmm2, %xmm0
movdqu 16(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 32(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 48(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 64(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 80(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 96(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 112(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 128(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 144(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 160(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 176(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 192(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 208(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 224(%r8), %xmm2
aesenclast %xmm2, %xmm0
pxor %xmm2, %xmm2
movdqu 0(%rax), %xmm4
pxor %xmm4, %xmm0
movdqu %xmm0, 0(%rax)
cmp $8, %r10
jae L144
mov $0, %rcx
pinsrq $1, %rcx, %xmm0
mov %r10, %rcx
shl $3, %rcx
mov $1, %r11
shl %cl, %r11
sub $1, %r11
pextrq $0, %xmm0, %rcx
and %r11, %rcx
pinsrq $0, %rcx, %xmm0
jmp L145
L144:
mov %r10, %rcx
sub $8, %rcx
shl $3, %rcx
mov $1, %r11
shl %cl, %r11
sub $1, %r11
pextrq $1, %xmm0, %rcx
and %r11, %rcx
pinsrq $1, %rcx, %xmm0
L145:
pshufb %xmm9, %xmm0
movdqu -32(%r9), %xmm5
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu %xmm1, %xmm4
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
pxor %xmm3, %xmm3
mov $3254779904, %r11
pinsrd $3, %r11d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
jmp L143
L142:
L143:
mov %r15, %r11
pxor %xmm0, %xmm0
mov %r11, %rax
imul $8, %rax
pinsrq $1, %rax, %xmm0
mov %r13, %rax
imul $8, %rax
pinsrq $0, %rax, %xmm0
movdqu -32(%r9), %xmm5
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu %xmm1, %xmm4
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
pxor %xmm3, %xmm3
mov $3254779904, %r11
pinsrd $3, %r11d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
movdqu 0(%rbp), %xmm0
pshufb %xmm9, %xmm0
movdqu 0(%r8), %xmm2
pxor %xmm2, %xmm0
movdqu 16(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 32(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 48(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 64(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 80(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 96(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 112(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 128(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 144(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 160(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 176(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 192(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 208(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 224(%r8), %xmm2
aesenclast %xmm2, %xmm0
pxor %xmm2, %xmm2
pshufb %xmm9, %xmm8
pxor %xmm0, %xmm8
mov 360(%rsp), %r15
movdqu %xmm8, 0(%r15)
pop %rax
pinsrq $1, %rax, %xmm6
pop %rax
pinsrq $0, %rax, %xmm6
pop %rax
pinsrq $1, %rax, %xmm7
pop %rax
pinsrq $0, %rax, %xmm7
pop %rax
pinsrq $1, %rax, %xmm8
pop %rax
pinsrq $0, %rax, %xmm8
pop %rax
pinsrq $1, %rax, %xmm9
pop %rax
pinsrq $0, %rax, %xmm9
pop %rax
pinsrq $1, %rax, %xmm10
pop %rax
pinsrq $0, %rax, %xmm10
pop %rax
pinsrq $1, %rax, %xmm11
pop %rax
pinsrq $0, %rax, %xmm11
pop %rax
pinsrq $1, %rax, %xmm12
pop %rax
pinsrq $0, %rax, %xmm12
pop %rax
pinsrq $1, %rax, %xmm13
pop %rax
pinsrq $0, %rax, %xmm13
pop %rax
pinsrq $1, %rax, %xmm14
pop %rax
pinsrq $0, %rax, %xmm14
pop %rax
pinsrq $1, %rax, %xmm15
pop %rax
pinsrq $0, %rax, %xmm15
pop %rbx
pop %rbp
pop %rdi
pop %rsi
pop %r12
pop %r13
pop %r14
pop %r15
ret
.global gcm128_decrypt_opt
gcm128_decrypt_opt:
push %r15
push %r14
push %r13
push %r12
push %rsi
push %rdi
push %rbp
push %rbx
pextrq $0, %xmm15, %rax
push %rax
pextrq $1, %xmm15, %rax
push %rax
pextrq $0, %xmm14, %rax
push %rax
pextrq $1, %xmm14, %rax
push %rax
pextrq $0, %xmm13, %rax
push %rax
pextrq $1, %xmm13, %rax
push %rax
pextrq $0, %xmm12, %rax
push %rax
pextrq $1, %xmm12, %rax
push %rax
pextrq $0, %xmm11, %rax
push %rax
pextrq $1, %xmm11, %rax
push %rax
pextrq $0, %xmm10, %rax
push %rax
pextrq $1, %xmm10, %rax
push %rax
pextrq $0, %xmm9, %rax
push %rax
pextrq $1, %xmm9, %rax
push %rax
pextrq $0, %xmm8, %rax
push %rax
pextrq $1, %xmm8, %rax
push %rax
pextrq $0, %xmm7, %rax
push %rax
pextrq $1, %xmm7, %rax
push %rax
pextrq $0, %xmm6, %rax
push %rax
pextrq $1, %xmm6, %rax
push %rax
mov %rcx, %rdi
mov %rdx, %rsi
mov %r8, %rdx
mov %r9, %rcx
mov 264(%rsp), %r8
mov 272(%rsp), %r9
mov 352(%rsp), %rbp
mov %rcx, %r13
lea 32(%r9), %r9
mov 280(%rsp), %rbx
mov %rdx, %rcx
imul $16, %rcx
mov $579005069656919567, %r10
pinsrq $0, %r10, %xmm9
mov $283686952306183, %r10
pinsrq $1, %r10, %xmm9
pxor %xmm8, %xmm8
mov %rdi, %r11
jmp L147
.balign 16
L146:
add $80, %r11
movdqu -32(%r9), %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
movdqu %xmm1, %xmm4
movdqu -16(%r9), %xmm1
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 16(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 32(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 64(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 80(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
vpxor %xmm1, %xmm4, %xmm4
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
pxor %xmm3, %xmm3
mov $3254779904, %r10
pinsrd $3, %r10d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
add $96, %r11
sub $6, %rdx
.balign 16
L147:
cmp $6, %rdx
jae L146
cmp $0, %rdx
jbe L148
mov %rdx, %r10
sub $1, %r10
imul $16, %r10
add %r10, %r11
movdqu -32(%r9), %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
cmp $1, %rdx
jne L150
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu %xmm1, %xmm4
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
jmp L151
L150:
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
movdqu %xmm1, %xmm4
movdqu -16(%r9), %xmm1
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
movdqu %xmm1, %xmm5
cmp $2, %rdx
je L152
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 16(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
cmp $3, %rdx
je L154
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 32(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
cmp $4, %rdx
je L156
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 64(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
jmp L157
L156:
L157:
jmp L155
L154:
L155:
jmp L153
L152:
L153:
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
vpxor %xmm1, %xmm4, %xmm4
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
L151:
pxor %xmm3, %xmm3
mov $3254779904, %r10
pinsrd $3, %r10d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
jmp L149
L148:
L149:
mov %rsi, %r15
cmp %rcx, %rsi
jbe L158
movdqu 0(%rbx), %xmm0
mov %rsi, %r10
and $15, %r10
cmp $8, %r10
jae L160
mov $0, %rcx
pinsrq $1, %rcx, %xmm0
mov %r10, %rcx
shl $3, %rcx
mov $1, %r11
shl %cl, %r11
sub $1, %r11
pextrq $0, %xmm0, %rcx
and %r11, %rcx
pinsrq $0, %rcx, %xmm0
jmp L161
L160:
mov %r10, %rcx
sub $8, %rcx
shl $3, %rcx
mov $1, %r11
shl %cl, %r11
sub $1, %r11
pextrq $1, %xmm0, %rcx
and %r11, %rcx
pinsrq $1, %rcx, %xmm0
L161:
pshufb %xmm9, %xmm0
movdqu -32(%r9), %xmm5
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu %xmm1, %xmm4
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
pxor %xmm3, %xmm3
mov $3254779904, %r11
pinsrd $3, %r11d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
jmp L159
L158:
L159:
mov 288(%rsp), %rdi
mov 296(%rsp), %rsi
mov 304(%rsp), %rdx
mov %r13, %rcx
movdqu %xmm9, %xmm0
movdqu 0(%r8), %xmm1
movdqu %xmm1, 0(%rbp)
pxor %xmm10, %xmm10
mov $1, %r11
pinsrq $0, %r11, %xmm10
vpaddd %xmm10, %xmm1, %xmm1
cmp $0, %rdx
jne L162
vpshufb %xmm0, %xmm1, %xmm1
movdqu %xmm1, 32(%rbp)
jmp L163
L162:
movdqu %xmm8, 32(%rbp)
add $128, %rcx
pextrq $0, %xmm1, %rbx
and $255, %rbx
vpshufb %xmm0, %xmm1, %xmm1
lea 96(%rdi), %r14
movdqu 32(%rbp), %xmm8
movdqu 80(%rdi), %xmm7
movdqu 64(%rdi), %xmm4
movdqu 48(%rdi), %xmm5
movdqu 32(%rdi), %xmm6
vpshufb %xmm0, %xmm7, %xmm7
movdqu 16(%rdi), %xmm2
vpshufb %xmm0, %xmm4, %xmm4
movdqu 0(%rdi), %xmm3
vpshufb %xmm0, %xmm5, %xmm5
movdqu %xmm4, 48(%rbp)
vpshufb %xmm0, %xmm6, %xmm6
movdqu %xmm5, 64(%rbp)
vpshufb %xmm0, %xmm2, %xmm2
movdqu %xmm6, 80(%rbp)
vpshufb %xmm0, %xmm3, %xmm3
movdqu %xmm2, 96(%rbp)
movdqu %xmm3, 112(%rbp)
pxor %xmm2, %xmm2
mov $72057594037927936, %r11
pinsrq $1, %r11, %xmm2
vpxor %xmm4, %xmm4, %xmm4
movdqu -128(%rcx), %xmm15
vpaddd %xmm2, %xmm1, %xmm10
vpaddd %xmm2, %xmm10, %xmm11
vpaddd %xmm2, %xmm11, %xmm12
vpaddd %xmm2, %xmm12, %xmm13
vpaddd %xmm2, %xmm13, %xmm14
vpxor %xmm15, %xmm1, %xmm9
movdqu %xmm4, 16(%rbp)
cmp $6, %rdx
jne L164
sub $96, %r14
jmp L165
L164:
L165:
jmp L167
.balign 16
L166:
add $6, %rbx
cmp $256, %rbx
jb L168
mov $579005069656919567, %r11
pinsrq $0, %r11, %xmm0
mov $283686952306183, %r11
pinsrq $1, %r11, %xmm0
vpshufb %xmm0, %xmm1, %xmm6
pxor %xmm5, %xmm5
mov $1, %r11
pinsrq $0, %r11, %xmm5
vpaddd %xmm5, %xmm6, %xmm10
pxor %xmm5, %xmm5
mov $2, %r11
pinsrq $0, %r11, %xmm5
vpaddd %xmm5, %xmm6, %xmm11
movdqu -32(%r9), %xmm3
vpaddd %xmm5, %xmm10, %xmm12
vpshufb %xmm0, %xmm10, %xmm10
vpaddd %xmm5, %xmm11, %xmm13
vpshufb %xmm0, %xmm11, %xmm11
vpxor %xmm15, %xmm10, %xmm10
vpaddd %xmm5, %xmm12, %xmm14
vpshufb %xmm0, %xmm12, %xmm12
vpxor %xmm15, %xmm11, %xmm11
vpaddd %xmm5, %xmm13, %xmm1
vpshufb %xmm0, %xmm13, %xmm13
vpshufb %xmm0, %xmm14, %xmm14
vpshufb %xmm0, %xmm1, %xmm1
sub $256, %rbx
jmp L169
L168:
movdqu -32(%r9), %xmm3
vpaddd %xmm14, %xmm2, %xmm1
vpxor %xmm15, %xmm10, %xmm10
vpxor %xmm15, %xmm11, %xmm11
L169:
movdqu %xmm1, 128(%rbp)
vpclmulqdq $16, %xmm3, %xmm7, %xmm5
vpxor %xmm15, %xmm12, %xmm12
movdqu -112(%rcx), %xmm2
vpclmulqdq $1, %xmm3, %xmm7, %xmm6
vaesenc %xmm2, %xmm9, %xmm9
movdqu 48(%rbp), %xmm0
vpxor %xmm15, %xmm13, %xmm13
vpclmulqdq $0, %xmm3, %xmm7, %xmm1
vaesenc %xmm2, %xmm10, %xmm10
vpxor %xmm15, %xmm14, %xmm14
vpclmulqdq $17, %xmm3, %xmm7, %xmm7
vaesenc %xmm2, %xmm11, %xmm11
movdqu -16(%r9), %xmm3
vaesenc %xmm2, %xmm12, %xmm12
vpxor %xmm5, %xmm6, %xmm6
vpclmulqdq $0, %xmm3, %xmm0, %xmm5
vpxor %xmm4, %xmm8, %xmm8
vaesenc %xmm2, %xmm13, %xmm13
vpxor %xmm5, %xmm1, %xmm4
vpclmulqdq $16, %xmm3, %xmm0, %xmm1
vaesenc %xmm2, %xmm14, %xmm14
movdqu -96(%rcx), %xmm15
vpclmulqdq $1, %xmm3, %xmm0, %xmm2
vaesenc %xmm15, %xmm9, %xmm9
vpxor 16(%rbp), %xmm8, %xmm8
vpclmulqdq $17, %xmm3, %xmm0, %xmm3
movdqu 64(%rbp), %xmm0
vaesenc %xmm15, %xmm10, %xmm10
movbeq 88(%r14), %r13
vaesenc %xmm15, %xmm11, %xmm11
movbeq 80(%r14), %r12
vaesenc %xmm15, %xmm12, %xmm12
movq %r13, 32(%rbp)
vaesenc %xmm15, %xmm13, %xmm13
movq %r12, 40(%rbp)
movdqu 16(%r9), %xmm5
vaesenc %xmm15, %xmm14, %xmm14
movdqu -80(%rcx), %xmm15
vpxor %xmm1, %xmm6, %xmm6
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vaesenc %xmm15, %xmm9, %xmm9
vpxor %xmm2, %xmm6, %xmm6
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vaesenc %xmm15, %xmm10, %xmm10
vpxor %xmm3, %xmm7, %xmm7
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vaesenc %xmm15, %xmm11, %xmm11
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 80(%rbp), %xmm0
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vpxor %xmm1, %xmm4, %xmm4
movdqu 32(%r9), %xmm1
vaesenc %xmm15, %xmm14, %xmm14
movdqu -64(%rcx), %xmm15
vpxor %xmm2, %xmm6, %xmm6
vpclmulqdq $0, %xmm1, %xmm0, %xmm2
vaesenc %xmm15, %xmm9, %xmm9
vpxor %xmm3, %xmm6, %xmm6
vpclmulqdq $16, %xmm1, %xmm0, %xmm3
vaesenc %xmm15, %xmm10, %xmm10
movbeq 72(%r14), %r13
vpxor %xmm5, %xmm7, %xmm7
vpclmulqdq $1, %xmm1, %xmm0, %xmm5
vaesenc %xmm15, %xmm11, %xmm11
movbeq 64(%r14), %r12
vpclmulqdq $17, %xmm1, %xmm0, %xmm1
movdqu 96(%rbp), %xmm0
vaesenc %xmm15, %xmm12, %xmm12
movq %r13, 48(%rbp)
vaesenc %xmm15, %xmm13, %xmm13
movq %r12, 56(%rbp)
vpxor %xmm2, %xmm4, %xmm4
movdqu 64(%r9), %xmm2
vaesenc %xmm15, %xmm14, %xmm14
movdqu -48(%rcx), %xmm15
vpxor %xmm3, %xmm6, %xmm6
vpclmulqdq $0, %xmm2, %xmm0, %xmm3
vaesenc %xmm15, %xmm9, %xmm9
vpxor %xmm5, %xmm6, %xmm6
vpclmulqdq $16, %xmm2, %xmm0, %xmm5
vaesenc %xmm15, %xmm10, %xmm10
movbeq 56(%r14), %r13
vpxor %xmm1, %xmm7, %xmm7
vpclmulqdq $1, %xmm2, %xmm0, %xmm1
vpxor 112(%rbp), %xmm8, %xmm8
vaesenc %xmm15, %xmm11, %xmm11
movbeq 48(%r14), %r12
vpclmulqdq $17, %xmm2, %xmm0, %xmm2
vaesenc %xmm15, %xmm12, %xmm12
movq %r13, 64(%rbp)
vaesenc %xmm15, %xmm13, %xmm13
movq %r12, 72(%rbp)
vpxor %xmm3, %xmm4, %xmm4
movdqu 80(%r9), %xmm3
vaesenc %xmm15, %xmm14, %xmm14
movdqu -32(%rcx), %xmm15
vpxor %xmm5, %xmm6, %xmm6
vpclmulqdq $16, %xmm3, %xmm8, %xmm5
vaesenc %xmm15, %xmm9, %xmm9
vpxor %xmm1, %xmm6, %xmm6
vpclmulqdq $1, %xmm3, %xmm8, %xmm1
vaesenc %xmm15, %xmm10, %xmm10
movbeq 40(%r14), %r13
vpxor %xmm2, %xmm7, %xmm7
vpclmulqdq $0, %xmm3, %xmm8, %xmm2
vaesenc %xmm15, %xmm11, %xmm11
movbeq 32(%r14), %r12
vpclmulqdq $17, %xmm3, %xmm8, %xmm8
vaesenc %xmm15, %xmm12, %xmm12
movq %r13, 80(%rbp)
vaesenc %xmm15, %xmm13, %xmm13
movq %r12, 88(%rbp)
vpxor %xmm5, %xmm6, %xmm6
vaesenc %xmm15, %xmm14, %xmm14
vpxor %xmm1, %xmm6, %xmm6
movdqu -16(%rcx), %xmm15
vpslldq $8, %xmm6, %xmm5
vpxor %xmm2, %xmm4, %xmm4
pxor %xmm3, %xmm3
mov $13979173243358019584, %r11
pinsrq $1, %r11, %xmm3
vaesenc %xmm15, %xmm9, %xmm9
vpxor %xmm8, %xmm7, %xmm7
vaesenc %xmm15, %xmm10, %xmm10
vpxor %xmm5, %xmm4, %xmm4
movbeq 24(%r14), %r13
vaesenc %xmm15, %xmm11, %xmm11
movbeq 16(%r14), %r12
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
movq %r13, 96(%rbp)
vaesenc %xmm15, %xmm12, %xmm12
movq %r12, 104(%rbp)
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu 0(%rcx), %xmm1
vaesenc %xmm1, %xmm9, %xmm9
movdqu 16(%rcx), %xmm15
vaesenc %xmm1, %xmm10, %xmm10
vpsrldq $8, %xmm6, %xmm6
vaesenc %xmm1, %xmm11, %xmm11
vpxor %xmm6, %xmm7, %xmm7
vaesenc %xmm1, %xmm12, %xmm12
vpxor %xmm0, %xmm4, %xmm4
movbeq 8(%r14), %r13
vaesenc %xmm1, %xmm13, %xmm13
movbeq 0(%r14), %r12
vaesenc %xmm1, %xmm14, %xmm14
movdqu 32(%rcx), %xmm1
vaesenc %xmm15, %xmm9, %xmm9
movdqu %xmm7, 16(%rbp)
vpalignr $8, %xmm4, %xmm4, %xmm8
vaesenc %xmm15, %xmm10, %xmm10
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor 0(%rdi), %xmm1, %xmm2
vaesenc %xmm15, %xmm11, %xmm11
vpxor 16(%rdi), %xmm1, %xmm0
vaesenc %xmm15, %xmm12, %xmm12
vpxor 32(%rdi), %xmm1, %xmm5
vaesenc %xmm15, %xmm13, %xmm13
vpxor 48(%rdi), %xmm1, %xmm6
vaesenc %xmm15, %xmm14, %xmm14
vpxor 64(%rdi), %xmm1, %xmm7
vpxor 80(%rdi), %xmm1, %xmm3
movdqu 128(%rbp), %xmm1
vaesenclast %xmm2, %xmm9, %xmm9
pxor %xmm2, %xmm2
mov $72057594037927936, %r11
pinsrq $1, %r11, %xmm2
vaesenclast %xmm0, %xmm10, %xmm10
vpaddd %xmm2, %xmm1, %xmm0
movq %r13, 112(%rbp)
lea 96(%rdi), %rdi
vaesenclast %xmm5, %xmm11, %xmm11
vpaddd %xmm2, %xmm0, %xmm5
movq %r12, 120(%rbp)
lea 96(%rsi), %rsi
movdqu -128(%rcx), %xmm15
vaesenclast %xmm6, %xmm12, %xmm12
vpaddd %xmm2, %xmm5, %xmm6
vaesenclast %xmm7, %xmm13, %xmm13
vpaddd %xmm2, %xmm6, %xmm7
vaesenclast %xmm3, %xmm14, %xmm14
vpaddd %xmm2, %xmm7, %xmm3
sub $6, %rdx
cmp $6, %rdx
jbe L170
add $96, %r14
jmp L171
L170:
L171:
cmp $0, %rdx
jbe L172
movdqu %xmm9, -96(%rsi)
vpxor %xmm15, %xmm1, %xmm9
movdqu %xmm10, -80(%rsi)
movdqu %xmm0, %xmm10
movdqu %xmm11, -64(%rsi)
movdqu %xmm5, %xmm11
movdqu %xmm12, -48(%rsi)
movdqu %xmm6, %xmm12
movdqu %xmm13, -32(%rsi)
movdqu %xmm7, %xmm13
movdqu %xmm14, -16(%rsi)
movdqu %xmm3, %xmm14
movdqu 32(%rbp), %xmm7
jmp L173
L172:
vpxor 16(%rbp), %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
L173:
.balign 16
L167:
cmp $0, %rdx
ja L166
movdqu %xmm1, 32(%rbp)
movdqu %xmm9, -96(%rsi)
movdqu %xmm10, -80(%rsi)
movdqu %xmm11, -64(%rsi)
movdqu %xmm12, -48(%rsi)
movdqu %xmm13, -32(%rsi)
movdqu %xmm14, -16(%rsi)
sub $128, %rcx
L163:
movdqu 32(%rbp), %xmm11
mov %rcx, %r8
mov 312(%rsp), %rax
mov 320(%rsp), %rdi
mov 328(%rsp), %rdx
mov %rdx, %r14
mov $579005069656919567, %r12
pinsrq $0, %r12, %xmm9
mov $283686952306183, %r12
pinsrq $1, %r12, %xmm9
pshufb %xmm9, %xmm11
mov %rdi, %rbx
mov %rdx, %r12
mov %rax, %rdi
mov %rdi, %r11
jmp L175
.balign 16
L174:
add $80, %r11
movdqu -32(%r9), %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
movdqu %xmm1, %xmm4
movdqu -16(%r9), %xmm1
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 16(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 32(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 64(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 80(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
vpxor %xmm1, %xmm4, %xmm4
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
pxor %xmm3, %xmm3
mov $3254779904, %r10
pinsrd $3, %r10d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
add $96, %r11
sub $6, %rdx
.balign 16
L175:
cmp $6, %rdx
jae L174
cmp $0, %rdx
jbe L176
mov %rdx, %r10
sub $1, %r10
imul $16, %r10
add %r10, %r11
movdqu -32(%r9), %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
cmp $1, %rdx
jne L178
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu %xmm1, %xmm4
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
jmp L179
L178:
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
movdqu %xmm1, %xmm4
movdqu -16(%r9), %xmm1
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
movdqu %xmm1, %xmm5
cmp $2, %rdx
je L180
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 16(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
cmp $3, %rdx
je L182
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 32(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
cmp $4, %rdx
je L184
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 64(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
jmp L185
L184:
L185:
jmp L183
L182:
L183:
jmp L181
L180:
L181:
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
vpxor %xmm1, %xmm4, %xmm4
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
L179:
pxor %xmm3, %xmm3
mov $3254779904, %r10
pinsrd $3, %r10d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
jmp L177
L176:
L177:
mov %rbx, %rdi
mov %r12, %rdx
pxor %xmm10, %xmm10
mov $1, %rbx
pinsrd $0, %ebx, %xmm10
mov %rax, %r11
mov %rdi, %r10
mov $0, %rbx
jmp L187
.balign 16
L186:
movdqu %xmm11, %xmm0
pshufb %xmm9, %xmm0
movdqu 0(%r8), %xmm2
pxor %xmm2, %xmm0
movdqu 16(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 32(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 48(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 64(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 80(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 96(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 112(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 128(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 144(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 160(%r8), %xmm2
aesenclast %xmm2, %xmm0
pxor %xmm2, %xmm2
movdqu 0(%r11), %xmm2
pxor %xmm0, %xmm2
movdqu %xmm2, 0(%r10)
add $1, %rbx
add $16, %r11
add $16, %r10
paddd %xmm10, %xmm11
.balign 16
L187:
cmp %rdx, %rbx
jne L186
add 304(%rsp), %r14
imul $16, %r14
mov 344(%rsp), %r13
cmp %r14, %r13
jbe L188
mov 336(%rsp), %rax
mov %r13, %r10
and $15, %r10
movdqu 0(%rax), %xmm0
movdqu %xmm0, %xmm10
cmp $8, %r10
jae L190
mov $0, %rcx
pinsrq $1, %rcx, %xmm0
mov %r10, %rcx
shl $3, %rcx
mov $1, %r11
shl %cl, %r11
sub $1, %r11
pextrq $0, %xmm0, %rcx
and %r11, %rcx
pinsrq $0, %rcx, %xmm0
jmp L191
L190:
mov %r10, %rcx
sub $8, %rcx
shl $3, %rcx
mov $1, %r11
shl %cl, %r11
sub $1, %r11
pextrq $1, %xmm0, %rcx
and %r11, %rcx
pinsrq $1, %rcx, %xmm0
L191:
pshufb %xmm9, %xmm0
movdqu -32(%r9), %xmm5
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu %xmm1, %xmm4
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
pxor %xmm3, %xmm3
mov $3254779904, %r11
pinsrd $3, %r11d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
movdqu %xmm11, %xmm0
pshufb %xmm9, %xmm0
movdqu 0(%r8), %xmm2
pxor %xmm2, %xmm0
movdqu 16(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 32(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 48(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 64(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 80(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 96(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 112(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 128(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 144(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 160(%r8), %xmm2
aesenclast %xmm2, %xmm0
pxor %xmm2, %xmm2
pxor %xmm0, %xmm10
movdqu %xmm10, 0(%rax)
jmp L189
L188:
L189:
mov %r15, %r11
pxor %xmm0, %xmm0
mov %r11, %rax
imul $8, %rax
pinsrq $1, %rax, %xmm0
mov %r13, %rax
imul $8, %rax
pinsrq $0, %rax, %xmm0
movdqu -32(%r9), %xmm5
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu %xmm1, %xmm4
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
pxor %xmm3, %xmm3
mov $3254779904, %r11
pinsrd $3, %r11d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
movdqu 0(%rbp), %xmm0
pshufb %xmm9, %xmm0
movdqu 0(%r8), %xmm2
pxor %xmm2, %xmm0
movdqu 16(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 32(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 48(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 64(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 80(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 96(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 112(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 128(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 144(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 160(%r8), %xmm2
aesenclast %xmm2, %xmm0
pxor %xmm2, %xmm2
pshufb %xmm9, %xmm8
pxor %xmm0, %xmm8
mov 360(%rsp), %r15
movdqu 0(%r15), %xmm0
pcmpeqd %xmm8, %xmm0
pextrq $0, %xmm0, %rdx
sub $18446744073709551615, %rdx
mov $0, %rax
adc $0, %rax
pextrq $1, %xmm0, %rdx
sub $18446744073709551615, %rdx
mov $0, %rdx
adc $0, %rdx
add %rdx, %rax
mov %rax, %rcx
pop %rax
pinsrq $1, %rax, %xmm6
pop %rax
pinsrq $0, %rax, %xmm6
pop %rax
pinsrq $1, %rax, %xmm7
pop %rax
pinsrq $0, %rax, %xmm7
pop %rax
pinsrq $1, %rax, %xmm8
pop %rax
pinsrq $0, %rax, %xmm8
pop %rax
pinsrq $1, %rax, %xmm9
pop %rax
pinsrq $0, %rax, %xmm9
pop %rax
pinsrq $1, %rax, %xmm10
pop %rax
pinsrq $0, %rax, %xmm10
pop %rax
pinsrq $1, %rax, %xmm11
pop %rax
pinsrq $0, %rax, %xmm11
pop %rax
pinsrq $1, %rax, %xmm12
pop %rax
pinsrq $0, %rax, %xmm12
pop %rax
pinsrq $1, %rax, %xmm13
pop %rax
pinsrq $0, %rax, %xmm13
pop %rax
pinsrq $1, %rax, %xmm14
pop %rax
pinsrq $0, %rax, %xmm14
pop %rax
pinsrq $1, %rax, %xmm15
pop %rax
pinsrq $0, %rax, %xmm15
pop %rbx
pop %rbp
pop %rdi
pop %rsi
pop %r12
pop %r13
pop %r14
pop %r15
mov %rcx, %rax
ret
.global gcm256_decrypt_opt
gcm256_decrypt_opt:
push %r15
push %r14
push %r13
push %r12
push %rsi
push %rdi
push %rbp
push %rbx
pextrq $0, %xmm15, %rax
push %rax
pextrq $1, %xmm15, %rax
push %rax
pextrq $0, %xmm14, %rax
push %rax
pextrq $1, %xmm14, %rax
push %rax
pextrq $0, %xmm13, %rax
push %rax
pextrq $1, %xmm13, %rax
push %rax
pextrq $0, %xmm12, %rax
push %rax
pextrq $1, %xmm12, %rax
push %rax
pextrq $0, %xmm11, %rax
push %rax
pextrq $1, %xmm11, %rax
push %rax
pextrq $0, %xmm10, %rax
push %rax
pextrq $1, %xmm10, %rax
push %rax
pextrq $0, %xmm9, %rax
push %rax
pextrq $1, %xmm9, %rax
push %rax
pextrq $0, %xmm8, %rax
push %rax
pextrq $1, %xmm8, %rax
push %rax
pextrq $0, %xmm7, %rax
push %rax
pextrq $1, %xmm7, %rax
push %rax
pextrq $0, %xmm6, %rax
push %rax
pextrq $1, %xmm6, %rax
push %rax
mov %rcx, %rdi
mov %rdx, %rsi
mov %r8, %rdx
mov %r9, %rcx
mov 264(%rsp), %r8
mov 272(%rsp), %r9
mov 352(%rsp), %rbp
mov %rcx, %r13
lea 32(%r9), %r9
mov 280(%rsp), %rbx
mov %rdx, %rcx
imul $16, %rcx
mov $579005069656919567, %r10
pinsrq $0, %r10, %xmm9
mov $283686952306183, %r10
pinsrq $1, %r10, %xmm9
pxor %xmm8, %xmm8
mov %rdi, %r11
jmp L193
.balign 16
L192:
add $80, %r11
movdqu -32(%r9), %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
movdqu %xmm1, %xmm4
movdqu -16(%r9), %xmm1
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 16(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 32(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 64(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 80(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
vpxor %xmm1, %xmm4, %xmm4
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
pxor %xmm3, %xmm3
mov $3254779904, %r10
pinsrd $3, %r10d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
add $96, %r11
sub $6, %rdx
.balign 16
L193:
cmp $6, %rdx
jae L192
cmp $0, %rdx
jbe L194
mov %rdx, %r10
sub $1, %r10
imul $16, %r10
add %r10, %r11
movdqu -32(%r9), %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
cmp $1, %rdx
jne L196
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu %xmm1, %xmm4
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
jmp L197
L196:
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
movdqu %xmm1, %xmm4
movdqu -16(%r9), %xmm1
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
movdqu %xmm1, %xmm5
cmp $2, %rdx
je L198
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 16(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
cmp $3, %rdx
je L200
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 32(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
cmp $4, %rdx
je L202
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 64(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
jmp L203
L202:
L203:
jmp L201
L200:
L201:
jmp L199
L198:
L199:
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
vpxor %xmm1, %xmm4, %xmm4
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
L197:
pxor %xmm3, %xmm3
mov $3254779904, %r10
pinsrd $3, %r10d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
jmp L195
L194:
L195:
mov %rsi, %r15
cmp %rcx, %rsi
jbe L204
movdqu 0(%rbx), %xmm0
mov %rsi, %r10
and $15, %r10
cmp $8, %r10
jae L206
mov $0, %rcx
pinsrq $1, %rcx, %xmm0
mov %r10, %rcx
shl $3, %rcx
mov $1, %r11
shl %cl, %r11
sub $1, %r11
pextrq $0, %xmm0, %rcx
and %r11, %rcx
pinsrq $0, %rcx, %xmm0
jmp L207
L206:
mov %r10, %rcx
sub $8, %rcx
shl $3, %rcx
mov $1, %r11
shl %cl, %r11
sub $1, %r11
pextrq $1, %xmm0, %rcx
and %r11, %rcx
pinsrq $1, %rcx, %xmm0
L207:
pshufb %xmm9, %xmm0
movdqu -32(%r9), %xmm5
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu %xmm1, %xmm4
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
pxor %xmm3, %xmm3
mov $3254779904, %r11
pinsrd $3, %r11d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
jmp L205
L204:
L205:
mov 288(%rsp), %rdi
mov 296(%rsp), %rsi
mov 304(%rsp), %rdx
mov %r13, %rcx
movdqu %xmm9, %xmm0
movdqu 0(%r8), %xmm1
movdqu %xmm1, 0(%rbp)
pxor %xmm10, %xmm10
mov $1, %r11
pinsrq $0, %r11, %xmm10
vpaddd %xmm10, %xmm1, %xmm1
cmp $0, %rdx
jne L208
vpshufb %xmm0, %xmm1, %xmm1
movdqu %xmm1, 32(%rbp)
jmp L209
L208:
movdqu %xmm8, 32(%rbp)
add $128, %rcx
pextrq $0, %xmm1, %rbx
and $255, %rbx
vpshufb %xmm0, %xmm1, %xmm1
lea 96(%rdi), %r14
movdqu 32(%rbp), %xmm8
movdqu 80(%rdi), %xmm7
movdqu 64(%rdi), %xmm4
movdqu 48(%rdi), %xmm5
movdqu 32(%rdi), %xmm6
vpshufb %xmm0, %xmm7, %xmm7
movdqu 16(%rdi), %xmm2
vpshufb %xmm0, %xmm4, %xmm4
movdqu 0(%rdi), %xmm3
vpshufb %xmm0, %xmm5, %xmm5
movdqu %xmm4, 48(%rbp)
vpshufb %xmm0, %xmm6, %xmm6
movdqu %xmm5, 64(%rbp)
vpshufb %xmm0, %xmm2, %xmm2
movdqu %xmm6, 80(%rbp)
vpshufb %xmm0, %xmm3, %xmm3
movdqu %xmm2, 96(%rbp)
movdqu %xmm3, 112(%rbp)
pxor %xmm2, %xmm2
mov $72057594037927936, %r11
pinsrq $1, %r11, %xmm2
vpxor %xmm4, %xmm4, %xmm4
movdqu -128(%rcx), %xmm15
vpaddd %xmm2, %xmm1, %xmm10
vpaddd %xmm2, %xmm10, %xmm11
vpaddd %xmm2, %xmm11, %xmm12
vpaddd %xmm2, %xmm12, %xmm13
vpaddd %xmm2, %xmm13, %xmm14
vpxor %xmm15, %xmm1, %xmm9
movdqu %xmm4, 16(%rbp)
cmp $6, %rdx
jne L210
sub $96, %r14
jmp L211
L210:
L211:
jmp L213
.balign 16
L212:
add $6, %rbx
cmp $256, %rbx
jb L214
mov $579005069656919567, %r11
pinsrq $0, %r11, %xmm0
mov $283686952306183, %r11
pinsrq $1, %r11, %xmm0
vpshufb %xmm0, %xmm1, %xmm6
pxor %xmm5, %xmm5
mov $1, %r11
pinsrq $0, %r11, %xmm5
vpaddd %xmm5, %xmm6, %xmm10
pxor %xmm5, %xmm5
mov $2, %r11
pinsrq $0, %r11, %xmm5
vpaddd %xmm5, %xmm6, %xmm11
movdqu -32(%r9), %xmm3
vpaddd %xmm5, %xmm10, %xmm12
vpshufb %xmm0, %xmm10, %xmm10
vpaddd %xmm5, %xmm11, %xmm13
vpshufb %xmm0, %xmm11, %xmm11
vpxor %xmm15, %xmm10, %xmm10
vpaddd %xmm5, %xmm12, %xmm14
vpshufb %xmm0, %xmm12, %xmm12
vpxor %xmm15, %xmm11, %xmm11
vpaddd %xmm5, %xmm13, %xmm1
vpshufb %xmm0, %xmm13, %xmm13
vpshufb %xmm0, %xmm14, %xmm14
vpshufb %xmm0, %xmm1, %xmm1
sub $256, %rbx
jmp L215
L214:
movdqu -32(%r9), %xmm3
vpaddd %xmm14, %xmm2, %xmm1
vpxor %xmm15, %xmm10, %xmm10
vpxor %xmm15, %xmm11, %xmm11
L215:
movdqu %xmm1, 128(%rbp)
vpclmulqdq $16, %xmm3, %xmm7, %xmm5
vpxor %xmm15, %xmm12, %xmm12
movdqu -112(%rcx), %xmm2
vpclmulqdq $1, %xmm3, %xmm7, %xmm6
vaesenc %xmm2, %xmm9, %xmm9
movdqu 48(%rbp), %xmm0
vpxor %xmm15, %xmm13, %xmm13
vpclmulqdq $0, %xmm3, %xmm7, %xmm1
vaesenc %xmm2, %xmm10, %xmm10
vpxor %xmm15, %xmm14, %xmm14
vpclmulqdq $17, %xmm3, %xmm7, %xmm7
vaesenc %xmm2, %xmm11, %xmm11
movdqu -16(%r9), %xmm3
vaesenc %xmm2, %xmm12, %xmm12
vpxor %xmm5, %xmm6, %xmm6
vpclmulqdq $0, %xmm3, %xmm0, %xmm5
vpxor %xmm4, %xmm8, %xmm8
vaesenc %xmm2, %xmm13, %xmm13
vpxor %xmm5, %xmm1, %xmm4
vpclmulqdq $16, %xmm3, %xmm0, %xmm1
vaesenc %xmm2, %xmm14, %xmm14
movdqu -96(%rcx), %xmm15
vpclmulqdq $1, %xmm3, %xmm0, %xmm2
vaesenc %xmm15, %xmm9, %xmm9
vpxor 16(%rbp), %xmm8, %xmm8
vpclmulqdq $17, %xmm3, %xmm0, %xmm3
movdqu 64(%rbp), %xmm0
vaesenc %xmm15, %xmm10, %xmm10
movbeq 88(%r14), %r13
vaesenc %xmm15, %xmm11, %xmm11
movbeq 80(%r14), %r12
vaesenc %xmm15, %xmm12, %xmm12
movq %r13, 32(%rbp)
vaesenc %xmm15, %xmm13, %xmm13
movq %r12, 40(%rbp)
movdqu 16(%r9), %xmm5
vaesenc %xmm15, %xmm14, %xmm14
movdqu -80(%rcx), %xmm15
vpxor %xmm1, %xmm6, %xmm6
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vaesenc %xmm15, %xmm9, %xmm9
vpxor %xmm2, %xmm6, %xmm6
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vaesenc %xmm15, %xmm10, %xmm10
vpxor %xmm3, %xmm7, %xmm7
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vaesenc %xmm15, %xmm11, %xmm11
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 80(%rbp), %xmm0
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vpxor %xmm1, %xmm4, %xmm4
movdqu 32(%r9), %xmm1
vaesenc %xmm15, %xmm14, %xmm14
movdqu -64(%rcx), %xmm15
vpxor %xmm2, %xmm6, %xmm6
vpclmulqdq $0, %xmm1, %xmm0, %xmm2
vaesenc %xmm15, %xmm9, %xmm9
vpxor %xmm3, %xmm6, %xmm6
vpclmulqdq $16, %xmm1, %xmm0, %xmm3
vaesenc %xmm15, %xmm10, %xmm10
movbeq 72(%r14), %r13
vpxor %xmm5, %xmm7, %xmm7
vpclmulqdq $1, %xmm1, %xmm0, %xmm5
vaesenc %xmm15, %xmm11, %xmm11
movbeq 64(%r14), %r12
vpclmulqdq $17, %xmm1, %xmm0, %xmm1
movdqu 96(%rbp), %xmm0
vaesenc %xmm15, %xmm12, %xmm12
movq %r13, 48(%rbp)
vaesenc %xmm15, %xmm13, %xmm13
movq %r12, 56(%rbp)
vpxor %xmm2, %xmm4, %xmm4
movdqu 64(%r9), %xmm2
vaesenc %xmm15, %xmm14, %xmm14
movdqu -48(%rcx), %xmm15
vpxor %xmm3, %xmm6, %xmm6
vpclmulqdq $0, %xmm2, %xmm0, %xmm3
vaesenc %xmm15, %xmm9, %xmm9
vpxor %xmm5, %xmm6, %xmm6
vpclmulqdq $16, %xmm2, %xmm0, %xmm5
vaesenc %xmm15, %xmm10, %xmm10
movbeq 56(%r14), %r13
vpxor %xmm1, %xmm7, %xmm7
vpclmulqdq $1, %xmm2, %xmm0, %xmm1
vpxor 112(%rbp), %xmm8, %xmm8
vaesenc %xmm15, %xmm11, %xmm11
movbeq 48(%r14), %r12
vpclmulqdq $17, %xmm2, %xmm0, %xmm2
vaesenc %xmm15, %xmm12, %xmm12
movq %r13, 64(%rbp)
vaesenc %xmm15, %xmm13, %xmm13
movq %r12, 72(%rbp)
vpxor %xmm3, %xmm4, %xmm4
movdqu 80(%r9), %xmm3
vaesenc %xmm15, %xmm14, %xmm14
movdqu -32(%rcx), %xmm15
vpxor %xmm5, %xmm6, %xmm6
vpclmulqdq $16, %xmm3, %xmm8, %xmm5
vaesenc %xmm15, %xmm9, %xmm9
vpxor %xmm1, %xmm6, %xmm6
vpclmulqdq $1, %xmm3, %xmm8, %xmm1
vaesenc %xmm15, %xmm10, %xmm10
movbeq 40(%r14), %r13
vpxor %xmm2, %xmm7, %xmm7
vpclmulqdq $0, %xmm3, %xmm8, %xmm2
vaesenc %xmm15, %xmm11, %xmm11
movbeq 32(%r14), %r12
vpclmulqdq $17, %xmm3, %xmm8, %xmm8
vaesenc %xmm15, %xmm12, %xmm12
movq %r13, 80(%rbp)
vaesenc %xmm15, %xmm13, %xmm13
movq %r12, 88(%rbp)
vpxor %xmm5, %xmm6, %xmm6
vaesenc %xmm15, %xmm14, %xmm14
vpxor %xmm1, %xmm6, %xmm6
movdqu -16(%rcx), %xmm15
vpslldq $8, %xmm6, %xmm5
vpxor %xmm2, %xmm4, %xmm4
pxor %xmm3, %xmm3
mov $13979173243358019584, %r11
pinsrq $1, %r11, %xmm3
vaesenc %xmm15, %xmm9, %xmm9
vpxor %xmm8, %xmm7, %xmm7
vaesenc %xmm15, %xmm10, %xmm10
vpxor %xmm5, %xmm4, %xmm4
movbeq 24(%r14), %r13
vaesenc %xmm15, %xmm11, %xmm11
movbeq 16(%r14), %r12
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
movq %r13, 96(%rbp)
vaesenc %xmm15, %xmm12, %xmm12
movq %r12, 104(%rbp)
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu 0(%rcx), %xmm1
vaesenc %xmm1, %xmm9, %xmm9
movdqu 16(%rcx), %xmm15
vaesenc %xmm1, %xmm10, %xmm10
vpsrldq $8, %xmm6, %xmm6
vaesenc %xmm1, %xmm11, %xmm11
vpxor %xmm6, %xmm7, %xmm7
vaesenc %xmm1, %xmm12, %xmm12
vpxor %xmm0, %xmm4, %xmm4
movbeq 8(%r14), %r13
vaesenc %xmm1, %xmm13, %xmm13
movbeq 0(%r14), %r12
vaesenc %xmm1, %xmm14, %xmm14
movdqu 32(%rcx), %xmm1
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
vaesenc %xmm1, %xmm9, %xmm9
vaesenc %xmm1, %xmm10, %xmm10
vaesenc %xmm1, %xmm11, %xmm11
vaesenc %xmm1, %xmm12, %xmm12
vaesenc %xmm1, %xmm13, %xmm13
movdqu 48(%rcx), %xmm15
vaesenc %xmm1, %xmm14, %xmm14
movdqu 64(%rcx), %xmm1
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
vaesenc %xmm1, %xmm9, %xmm9
vaesenc %xmm1, %xmm10, %xmm10
vaesenc %xmm1, %xmm11, %xmm11
vaesenc %xmm1, %xmm12, %xmm12
vaesenc %xmm1, %xmm13, %xmm13
movdqu 80(%rcx), %xmm15
vaesenc %xmm1, %xmm14, %xmm14
movdqu 96(%rcx), %xmm1
vaesenc %xmm15, %xmm9, %xmm9
movdqu %xmm7, 16(%rbp)
vpalignr $8, %xmm4, %xmm4, %xmm8
vaesenc %xmm15, %xmm10, %xmm10
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor 0(%rdi), %xmm1, %xmm2
vaesenc %xmm15, %xmm11, %xmm11
vpxor 16(%rdi), %xmm1, %xmm0
vaesenc %xmm15, %xmm12, %xmm12
vpxor 32(%rdi), %xmm1, %xmm5
vaesenc %xmm15, %xmm13, %xmm13
vpxor 48(%rdi), %xmm1, %xmm6
vaesenc %xmm15, %xmm14, %xmm14
vpxor 64(%rdi), %xmm1, %xmm7
vpxor 80(%rdi), %xmm1, %xmm3
movdqu 128(%rbp), %xmm1
vaesenclast %xmm2, %xmm9, %xmm9
pxor %xmm2, %xmm2
mov $72057594037927936, %r11
pinsrq $1, %r11, %xmm2
vaesenclast %xmm0, %xmm10, %xmm10
vpaddd %xmm2, %xmm1, %xmm0
movq %r13, 112(%rbp)
lea 96(%rdi), %rdi
vaesenclast %xmm5, %xmm11, %xmm11
vpaddd %xmm2, %xmm0, %xmm5
movq %r12, 120(%rbp)
lea 96(%rsi), %rsi
movdqu -128(%rcx), %xmm15
vaesenclast %xmm6, %xmm12, %xmm12
vpaddd %xmm2, %xmm5, %xmm6
vaesenclast %xmm7, %xmm13, %xmm13
vpaddd %xmm2, %xmm6, %xmm7
vaesenclast %xmm3, %xmm14, %xmm14
vpaddd %xmm2, %xmm7, %xmm3
sub $6, %rdx
cmp $6, %rdx
jbe L216
add $96, %r14
jmp L217
L216:
L217:
cmp $0, %rdx
jbe L218
movdqu %xmm9, -96(%rsi)
vpxor %xmm15, %xmm1, %xmm9
movdqu %xmm10, -80(%rsi)
movdqu %xmm0, %xmm10
movdqu %xmm11, -64(%rsi)
movdqu %xmm5, %xmm11
movdqu %xmm12, -48(%rsi)
movdqu %xmm6, %xmm12
movdqu %xmm13, -32(%rsi)
movdqu %xmm7, %xmm13
movdqu %xmm14, -16(%rsi)
movdqu %xmm3, %xmm14
movdqu 32(%rbp), %xmm7
jmp L219
L218:
vpxor 16(%rbp), %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
L219:
.balign 16
L213:
cmp $0, %rdx
ja L212
movdqu %xmm1, 32(%rbp)
movdqu %xmm9, -96(%rsi)
movdqu %xmm10, -80(%rsi)
movdqu %xmm11, -64(%rsi)
movdqu %xmm12, -48(%rsi)
movdqu %xmm13, -32(%rsi)
movdqu %xmm14, -16(%rsi)
sub $128, %rcx
L209:
movdqu 32(%rbp), %xmm11
mov %rcx, %r8
mov 312(%rsp), %rax
mov 320(%rsp), %rdi
mov 328(%rsp), %rdx
mov %rdx, %r14
mov $579005069656919567, %r12
pinsrq $0, %r12, %xmm9
mov $283686952306183, %r12
pinsrq $1, %r12, %xmm9
pshufb %xmm9, %xmm11
mov %rdi, %rbx
mov %rdx, %r12
mov %rax, %rdi
mov %rdi, %r11
jmp L221
.balign 16
L220:
add $80, %r11
movdqu -32(%r9), %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
movdqu %xmm1, %xmm4
movdqu -16(%r9), %xmm1
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 16(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 32(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 64(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 80(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
vpxor %xmm1, %xmm4, %xmm4
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
pxor %xmm3, %xmm3
mov $3254779904, %r10
pinsrd $3, %r10d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
add $96, %r11
sub $6, %rdx
.balign 16
L221:
cmp $6, %rdx
jae L220
cmp $0, %rdx
jbe L222
mov %rdx, %r10
sub $1, %r10
imul $16, %r10
add %r10, %r11
movdqu -32(%r9), %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
cmp $1, %rdx
jne L224
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu %xmm1, %xmm4
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
jmp L225
L224:
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
movdqu %xmm1, %xmm4
movdqu -16(%r9), %xmm1
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
movdqu %xmm1, %xmm5
cmp $2, %rdx
je L226
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 16(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
cmp $3, %rdx
je L228
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 32(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
cmp $4, %rdx
je L230
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 64(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
jmp L231
L230:
L231:
jmp L229
L228:
L229:
jmp L227
L226:
L227:
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
vpxor %xmm1, %xmm4, %xmm4
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
L225:
pxor %xmm3, %xmm3
mov $3254779904, %r10
pinsrd $3, %r10d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
jmp L223
L222:
L223:
mov %rbx, %rdi
mov %r12, %rdx
pxor %xmm10, %xmm10
mov $1, %rbx
pinsrd $0, %ebx, %xmm10
mov %rax, %r11
mov %rdi, %r10
mov $0, %rbx
jmp L233
.balign 16
L232:
movdqu %xmm11, %xmm0
pshufb %xmm9, %xmm0
movdqu 0(%r8), %xmm2
pxor %xmm2, %xmm0
movdqu 16(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 32(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 48(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 64(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 80(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 96(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 112(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 128(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 144(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 160(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 176(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 192(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 208(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 224(%r8), %xmm2
aesenclast %xmm2, %xmm0
pxor %xmm2, %xmm2
movdqu 0(%r11), %xmm2
pxor %xmm0, %xmm2
movdqu %xmm2, 0(%r10)
add $1, %rbx
add $16, %r11
add $16, %r10
paddd %xmm10, %xmm11
.balign 16
L233:
cmp %rdx, %rbx
jne L232
add 304(%rsp), %r14
imul $16, %r14
mov 344(%rsp), %r13
cmp %r14, %r13
jbe L234
mov 336(%rsp), %rax
mov %r13, %r10
and $15, %r10
movdqu 0(%rax), %xmm0
movdqu %xmm0, %xmm10
cmp $8, %r10
jae L236
mov $0, %rcx
pinsrq $1, %rcx, %xmm0
mov %r10, %rcx
shl $3, %rcx
mov $1, %r11
shl %cl, %r11
sub $1, %r11
pextrq $0, %xmm0, %rcx
and %r11, %rcx
pinsrq $0, %rcx, %xmm0
jmp L237
L236:
mov %r10, %rcx
sub $8, %rcx
shl $3, %rcx
mov $1, %r11
shl %cl, %r11
sub $1, %r11
pextrq $1, %xmm0, %rcx
and %r11, %rcx
pinsrq $1, %rcx, %xmm0
L237:
pshufb %xmm9, %xmm0
movdqu -32(%r9), %xmm5
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu %xmm1, %xmm4
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
pxor %xmm3, %xmm3
mov $3254779904, %r11
pinsrd $3, %r11d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
movdqu %xmm11, %xmm0
pshufb %xmm9, %xmm0
movdqu 0(%r8), %xmm2
pxor %xmm2, %xmm0
movdqu 16(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 32(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 48(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 64(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 80(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 96(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 112(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 128(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 144(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 160(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 176(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 192(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 208(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 224(%r8), %xmm2
aesenclast %xmm2, %xmm0
pxor %xmm2, %xmm2
pxor %xmm0, %xmm10
movdqu %xmm10, 0(%rax)
jmp L235
L234:
L235:
mov %r15, %r11
pxor %xmm0, %xmm0
mov %r11, %rax
imul $8, %rax
pinsrq $1, %rax, %xmm0
mov %r13, %rax
imul $8, %rax
pinsrq $0, %rax, %xmm0
movdqu -32(%r9), %xmm5
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu %xmm1, %xmm4
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
pxor %xmm3, %xmm3
mov $3254779904, %r11
pinsrd $3, %r11d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
movdqu 0(%rbp), %xmm0
pshufb %xmm9, %xmm0
movdqu 0(%r8), %xmm2
pxor %xmm2, %xmm0
movdqu 16(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 32(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 48(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 64(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 80(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 96(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 112(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 128(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 144(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 160(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 176(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 192(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 208(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 224(%r8), %xmm2
aesenclast %xmm2, %xmm0
pxor %xmm2, %xmm2
pshufb %xmm9, %xmm8
pxor %xmm0, %xmm8
mov 360(%rsp), %r15
movdqu 0(%r15), %xmm0
pcmpeqd %xmm8, %xmm0
pextrq $0, %xmm0, %rdx
sub $18446744073709551615, %rdx
mov $0, %rax
adc $0, %rax
pextrq $1, %xmm0, %rdx
sub $18446744073709551615, %rdx
mov $0, %rdx
adc $0, %rdx
add %rdx, %rax
mov %rax, %rcx
pop %rax
pinsrq $1, %rax, %xmm6
pop %rax
pinsrq $0, %rax, %xmm6
pop %rax
pinsrq $1, %rax, %xmm7
pop %rax
pinsrq $0, %rax, %xmm7
pop %rax
pinsrq $1, %rax, %xmm8
pop %rax
pinsrq $0, %rax, %xmm8
pop %rax
pinsrq $1, %rax, %xmm9
pop %rax
pinsrq $0, %rax, %xmm9
pop %rax
pinsrq $1, %rax, %xmm10
pop %rax
pinsrq $0, %rax, %xmm10
pop %rax
pinsrq $1, %rax, %xmm11
pop %rax
pinsrq $0, %rax, %xmm11
pop %rax
pinsrq $1, %rax, %xmm12
pop %rax
pinsrq $0, %rax, %xmm12
pop %rax
pinsrq $1, %rax, %xmm13
pop %rax
pinsrq $0, %rax, %xmm13
pop %rax
pinsrq $1, %rax, %xmm14
pop %rax
pinsrq $0, %rax, %xmm14
pop %rax
pinsrq $1, %rax, %xmm15
pop %rax
pinsrq $0, %rax, %xmm15
pop %rbx
pop %rbp
pop %rdi
pop %rsi
pop %r12
pop %r13
pop %r14
pop %r15
mov %rcx, %rax
ret
|
usenix-security-verdict/verdict
| 2,368
|
deps/libcrux/sys/hacl/c/vale/src/cpuid-x86_64-darwin.S
|
.text
.global _check_aesni
_check_aesni:
mov %rbx, %r9
mov $0, %rcx
mov $1, %rax
cpuid
mov %rcx, %rax
and $33554432, %rax
shr $24, %rax
and $2, %rcx
and %rcx, %rax
mov %r9, %rbx
ret
.global _check_sha
_check_sha:
mov %rbx, %r9
mov $7, %rax
mov $0, %rcx
cpuid
and $536870912, %rbx
mov %rbx, %rax
mov %r9, %rbx
ret
.global _check_adx_bmi2
_check_adx_bmi2:
mov %rbx, %r9
mov $7, %rax
mov $0, %rcx
cpuid
mov %rbx, %rax
and $524288, %rax
shr $11, %rax
and $256, %rbx
and %rbx, %rax
mov %r9, %rbx
ret
.global _check_avx
_check_avx:
mov %rbx, %r9
mov $0, %rcx
mov $1, %rax
cpuid
mov %rcx, %rax
and $268435456, %rax
shr $27, %rax
mov %r9, %rbx
ret
.global _check_avx2
_check_avx2:
mov %rbx, %r9
mov $7, %rax
mov $0, %rcx
cpuid
and $32, %rbx
mov %rbx, %rax
mov %r9, %rbx
ret
.global _check_movbe
_check_movbe:
mov %rbx, %r9
mov $0, %rcx
mov $1, %rax
cpuid
mov %rcx, %rax
and $4194304, %rax
shr $21, %rax
mov %r9, %rbx
ret
.global _check_sse
_check_sse:
mov %rbx, %r9
mov $0, %rcx
mov $1, %rax
cpuid
mov %rcx, %rax
and $524288, %rax
and $512, %rcx
and $67108864, %rdx
shr $10, %rax
shr $17, %rdx
and %rdx, %rax
and %rcx, %rax
mov %r9, %rbx
ret
.global _check_rdrand
_check_rdrand:
mov %rbx, %r9
mov $0, %rcx
mov $1, %rax
cpuid
mov %rcx, %rax
and $1073741824, %rax
shr $29, %rax
mov %r9, %rbx
ret
.global _check_avx512
_check_avx512:
mov %rbx, %r9
mov $7, %rax
mov $0, %rcx
cpuid
mov %rbx, %rax
mov %rbx, %r10
mov %rbx, %r11
and $65536, %rbx
and $131072, %rax
and $1073741824, %r10
shr $1, %rax
shr $14, %r10
and %rbx, %rax
mov $2147483648, %rbx
and %rbx, %r11
shr $15, %r11
and %r10, %rax
and %r11, %rax
mov %r9, %rbx
ret
.global _check_osxsave
_check_osxsave:
mov %rbx, %r9
mov $0, %rcx
mov $1, %rax
cpuid
mov %rcx, %rax
and $134217728, %rax
shr $26, %rax
mov %r9, %rbx
ret
.global _check_avx_xcr0
_check_avx_xcr0:
mov $0, %rcx
xgetbv
mov %rax, %rcx
and $4, %rax
shr $1, %rax
and $2, %rcx
and %rcx, %rax
ret
.global _check_avx512_xcr0
_check_avx512_xcr0:
mov $0, %rcx
xgetbv
mov %rax, %rcx
mov %rax, %rdx
and $32, %rax
and $64, %rcx
and $128, %rdx
shr $2, %rdx
shr $1, %rcx
and %rdx, %rax
and %rcx, %rax
ret
|
usenix-security-verdict/verdict
| 2,381
|
deps/libcrux/sys/hacl/c/vale/src/cpuid-x86_64-linux.S
|
.text
.global check_aesni
check_aesni:
mov %rbx, %r9
mov $0, %rcx
mov $1, %rax
cpuid
mov %rcx, %rax
and $33554432, %rax
shr $24, %rax
and $2, %rcx
and %rcx, %rax
mov %r9, %rbx
ret
.global check_sha
check_sha:
mov %rbx, %r9
mov $7, %rax
mov $0, %rcx
cpuid
and $536870912, %rbx
mov %rbx, %rax
mov %r9, %rbx
ret
.global check_adx_bmi2
check_adx_bmi2:
mov %rbx, %r9
mov $7, %rax
mov $0, %rcx
cpuid
mov %rbx, %rax
and $524288, %rax
shr $11, %rax
and $256, %rbx
and %rbx, %rax
mov %r9, %rbx
ret
.global check_avx
check_avx:
mov %rbx, %r9
mov $0, %rcx
mov $1, %rax
cpuid
mov %rcx, %rax
and $268435456, %rax
shr $27, %rax
mov %r9, %rbx
ret
.global check_avx2
check_avx2:
mov %rbx, %r9
mov $7, %rax
mov $0, %rcx
cpuid
and $32, %rbx
mov %rbx, %rax
mov %r9, %rbx
ret
.global check_movbe
check_movbe:
mov %rbx, %r9
mov $0, %rcx
mov $1, %rax
cpuid
mov %rcx, %rax
and $4194304, %rax
shr $21, %rax
mov %r9, %rbx
ret
.global check_sse
check_sse:
mov %rbx, %r9
mov $0, %rcx
mov $1, %rax
cpuid
mov %rcx, %rax
and $524288, %rax
and $512, %rcx
and $67108864, %rdx
shr $10, %rax
shr $17, %rdx
and %rdx, %rax
and %rcx, %rax
mov %r9, %rbx
ret
.global check_rdrand
check_rdrand:
mov %rbx, %r9
mov $0, %rcx
mov $1, %rax
cpuid
mov %rcx, %rax
and $1073741824, %rax
shr $29, %rax
mov %r9, %rbx
ret
.global check_avx512
check_avx512:
mov %rbx, %r9
mov $7, %rax
mov $0, %rcx
cpuid
mov %rbx, %rax
mov %rbx, %r10
mov %rbx, %r11
and $65536, %rbx
and $131072, %rax
and $1073741824, %r10
shr $1, %rax
shr $14, %r10
and %rbx, %rax
mov $2147483648, %rbx
and %rbx, %r11
shr $15, %r11
and %r10, %rax
and %r11, %rax
mov %r9, %rbx
ret
.global check_osxsave
check_osxsave:
mov %rbx, %r9
mov $0, %rcx
mov $1, %rax
cpuid
mov %rcx, %rax
and $134217728, %rax
shr $26, %rax
mov %r9, %rbx
ret
.global check_avx_xcr0
check_avx_xcr0:
mov $0, %rcx
xgetbv
mov %rax, %rcx
and $4, %rax
shr $1, %rax
and $2, %rcx
and %rcx, %rax
ret
.global check_avx512_xcr0
check_avx512_xcr0:
mov $0, %rcx
xgetbv
mov %rax, %rcx
mov %rax, %rdx
and $32, %rax
and $64, %rcx
and $128, %rdx
shr $2, %rdx
shr $1, %rcx
and %rdx, %rax
and %rcx, %rax
ret
.section .note.GNU-stack,"",%progbits
|
usenix-security-verdict/verdict
| 18,944
|
deps/libcrux/sys/hacl/c/vale/src/curve25519-x86_64-linux.S
|
.text
.global add_scalar_e
add_scalar_e:
push %rdi
push %rsi
;# Clear registers to propagate the carry bit
xor %r8d, %r8d
xor %r9d, %r9d
xor %r10d, %r10d
xor %r11d, %r11d
xor %eax, %eax
;# Begin addition chain
addq 0(%rsi), %rdx
movq %rdx, 0(%rdi)
adcxq 8(%rsi), %r8
movq %r8, 8(%rdi)
adcxq 16(%rsi), %r9
movq %r9, 16(%rdi)
adcxq 24(%rsi), %r10
movq %r10, 24(%rdi)
;# Return the carry bit in a register
adcx %r11, %rax
pop %rsi
pop %rdi
ret
.global fadd_e
fadd_e:
;# Compute the raw addition of f1 + f2
movq 0(%rdx), %r8
addq 0(%rsi), %r8
movq 8(%rdx), %r9
adcxq 8(%rsi), %r9
movq 16(%rdx), %r10
adcxq 16(%rsi), %r10
movq 24(%rdx), %r11
adcxq 24(%rsi), %r11
;# Wrap the result back into the field
;# Step 1: Compute carry*38
mov $0, %rax
mov $38, %rdx
cmovc %rdx, %rax
;# Step 2: Add carry*38 to the original sum
xor %ecx, %ecx
add %rax, %r8
adcx %rcx, %r9
movq %r9, 8(%rdi)
adcx %rcx, %r10
movq %r10, 16(%rdi)
adcx %rcx, %r11
movq %r11, 24(%rdi)
;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point
mov $0, %rax
cmovc %rdx, %rax
add %rax, %r8
movq %r8, 0(%rdi)
ret
.global fsub_e
fsub_e:
;# Compute the raw substraction of f1-f2
movq 0(%rsi), %r8
subq 0(%rdx), %r8
movq 8(%rsi), %r9
sbbq 8(%rdx), %r9
movq 16(%rsi), %r10
sbbq 16(%rdx), %r10
movq 24(%rsi), %r11
sbbq 24(%rdx), %r11
;# Wrap the result back into the field
;# Step 1: Compute carry*38
mov $0, %rax
mov $38, %rcx
cmovc %rcx, %rax
;# Step 2: Substract carry*38 from the original difference
sub %rax, %r8
sbb $0, %r9
sbb $0, %r10
sbb $0, %r11
;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point
mov $0, %rax
cmovc %rcx, %rax
sub %rax, %r8
;# Store the result
movq %r8, 0(%rdi)
movq %r9, 8(%rdi)
movq %r10, 16(%rdi)
movq %r11, 24(%rdi)
ret
.global fmul_scalar_e
fmul_scalar_e:
push %rdi
push %r13
push %rbx
;# Compute the raw multiplication of f1*f2
mulxq 0(%rsi), %r8, %rcx
;# f1[0]*f2
mulxq 8(%rsi), %r9, %rbx
;# f1[1]*f2
add %rcx, %r9
mov $0, %rcx
mulxq 16(%rsi), %r10, %r13
;# f1[2]*f2
adcx %rbx, %r10
mulxq 24(%rsi), %r11, %rax
;# f1[3]*f2
adcx %r13, %r11
adcx %rcx, %rax
;# Wrap the result back into the field
;# Step 1: Compute carry*38
mov $38, %rdx
imul %rdx, %rax
;# Step 2: Fold the carry back into dst
add %rax, %r8
adcx %rcx, %r9
movq %r9, 8(%rdi)
adcx %rcx, %r10
movq %r10, 16(%rdi)
adcx %rcx, %r11
movq %r11, 24(%rdi)
;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point
mov $0, %rax
cmovc %rdx, %rax
add %rax, %r8
movq %r8, 0(%rdi)
pop %rbx
pop %r13
pop %rdi
ret
.global fmul_e
fmul_e:
push %r13
push %r14
push %r15
push %rbx
mov %rdx, %r15
;# Compute the raw multiplication: tmp <- src1 * src2
;# Compute src1[0] * src2
movq 0(%rsi), %rdx
mulxq 0(%rcx), %r8, %r9
xor %r10d, %r10d
movq %r8, 0(%rdi)
mulxq 8(%rcx), %r10, %r11
adox %r9, %r10
movq %r10, 8(%rdi)
mulxq 16(%rcx), %rbx, %r13
adox %r11, %rbx
mulxq 24(%rcx), %r14, %rdx
adox %r13, %r14
mov $0, %rax
adox %rdx, %rax
;# Compute src1[1] * src2
movq 8(%rsi), %rdx
mulxq 0(%rcx), %r8, %r9
xor %r10d, %r10d
adcxq 8(%rdi), %r8
movq %r8, 8(%rdi)
mulxq 8(%rcx), %r10, %r11
adox %r9, %r10
adcx %rbx, %r10
movq %r10, 16(%rdi)
mulxq 16(%rcx), %rbx, %r13
adox %r11, %rbx
adcx %r14, %rbx
mov $0, %r8
mulxq 24(%rcx), %r14, %rdx
adox %r13, %r14
adcx %rax, %r14
mov $0, %rax
adox %rdx, %rax
adcx %r8, %rax
;# Compute src1[2] * src2
movq 16(%rsi), %rdx
mulxq 0(%rcx), %r8, %r9
xor %r10d, %r10d
adcxq 16(%rdi), %r8
movq %r8, 16(%rdi)
mulxq 8(%rcx), %r10, %r11
adox %r9, %r10
adcx %rbx, %r10
movq %r10, 24(%rdi)
mulxq 16(%rcx), %rbx, %r13
adox %r11, %rbx
adcx %r14, %rbx
mov $0, %r8
mulxq 24(%rcx), %r14, %rdx
adox %r13, %r14
adcx %rax, %r14
mov $0, %rax
adox %rdx, %rax
adcx %r8, %rax
;# Compute src1[3] * src2
movq 24(%rsi), %rdx
mulxq 0(%rcx), %r8, %r9
xor %r10d, %r10d
adcxq 24(%rdi), %r8
movq %r8, 24(%rdi)
mulxq 8(%rcx), %r10, %r11
adox %r9, %r10
adcx %rbx, %r10
movq %r10, 32(%rdi)
mulxq 16(%rcx), %rbx, %r13
adox %r11, %rbx
adcx %r14, %rbx
movq %rbx, 40(%rdi)
mov $0, %r8
mulxq 24(%rcx), %r14, %rdx
adox %r13, %r14
adcx %rax, %r14
movq %r14, 48(%rdi)
mov $0, %rax
adox %rdx, %rax
adcx %r8, %rax
movq %rax, 56(%rdi)
;# Line up pointers
mov %rdi, %rsi
mov %r15, %rdi
;# Wrap the result back into the field
;# Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo
mov $38, %rdx
mulxq 32(%rsi), %r8, %r13
xor %ecx, %ecx
adoxq 0(%rsi), %r8
mulxq 40(%rsi), %r9, %rbx
adcx %r13, %r9
adoxq 8(%rsi), %r9
mulxq 48(%rsi), %r10, %r13
adcx %rbx, %r10
adoxq 16(%rsi), %r10
mulxq 56(%rsi), %r11, %rax
adcx %r13, %r11
adoxq 24(%rsi), %r11
adcx %rcx, %rax
adox %rcx, %rax
imul %rdx, %rax
;# Step 2: Fold the carry back into dst
add %rax, %r8
adcx %rcx, %r9
movq %r9, 8(%rdi)
adcx %rcx, %r10
movq %r10, 16(%rdi)
adcx %rcx, %r11
movq %r11, 24(%rdi)
;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point
mov $0, %rax
cmovc %rdx, %rax
add %rax, %r8
movq %r8, 0(%rdi)
pop %rbx
pop %r15
pop %r14
pop %r13
ret
.global fmul2_e
fmul2_e:
push %r13
push %r14
push %r15
push %rbx
mov %rdx, %r15
;# Compute the raw multiplication tmp[0] <- f1[0] * f2[0]
;# Compute src1[0] * src2
movq 0(%rsi), %rdx
mulxq 0(%rcx), %r8, %r9
xor %r10d, %r10d
movq %r8, 0(%rdi)
mulxq 8(%rcx), %r10, %r11
adox %r9, %r10
movq %r10, 8(%rdi)
mulxq 16(%rcx), %rbx, %r13
adox %r11, %rbx
mulxq 24(%rcx), %r14, %rdx
adox %r13, %r14
mov $0, %rax
adox %rdx, %rax
;# Compute src1[1] * src2
movq 8(%rsi), %rdx
mulxq 0(%rcx), %r8, %r9
xor %r10d, %r10d
adcxq 8(%rdi), %r8
movq %r8, 8(%rdi)
mulxq 8(%rcx), %r10, %r11
adox %r9, %r10
adcx %rbx, %r10
movq %r10, 16(%rdi)
mulxq 16(%rcx), %rbx, %r13
adox %r11, %rbx
adcx %r14, %rbx
mov $0, %r8
mulxq 24(%rcx), %r14, %rdx
adox %r13, %r14
adcx %rax, %r14
mov $0, %rax
adox %rdx, %rax
adcx %r8, %rax
;# Compute src1[2] * src2
movq 16(%rsi), %rdx
mulxq 0(%rcx), %r8, %r9
xor %r10d, %r10d
adcxq 16(%rdi), %r8
movq %r8, 16(%rdi)
mulxq 8(%rcx), %r10, %r11
adox %r9, %r10
adcx %rbx, %r10
movq %r10, 24(%rdi)
mulxq 16(%rcx), %rbx, %r13
adox %r11, %rbx
adcx %r14, %rbx
mov $0, %r8
mulxq 24(%rcx), %r14, %rdx
adox %r13, %r14
adcx %rax, %r14
mov $0, %rax
adox %rdx, %rax
adcx %r8, %rax
;# Compute src1[3] * src2
movq 24(%rsi), %rdx
mulxq 0(%rcx), %r8, %r9
xor %r10d, %r10d
adcxq 24(%rdi), %r8
movq %r8, 24(%rdi)
mulxq 8(%rcx), %r10, %r11
adox %r9, %r10
adcx %rbx, %r10
movq %r10, 32(%rdi)
mulxq 16(%rcx), %rbx, %r13
adox %r11, %rbx
adcx %r14, %rbx
movq %rbx, 40(%rdi)
mov $0, %r8
mulxq 24(%rcx), %r14, %rdx
adox %r13, %r14
adcx %rax, %r14
movq %r14, 48(%rdi)
mov $0, %rax
adox %rdx, %rax
adcx %r8, %rax
movq %rax, 56(%rdi)
;# Compute the raw multiplication tmp[1] <- f1[1] * f2[1]
;# Compute src1[0] * src2
movq 32(%rsi), %rdx
mulxq 32(%rcx), %r8, %r9
xor %r10d, %r10d
movq %r8, 64(%rdi)
mulxq 40(%rcx), %r10, %r11
adox %r9, %r10
movq %r10, 72(%rdi)
mulxq 48(%rcx), %rbx, %r13
adox %r11, %rbx
mulxq 56(%rcx), %r14, %rdx
adox %r13, %r14
mov $0, %rax
adox %rdx, %rax
;# Compute src1[1] * src2
movq 40(%rsi), %rdx
mulxq 32(%rcx), %r8, %r9
xor %r10d, %r10d
adcxq 72(%rdi), %r8
movq %r8, 72(%rdi)
mulxq 40(%rcx), %r10, %r11
adox %r9, %r10
adcx %rbx, %r10
movq %r10, 80(%rdi)
mulxq 48(%rcx), %rbx, %r13
adox %r11, %rbx
adcx %r14, %rbx
mov $0, %r8
mulxq 56(%rcx), %r14, %rdx
adox %r13, %r14
adcx %rax, %r14
mov $0, %rax
adox %rdx, %rax
adcx %r8, %rax
;# Compute src1[2] * src2
movq 48(%rsi), %rdx
mulxq 32(%rcx), %r8, %r9
xor %r10d, %r10d
adcxq 80(%rdi), %r8
movq %r8, 80(%rdi)
mulxq 40(%rcx), %r10, %r11
adox %r9, %r10
adcx %rbx, %r10
movq %r10, 88(%rdi)
mulxq 48(%rcx), %rbx, %r13
adox %r11, %rbx
adcx %r14, %rbx
mov $0, %r8
mulxq 56(%rcx), %r14, %rdx
adox %r13, %r14
adcx %rax, %r14
mov $0, %rax
adox %rdx, %rax
adcx %r8, %rax
;# Compute src1[3] * src2
movq 56(%rsi), %rdx
mulxq 32(%rcx), %r8, %r9
xor %r10d, %r10d
adcxq 88(%rdi), %r8
movq %r8, 88(%rdi)
mulxq 40(%rcx), %r10, %r11
adox %r9, %r10
adcx %rbx, %r10
movq %r10, 96(%rdi)
mulxq 48(%rcx), %rbx, %r13
adox %r11, %rbx
adcx %r14, %rbx
movq %rbx, 104(%rdi)
mov $0, %r8
mulxq 56(%rcx), %r14, %rdx
adox %r13, %r14
adcx %rax, %r14
movq %r14, 112(%rdi)
mov $0, %rax
adox %rdx, %rax
adcx %r8, %rax
movq %rax, 120(%rdi)
;# Line up pointers
mov %rdi, %rsi
mov %r15, %rdi
;# Wrap the results back into the field
;# Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo
mov $38, %rdx
mulxq 32(%rsi), %r8, %r13
xor %ecx, %ecx
adoxq 0(%rsi), %r8
mulxq 40(%rsi), %r9, %rbx
adcx %r13, %r9
adoxq 8(%rsi), %r9
mulxq 48(%rsi), %r10, %r13
adcx %rbx, %r10
adoxq 16(%rsi), %r10
mulxq 56(%rsi), %r11, %rax
adcx %r13, %r11
adoxq 24(%rsi), %r11
adcx %rcx, %rax
adox %rcx, %rax
imul %rdx, %rax
;# Step 2: Fold the carry back into dst
add %rax, %r8
adcx %rcx, %r9
movq %r9, 8(%rdi)
adcx %rcx, %r10
movq %r10, 16(%rdi)
adcx %rcx, %r11
movq %r11, 24(%rdi)
;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point
mov $0, %rax
cmovc %rdx, %rax
add %rax, %r8
movq %r8, 0(%rdi)
;# Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo
mov $38, %rdx
mulxq 96(%rsi), %r8, %r13
xor %ecx, %ecx
adoxq 64(%rsi), %r8
mulxq 104(%rsi), %r9, %rbx
adcx %r13, %r9
adoxq 72(%rsi), %r9
mulxq 112(%rsi), %r10, %r13
adcx %rbx, %r10
adoxq 80(%rsi), %r10
mulxq 120(%rsi), %r11, %rax
adcx %r13, %r11
adoxq 88(%rsi), %r11
adcx %rcx, %rax
adox %rcx, %rax
imul %rdx, %rax
;# Step 2: Fold the carry back into dst
add %rax, %r8
adcx %rcx, %r9
movq %r9, 40(%rdi)
adcx %rcx, %r10
movq %r10, 48(%rdi)
adcx %rcx, %r11
movq %r11, 56(%rdi)
;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point
mov $0, %rax
cmovc %rdx, %rax
add %rax, %r8
movq %r8, 32(%rdi)
pop %rbx
pop %r15
pop %r14
pop %r13
ret
.global fsqr_e
fsqr_e:
push %r15
push %r13
push %r14
push %r12
push %rbx
mov %rdx, %r12
;# Compute the raw multiplication: tmp <- f * f
;# Step 1: Compute all partial products
movq 0(%rsi), %rdx
;# f[0]
mulxq 8(%rsi), %r8, %r14
xor %r15d, %r15d
;# f[1]*f[0]
mulxq 16(%rsi), %r9, %r10
adcx %r14, %r9
;# f[2]*f[0]
mulxq 24(%rsi), %rax, %rcx
adcx %rax, %r10
;# f[3]*f[0]
movq 24(%rsi), %rdx
;# f[3]
mulxq 8(%rsi), %r11, %rbx
adcx %rcx, %r11
;# f[1]*f[3]
mulxq 16(%rsi), %rax, %r13
adcx %rax, %rbx
;# f[2]*f[3]
movq 8(%rsi), %rdx
adcx %r15, %r13
;# f1
mulxq 16(%rsi), %rax, %rcx
mov $0, %r14
;# f[2]*f[1]
;# Step 2: Compute two parallel carry chains
xor %r15d, %r15d
adox %rax, %r10
adcx %r8, %r8
adox %rcx, %r11
adcx %r9, %r9
adox %r15, %rbx
adcx %r10, %r10
adox %r15, %r13
adcx %r11, %r11
adox %r15, %r14
adcx %rbx, %rbx
adcx %r13, %r13
adcx %r14, %r14
;# Step 3: Compute intermediate squares
movq 0(%rsi), %rdx
mulx %rdx, %rax, %rcx
;# f[0]^2
movq %rax, 0(%rdi)
add %rcx, %r8
movq %r8, 8(%rdi)
movq 8(%rsi), %rdx
mulx %rdx, %rax, %rcx
;# f[1]^2
adcx %rax, %r9
movq %r9, 16(%rdi)
adcx %rcx, %r10
movq %r10, 24(%rdi)
movq 16(%rsi), %rdx
mulx %rdx, %rax, %rcx
;# f[2]^2
adcx %rax, %r11
movq %r11, 32(%rdi)
adcx %rcx, %rbx
movq %rbx, 40(%rdi)
movq 24(%rsi), %rdx
mulx %rdx, %rax, %rcx
;# f[3]^2
adcx %rax, %r13
movq %r13, 48(%rdi)
adcx %rcx, %r14
movq %r14, 56(%rdi)
;# Line up pointers
mov %rdi, %rsi
mov %r12, %rdi
;# Wrap the result back into the field
;# Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo
mov $38, %rdx
mulxq 32(%rsi), %r8, %r13
xor %ecx, %ecx
adoxq 0(%rsi), %r8
mulxq 40(%rsi), %r9, %rbx
adcx %r13, %r9
adoxq 8(%rsi), %r9
mulxq 48(%rsi), %r10, %r13
adcx %rbx, %r10
adoxq 16(%rsi), %r10
mulxq 56(%rsi), %r11, %rax
adcx %r13, %r11
adoxq 24(%rsi), %r11
adcx %rcx, %rax
adox %rcx, %rax
imul %rdx, %rax
;# Step 2: Fold the carry back into dst
add %rax, %r8
adcx %rcx, %r9
movq %r9, 8(%rdi)
adcx %rcx, %r10
movq %r10, 16(%rdi)
adcx %rcx, %r11
movq %r11, 24(%rdi)
;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point
mov $0, %rax
cmovc %rdx, %rax
add %rax, %r8
movq %r8, 0(%rdi)
pop %rbx
pop %r12
pop %r14
pop %r13
pop %r15
ret
.global fsqr2_e
fsqr2_e:
push %r15
push %r13
push %r14
push %r12
push %rbx
mov %rdx, %r12
;# Step 1: Compute all partial products
movq 0(%rsi), %rdx
;# f[0]
mulxq 8(%rsi), %r8, %r14
xor %r15d, %r15d
;# f[1]*f[0]
mulxq 16(%rsi), %r9, %r10
adcx %r14, %r9
;# f[2]*f[0]
mulxq 24(%rsi), %rax, %rcx
adcx %rax, %r10
;# f[3]*f[0]
movq 24(%rsi), %rdx
;# f[3]
mulxq 8(%rsi), %r11, %rbx
adcx %rcx, %r11
;# f[1]*f[3]
mulxq 16(%rsi), %rax, %r13
adcx %rax, %rbx
;# f[2]*f[3]
movq 8(%rsi), %rdx
adcx %r15, %r13
;# f1
mulxq 16(%rsi), %rax, %rcx
mov $0, %r14
;# f[2]*f[1]
;# Step 2: Compute two parallel carry chains
xor %r15d, %r15d
adox %rax, %r10
adcx %r8, %r8
adox %rcx, %r11
adcx %r9, %r9
adox %r15, %rbx
adcx %r10, %r10
adox %r15, %r13
adcx %r11, %r11
adox %r15, %r14
adcx %rbx, %rbx
adcx %r13, %r13
adcx %r14, %r14
;# Step 3: Compute intermediate squares
movq 0(%rsi), %rdx
mulx %rdx, %rax, %rcx
;# f[0]^2
movq %rax, 0(%rdi)
add %rcx, %r8
movq %r8, 8(%rdi)
movq 8(%rsi), %rdx
mulx %rdx, %rax, %rcx
;# f[1]^2
adcx %rax, %r9
movq %r9, 16(%rdi)
adcx %rcx, %r10
movq %r10, 24(%rdi)
movq 16(%rsi), %rdx
mulx %rdx, %rax, %rcx
;# f[2]^2
adcx %rax, %r11
movq %r11, 32(%rdi)
adcx %rcx, %rbx
movq %rbx, 40(%rdi)
movq 24(%rsi), %rdx
mulx %rdx, %rax, %rcx
;# f[3]^2
adcx %rax, %r13
movq %r13, 48(%rdi)
adcx %rcx, %r14
movq %r14, 56(%rdi)
;# Step 1: Compute all partial products
movq 32(%rsi), %rdx
;# f[0]
mulxq 40(%rsi), %r8, %r14
xor %r15d, %r15d
;# f[1]*f[0]
mulxq 48(%rsi), %r9, %r10
adcx %r14, %r9
;# f[2]*f[0]
mulxq 56(%rsi), %rax, %rcx
adcx %rax, %r10
;# f[3]*f[0]
movq 56(%rsi), %rdx
;# f[3]
mulxq 40(%rsi), %r11, %rbx
adcx %rcx, %r11
;# f[1]*f[3]
mulxq 48(%rsi), %rax, %r13
adcx %rax, %rbx
;# f[2]*f[3]
movq 40(%rsi), %rdx
adcx %r15, %r13
;# f1
mulxq 48(%rsi), %rax, %rcx
mov $0, %r14
;# f[2]*f[1]
;# Step 2: Compute two parallel carry chains
xor %r15d, %r15d
adox %rax, %r10
adcx %r8, %r8
adox %rcx, %r11
adcx %r9, %r9
adox %r15, %rbx
adcx %r10, %r10
adox %r15, %r13
adcx %r11, %r11
adox %r15, %r14
adcx %rbx, %rbx
adcx %r13, %r13
adcx %r14, %r14
;# Step 3: Compute intermediate squares
movq 32(%rsi), %rdx
mulx %rdx, %rax, %rcx
;# f[0]^2
movq %rax, 64(%rdi)
add %rcx, %r8
movq %r8, 72(%rdi)
movq 40(%rsi), %rdx
mulx %rdx, %rax, %rcx
;# f[1]^2
adcx %rax, %r9
movq %r9, 80(%rdi)
adcx %rcx, %r10
movq %r10, 88(%rdi)
movq 48(%rsi), %rdx
mulx %rdx, %rax, %rcx
;# f[2]^2
adcx %rax, %r11
movq %r11, 96(%rdi)
adcx %rcx, %rbx
movq %rbx, 104(%rdi)
movq 56(%rsi), %rdx
mulx %rdx, %rax, %rcx
;# f[3]^2
adcx %rax, %r13
movq %r13, 112(%rdi)
adcx %rcx, %r14
movq %r14, 120(%rdi)
;# Line up pointers
mov %rdi, %rsi
mov %r12, %rdi
;# Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo
mov $38, %rdx
mulxq 32(%rsi), %r8, %r13
xor %ecx, %ecx
adoxq 0(%rsi), %r8
mulxq 40(%rsi), %r9, %rbx
adcx %r13, %r9
adoxq 8(%rsi), %r9
mulxq 48(%rsi), %r10, %r13
adcx %rbx, %r10
adoxq 16(%rsi), %r10
mulxq 56(%rsi), %r11, %rax
adcx %r13, %r11
adoxq 24(%rsi), %r11
adcx %rcx, %rax
adox %rcx, %rax
imul %rdx, %rax
;# Step 2: Fold the carry back into dst
add %rax, %r8
adcx %rcx, %r9
movq %r9, 8(%rdi)
adcx %rcx, %r10
movq %r10, 16(%rdi)
adcx %rcx, %r11
movq %r11, 24(%rdi)
;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point
mov $0, %rax
cmovc %rdx, %rax
add %rax, %r8
movq %r8, 0(%rdi)
;# Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo
mov $38, %rdx
mulxq 96(%rsi), %r8, %r13
xor %ecx, %ecx
adoxq 64(%rsi), %r8
mulxq 104(%rsi), %r9, %rbx
adcx %r13, %r9
adoxq 72(%rsi), %r9
mulxq 112(%rsi), %r10, %r13
adcx %rbx, %r10
adoxq 80(%rsi), %r10
mulxq 120(%rsi), %r11, %rax
adcx %r13, %r11
adoxq 88(%rsi), %r11
adcx %rcx, %rax
adox %rcx, %rax
imul %rdx, %rax
;# Step 2: Fold the carry back into dst
add %rax, %r8
adcx %rcx, %r9
movq %r9, 40(%rdi)
adcx %rcx, %r10
movq %r10, 48(%rdi)
adcx %rcx, %r11
movq %r11, 56(%rdi)
;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point
mov $0, %rax
cmovc %rdx, %rax
add %rax, %r8
movq %r8, 32(%rdi)
pop %rbx
pop %r12
pop %r14
pop %r13
pop %r15
ret
.global cswap2_e
cswap2_e:
;# Transfer bit into CF flag
add $18446744073709551615, %rdi
;# cswap p1[0], p2[0]
movq 0(%rsi), %r8
movq 0(%rdx), %r9
mov %r8, %r10
cmovc %r9, %r8
cmovc %r10, %r9
movq %r8, 0(%rsi)
movq %r9, 0(%rdx)
;# cswap p1[1], p2[1]
movq 8(%rsi), %r8
movq 8(%rdx), %r9
mov %r8, %r10
cmovc %r9, %r8
cmovc %r10, %r9
movq %r8, 8(%rsi)
movq %r9, 8(%rdx)
;# cswap p1[2], p2[2]
movq 16(%rsi), %r8
movq 16(%rdx), %r9
mov %r8, %r10
cmovc %r9, %r8
cmovc %r10, %r9
movq %r8, 16(%rsi)
movq %r9, 16(%rdx)
;# cswap p1[3], p2[3]
movq 24(%rsi), %r8
movq 24(%rdx), %r9
mov %r8, %r10
cmovc %r9, %r8
cmovc %r10, %r9
movq %r8, 24(%rsi)
movq %r9, 24(%rdx)
;# cswap p1[4], p2[4]
movq 32(%rsi), %r8
movq 32(%rdx), %r9
mov %r8, %r10
cmovc %r9, %r8
cmovc %r10, %r9
movq %r8, 32(%rsi)
movq %r9, 32(%rdx)
;# cswap p1[5], p2[5]
movq 40(%rsi), %r8
movq 40(%rdx), %r9
mov %r8, %r10
cmovc %r9, %r8
cmovc %r10, %r9
movq %r8, 40(%rsi)
movq %r9, 40(%rdx)
;# cswap p1[6], p2[6]
movq 48(%rsi), %r8
movq 48(%rdx), %r9
mov %r8, %r10
cmovc %r9, %r8
cmovc %r10, %r9
movq %r8, 48(%rsi)
movq %r9, 48(%rdx)
;# cswap p1[7], p2[7]
movq 56(%rsi), %r8
movq 56(%rdx), %r9
mov %r8, %r10
cmovc %r9, %r8
cmovc %r10, %r9
movq %r8, 56(%rsi)
movq %r9, 56(%rdx)
ret
.section .note.GNU-stack,"",%progbits
|
usenix-security-verdict/verdict
| 2,344
|
deps/libcrux/sys/hacl/c/vale/src/cpuid-x86_64-mingw.S
|
.text
.global check_aesni
check_aesni:
mov %rbx, %r9
mov $0, %rcx
mov $1, %rax
cpuid
mov %rcx, %rax
and $33554432, %rax
shr $24, %rax
and $2, %rcx
and %rcx, %rax
mov %r9, %rbx
ret
.global check_sha
check_sha:
mov %rbx, %r9
mov $7, %rax
mov $0, %rcx
cpuid
and $536870912, %rbx
mov %rbx, %rax
mov %r9, %rbx
ret
.global check_adx_bmi2
check_adx_bmi2:
mov %rbx, %r9
mov $7, %rax
mov $0, %rcx
cpuid
mov %rbx, %rax
and $524288, %rax
shr $11, %rax
and $256, %rbx
and %rbx, %rax
mov %r9, %rbx
ret
.global check_avx
check_avx:
mov %rbx, %r9
mov $0, %rcx
mov $1, %rax
cpuid
mov %rcx, %rax
and $268435456, %rax
shr $27, %rax
mov %r9, %rbx
ret
.global check_avx2
check_avx2:
mov %rbx, %r9
mov $7, %rax
mov $0, %rcx
cpuid
and $32, %rbx
mov %rbx, %rax
mov %r9, %rbx
ret
.global check_movbe
check_movbe:
mov %rbx, %r9
mov $0, %rcx
mov $1, %rax
cpuid
mov %rcx, %rax
and $4194304, %rax
shr $21, %rax
mov %r9, %rbx
ret
.global check_sse
check_sse:
mov %rbx, %r9
mov $0, %rcx
mov $1, %rax
cpuid
mov %rcx, %rax
and $524288, %rax
and $512, %rcx
and $67108864, %rdx
shr $10, %rax
shr $17, %rdx
and %rdx, %rax
and %rcx, %rax
mov %r9, %rbx
ret
.global check_rdrand
check_rdrand:
mov %rbx, %r9
mov $0, %rcx
mov $1, %rax
cpuid
mov %rcx, %rax
and $1073741824, %rax
shr $29, %rax
mov %r9, %rbx
ret
.global check_avx512
check_avx512:
mov %rbx, %r9
mov $7, %rax
mov $0, %rcx
cpuid
mov %rbx, %rax
mov %rbx, %r10
mov %rbx, %r11
and $65536, %rbx
and $131072, %rax
and $1073741824, %r10
shr $1, %rax
shr $14, %r10
and %rbx, %rax
mov $2147483648, %rbx
and %rbx, %r11
shr $15, %r11
and %r10, %rax
and %r11, %rax
mov %r9, %rbx
ret
.global check_osxsave
check_osxsave:
mov %rbx, %r9
mov $0, %rcx
mov $1, %rax
cpuid
mov %rcx, %rax
and $134217728, %rax
shr $26, %rax
mov %r9, %rbx
ret
.global check_avx_xcr0
check_avx_xcr0:
mov $0, %rcx
xgetbv
mov %rax, %rcx
and $4, %rax
shr $1, %rax
and $2, %rcx
and %rcx, %rax
ret
.global check_avx512_xcr0
check_avx512_xcr0:
mov $0, %rcx
xgetbv
mov %rax, %rcx
mov %rax, %rdx
and $32, %rax
and $64, %rcx
and $128, %rdx
shr $2, %rdx
shr $1, %rcx
and %rdx, %rax
and %rcx, %rax
ret
|
usenix-security-verdict/verdict
| 19,666
|
deps/libcrux/sys/hacl/c/vale/src/curve25519-x86_64-mingw.S
|
.text
.global add_scalar_e
add_scalar_e:
push %rdi
push %rsi
mov %rcx, %rdi
mov %rdx, %rsi
mov %r8, %rdx
;# Clear registers to propagate the carry bit
xor %r8d, %r8d
xor %r9d, %r9d
xor %r10d, %r10d
xor %r11d, %r11d
xor %eax, %eax
;# Begin addition chain
addq 0(%rsi), %rdx
movq %rdx, 0(%rdi)
adcxq 8(%rsi), %r8
movq %r8, 8(%rdi)
adcxq 16(%rsi), %r9
movq %r9, 16(%rdi)
adcxq 24(%rsi), %r10
movq %r10, 24(%rdi)
;# Return the carry bit in a register
adcx %r11, %rax
pop %rsi
pop %rdi
ret
.global fadd_e
fadd_e:
push %rdi
push %rsi
mov %rcx, %rdi
mov %rdx, %rsi
mov %r8, %rdx
;# Compute the raw addition of f1 + f2
movq 0(%rdx), %r8
addq 0(%rsi), %r8
movq 8(%rdx), %r9
adcxq 8(%rsi), %r9
movq 16(%rdx), %r10
adcxq 16(%rsi), %r10
movq 24(%rdx), %r11
adcxq 24(%rsi), %r11
;# Wrap the result back into the field
;# Step 1: Compute carry*38
mov $0, %rax
mov $38, %rdx
cmovc %rdx, %rax
;# Step 2: Add carry*38 to the original sum
xor %ecx, %ecx
add %rax, %r8
adcx %rcx, %r9
movq %r9, 8(%rdi)
adcx %rcx, %r10
movq %r10, 16(%rdi)
adcx %rcx, %r11
movq %r11, 24(%rdi)
;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point
mov $0, %rax
cmovc %rdx, %rax
add %rax, %r8
movq %r8, 0(%rdi)
pop %rsi
pop %rdi
ret
.global fsub_e
fsub_e:
push %rdi
push %rsi
mov %rcx, %rdi
mov %rdx, %rsi
mov %r8, %rdx
;# Compute the raw substraction of f1-f2
movq 0(%rsi), %r8
subq 0(%rdx), %r8
movq 8(%rsi), %r9
sbbq 8(%rdx), %r9
movq 16(%rsi), %r10
sbbq 16(%rdx), %r10
movq 24(%rsi), %r11
sbbq 24(%rdx), %r11
;# Wrap the result back into the field
;# Step 1: Compute carry*38
mov $0, %rax
mov $38, %rcx
cmovc %rcx, %rax
;# Step 2: Substract carry*38 from the original difference
sub %rax, %r8
sbb $0, %r9
sbb $0, %r10
sbb $0, %r11
;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point
mov $0, %rax
cmovc %rcx, %rax
sub %rax, %r8
;# Store the result
movq %r8, 0(%rdi)
movq %r9, 8(%rdi)
movq %r10, 16(%rdi)
movq %r11, 24(%rdi)
pop %rsi
pop %rdi
ret
.global fmul_scalar_e
fmul_scalar_e:
push %rdi
push %r13
push %rbx
push %rsi
mov %rcx, %rdi
mov %rdx, %rsi
mov %r8, %rdx
;# Compute the raw multiplication of f1*f2
mulxq 0(%rsi), %r8, %rcx
;# f1[0]*f2
mulxq 8(%rsi), %r9, %rbx
;# f1[1]*f2
add %rcx, %r9
mov $0, %rcx
mulxq 16(%rsi), %r10, %r13
;# f1[2]*f2
adcx %rbx, %r10
mulxq 24(%rsi), %r11, %rax
;# f1[3]*f2
adcx %r13, %r11
adcx %rcx, %rax
;# Wrap the result back into the field
;# Step 1: Compute carry*38
mov $38, %rdx
imul %rdx, %rax
;# Step 2: Fold the carry back into dst
add %rax, %r8
adcx %rcx, %r9
movq %r9, 8(%rdi)
adcx %rcx, %r10
movq %r10, 16(%rdi)
adcx %rcx, %r11
movq %r11, 24(%rdi)
;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point
mov $0, %rax
cmovc %rdx, %rax
add %rax, %r8
movq %r8, 0(%rdi)
pop %rsi
pop %rbx
pop %r13
pop %rdi
ret
.global fmul_e
fmul_e:
push %r13
push %r14
push %r15
push %rbx
push %rsi
push %rdi
mov %rcx, %rdi
mov %rdx, %rsi
mov %r8, %r15
mov %r9, %rcx
;# Compute the raw multiplication: tmp <- src1 * src2
;# Compute src1[0] * src2
movq 0(%rsi), %rdx
mulxq 0(%rcx), %r8, %r9
xor %r10d, %r10d
movq %r8, 0(%rdi)
mulxq 8(%rcx), %r10, %r11
adox %r9, %r10
movq %r10, 8(%rdi)
mulxq 16(%rcx), %rbx, %r13
adox %r11, %rbx
mulxq 24(%rcx), %r14, %rdx
adox %r13, %r14
mov $0, %rax
adox %rdx, %rax
;# Compute src1[1] * src2
movq 8(%rsi), %rdx
mulxq 0(%rcx), %r8, %r9
xor %r10d, %r10d
adcxq 8(%rdi), %r8
movq %r8, 8(%rdi)
mulxq 8(%rcx), %r10, %r11
adox %r9, %r10
adcx %rbx, %r10
movq %r10, 16(%rdi)
mulxq 16(%rcx), %rbx, %r13
adox %r11, %rbx
adcx %r14, %rbx
mov $0, %r8
mulxq 24(%rcx), %r14, %rdx
adox %r13, %r14
adcx %rax, %r14
mov $0, %rax
adox %rdx, %rax
adcx %r8, %rax
;# Compute src1[2] * src2
movq 16(%rsi), %rdx
mulxq 0(%rcx), %r8, %r9
xor %r10d, %r10d
adcxq 16(%rdi), %r8
movq %r8, 16(%rdi)
mulxq 8(%rcx), %r10, %r11
adox %r9, %r10
adcx %rbx, %r10
movq %r10, 24(%rdi)
mulxq 16(%rcx), %rbx, %r13
adox %r11, %rbx
adcx %r14, %rbx
mov $0, %r8
mulxq 24(%rcx), %r14, %rdx
adox %r13, %r14
adcx %rax, %r14
mov $0, %rax
adox %rdx, %rax
adcx %r8, %rax
;# Compute src1[3] * src2
movq 24(%rsi), %rdx
mulxq 0(%rcx), %r8, %r9
xor %r10d, %r10d
adcxq 24(%rdi), %r8
movq %r8, 24(%rdi)
mulxq 8(%rcx), %r10, %r11
adox %r9, %r10
adcx %rbx, %r10
movq %r10, 32(%rdi)
mulxq 16(%rcx), %rbx, %r13
adox %r11, %rbx
adcx %r14, %rbx
movq %rbx, 40(%rdi)
mov $0, %r8
mulxq 24(%rcx), %r14, %rdx
adox %r13, %r14
adcx %rax, %r14
movq %r14, 48(%rdi)
mov $0, %rax
adox %rdx, %rax
adcx %r8, %rax
movq %rax, 56(%rdi)
;# Line up pointers
mov %rdi, %rsi
mov %r15, %rdi
;# Wrap the result back into the field
;# Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo
mov $38, %rdx
mulxq 32(%rsi), %r8, %r13
xor %ecx, %ecx
adoxq 0(%rsi), %r8
mulxq 40(%rsi), %r9, %rbx
adcx %r13, %r9
adoxq 8(%rsi), %r9
mulxq 48(%rsi), %r10, %r13
adcx %rbx, %r10
adoxq 16(%rsi), %r10
mulxq 56(%rsi), %r11, %rax
adcx %r13, %r11
adoxq 24(%rsi), %r11
adcx %rcx, %rax
adox %rcx, %rax
imul %rdx, %rax
;# Step 2: Fold the carry back into dst
add %rax, %r8
adcx %rcx, %r9
movq %r9, 8(%rdi)
adcx %rcx, %r10
movq %r10, 16(%rdi)
adcx %rcx, %r11
movq %r11, 24(%rdi)
;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point
mov $0, %rax
cmovc %rdx, %rax
add %rax, %r8
movq %r8, 0(%rdi)
pop %rdi
pop %rsi
pop %rbx
pop %r15
pop %r14
pop %r13
ret
.global fmul2_e
fmul2_e:
push %r13
push %r14
push %r15
push %rbx
push %rsi
push %rdi
mov %rcx, %rdi
mov %rdx, %rsi
mov %r8, %r15
mov %r9, %rcx
;# Compute the raw multiplication tmp[0] <- f1[0] * f2[0]
;# Compute src1[0] * src2
movq 0(%rsi), %rdx
mulxq 0(%rcx), %r8, %r9
xor %r10d, %r10d
movq %r8, 0(%rdi)
mulxq 8(%rcx), %r10, %r11
adox %r9, %r10
movq %r10, 8(%rdi)
mulxq 16(%rcx), %rbx, %r13
adox %r11, %rbx
mulxq 24(%rcx), %r14, %rdx
adox %r13, %r14
mov $0, %rax
adox %rdx, %rax
;# Compute src1[1] * src2
movq 8(%rsi), %rdx
mulxq 0(%rcx), %r8, %r9
xor %r10d, %r10d
adcxq 8(%rdi), %r8
movq %r8, 8(%rdi)
mulxq 8(%rcx), %r10, %r11
adox %r9, %r10
adcx %rbx, %r10
movq %r10, 16(%rdi)
mulxq 16(%rcx), %rbx, %r13
adox %r11, %rbx
adcx %r14, %rbx
mov $0, %r8
mulxq 24(%rcx), %r14, %rdx
adox %r13, %r14
adcx %rax, %r14
mov $0, %rax
adox %rdx, %rax
adcx %r8, %rax
;# Compute src1[2] * src2
movq 16(%rsi), %rdx
mulxq 0(%rcx), %r8, %r9
xor %r10d, %r10d
adcxq 16(%rdi), %r8
movq %r8, 16(%rdi)
mulxq 8(%rcx), %r10, %r11
adox %r9, %r10
adcx %rbx, %r10
movq %r10, 24(%rdi)
mulxq 16(%rcx), %rbx, %r13
adox %r11, %rbx
adcx %r14, %rbx
mov $0, %r8
mulxq 24(%rcx), %r14, %rdx
adox %r13, %r14
adcx %rax, %r14
mov $0, %rax
adox %rdx, %rax
adcx %r8, %rax
;# Compute src1[3] * src2
movq 24(%rsi), %rdx
mulxq 0(%rcx), %r8, %r9
xor %r10d, %r10d
adcxq 24(%rdi), %r8
movq %r8, 24(%rdi)
mulxq 8(%rcx), %r10, %r11
adox %r9, %r10
adcx %rbx, %r10
movq %r10, 32(%rdi)
mulxq 16(%rcx), %rbx, %r13
adox %r11, %rbx
adcx %r14, %rbx
movq %rbx, 40(%rdi)
mov $0, %r8
mulxq 24(%rcx), %r14, %rdx
adox %r13, %r14
adcx %rax, %r14
movq %r14, 48(%rdi)
mov $0, %rax
adox %rdx, %rax
adcx %r8, %rax
movq %rax, 56(%rdi)
;# Compute the raw multiplication tmp[1] <- f1[1] * f2[1]
;# Compute src1[0] * src2
movq 32(%rsi), %rdx
mulxq 32(%rcx), %r8, %r9
xor %r10d, %r10d
movq %r8, 64(%rdi)
mulxq 40(%rcx), %r10, %r11
adox %r9, %r10
movq %r10, 72(%rdi)
mulxq 48(%rcx), %rbx, %r13
adox %r11, %rbx
mulxq 56(%rcx), %r14, %rdx
adox %r13, %r14
mov $0, %rax
adox %rdx, %rax
;# Compute src1[1] * src2
movq 40(%rsi), %rdx
mulxq 32(%rcx), %r8, %r9
xor %r10d, %r10d
adcxq 72(%rdi), %r8
movq %r8, 72(%rdi)
mulxq 40(%rcx), %r10, %r11
adox %r9, %r10
adcx %rbx, %r10
movq %r10, 80(%rdi)
mulxq 48(%rcx), %rbx, %r13
adox %r11, %rbx
adcx %r14, %rbx
mov $0, %r8
mulxq 56(%rcx), %r14, %rdx
adox %r13, %r14
adcx %rax, %r14
mov $0, %rax
adox %rdx, %rax
adcx %r8, %rax
;# Compute src1[2] * src2
movq 48(%rsi), %rdx
mulxq 32(%rcx), %r8, %r9
xor %r10d, %r10d
adcxq 80(%rdi), %r8
movq %r8, 80(%rdi)
mulxq 40(%rcx), %r10, %r11
adox %r9, %r10
adcx %rbx, %r10
movq %r10, 88(%rdi)
mulxq 48(%rcx), %rbx, %r13
adox %r11, %rbx
adcx %r14, %rbx
mov $0, %r8
mulxq 56(%rcx), %r14, %rdx
adox %r13, %r14
adcx %rax, %r14
mov $0, %rax
adox %rdx, %rax
adcx %r8, %rax
;# Compute src1[3] * src2
movq 56(%rsi), %rdx
mulxq 32(%rcx), %r8, %r9
xor %r10d, %r10d
adcxq 88(%rdi), %r8
movq %r8, 88(%rdi)
mulxq 40(%rcx), %r10, %r11
adox %r9, %r10
adcx %rbx, %r10
movq %r10, 96(%rdi)
mulxq 48(%rcx), %rbx, %r13
adox %r11, %rbx
adcx %r14, %rbx
movq %rbx, 104(%rdi)
mov $0, %r8
mulxq 56(%rcx), %r14, %rdx
adox %r13, %r14
adcx %rax, %r14
movq %r14, 112(%rdi)
mov $0, %rax
adox %rdx, %rax
adcx %r8, %rax
movq %rax, 120(%rdi)
;# Line up pointers
mov %rdi, %rsi
mov %r15, %rdi
;# Wrap the results back into the field
;# Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo
mov $38, %rdx
mulxq 32(%rsi), %r8, %r13
xor %ecx, %ecx
adoxq 0(%rsi), %r8
mulxq 40(%rsi), %r9, %rbx
adcx %r13, %r9
adoxq 8(%rsi), %r9
mulxq 48(%rsi), %r10, %r13
adcx %rbx, %r10
adoxq 16(%rsi), %r10
mulxq 56(%rsi), %r11, %rax
adcx %r13, %r11
adoxq 24(%rsi), %r11
adcx %rcx, %rax
adox %rcx, %rax
imul %rdx, %rax
;# Step 2: Fold the carry back into dst
add %rax, %r8
adcx %rcx, %r9
movq %r9, 8(%rdi)
adcx %rcx, %r10
movq %r10, 16(%rdi)
adcx %rcx, %r11
movq %r11, 24(%rdi)
;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point
mov $0, %rax
cmovc %rdx, %rax
add %rax, %r8
movq %r8, 0(%rdi)
;# Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo
mov $38, %rdx
mulxq 96(%rsi), %r8, %r13
xor %ecx, %ecx
adoxq 64(%rsi), %r8
mulxq 104(%rsi), %r9, %rbx
adcx %r13, %r9
adoxq 72(%rsi), %r9
mulxq 112(%rsi), %r10, %r13
adcx %rbx, %r10
adoxq 80(%rsi), %r10
mulxq 120(%rsi), %r11, %rax
adcx %r13, %r11
adoxq 88(%rsi), %r11
adcx %rcx, %rax
adox %rcx, %rax
imul %rdx, %rax
;# Step 2: Fold the carry back into dst
add %rax, %r8
adcx %rcx, %r9
movq %r9, 40(%rdi)
adcx %rcx, %r10
movq %r10, 48(%rdi)
adcx %rcx, %r11
movq %r11, 56(%rdi)
;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point
mov $0, %rax
cmovc %rdx, %rax
add %rax, %r8
movq %r8, 32(%rdi)
pop %rdi
pop %rsi
pop %rbx
pop %r15
pop %r14
pop %r13
ret
.global fsqr_e
fsqr_e:
push %r15
push %r13
push %r14
push %r12
push %rbx
push %rsi
push %rdi
mov %rcx, %rdi
mov %rdx, %rsi
mov %r8, %r12
;# Compute the raw multiplication: tmp <- f * f
;# Step 1: Compute all partial products
movq 0(%rsi), %rdx
;# f[0]
mulxq 8(%rsi), %r8, %r14
xor %r15d, %r15d
;# f[1]*f[0]
mulxq 16(%rsi), %r9, %r10
adcx %r14, %r9
;# f[2]*f[0]
mulxq 24(%rsi), %rax, %rcx
adcx %rax, %r10
;# f[3]*f[0]
movq 24(%rsi), %rdx
;# f[3]
mulxq 8(%rsi), %r11, %rbx
adcx %rcx, %r11
;# f[1]*f[3]
mulxq 16(%rsi), %rax, %r13
adcx %rax, %rbx
;# f[2]*f[3]
movq 8(%rsi), %rdx
adcx %r15, %r13
;# f1
mulxq 16(%rsi), %rax, %rcx
mov $0, %r14
;# f[2]*f[1]
;# Step 2: Compute two parallel carry chains
xor %r15d, %r15d
adox %rax, %r10
adcx %r8, %r8
adox %rcx, %r11
adcx %r9, %r9
adox %r15, %rbx
adcx %r10, %r10
adox %r15, %r13
adcx %r11, %r11
adox %r15, %r14
adcx %rbx, %rbx
adcx %r13, %r13
adcx %r14, %r14
;# Step 3: Compute intermediate squares
movq 0(%rsi), %rdx
mulx %rdx, %rax, %rcx
;# f[0]^2
movq %rax, 0(%rdi)
add %rcx, %r8
movq %r8, 8(%rdi)
movq 8(%rsi), %rdx
mulx %rdx, %rax, %rcx
;# f[1]^2
adcx %rax, %r9
movq %r9, 16(%rdi)
adcx %rcx, %r10
movq %r10, 24(%rdi)
movq 16(%rsi), %rdx
mulx %rdx, %rax, %rcx
;# f[2]^2
adcx %rax, %r11
movq %r11, 32(%rdi)
adcx %rcx, %rbx
movq %rbx, 40(%rdi)
movq 24(%rsi), %rdx
mulx %rdx, %rax, %rcx
;# f[3]^2
adcx %rax, %r13
movq %r13, 48(%rdi)
adcx %rcx, %r14
movq %r14, 56(%rdi)
;# Line up pointers
mov %rdi, %rsi
mov %r12, %rdi
;# Wrap the result back into the field
;# Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo
mov $38, %rdx
mulxq 32(%rsi), %r8, %r13
xor %ecx, %ecx
adoxq 0(%rsi), %r8
mulxq 40(%rsi), %r9, %rbx
adcx %r13, %r9
adoxq 8(%rsi), %r9
mulxq 48(%rsi), %r10, %r13
adcx %rbx, %r10
adoxq 16(%rsi), %r10
mulxq 56(%rsi), %r11, %rax
adcx %r13, %r11
adoxq 24(%rsi), %r11
adcx %rcx, %rax
adox %rcx, %rax
imul %rdx, %rax
;# Step 2: Fold the carry back into dst
add %rax, %r8
adcx %rcx, %r9
movq %r9, 8(%rdi)
adcx %rcx, %r10
movq %r10, 16(%rdi)
adcx %rcx, %r11
movq %r11, 24(%rdi)
;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point
mov $0, %rax
cmovc %rdx, %rax
add %rax, %r8
movq %r8, 0(%rdi)
pop %rdi
pop %rsi
pop %rbx
pop %r12
pop %r14
pop %r13
pop %r15
ret
.global fsqr2_e
fsqr2_e:
push %r15
push %r13
push %r14
push %r12
push %rbx
push %rsi
push %rdi
mov %rcx, %rdi
mov %rdx, %rsi
mov %r8, %r12
;# Step 1: Compute all partial products
movq 0(%rsi), %rdx
;# f[0]
mulxq 8(%rsi), %r8, %r14
xor %r15d, %r15d
;# f[1]*f[0]
mulxq 16(%rsi), %r9, %r10
adcx %r14, %r9
;# f[2]*f[0]
mulxq 24(%rsi), %rax, %rcx
adcx %rax, %r10
;# f[3]*f[0]
movq 24(%rsi), %rdx
;# f[3]
mulxq 8(%rsi), %r11, %rbx
adcx %rcx, %r11
;# f[1]*f[3]
mulxq 16(%rsi), %rax, %r13
adcx %rax, %rbx
;# f[2]*f[3]
movq 8(%rsi), %rdx
adcx %r15, %r13
;# f1
mulxq 16(%rsi), %rax, %rcx
mov $0, %r14
;# f[2]*f[1]
;# Step 2: Compute two parallel carry chains
xor %r15d, %r15d
adox %rax, %r10
adcx %r8, %r8
adox %rcx, %r11
adcx %r9, %r9
adox %r15, %rbx
adcx %r10, %r10
adox %r15, %r13
adcx %r11, %r11
adox %r15, %r14
adcx %rbx, %rbx
adcx %r13, %r13
adcx %r14, %r14
;# Step 3: Compute intermediate squares
movq 0(%rsi), %rdx
mulx %rdx, %rax, %rcx
;# f[0]^2
movq %rax, 0(%rdi)
add %rcx, %r8
movq %r8, 8(%rdi)
movq 8(%rsi), %rdx
mulx %rdx, %rax, %rcx
;# f[1]^2
adcx %rax, %r9
movq %r9, 16(%rdi)
adcx %rcx, %r10
movq %r10, 24(%rdi)
movq 16(%rsi), %rdx
mulx %rdx, %rax, %rcx
;# f[2]^2
adcx %rax, %r11
movq %r11, 32(%rdi)
adcx %rcx, %rbx
movq %rbx, 40(%rdi)
movq 24(%rsi), %rdx
mulx %rdx, %rax, %rcx
;# f[3]^2
adcx %rax, %r13
movq %r13, 48(%rdi)
adcx %rcx, %r14
movq %r14, 56(%rdi)
;# Step 1: Compute all partial products
movq 32(%rsi), %rdx
;# f[0]
mulxq 40(%rsi), %r8, %r14
xor %r15d, %r15d
;# f[1]*f[0]
mulxq 48(%rsi), %r9, %r10
adcx %r14, %r9
;# f[2]*f[0]
mulxq 56(%rsi), %rax, %rcx
adcx %rax, %r10
;# f[3]*f[0]
movq 56(%rsi), %rdx
;# f[3]
mulxq 40(%rsi), %r11, %rbx
adcx %rcx, %r11
;# f[1]*f[3]
mulxq 48(%rsi), %rax, %r13
adcx %rax, %rbx
;# f[2]*f[3]
movq 40(%rsi), %rdx
adcx %r15, %r13
;# f1
mulxq 48(%rsi), %rax, %rcx
mov $0, %r14
;# f[2]*f[1]
;# Step 2: Compute two parallel carry chains
xor %r15d, %r15d
adox %rax, %r10
adcx %r8, %r8
adox %rcx, %r11
adcx %r9, %r9
adox %r15, %rbx
adcx %r10, %r10
adox %r15, %r13
adcx %r11, %r11
adox %r15, %r14
adcx %rbx, %rbx
adcx %r13, %r13
adcx %r14, %r14
;# Step 3: Compute intermediate squares
movq 32(%rsi), %rdx
mulx %rdx, %rax, %rcx
;# f[0]^2
movq %rax, 64(%rdi)
add %rcx, %r8
movq %r8, 72(%rdi)
movq 40(%rsi), %rdx
mulx %rdx, %rax, %rcx
;# f[1]^2
adcx %rax, %r9
movq %r9, 80(%rdi)
adcx %rcx, %r10
movq %r10, 88(%rdi)
movq 48(%rsi), %rdx
mulx %rdx, %rax, %rcx
;# f[2]^2
adcx %rax, %r11
movq %r11, 96(%rdi)
adcx %rcx, %rbx
movq %rbx, 104(%rdi)
movq 56(%rsi), %rdx
mulx %rdx, %rax, %rcx
;# f[3]^2
adcx %rax, %r13
movq %r13, 112(%rdi)
adcx %rcx, %r14
movq %r14, 120(%rdi)
;# Line up pointers
mov %rdi, %rsi
mov %r12, %rdi
;# Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo
mov $38, %rdx
mulxq 32(%rsi), %r8, %r13
xor %ecx, %ecx
adoxq 0(%rsi), %r8
mulxq 40(%rsi), %r9, %rbx
adcx %r13, %r9
adoxq 8(%rsi), %r9
mulxq 48(%rsi), %r10, %r13
adcx %rbx, %r10
adoxq 16(%rsi), %r10
mulxq 56(%rsi), %r11, %rax
adcx %r13, %r11
adoxq 24(%rsi), %r11
adcx %rcx, %rax
adox %rcx, %rax
imul %rdx, %rax
;# Step 2: Fold the carry back into dst
add %rax, %r8
adcx %rcx, %r9
movq %r9, 8(%rdi)
adcx %rcx, %r10
movq %r10, 16(%rdi)
adcx %rcx, %r11
movq %r11, 24(%rdi)
;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point
mov $0, %rax
cmovc %rdx, %rax
add %rax, %r8
movq %r8, 0(%rdi)
;# Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo
mov $38, %rdx
mulxq 96(%rsi), %r8, %r13
xor %ecx, %ecx
adoxq 64(%rsi), %r8
mulxq 104(%rsi), %r9, %rbx
adcx %r13, %r9
adoxq 72(%rsi), %r9
mulxq 112(%rsi), %r10, %r13
adcx %rbx, %r10
adoxq 80(%rsi), %r10
mulxq 120(%rsi), %r11, %rax
adcx %r13, %r11
adoxq 88(%rsi), %r11
adcx %rcx, %rax
adox %rcx, %rax
imul %rdx, %rax
;# Step 2: Fold the carry back into dst
add %rax, %r8
adcx %rcx, %r9
movq %r9, 40(%rdi)
adcx %rcx, %r10
movq %r10, 48(%rdi)
adcx %rcx, %r11
movq %r11, 56(%rdi)
;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point
mov $0, %rax
cmovc %rdx, %rax
add %rax, %r8
movq %r8, 32(%rdi)
pop %rdi
pop %rsi
pop %rbx
pop %r12
pop %r14
pop %r13
pop %r15
ret
.global cswap2_e
cswap2_e:
push %rdi
push %rsi
mov %rcx, %rdi
mov %rdx, %rsi
mov %r8, %rdx
;# Transfer bit into CF flag
add $18446744073709551615, %rdi
;# cswap p1[0], p2[0]
movq 0(%rsi), %r8
movq 0(%rdx), %r9
mov %r8, %r10
cmovc %r9, %r8
cmovc %r10, %r9
movq %r8, 0(%rsi)
movq %r9, 0(%rdx)
;# cswap p1[1], p2[1]
movq 8(%rsi), %r8
movq 8(%rdx), %r9
mov %r8, %r10
cmovc %r9, %r8
cmovc %r10, %r9
movq %r8, 8(%rsi)
movq %r9, 8(%rdx)
;# cswap p1[2], p2[2]
movq 16(%rsi), %r8
movq 16(%rdx), %r9
mov %r8, %r10
cmovc %r9, %r8
cmovc %r10, %r9
movq %r8, 16(%rsi)
movq %r9, 16(%rdx)
;# cswap p1[3], p2[3]
movq 24(%rsi), %r8
movq 24(%rdx), %r9
mov %r8, %r10
cmovc %r9, %r8
cmovc %r10, %r9
movq %r8, 24(%rsi)
movq %r9, 24(%rdx)
;# cswap p1[4], p2[4]
movq 32(%rsi), %r8
movq 32(%rdx), %r9
mov %r8, %r10
cmovc %r9, %r8
cmovc %r10, %r9
movq %r8, 32(%rsi)
movq %r9, 32(%rdx)
;# cswap p1[5], p2[5]
movq 40(%rsi), %r8
movq 40(%rdx), %r9
mov %r8, %r10
cmovc %r9, %r8
cmovc %r10, %r9
movq %r8, 40(%rsi)
movq %r9, 40(%rdx)
;# cswap p1[6], p2[6]
movq 48(%rsi), %r8
movq 48(%rdx), %r9
mov %r8, %r10
cmovc %r9, %r8
cmovc %r10, %r9
movq %r8, 48(%rsi)
movq %r9, 48(%rdx)
;# cswap p1[7], p2[7]
movq 56(%rsi), %r8
movq 56(%rdx), %r9
mov %r8, %r10
cmovc %r9, %r8
cmovc %r10, %r9
movq %r8, 56(%rsi)
movq %r9, 56(%rdx)
pop %rsi
pop %rdi
ret
|
usenix-security-verdict/verdict
| 18,925
|
deps/libcrux/sys/hacl/c/vale/src/curve25519-x86_64-darwin.S
|
.text
.global _add_scalar_e
_add_scalar_e:
push %rdi
push %rsi
;# Clear registers to propagate the carry bit
xor %r8d, %r8d
xor %r9d, %r9d
xor %r10d, %r10d
xor %r11d, %r11d
xor %eax, %eax
;# Begin addition chain
addq 0(%rsi), %rdx
movq %rdx, 0(%rdi)
adcxq 8(%rsi), %r8
movq %r8, 8(%rdi)
adcxq 16(%rsi), %r9
movq %r9, 16(%rdi)
adcxq 24(%rsi), %r10
movq %r10, 24(%rdi)
;# Return the carry bit in a register
adcx %r11, %rax
pop %rsi
pop %rdi
ret
.global _fadd_e
_fadd_e:
;# Compute the raw addition of f1 + f2
movq 0(%rdx), %r8
addq 0(%rsi), %r8
movq 8(%rdx), %r9
adcxq 8(%rsi), %r9
movq 16(%rdx), %r10
adcxq 16(%rsi), %r10
movq 24(%rdx), %r11
adcxq 24(%rsi), %r11
;# Wrap the result back into the field
;# Step 1: Compute carry*38
mov $0, %rax
mov $38, %rdx
cmovc %rdx, %rax
;# Step 2: Add carry*38 to the original sum
xor %ecx, %ecx
add %rax, %r8
adcx %rcx, %r9
movq %r9, 8(%rdi)
adcx %rcx, %r10
movq %r10, 16(%rdi)
adcx %rcx, %r11
movq %r11, 24(%rdi)
;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point
mov $0, %rax
cmovc %rdx, %rax
add %rax, %r8
movq %r8, 0(%rdi)
ret
.global _fsub_e
_fsub_e:
;# Compute the raw substraction of f1-f2
movq 0(%rsi), %r8
subq 0(%rdx), %r8
movq 8(%rsi), %r9
sbbq 8(%rdx), %r9
movq 16(%rsi), %r10
sbbq 16(%rdx), %r10
movq 24(%rsi), %r11
sbbq 24(%rdx), %r11
;# Wrap the result back into the field
;# Step 1: Compute carry*38
mov $0, %rax
mov $38, %rcx
cmovc %rcx, %rax
;# Step 2: Substract carry*38 from the original difference
sub %rax, %r8
sbb $0, %r9
sbb $0, %r10
sbb $0, %r11
;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point
mov $0, %rax
cmovc %rcx, %rax
sub %rax, %r8
;# Store the result
movq %r8, 0(%rdi)
movq %r9, 8(%rdi)
movq %r10, 16(%rdi)
movq %r11, 24(%rdi)
ret
.global _fmul_scalar_e
_fmul_scalar_e:
push %rdi
push %r13
push %rbx
;# Compute the raw multiplication of f1*f2
mulxq 0(%rsi), %r8, %rcx
;# f1[0]*f2
mulxq 8(%rsi), %r9, %rbx
;# f1[1]*f2
add %rcx, %r9
mov $0, %rcx
mulxq 16(%rsi), %r10, %r13
;# f1[2]*f2
adcx %rbx, %r10
mulxq 24(%rsi), %r11, %rax
;# f1[3]*f2
adcx %r13, %r11
adcx %rcx, %rax
;# Wrap the result back into the field
;# Step 1: Compute carry*38
mov $38, %rdx
imul %rdx, %rax
;# Step 2: Fold the carry back into dst
add %rax, %r8
adcx %rcx, %r9
movq %r9, 8(%rdi)
adcx %rcx, %r10
movq %r10, 16(%rdi)
adcx %rcx, %r11
movq %r11, 24(%rdi)
;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point
mov $0, %rax
cmovc %rdx, %rax
add %rax, %r8
movq %r8, 0(%rdi)
pop %rbx
pop %r13
pop %rdi
ret
.global _fmul_e
_fmul_e:
push %r13
push %r14
push %r15
push %rbx
mov %rdx, %r15
;# Compute the raw multiplication: tmp <- src1 * src2
;# Compute src1[0] * src2
movq 0(%rsi), %rdx
mulxq 0(%rcx), %r8, %r9
xor %r10d, %r10d
movq %r8, 0(%rdi)
mulxq 8(%rcx), %r10, %r11
adox %r9, %r10
movq %r10, 8(%rdi)
mulxq 16(%rcx), %rbx, %r13
adox %r11, %rbx
mulxq 24(%rcx), %r14, %rdx
adox %r13, %r14
mov $0, %rax
adox %rdx, %rax
;# Compute src1[1] * src2
movq 8(%rsi), %rdx
mulxq 0(%rcx), %r8, %r9
xor %r10d, %r10d
adcxq 8(%rdi), %r8
movq %r8, 8(%rdi)
mulxq 8(%rcx), %r10, %r11
adox %r9, %r10
adcx %rbx, %r10
movq %r10, 16(%rdi)
mulxq 16(%rcx), %rbx, %r13
adox %r11, %rbx
adcx %r14, %rbx
mov $0, %r8
mulxq 24(%rcx), %r14, %rdx
adox %r13, %r14
adcx %rax, %r14
mov $0, %rax
adox %rdx, %rax
adcx %r8, %rax
;# Compute src1[2] * src2
movq 16(%rsi), %rdx
mulxq 0(%rcx), %r8, %r9
xor %r10d, %r10d
adcxq 16(%rdi), %r8
movq %r8, 16(%rdi)
mulxq 8(%rcx), %r10, %r11
adox %r9, %r10
adcx %rbx, %r10
movq %r10, 24(%rdi)
mulxq 16(%rcx), %rbx, %r13
adox %r11, %rbx
adcx %r14, %rbx
mov $0, %r8
mulxq 24(%rcx), %r14, %rdx
adox %r13, %r14
adcx %rax, %r14
mov $0, %rax
adox %rdx, %rax
adcx %r8, %rax
;# Compute src1[3] * src2
movq 24(%rsi), %rdx
mulxq 0(%rcx), %r8, %r9
xor %r10d, %r10d
adcxq 24(%rdi), %r8
movq %r8, 24(%rdi)
mulxq 8(%rcx), %r10, %r11
adox %r9, %r10
adcx %rbx, %r10
movq %r10, 32(%rdi)
mulxq 16(%rcx), %rbx, %r13
adox %r11, %rbx
adcx %r14, %rbx
movq %rbx, 40(%rdi)
mov $0, %r8
mulxq 24(%rcx), %r14, %rdx
adox %r13, %r14
adcx %rax, %r14
movq %r14, 48(%rdi)
mov $0, %rax
adox %rdx, %rax
adcx %r8, %rax
movq %rax, 56(%rdi)
;# Line up pointers
mov %rdi, %rsi
mov %r15, %rdi
;# Wrap the result back into the field
;# Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo
mov $38, %rdx
mulxq 32(%rsi), %r8, %r13
xor %ecx, %ecx
adoxq 0(%rsi), %r8
mulxq 40(%rsi), %r9, %rbx
adcx %r13, %r9
adoxq 8(%rsi), %r9
mulxq 48(%rsi), %r10, %r13
adcx %rbx, %r10
adoxq 16(%rsi), %r10
mulxq 56(%rsi), %r11, %rax
adcx %r13, %r11
adoxq 24(%rsi), %r11
adcx %rcx, %rax
adox %rcx, %rax
imul %rdx, %rax
;# Step 2: Fold the carry back into dst
add %rax, %r8
adcx %rcx, %r9
movq %r9, 8(%rdi)
adcx %rcx, %r10
movq %r10, 16(%rdi)
adcx %rcx, %r11
movq %r11, 24(%rdi)
;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point
mov $0, %rax
cmovc %rdx, %rax
add %rax, %r8
movq %r8, 0(%rdi)
pop %rbx
pop %r15
pop %r14
pop %r13
ret
.global _fmul2_e
_fmul2_e:
push %r13
push %r14
push %r15
push %rbx
mov %rdx, %r15
;# Compute the raw multiplication tmp[0] <- f1[0] * f2[0]
;# Compute src1[0] * src2
movq 0(%rsi), %rdx
mulxq 0(%rcx), %r8, %r9
xor %r10d, %r10d
movq %r8, 0(%rdi)
mulxq 8(%rcx), %r10, %r11
adox %r9, %r10
movq %r10, 8(%rdi)
mulxq 16(%rcx), %rbx, %r13
adox %r11, %rbx
mulxq 24(%rcx), %r14, %rdx
adox %r13, %r14
mov $0, %rax
adox %rdx, %rax
;# Compute src1[1] * src2
movq 8(%rsi), %rdx
mulxq 0(%rcx), %r8, %r9
xor %r10d, %r10d
adcxq 8(%rdi), %r8
movq %r8, 8(%rdi)
mulxq 8(%rcx), %r10, %r11
adox %r9, %r10
adcx %rbx, %r10
movq %r10, 16(%rdi)
mulxq 16(%rcx), %rbx, %r13
adox %r11, %rbx
adcx %r14, %rbx
mov $0, %r8
mulxq 24(%rcx), %r14, %rdx
adox %r13, %r14
adcx %rax, %r14
mov $0, %rax
adox %rdx, %rax
adcx %r8, %rax
;# Compute src1[2] * src2
movq 16(%rsi), %rdx
mulxq 0(%rcx), %r8, %r9
xor %r10d, %r10d
adcxq 16(%rdi), %r8
movq %r8, 16(%rdi)
mulxq 8(%rcx), %r10, %r11
adox %r9, %r10
adcx %rbx, %r10
movq %r10, 24(%rdi)
mulxq 16(%rcx), %rbx, %r13
adox %r11, %rbx
adcx %r14, %rbx
mov $0, %r8
mulxq 24(%rcx), %r14, %rdx
adox %r13, %r14
adcx %rax, %r14
mov $0, %rax
adox %rdx, %rax
adcx %r8, %rax
;# Compute src1[3] * src2
movq 24(%rsi), %rdx
mulxq 0(%rcx), %r8, %r9
xor %r10d, %r10d
adcxq 24(%rdi), %r8
movq %r8, 24(%rdi)
mulxq 8(%rcx), %r10, %r11
adox %r9, %r10
adcx %rbx, %r10
movq %r10, 32(%rdi)
mulxq 16(%rcx), %rbx, %r13
adox %r11, %rbx
adcx %r14, %rbx
movq %rbx, 40(%rdi)
mov $0, %r8
mulxq 24(%rcx), %r14, %rdx
adox %r13, %r14
adcx %rax, %r14
movq %r14, 48(%rdi)
mov $0, %rax
adox %rdx, %rax
adcx %r8, %rax
movq %rax, 56(%rdi)
;# Compute the raw multiplication tmp[1] <- f1[1] * f2[1]
;# Compute src1[0] * src2
movq 32(%rsi), %rdx
mulxq 32(%rcx), %r8, %r9
xor %r10d, %r10d
movq %r8, 64(%rdi)
mulxq 40(%rcx), %r10, %r11
adox %r9, %r10
movq %r10, 72(%rdi)
mulxq 48(%rcx), %rbx, %r13
adox %r11, %rbx
mulxq 56(%rcx), %r14, %rdx
adox %r13, %r14
mov $0, %rax
adox %rdx, %rax
;# Compute src1[1] * src2
movq 40(%rsi), %rdx
mulxq 32(%rcx), %r8, %r9
xor %r10d, %r10d
adcxq 72(%rdi), %r8
movq %r8, 72(%rdi)
mulxq 40(%rcx), %r10, %r11
adox %r9, %r10
adcx %rbx, %r10
movq %r10, 80(%rdi)
mulxq 48(%rcx), %rbx, %r13
adox %r11, %rbx
adcx %r14, %rbx
mov $0, %r8
mulxq 56(%rcx), %r14, %rdx
adox %r13, %r14
adcx %rax, %r14
mov $0, %rax
adox %rdx, %rax
adcx %r8, %rax
;# Compute src1[2] * src2
movq 48(%rsi), %rdx
mulxq 32(%rcx), %r8, %r9
xor %r10d, %r10d
adcxq 80(%rdi), %r8
movq %r8, 80(%rdi)
mulxq 40(%rcx), %r10, %r11
adox %r9, %r10
adcx %rbx, %r10
movq %r10, 88(%rdi)
mulxq 48(%rcx), %rbx, %r13
adox %r11, %rbx
adcx %r14, %rbx
mov $0, %r8
mulxq 56(%rcx), %r14, %rdx
adox %r13, %r14
adcx %rax, %r14
mov $0, %rax
adox %rdx, %rax
adcx %r8, %rax
;# Compute src1[3] * src2
movq 56(%rsi), %rdx
mulxq 32(%rcx), %r8, %r9
xor %r10d, %r10d
adcxq 88(%rdi), %r8
movq %r8, 88(%rdi)
mulxq 40(%rcx), %r10, %r11
adox %r9, %r10
adcx %rbx, %r10
movq %r10, 96(%rdi)
mulxq 48(%rcx), %rbx, %r13
adox %r11, %rbx
adcx %r14, %rbx
movq %rbx, 104(%rdi)
mov $0, %r8
mulxq 56(%rcx), %r14, %rdx
adox %r13, %r14
adcx %rax, %r14
movq %r14, 112(%rdi)
mov $0, %rax
adox %rdx, %rax
adcx %r8, %rax
movq %rax, 120(%rdi)
;# Line up pointers
mov %rdi, %rsi
mov %r15, %rdi
;# Wrap the results back into the field
;# Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo
mov $38, %rdx
mulxq 32(%rsi), %r8, %r13
xor %ecx, %ecx
adoxq 0(%rsi), %r8
mulxq 40(%rsi), %r9, %rbx
adcx %r13, %r9
adoxq 8(%rsi), %r9
mulxq 48(%rsi), %r10, %r13
adcx %rbx, %r10
adoxq 16(%rsi), %r10
mulxq 56(%rsi), %r11, %rax
adcx %r13, %r11
adoxq 24(%rsi), %r11
adcx %rcx, %rax
adox %rcx, %rax
imul %rdx, %rax
;# Step 2: Fold the carry back into dst
add %rax, %r8
adcx %rcx, %r9
movq %r9, 8(%rdi)
adcx %rcx, %r10
movq %r10, 16(%rdi)
adcx %rcx, %r11
movq %r11, 24(%rdi)
;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point
mov $0, %rax
cmovc %rdx, %rax
add %rax, %r8
movq %r8, 0(%rdi)
;# Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo
mov $38, %rdx
mulxq 96(%rsi), %r8, %r13
xor %ecx, %ecx
adoxq 64(%rsi), %r8
mulxq 104(%rsi), %r9, %rbx
adcx %r13, %r9
adoxq 72(%rsi), %r9
mulxq 112(%rsi), %r10, %r13
adcx %rbx, %r10
adoxq 80(%rsi), %r10
mulxq 120(%rsi), %r11, %rax
adcx %r13, %r11
adoxq 88(%rsi), %r11
adcx %rcx, %rax
adox %rcx, %rax
imul %rdx, %rax
;# Step 2: Fold the carry back into dst
add %rax, %r8
adcx %rcx, %r9
movq %r9, 40(%rdi)
adcx %rcx, %r10
movq %r10, 48(%rdi)
adcx %rcx, %r11
movq %r11, 56(%rdi)
;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point
mov $0, %rax
cmovc %rdx, %rax
add %rax, %r8
movq %r8, 32(%rdi)
pop %rbx
pop %r15
pop %r14
pop %r13
ret
.global _fsqr_e
_fsqr_e:
push %r15
push %r13
push %r14
push %r12
push %rbx
mov %rdx, %r12
;# Compute the raw multiplication: tmp <- f * f
;# Step 1: Compute all partial products
movq 0(%rsi), %rdx
;# f[0]
mulxq 8(%rsi), %r8, %r14
xor %r15d, %r15d
;# f[1]*f[0]
mulxq 16(%rsi), %r9, %r10
adcx %r14, %r9
;# f[2]*f[0]
mulxq 24(%rsi), %rax, %rcx
adcx %rax, %r10
;# f[3]*f[0]
movq 24(%rsi), %rdx
;# f[3]
mulxq 8(%rsi), %r11, %rbx
adcx %rcx, %r11
;# f[1]*f[3]
mulxq 16(%rsi), %rax, %r13
adcx %rax, %rbx
;# f[2]*f[3]
movq 8(%rsi), %rdx
adcx %r15, %r13
;# f1
mulxq 16(%rsi), %rax, %rcx
mov $0, %r14
;# f[2]*f[1]
;# Step 2: Compute two parallel carry chains
xor %r15d, %r15d
adox %rax, %r10
adcx %r8, %r8
adox %rcx, %r11
adcx %r9, %r9
adox %r15, %rbx
adcx %r10, %r10
adox %r15, %r13
adcx %r11, %r11
adox %r15, %r14
adcx %rbx, %rbx
adcx %r13, %r13
adcx %r14, %r14
;# Step 3: Compute intermediate squares
movq 0(%rsi), %rdx
mulx %rdx, %rax, %rcx
;# f[0]^2
movq %rax, 0(%rdi)
add %rcx, %r8
movq %r8, 8(%rdi)
movq 8(%rsi), %rdx
mulx %rdx, %rax, %rcx
;# f[1]^2
adcx %rax, %r9
movq %r9, 16(%rdi)
adcx %rcx, %r10
movq %r10, 24(%rdi)
movq 16(%rsi), %rdx
mulx %rdx, %rax, %rcx
;# f[2]^2
adcx %rax, %r11
movq %r11, 32(%rdi)
adcx %rcx, %rbx
movq %rbx, 40(%rdi)
movq 24(%rsi), %rdx
mulx %rdx, %rax, %rcx
;# f[3]^2
adcx %rax, %r13
movq %r13, 48(%rdi)
adcx %rcx, %r14
movq %r14, 56(%rdi)
;# Line up pointers
mov %rdi, %rsi
mov %r12, %rdi
;# Wrap the result back into the field
;# Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo
mov $38, %rdx
mulxq 32(%rsi), %r8, %r13
xor %ecx, %ecx
adoxq 0(%rsi), %r8
mulxq 40(%rsi), %r9, %rbx
adcx %r13, %r9
adoxq 8(%rsi), %r9
mulxq 48(%rsi), %r10, %r13
adcx %rbx, %r10
adoxq 16(%rsi), %r10
mulxq 56(%rsi), %r11, %rax
adcx %r13, %r11
adoxq 24(%rsi), %r11
adcx %rcx, %rax
adox %rcx, %rax
imul %rdx, %rax
;# Step 2: Fold the carry back into dst
add %rax, %r8
adcx %rcx, %r9
movq %r9, 8(%rdi)
adcx %rcx, %r10
movq %r10, 16(%rdi)
adcx %rcx, %r11
movq %r11, 24(%rdi)
;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point
mov $0, %rax
cmovc %rdx, %rax
add %rax, %r8
movq %r8, 0(%rdi)
pop %rbx
pop %r12
pop %r14
pop %r13
pop %r15
ret
.global _fsqr2_e
_fsqr2_e:
push %r15
push %r13
push %r14
push %r12
push %rbx
mov %rdx, %r12
;# Step 1: Compute all partial products
movq 0(%rsi), %rdx
;# f[0]
mulxq 8(%rsi), %r8, %r14
xor %r15d, %r15d
;# f[1]*f[0]
mulxq 16(%rsi), %r9, %r10
adcx %r14, %r9
;# f[2]*f[0]
mulxq 24(%rsi), %rax, %rcx
adcx %rax, %r10
;# f[3]*f[0]
movq 24(%rsi), %rdx
;# f[3]
mulxq 8(%rsi), %r11, %rbx
adcx %rcx, %r11
;# f[1]*f[3]
mulxq 16(%rsi), %rax, %r13
adcx %rax, %rbx
;# f[2]*f[3]
movq 8(%rsi), %rdx
adcx %r15, %r13
;# f1
mulxq 16(%rsi), %rax, %rcx
mov $0, %r14
;# f[2]*f[1]
;# Step 2: Compute two parallel carry chains
xor %r15d, %r15d
adox %rax, %r10
adcx %r8, %r8
adox %rcx, %r11
adcx %r9, %r9
adox %r15, %rbx
adcx %r10, %r10
adox %r15, %r13
adcx %r11, %r11
adox %r15, %r14
adcx %rbx, %rbx
adcx %r13, %r13
adcx %r14, %r14
;# Step 3: Compute intermediate squares
movq 0(%rsi), %rdx
mulx %rdx, %rax, %rcx
;# f[0]^2
movq %rax, 0(%rdi)
add %rcx, %r8
movq %r8, 8(%rdi)
movq 8(%rsi), %rdx
mulx %rdx, %rax, %rcx
;# f[1]^2
adcx %rax, %r9
movq %r9, 16(%rdi)
adcx %rcx, %r10
movq %r10, 24(%rdi)
movq 16(%rsi), %rdx
mulx %rdx, %rax, %rcx
;# f[2]^2
adcx %rax, %r11
movq %r11, 32(%rdi)
adcx %rcx, %rbx
movq %rbx, 40(%rdi)
movq 24(%rsi), %rdx
mulx %rdx, %rax, %rcx
;# f[3]^2
adcx %rax, %r13
movq %r13, 48(%rdi)
adcx %rcx, %r14
movq %r14, 56(%rdi)
;# Step 1: Compute all partial products
movq 32(%rsi), %rdx
;# f[0]
mulxq 40(%rsi), %r8, %r14
xor %r15d, %r15d
;# f[1]*f[0]
mulxq 48(%rsi), %r9, %r10
adcx %r14, %r9
;# f[2]*f[0]
mulxq 56(%rsi), %rax, %rcx
adcx %rax, %r10
;# f[3]*f[0]
movq 56(%rsi), %rdx
;# f[3]
mulxq 40(%rsi), %r11, %rbx
adcx %rcx, %r11
;# f[1]*f[3]
mulxq 48(%rsi), %rax, %r13
adcx %rax, %rbx
;# f[2]*f[3]
movq 40(%rsi), %rdx
adcx %r15, %r13
;# f1
mulxq 48(%rsi), %rax, %rcx
mov $0, %r14
;# f[2]*f[1]
;# Step 2: Compute two parallel carry chains
xor %r15d, %r15d
adox %rax, %r10
adcx %r8, %r8
adox %rcx, %r11
adcx %r9, %r9
adox %r15, %rbx
adcx %r10, %r10
adox %r15, %r13
adcx %r11, %r11
adox %r15, %r14
adcx %rbx, %rbx
adcx %r13, %r13
adcx %r14, %r14
;# Step 3: Compute intermediate squares
movq 32(%rsi), %rdx
mulx %rdx, %rax, %rcx
;# f[0]^2
movq %rax, 64(%rdi)
add %rcx, %r8
movq %r8, 72(%rdi)
movq 40(%rsi), %rdx
mulx %rdx, %rax, %rcx
;# f[1]^2
adcx %rax, %r9
movq %r9, 80(%rdi)
adcx %rcx, %r10
movq %r10, 88(%rdi)
movq 48(%rsi), %rdx
mulx %rdx, %rax, %rcx
;# f[2]^2
adcx %rax, %r11
movq %r11, 96(%rdi)
adcx %rcx, %rbx
movq %rbx, 104(%rdi)
movq 56(%rsi), %rdx
mulx %rdx, %rax, %rcx
;# f[3]^2
adcx %rax, %r13
movq %r13, 112(%rdi)
adcx %rcx, %r14
movq %r14, 120(%rdi)
;# Line up pointers
mov %rdi, %rsi
mov %r12, %rdi
;# Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo
mov $38, %rdx
mulxq 32(%rsi), %r8, %r13
xor %ecx, %ecx
adoxq 0(%rsi), %r8
mulxq 40(%rsi), %r9, %rbx
adcx %r13, %r9
adoxq 8(%rsi), %r9
mulxq 48(%rsi), %r10, %r13
adcx %rbx, %r10
adoxq 16(%rsi), %r10
mulxq 56(%rsi), %r11, %rax
adcx %r13, %r11
adoxq 24(%rsi), %r11
adcx %rcx, %rax
adox %rcx, %rax
imul %rdx, %rax
;# Step 2: Fold the carry back into dst
add %rax, %r8
adcx %rcx, %r9
movq %r9, 8(%rdi)
adcx %rcx, %r10
movq %r10, 16(%rdi)
adcx %rcx, %r11
movq %r11, 24(%rdi)
;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point
mov $0, %rax
cmovc %rdx, %rax
add %rax, %r8
movq %r8, 0(%rdi)
;# Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo
mov $38, %rdx
mulxq 96(%rsi), %r8, %r13
xor %ecx, %ecx
adoxq 64(%rsi), %r8
mulxq 104(%rsi), %r9, %rbx
adcx %r13, %r9
adoxq 72(%rsi), %r9
mulxq 112(%rsi), %r10, %r13
adcx %rbx, %r10
adoxq 80(%rsi), %r10
mulxq 120(%rsi), %r11, %rax
adcx %r13, %r11
adoxq 88(%rsi), %r11
adcx %rcx, %rax
adox %rcx, %rax
imul %rdx, %rax
;# Step 2: Fold the carry back into dst
add %rax, %r8
adcx %rcx, %r9
movq %r9, 40(%rdi)
adcx %rcx, %r10
movq %r10, 48(%rdi)
adcx %rcx, %r11
movq %r11, 56(%rdi)
;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point
mov $0, %rax
cmovc %rdx, %rax
add %rax, %r8
movq %r8, 32(%rdi)
pop %rbx
pop %r12
pop %r14
pop %r13
pop %r15
ret
.global _cswap2_e
_cswap2_e:
;# Transfer bit into CF flag
add $18446744073709551615, %rdi
;# cswap p1[0], p2[0]
movq 0(%rsi), %r8
movq 0(%rdx), %r9
mov %r8, %r10
cmovc %r9, %r8
cmovc %r10, %r9
movq %r8, 0(%rsi)
movq %r9, 0(%rdx)
;# cswap p1[1], p2[1]
movq 8(%rsi), %r8
movq 8(%rdx), %r9
mov %r8, %r10
cmovc %r9, %r8
cmovc %r10, %r9
movq %r8, 8(%rsi)
movq %r9, 8(%rdx)
;# cswap p1[2], p2[2]
movq 16(%rsi), %r8
movq 16(%rdx), %r9
mov %r8, %r10
cmovc %r9, %r8
cmovc %r10, %r9
movq %r8, 16(%rsi)
movq %r9, 16(%rdx)
;# cswap p1[3], p2[3]
movq 24(%rsi), %r8
movq 24(%rdx), %r9
mov %r8, %r10
cmovc %r9, %r8
cmovc %r10, %r9
movq %r8, 24(%rsi)
movq %r9, 24(%rdx)
;# cswap p1[4], p2[4]
movq 32(%rsi), %r8
movq 32(%rdx), %r9
mov %r8, %r10
cmovc %r9, %r8
cmovc %r10, %r9
movq %r8, 32(%rsi)
movq %r9, 32(%rdx)
;# cswap p1[5], p2[5]
movq 40(%rsi), %r8
movq 40(%rdx), %r9
mov %r8, %r10
cmovc %r9, %r8
cmovc %r10, %r9
movq %r8, 40(%rsi)
movq %r9, 40(%rdx)
;# cswap p1[6], p2[6]
movq 48(%rsi), %r8
movq 48(%rdx), %r9
mov %r8, %r10
cmovc %r9, %r8
cmovc %r10, %r9
movq %r8, 48(%rsi)
movq %r9, 48(%rdx)
;# cswap p1[7], p2[7]
movq 56(%rsi), %r8
movq 56(%rdx), %r9
mov %r8, %r10
cmovc %r9, %r8
cmovc %r10, %r9
movq %r8, 56(%rsi)
movq %r9, 56(%rdx)
ret
|
usenix-security-verdict/verdict
| 199,564
|
deps/libcrux/sys/hacl/c/vale/src/aesgcm-x86_64-darwin.S
|
.text
.global _aes128_key_expansion
_aes128_key_expansion:
movdqu 0(%rdi), %xmm1
mov %rsi, %rdx
movdqu %xmm1, 0(%rdx)
aeskeygenassist $1, %xmm1, %xmm2
pshufd $255, %xmm2, %xmm2
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
pxor %xmm2, %xmm1
movdqu %xmm1, 16(%rdx)
aeskeygenassist $2, %xmm1, %xmm2
pshufd $255, %xmm2, %xmm2
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
pxor %xmm2, %xmm1
movdqu %xmm1, 32(%rdx)
aeskeygenassist $4, %xmm1, %xmm2
pshufd $255, %xmm2, %xmm2
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
pxor %xmm2, %xmm1
movdqu %xmm1, 48(%rdx)
aeskeygenassist $8, %xmm1, %xmm2
pshufd $255, %xmm2, %xmm2
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
pxor %xmm2, %xmm1
movdqu %xmm1, 64(%rdx)
aeskeygenassist $16, %xmm1, %xmm2
pshufd $255, %xmm2, %xmm2
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
pxor %xmm2, %xmm1
movdqu %xmm1, 80(%rdx)
aeskeygenassist $32, %xmm1, %xmm2
pshufd $255, %xmm2, %xmm2
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
pxor %xmm2, %xmm1
movdqu %xmm1, 96(%rdx)
aeskeygenassist $64, %xmm1, %xmm2
pshufd $255, %xmm2, %xmm2
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
pxor %xmm2, %xmm1
movdqu %xmm1, 112(%rdx)
aeskeygenassist $128, %xmm1, %xmm2
pshufd $255, %xmm2, %xmm2
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
pxor %xmm2, %xmm1
movdqu %xmm1, 128(%rdx)
aeskeygenassist $27, %xmm1, %xmm2
pshufd $255, %xmm2, %xmm2
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
pxor %xmm2, %xmm1
movdqu %xmm1, 144(%rdx)
aeskeygenassist $54, %xmm1, %xmm2
pshufd $255, %xmm2, %xmm2
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
vpslldq $4, %xmm1, %xmm3
pxor %xmm3, %xmm1
pxor %xmm2, %xmm1
movdqu %xmm1, 160(%rdx)
pxor %xmm1, %xmm1
pxor %xmm2, %xmm2
pxor %xmm3, %xmm3
ret
.global _aes128_keyhash_init
_aes128_keyhash_init:
mov $579005069656919567, %r8
pinsrq $0, %r8, %xmm4
mov $283686952306183, %r8
pinsrq $1, %r8, %xmm4
pxor %xmm0, %xmm0
movdqu %xmm0, 80(%rsi)
mov %rdi, %r8
movdqu 0(%r8), %xmm2
pxor %xmm2, %xmm0
movdqu 16(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 32(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 48(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 64(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 80(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 96(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 112(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 128(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 144(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 160(%r8), %xmm2
aesenclast %xmm2, %xmm0
pxor %xmm2, %xmm2
pshufb %xmm4, %xmm0
mov %rsi, %rcx
movdqu %xmm0, 32(%rcx)
movdqu %xmm6, %xmm0
mov %r12, %rax
movdqu 32(%rcx), %xmm1
movdqu %xmm1, %xmm6
movdqu %xmm1, %xmm3
pxor %xmm4, %xmm4
pxor %xmm5, %xmm5
mov $3254779904, %r12
pinsrd $3, %r12d, %xmm4
mov $1, %r12
pinsrd $0, %r12d, %xmm4
mov $2147483648, %r12
pinsrd $3, %r12d, %xmm5
movdqu %xmm3, %xmm1
movdqu %xmm1, %xmm2
psrld $31, %xmm2
pslld $1, %xmm1
vpslldq $4, %xmm2, %xmm2
pxor %xmm2, %xmm1
pand %xmm5, %xmm3
pcmpeqd %xmm5, %xmm3
pshufd $255, %xmm3, %xmm3
pand %xmm4, %xmm3
vpxor %xmm3, %xmm1, %xmm1
movdqu %xmm1, 0(%rcx)
movdqu %xmm6, %xmm1
movdqu %xmm6, %xmm2
movdqu %xmm1, %xmm5
pclmulqdq $16, %xmm2, %xmm1
movdqu %xmm1, %xmm3
movdqu %xmm5, %xmm1
pclmulqdq $1, %xmm2, %xmm1
movdqu %xmm1, %xmm4
movdqu %xmm5, %xmm1
pclmulqdq $0, %xmm2, %xmm1
pclmulqdq $17, %xmm2, %xmm5
movdqu %xmm5, %xmm2
movdqu %xmm1, %xmm5
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm4, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm1
pshufd $79, %xmm1, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm4
pshufd $79, %xmm4, %xmm4
pxor %xmm4, %xmm1
pxor %xmm5, %xmm1
movdqu %xmm1, %xmm3
psrld $31, %xmm3
movdqu %xmm2, %xmm4
psrld $31, %xmm4
pslld $1, %xmm1
pslld $1, %xmm2
vpslldq $4, %xmm3, %xmm5
vpslldq $4, %xmm4, %xmm4
mov $0, %r12
pinsrd $0, %r12d, %xmm3
pshufd $3, %xmm3, %xmm3
pxor %xmm4, %xmm3
pxor %xmm5, %xmm1
pxor %xmm3, %xmm2
movdqu %xmm2, %xmm6
pxor %xmm2, %xmm2
mov $3774873600, %r12
pinsrd $3, %r12d, %xmm2
movdqu %xmm1, %xmm5
pclmulqdq $16, %xmm2, %xmm1
movdqu %xmm1, %xmm3
movdqu %xmm5, %xmm1
pclmulqdq $1, %xmm2, %xmm1
movdqu %xmm1, %xmm4
movdqu %xmm5, %xmm1
pclmulqdq $0, %xmm2, %xmm1
pclmulqdq $17, %xmm2, %xmm5
movdqu %xmm5, %xmm2
movdqu %xmm1, %xmm5
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm4, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm1
pshufd $79, %xmm1, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm4
pshufd $79, %xmm4, %xmm4
pxor %xmm4, %xmm1
pxor %xmm5, %xmm1
movdqu %xmm1, %xmm3
psrld $31, %xmm3
movdqu %xmm2, %xmm4
psrld $31, %xmm4
pslld $1, %xmm1
pslld $1, %xmm2
vpslldq $4, %xmm3, %xmm5
vpslldq $4, %xmm4, %xmm4
mov $0, %r12
pinsrd $0, %r12d, %xmm3
pshufd $3, %xmm3, %xmm3
pxor %xmm4, %xmm3
pxor %xmm5, %xmm1
pxor %xmm3, %xmm2
movdqu %xmm2, %xmm5
pxor %xmm2, %xmm2
mov $3774873600, %r12
pinsrd $3, %r12d, %xmm2
pclmulqdq $17, %xmm2, %xmm1
movdqu %xmm1, %xmm2
psrld $31, %xmm2
pslld $1, %xmm1
vpslldq $4, %xmm2, %xmm2
pxor %xmm2, %xmm1
pxor %xmm5, %xmm1
pxor %xmm6, %xmm1
movdqu %xmm1, %xmm6
movdqu %xmm1, %xmm3
pxor %xmm4, %xmm4
pxor %xmm5, %xmm5
mov $3254779904, %r12
pinsrd $3, %r12d, %xmm4
mov $1, %r12
pinsrd $0, %r12d, %xmm4
mov $2147483648, %r12
pinsrd $3, %r12d, %xmm5
movdqu %xmm3, %xmm1
movdqu %xmm1, %xmm2
psrld $31, %xmm2
pslld $1, %xmm1
vpslldq $4, %xmm2, %xmm2
pxor %xmm2, %xmm1
pand %xmm5, %xmm3
pcmpeqd %xmm5, %xmm3
pshufd $255, %xmm3, %xmm3
pand %xmm4, %xmm3
vpxor %xmm3, %xmm1, %xmm1
movdqu %xmm1, 16(%rcx)
movdqu %xmm6, %xmm2
movdqu 32(%rcx), %xmm1
movdqu %xmm1, %xmm5
pclmulqdq $16, %xmm2, %xmm1
movdqu %xmm1, %xmm3
movdqu %xmm5, %xmm1
pclmulqdq $1, %xmm2, %xmm1
movdqu %xmm1, %xmm4
movdqu %xmm5, %xmm1
pclmulqdq $0, %xmm2, %xmm1
pclmulqdq $17, %xmm2, %xmm5
movdqu %xmm5, %xmm2
movdqu %xmm1, %xmm5
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm4, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm1
pshufd $79, %xmm1, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm4
pshufd $79, %xmm4, %xmm4
pxor %xmm4, %xmm1
pxor %xmm5, %xmm1
movdqu %xmm1, %xmm3
psrld $31, %xmm3
movdqu %xmm2, %xmm4
psrld $31, %xmm4
pslld $1, %xmm1
pslld $1, %xmm2
vpslldq $4, %xmm3, %xmm5
vpslldq $4, %xmm4, %xmm4
mov $0, %r12
pinsrd $0, %r12d, %xmm3
pshufd $3, %xmm3, %xmm3
pxor %xmm4, %xmm3
pxor %xmm5, %xmm1
pxor %xmm3, %xmm2
movdqu %xmm2, %xmm6
pxor %xmm2, %xmm2
mov $3774873600, %r12
pinsrd $3, %r12d, %xmm2
movdqu %xmm1, %xmm5
pclmulqdq $16, %xmm2, %xmm1
movdqu %xmm1, %xmm3
movdqu %xmm5, %xmm1
pclmulqdq $1, %xmm2, %xmm1
movdqu %xmm1, %xmm4
movdqu %xmm5, %xmm1
pclmulqdq $0, %xmm2, %xmm1
pclmulqdq $17, %xmm2, %xmm5
movdqu %xmm5, %xmm2
movdqu %xmm1, %xmm5
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm4, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm1
pshufd $79, %xmm1, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm4
pshufd $79, %xmm4, %xmm4
pxor %xmm4, %xmm1
pxor %xmm5, %xmm1
movdqu %xmm1, %xmm3
psrld $31, %xmm3
movdqu %xmm2, %xmm4
psrld $31, %xmm4
pslld $1, %xmm1
pslld $1, %xmm2
vpslldq $4, %xmm3, %xmm5
vpslldq $4, %xmm4, %xmm4
mov $0, %r12
pinsrd $0, %r12d, %xmm3
pshufd $3, %xmm3, %xmm3
pxor %xmm4, %xmm3
pxor %xmm5, %xmm1
pxor %xmm3, %xmm2
movdqu %xmm2, %xmm5
pxor %xmm2, %xmm2
mov $3774873600, %r12
pinsrd $3, %r12d, %xmm2
pclmulqdq $17, %xmm2, %xmm1
movdqu %xmm1, %xmm2
psrld $31, %xmm2
pslld $1, %xmm1
vpslldq $4, %xmm2, %xmm2
pxor %xmm2, %xmm1
pxor %xmm5, %xmm1
pxor %xmm6, %xmm1
movdqu %xmm1, %xmm6
movdqu %xmm1, %xmm3
pxor %xmm4, %xmm4
pxor %xmm5, %xmm5
mov $3254779904, %r12
pinsrd $3, %r12d, %xmm4
mov $1, %r12
pinsrd $0, %r12d, %xmm4
mov $2147483648, %r12
pinsrd $3, %r12d, %xmm5
movdqu %xmm3, %xmm1
movdqu %xmm1, %xmm2
psrld $31, %xmm2
pslld $1, %xmm1
vpslldq $4, %xmm2, %xmm2
pxor %xmm2, %xmm1
pand %xmm5, %xmm3
pcmpeqd %xmm5, %xmm3
pshufd $255, %xmm3, %xmm3
pand %xmm4, %xmm3
vpxor %xmm3, %xmm1, %xmm1
movdqu %xmm1, 48(%rcx)
movdqu %xmm6, %xmm2
movdqu 32(%rcx), %xmm1
movdqu %xmm1, %xmm5
pclmulqdq $16, %xmm2, %xmm1
movdqu %xmm1, %xmm3
movdqu %xmm5, %xmm1
pclmulqdq $1, %xmm2, %xmm1
movdqu %xmm1, %xmm4
movdqu %xmm5, %xmm1
pclmulqdq $0, %xmm2, %xmm1
pclmulqdq $17, %xmm2, %xmm5
movdqu %xmm5, %xmm2
movdqu %xmm1, %xmm5
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm4, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm1
pshufd $79, %xmm1, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm4
pshufd $79, %xmm4, %xmm4
pxor %xmm4, %xmm1
pxor %xmm5, %xmm1
movdqu %xmm1, %xmm3
psrld $31, %xmm3
movdqu %xmm2, %xmm4
psrld $31, %xmm4
pslld $1, %xmm1
pslld $1, %xmm2
vpslldq $4, %xmm3, %xmm5
vpslldq $4, %xmm4, %xmm4
mov $0, %r12
pinsrd $0, %r12d, %xmm3
pshufd $3, %xmm3, %xmm3
pxor %xmm4, %xmm3
pxor %xmm5, %xmm1
pxor %xmm3, %xmm2
movdqu %xmm2, %xmm6
pxor %xmm2, %xmm2
mov $3774873600, %r12
pinsrd $3, %r12d, %xmm2
movdqu %xmm1, %xmm5
pclmulqdq $16, %xmm2, %xmm1
movdqu %xmm1, %xmm3
movdqu %xmm5, %xmm1
pclmulqdq $1, %xmm2, %xmm1
movdqu %xmm1, %xmm4
movdqu %xmm5, %xmm1
pclmulqdq $0, %xmm2, %xmm1
pclmulqdq $17, %xmm2, %xmm5
movdqu %xmm5, %xmm2
movdqu %xmm1, %xmm5
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm4, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm1
pshufd $79, %xmm1, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm4
pshufd $79, %xmm4, %xmm4
pxor %xmm4, %xmm1
pxor %xmm5, %xmm1
movdqu %xmm1, %xmm3
psrld $31, %xmm3
movdqu %xmm2, %xmm4
psrld $31, %xmm4
pslld $1, %xmm1
pslld $1, %xmm2
vpslldq $4, %xmm3, %xmm5
vpslldq $4, %xmm4, %xmm4
mov $0, %r12
pinsrd $0, %r12d, %xmm3
pshufd $3, %xmm3, %xmm3
pxor %xmm4, %xmm3
pxor %xmm5, %xmm1
pxor %xmm3, %xmm2
movdqu %xmm2, %xmm5
pxor %xmm2, %xmm2
mov $3774873600, %r12
pinsrd $3, %r12d, %xmm2
pclmulqdq $17, %xmm2, %xmm1
movdqu %xmm1, %xmm2
psrld $31, %xmm2
pslld $1, %xmm1
vpslldq $4, %xmm2, %xmm2
pxor %xmm2, %xmm1
pxor %xmm5, %xmm1
pxor %xmm6, %xmm1
movdqu %xmm1, %xmm6
movdqu %xmm1, %xmm3
pxor %xmm4, %xmm4
pxor %xmm5, %xmm5
mov $3254779904, %r12
pinsrd $3, %r12d, %xmm4
mov $1, %r12
pinsrd $0, %r12d, %xmm4
mov $2147483648, %r12
pinsrd $3, %r12d, %xmm5
movdqu %xmm3, %xmm1
movdqu %xmm1, %xmm2
psrld $31, %xmm2
pslld $1, %xmm1
vpslldq $4, %xmm2, %xmm2
pxor %xmm2, %xmm1
pand %xmm5, %xmm3
pcmpeqd %xmm5, %xmm3
pshufd $255, %xmm3, %xmm3
pand %xmm4, %xmm3
vpxor %xmm3, %xmm1, %xmm1
movdqu %xmm1, 64(%rcx)
movdqu %xmm6, %xmm2
movdqu 32(%rcx), %xmm1
movdqu %xmm1, %xmm5
pclmulqdq $16, %xmm2, %xmm1
movdqu %xmm1, %xmm3
movdqu %xmm5, %xmm1
pclmulqdq $1, %xmm2, %xmm1
movdqu %xmm1, %xmm4
movdqu %xmm5, %xmm1
pclmulqdq $0, %xmm2, %xmm1
pclmulqdq $17, %xmm2, %xmm5
movdqu %xmm5, %xmm2
movdqu %xmm1, %xmm5
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm4, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm1
pshufd $79, %xmm1, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm4
pshufd $79, %xmm4, %xmm4
pxor %xmm4, %xmm1
pxor %xmm5, %xmm1
movdqu %xmm1, %xmm3
psrld $31, %xmm3
movdqu %xmm2, %xmm4
psrld $31, %xmm4
pslld $1, %xmm1
pslld $1, %xmm2
vpslldq $4, %xmm3, %xmm5
vpslldq $4, %xmm4, %xmm4
mov $0, %r12
pinsrd $0, %r12d, %xmm3
pshufd $3, %xmm3, %xmm3
pxor %xmm4, %xmm3
pxor %xmm5, %xmm1
pxor %xmm3, %xmm2
movdqu %xmm2, %xmm6
pxor %xmm2, %xmm2
mov $3774873600, %r12
pinsrd $3, %r12d, %xmm2
movdqu %xmm1, %xmm5
pclmulqdq $16, %xmm2, %xmm1
movdqu %xmm1, %xmm3
movdqu %xmm5, %xmm1
pclmulqdq $1, %xmm2, %xmm1
movdqu %xmm1, %xmm4
movdqu %xmm5, %xmm1
pclmulqdq $0, %xmm2, %xmm1
pclmulqdq $17, %xmm2, %xmm5
movdqu %xmm5, %xmm2
movdqu %xmm1, %xmm5
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm4, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm1
pshufd $79, %xmm1, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm4
pshufd $79, %xmm4, %xmm4
pxor %xmm4, %xmm1
pxor %xmm5, %xmm1
movdqu %xmm1, %xmm3
psrld $31, %xmm3
movdqu %xmm2, %xmm4
psrld $31, %xmm4
pslld $1, %xmm1
pslld $1, %xmm2
vpslldq $4, %xmm3, %xmm5
vpslldq $4, %xmm4, %xmm4
mov $0, %r12
pinsrd $0, %r12d, %xmm3
pshufd $3, %xmm3, %xmm3
pxor %xmm4, %xmm3
pxor %xmm5, %xmm1
pxor %xmm3, %xmm2
movdqu %xmm2, %xmm5
pxor %xmm2, %xmm2
mov $3774873600, %r12
pinsrd $3, %r12d, %xmm2
pclmulqdq $17, %xmm2, %xmm1
movdqu %xmm1, %xmm2
psrld $31, %xmm2
pslld $1, %xmm1
vpslldq $4, %xmm2, %xmm2
pxor %xmm2, %xmm1
pxor %xmm5, %xmm1
pxor %xmm6, %xmm1
movdqu %xmm1, %xmm6
movdqu %xmm1, %xmm3
pxor %xmm4, %xmm4
pxor %xmm5, %xmm5
mov $3254779904, %r12
pinsrd $3, %r12d, %xmm4
mov $1, %r12
pinsrd $0, %r12d, %xmm4
mov $2147483648, %r12
pinsrd $3, %r12d, %xmm5
movdqu %xmm3, %xmm1
movdqu %xmm1, %xmm2
psrld $31, %xmm2
pslld $1, %xmm1
vpslldq $4, %xmm2, %xmm2
pxor %xmm2, %xmm1
pand %xmm5, %xmm3
pcmpeqd %xmm5, %xmm3
pshufd $255, %xmm3, %xmm3
pand %xmm4, %xmm3
vpxor %xmm3, %xmm1, %xmm1
movdqu %xmm1, 96(%rcx)
movdqu %xmm6, %xmm2
movdqu 32(%rcx), %xmm1
movdqu %xmm1, %xmm5
pclmulqdq $16, %xmm2, %xmm1
movdqu %xmm1, %xmm3
movdqu %xmm5, %xmm1
pclmulqdq $1, %xmm2, %xmm1
movdqu %xmm1, %xmm4
movdqu %xmm5, %xmm1
pclmulqdq $0, %xmm2, %xmm1
pclmulqdq $17, %xmm2, %xmm5
movdqu %xmm5, %xmm2
movdqu %xmm1, %xmm5
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm4, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm1
pshufd $79, %xmm1, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm4
pshufd $79, %xmm4, %xmm4
pxor %xmm4, %xmm1
pxor %xmm5, %xmm1
movdqu %xmm1, %xmm3
psrld $31, %xmm3
movdqu %xmm2, %xmm4
psrld $31, %xmm4
pslld $1, %xmm1
pslld $1, %xmm2
vpslldq $4, %xmm3, %xmm5
vpslldq $4, %xmm4, %xmm4
mov $0, %r12
pinsrd $0, %r12d, %xmm3
pshufd $3, %xmm3, %xmm3
pxor %xmm4, %xmm3
pxor %xmm5, %xmm1
pxor %xmm3, %xmm2
movdqu %xmm2, %xmm6
pxor %xmm2, %xmm2
mov $3774873600, %r12
pinsrd $3, %r12d, %xmm2
movdqu %xmm1, %xmm5
pclmulqdq $16, %xmm2, %xmm1
movdqu %xmm1, %xmm3
movdqu %xmm5, %xmm1
pclmulqdq $1, %xmm2, %xmm1
movdqu %xmm1, %xmm4
movdqu %xmm5, %xmm1
pclmulqdq $0, %xmm2, %xmm1
pclmulqdq $17, %xmm2, %xmm5
movdqu %xmm5, %xmm2
movdqu %xmm1, %xmm5
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm4, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm1
pshufd $79, %xmm1, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm4
pshufd $79, %xmm4, %xmm4
pxor %xmm4, %xmm1
pxor %xmm5, %xmm1
movdqu %xmm1, %xmm3
psrld $31, %xmm3
movdqu %xmm2, %xmm4
psrld $31, %xmm4
pslld $1, %xmm1
pslld $1, %xmm2
vpslldq $4, %xmm3, %xmm5
vpslldq $4, %xmm4, %xmm4
mov $0, %r12
pinsrd $0, %r12d, %xmm3
pshufd $3, %xmm3, %xmm3
pxor %xmm4, %xmm3
pxor %xmm5, %xmm1
pxor %xmm3, %xmm2
movdqu %xmm2, %xmm5
pxor %xmm2, %xmm2
mov $3774873600, %r12
pinsrd $3, %r12d, %xmm2
pclmulqdq $17, %xmm2, %xmm1
movdqu %xmm1, %xmm2
psrld $31, %xmm2
pslld $1, %xmm1
vpslldq $4, %xmm2, %xmm2
pxor %xmm2, %xmm1
pxor %xmm5, %xmm1
pxor %xmm6, %xmm1
movdqu %xmm1, %xmm6
movdqu %xmm1, %xmm3
pxor %xmm4, %xmm4
pxor %xmm5, %xmm5
mov $3254779904, %r12
pinsrd $3, %r12d, %xmm4
mov $1, %r12
pinsrd $0, %r12d, %xmm4
mov $2147483648, %r12
pinsrd $3, %r12d, %xmm5
movdqu %xmm3, %xmm1
movdqu %xmm1, %xmm2
psrld $31, %xmm2
pslld $1, %xmm1
vpslldq $4, %xmm2, %xmm2
pxor %xmm2, %xmm1
pand %xmm5, %xmm3
pcmpeqd %xmm5, %xmm3
pshufd $255, %xmm3, %xmm3
pand %xmm4, %xmm3
vpxor %xmm3, %xmm1, %xmm1
movdqu %xmm1, 112(%rcx)
movdqu %xmm0, %xmm6
mov %rax, %r12
ret
.global _aes256_key_expansion
_aes256_key_expansion:
movdqu 0(%rdi), %xmm1
movdqu 16(%rdi), %xmm3
mov %rsi, %rdx
movdqu %xmm1, 0(%rdx)
movdqu %xmm3, 16(%rdx)
aeskeygenassist $1, %xmm3, %xmm2
pshufd $255, %xmm2, %xmm2
vpslldq $4, %xmm1, %xmm4
pxor %xmm4, %xmm1
vpslldq $4, %xmm1, %xmm4
pxor %xmm4, %xmm1
vpslldq $4, %xmm1, %xmm4
pxor %xmm4, %xmm1
pxor %xmm2, %xmm1
movdqu %xmm1, 32(%rdx)
aeskeygenassist $0, %xmm1, %xmm2
pshufd $170, %xmm2, %xmm2
vpslldq $4, %xmm3, %xmm4
pxor %xmm4, %xmm3
vpslldq $4, %xmm3, %xmm4
pxor %xmm4, %xmm3
vpslldq $4, %xmm3, %xmm4
pxor %xmm4, %xmm3
pxor %xmm2, %xmm3
movdqu %xmm3, 48(%rdx)
aeskeygenassist $2, %xmm3, %xmm2
pshufd $255, %xmm2, %xmm2
vpslldq $4, %xmm1, %xmm4
pxor %xmm4, %xmm1
vpslldq $4, %xmm1, %xmm4
pxor %xmm4, %xmm1
vpslldq $4, %xmm1, %xmm4
pxor %xmm4, %xmm1
pxor %xmm2, %xmm1
movdqu %xmm1, 64(%rdx)
aeskeygenassist $0, %xmm1, %xmm2
pshufd $170, %xmm2, %xmm2
vpslldq $4, %xmm3, %xmm4
pxor %xmm4, %xmm3
vpslldq $4, %xmm3, %xmm4
pxor %xmm4, %xmm3
vpslldq $4, %xmm3, %xmm4
pxor %xmm4, %xmm3
pxor %xmm2, %xmm3
movdqu %xmm3, 80(%rdx)
aeskeygenassist $4, %xmm3, %xmm2
pshufd $255, %xmm2, %xmm2
vpslldq $4, %xmm1, %xmm4
pxor %xmm4, %xmm1
vpslldq $4, %xmm1, %xmm4
pxor %xmm4, %xmm1
vpslldq $4, %xmm1, %xmm4
pxor %xmm4, %xmm1
pxor %xmm2, %xmm1
movdqu %xmm1, 96(%rdx)
aeskeygenassist $0, %xmm1, %xmm2
pshufd $170, %xmm2, %xmm2
vpslldq $4, %xmm3, %xmm4
pxor %xmm4, %xmm3
vpslldq $4, %xmm3, %xmm4
pxor %xmm4, %xmm3
vpslldq $4, %xmm3, %xmm4
pxor %xmm4, %xmm3
pxor %xmm2, %xmm3
movdqu %xmm3, 112(%rdx)
aeskeygenassist $8, %xmm3, %xmm2
pshufd $255, %xmm2, %xmm2
vpslldq $4, %xmm1, %xmm4
pxor %xmm4, %xmm1
vpslldq $4, %xmm1, %xmm4
pxor %xmm4, %xmm1
vpslldq $4, %xmm1, %xmm4
pxor %xmm4, %xmm1
pxor %xmm2, %xmm1
movdqu %xmm1, 128(%rdx)
aeskeygenassist $0, %xmm1, %xmm2
pshufd $170, %xmm2, %xmm2
vpslldq $4, %xmm3, %xmm4
pxor %xmm4, %xmm3
vpslldq $4, %xmm3, %xmm4
pxor %xmm4, %xmm3
vpslldq $4, %xmm3, %xmm4
pxor %xmm4, %xmm3
pxor %xmm2, %xmm3
movdqu %xmm3, 144(%rdx)
aeskeygenassist $16, %xmm3, %xmm2
pshufd $255, %xmm2, %xmm2
vpslldq $4, %xmm1, %xmm4
pxor %xmm4, %xmm1
vpslldq $4, %xmm1, %xmm4
pxor %xmm4, %xmm1
vpslldq $4, %xmm1, %xmm4
pxor %xmm4, %xmm1
pxor %xmm2, %xmm1
movdqu %xmm1, 160(%rdx)
aeskeygenassist $0, %xmm1, %xmm2
pshufd $170, %xmm2, %xmm2
vpslldq $4, %xmm3, %xmm4
pxor %xmm4, %xmm3
vpslldq $4, %xmm3, %xmm4
pxor %xmm4, %xmm3
vpslldq $4, %xmm3, %xmm4
pxor %xmm4, %xmm3
pxor %xmm2, %xmm3
movdqu %xmm3, 176(%rdx)
aeskeygenassist $32, %xmm3, %xmm2
pshufd $255, %xmm2, %xmm2
vpslldq $4, %xmm1, %xmm4
pxor %xmm4, %xmm1
vpslldq $4, %xmm1, %xmm4
pxor %xmm4, %xmm1
vpslldq $4, %xmm1, %xmm4
pxor %xmm4, %xmm1
pxor %xmm2, %xmm1
movdqu %xmm1, 192(%rdx)
aeskeygenassist $0, %xmm1, %xmm2
pshufd $170, %xmm2, %xmm2
vpslldq $4, %xmm3, %xmm4
pxor %xmm4, %xmm3
vpslldq $4, %xmm3, %xmm4
pxor %xmm4, %xmm3
vpslldq $4, %xmm3, %xmm4
pxor %xmm4, %xmm3
pxor %xmm2, %xmm3
movdqu %xmm3, 208(%rdx)
aeskeygenassist $64, %xmm3, %xmm2
pshufd $255, %xmm2, %xmm2
vpslldq $4, %xmm1, %xmm4
pxor %xmm4, %xmm1
vpslldq $4, %xmm1, %xmm4
pxor %xmm4, %xmm1
vpslldq $4, %xmm1, %xmm4
pxor %xmm4, %xmm1
pxor %xmm2, %xmm1
movdqu %xmm1, 224(%rdx)
pxor %xmm1, %xmm1
pxor %xmm2, %xmm2
pxor %xmm3, %xmm3
pxor %xmm4, %xmm4
ret
.global _aes256_keyhash_init
_aes256_keyhash_init:
mov $579005069656919567, %r8
pinsrq $0, %r8, %xmm4
mov $283686952306183, %r8
pinsrq $1, %r8, %xmm4
pxor %xmm0, %xmm0
movdqu %xmm0, 80(%rsi)
mov %rdi, %r8
movdqu 0(%r8), %xmm2
pxor %xmm2, %xmm0
movdqu 16(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 32(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 48(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 64(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 80(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 96(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 112(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 128(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 144(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 160(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 176(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 192(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 208(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 224(%r8), %xmm2
aesenclast %xmm2, %xmm0
pxor %xmm2, %xmm2
pshufb %xmm4, %xmm0
mov %rsi, %rcx
movdqu %xmm0, 32(%rcx)
movdqu %xmm6, %xmm0
mov %r12, %rax
movdqu 32(%rcx), %xmm1
movdqu %xmm1, %xmm6
movdqu %xmm1, %xmm3
pxor %xmm4, %xmm4
pxor %xmm5, %xmm5
mov $3254779904, %r12
pinsrd $3, %r12d, %xmm4
mov $1, %r12
pinsrd $0, %r12d, %xmm4
mov $2147483648, %r12
pinsrd $3, %r12d, %xmm5
movdqu %xmm3, %xmm1
movdqu %xmm1, %xmm2
psrld $31, %xmm2
pslld $1, %xmm1
vpslldq $4, %xmm2, %xmm2
pxor %xmm2, %xmm1
pand %xmm5, %xmm3
pcmpeqd %xmm5, %xmm3
pshufd $255, %xmm3, %xmm3
pand %xmm4, %xmm3
vpxor %xmm3, %xmm1, %xmm1
movdqu %xmm1, 0(%rcx)
movdqu %xmm6, %xmm1
movdqu %xmm6, %xmm2
movdqu %xmm1, %xmm5
pclmulqdq $16, %xmm2, %xmm1
movdqu %xmm1, %xmm3
movdqu %xmm5, %xmm1
pclmulqdq $1, %xmm2, %xmm1
movdqu %xmm1, %xmm4
movdqu %xmm5, %xmm1
pclmulqdq $0, %xmm2, %xmm1
pclmulqdq $17, %xmm2, %xmm5
movdqu %xmm5, %xmm2
movdqu %xmm1, %xmm5
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm4, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm1
pshufd $79, %xmm1, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm4
pshufd $79, %xmm4, %xmm4
pxor %xmm4, %xmm1
pxor %xmm5, %xmm1
movdqu %xmm1, %xmm3
psrld $31, %xmm3
movdqu %xmm2, %xmm4
psrld $31, %xmm4
pslld $1, %xmm1
pslld $1, %xmm2
vpslldq $4, %xmm3, %xmm5
vpslldq $4, %xmm4, %xmm4
mov $0, %r12
pinsrd $0, %r12d, %xmm3
pshufd $3, %xmm3, %xmm3
pxor %xmm4, %xmm3
pxor %xmm5, %xmm1
pxor %xmm3, %xmm2
movdqu %xmm2, %xmm6
pxor %xmm2, %xmm2
mov $3774873600, %r12
pinsrd $3, %r12d, %xmm2
movdqu %xmm1, %xmm5
pclmulqdq $16, %xmm2, %xmm1
movdqu %xmm1, %xmm3
movdqu %xmm5, %xmm1
pclmulqdq $1, %xmm2, %xmm1
movdqu %xmm1, %xmm4
movdqu %xmm5, %xmm1
pclmulqdq $0, %xmm2, %xmm1
pclmulqdq $17, %xmm2, %xmm5
movdqu %xmm5, %xmm2
movdqu %xmm1, %xmm5
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm4, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm1
pshufd $79, %xmm1, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm4
pshufd $79, %xmm4, %xmm4
pxor %xmm4, %xmm1
pxor %xmm5, %xmm1
movdqu %xmm1, %xmm3
psrld $31, %xmm3
movdqu %xmm2, %xmm4
psrld $31, %xmm4
pslld $1, %xmm1
pslld $1, %xmm2
vpslldq $4, %xmm3, %xmm5
vpslldq $4, %xmm4, %xmm4
mov $0, %r12
pinsrd $0, %r12d, %xmm3
pshufd $3, %xmm3, %xmm3
pxor %xmm4, %xmm3
pxor %xmm5, %xmm1
pxor %xmm3, %xmm2
movdqu %xmm2, %xmm5
pxor %xmm2, %xmm2
mov $3774873600, %r12
pinsrd $3, %r12d, %xmm2
pclmulqdq $17, %xmm2, %xmm1
movdqu %xmm1, %xmm2
psrld $31, %xmm2
pslld $1, %xmm1
vpslldq $4, %xmm2, %xmm2
pxor %xmm2, %xmm1
pxor %xmm5, %xmm1
pxor %xmm6, %xmm1
movdqu %xmm1, %xmm6
movdqu %xmm1, %xmm3
pxor %xmm4, %xmm4
pxor %xmm5, %xmm5
mov $3254779904, %r12
pinsrd $3, %r12d, %xmm4
mov $1, %r12
pinsrd $0, %r12d, %xmm4
mov $2147483648, %r12
pinsrd $3, %r12d, %xmm5
movdqu %xmm3, %xmm1
movdqu %xmm1, %xmm2
psrld $31, %xmm2
pslld $1, %xmm1
vpslldq $4, %xmm2, %xmm2
pxor %xmm2, %xmm1
pand %xmm5, %xmm3
pcmpeqd %xmm5, %xmm3
pshufd $255, %xmm3, %xmm3
pand %xmm4, %xmm3
vpxor %xmm3, %xmm1, %xmm1
movdqu %xmm1, 16(%rcx)
movdqu %xmm6, %xmm2
movdqu 32(%rcx), %xmm1
movdqu %xmm1, %xmm5
pclmulqdq $16, %xmm2, %xmm1
movdqu %xmm1, %xmm3
movdqu %xmm5, %xmm1
pclmulqdq $1, %xmm2, %xmm1
movdqu %xmm1, %xmm4
movdqu %xmm5, %xmm1
pclmulqdq $0, %xmm2, %xmm1
pclmulqdq $17, %xmm2, %xmm5
movdqu %xmm5, %xmm2
movdqu %xmm1, %xmm5
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm4, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm1
pshufd $79, %xmm1, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm4
pshufd $79, %xmm4, %xmm4
pxor %xmm4, %xmm1
pxor %xmm5, %xmm1
movdqu %xmm1, %xmm3
psrld $31, %xmm3
movdqu %xmm2, %xmm4
psrld $31, %xmm4
pslld $1, %xmm1
pslld $1, %xmm2
vpslldq $4, %xmm3, %xmm5
vpslldq $4, %xmm4, %xmm4
mov $0, %r12
pinsrd $0, %r12d, %xmm3
pshufd $3, %xmm3, %xmm3
pxor %xmm4, %xmm3
pxor %xmm5, %xmm1
pxor %xmm3, %xmm2
movdqu %xmm2, %xmm6
pxor %xmm2, %xmm2
mov $3774873600, %r12
pinsrd $3, %r12d, %xmm2
movdqu %xmm1, %xmm5
pclmulqdq $16, %xmm2, %xmm1
movdqu %xmm1, %xmm3
movdqu %xmm5, %xmm1
pclmulqdq $1, %xmm2, %xmm1
movdqu %xmm1, %xmm4
movdqu %xmm5, %xmm1
pclmulqdq $0, %xmm2, %xmm1
pclmulqdq $17, %xmm2, %xmm5
movdqu %xmm5, %xmm2
movdqu %xmm1, %xmm5
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm4, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm1
pshufd $79, %xmm1, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm4
pshufd $79, %xmm4, %xmm4
pxor %xmm4, %xmm1
pxor %xmm5, %xmm1
movdqu %xmm1, %xmm3
psrld $31, %xmm3
movdqu %xmm2, %xmm4
psrld $31, %xmm4
pslld $1, %xmm1
pslld $1, %xmm2
vpslldq $4, %xmm3, %xmm5
vpslldq $4, %xmm4, %xmm4
mov $0, %r12
pinsrd $0, %r12d, %xmm3
pshufd $3, %xmm3, %xmm3
pxor %xmm4, %xmm3
pxor %xmm5, %xmm1
pxor %xmm3, %xmm2
movdqu %xmm2, %xmm5
pxor %xmm2, %xmm2
mov $3774873600, %r12
pinsrd $3, %r12d, %xmm2
pclmulqdq $17, %xmm2, %xmm1
movdqu %xmm1, %xmm2
psrld $31, %xmm2
pslld $1, %xmm1
vpslldq $4, %xmm2, %xmm2
pxor %xmm2, %xmm1
pxor %xmm5, %xmm1
pxor %xmm6, %xmm1
movdqu %xmm1, %xmm6
movdqu %xmm1, %xmm3
pxor %xmm4, %xmm4
pxor %xmm5, %xmm5
mov $3254779904, %r12
pinsrd $3, %r12d, %xmm4
mov $1, %r12
pinsrd $0, %r12d, %xmm4
mov $2147483648, %r12
pinsrd $3, %r12d, %xmm5
movdqu %xmm3, %xmm1
movdqu %xmm1, %xmm2
psrld $31, %xmm2
pslld $1, %xmm1
vpslldq $4, %xmm2, %xmm2
pxor %xmm2, %xmm1
pand %xmm5, %xmm3
pcmpeqd %xmm5, %xmm3
pshufd $255, %xmm3, %xmm3
pand %xmm4, %xmm3
vpxor %xmm3, %xmm1, %xmm1
movdqu %xmm1, 48(%rcx)
movdqu %xmm6, %xmm2
movdqu 32(%rcx), %xmm1
movdqu %xmm1, %xmm5
pclmulqdq $16, %xmm2, %xmm1
movdqu %xmm1, %xmm3
movdqu %xmm5, %xmm1
pclmulqdq $1, %xmm2, %xmm1
movdqu %xmm1, %xmm4
movdqu %xmm5, %xmm1
pclmulqdq $0, %xmm2, %xmm1
pclmulqdq $17, %xmm2, %xmm5
movdqu %xmm5, %xmm2
movdqu %xmm1, %xmm5
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm4, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm1
pshufd $79, %xmm1, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm4
pshufd $79, %xmm4, %xmm4
pxor %xmm4, %xmm1
pxor %xmm5, %xmm1
movdqu %xmm1, %xmm3
psrld $31, %xmm3
movdqu %xmm2, %xmm4
psrld $31, %xmm4
pslld $1, %xmm1
pslld $1, %xmm2
vpslldq $4, %xmm3, %xmm5
vpslldq $4, %xmm4, %xmm4
mov $0, %r12
pinsrd $0, %r12d, %xmm3
pshufd $3, %xmm3, %xmm3
pxor %xmm4, %xmm3
pxor %xmm5, %xmm1
pxor %xmm3, %xmm2
movdqu %xmm2, %xmm6
pxor %xmm2, %xmm2
mov $3774873600, %r12
pinsrd $3, %r12d, %xmm2
movdqu %xmm1, %xmm5
pclmulqdq $16, %xmm2, %xmm1
movdqu %xmm1, %xmm3
movdqu %xmm5, %xmm1
pclmulqdq $1, %xmm2, %xmm1
movdqu %xmm1, %xmm4
movdqu %xmm5, %xmm1
pclmulqdq $0, %xmm2, %xmm1
pclmulqdq $17, %xmm2, %xmm5
movdqu %xmm5, %xmm2
movdqu %xmm1, %xmm5
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm4, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm1
pshufd $79, %xmm1, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm4
pshufd $79, %xmm4, %xmm4
pxor %xmm4, %xmm1
pxor %xmm5, %xmm1
movdqu %xmm1, %xmm3
psrld $31, %xmm3
movdqu %xmm2, %xmm4
psrld $31, %xmm4
pslld $1, %xmm1
pslld $1, %xmm2
vpslldq $4, %xmm3, %xmm5
vpslldq $4, %xmm4, %xmm4
mov $0, %r12
pinsrd $0, %r12d, %xmm3
pshufd $3, %xmm3, %xmm3
pxor %xmm4, %xmm3
pxor %xmm5, %xmm1
pxor %xmm3, %xmm2
movdqu %xmm2, %xmm5
pxor %xmm2, %xmm2
mov $3774873600, %r12
pinsrd $3, %r12d, %xmm2
pclmulqdq $17, %xmm2, %xmm1
movdqu %xmm1, %xmm2
psrld $31, %xmm2
pslld $1, %xmm1
vpslldq $4, %xmm2, %xmm2
pxor %xmm2, %xmm1
pxor %xmm5, %xmm1
pxor %xmm6, %xmm1
movdqu %xmm1, %xmm6
movdqu %xmm1, %xmm3
pxor %xmm4, %xmm4
pxor %xmm5, %xmm5
mov $3254779904, %r12
pinsrd $3, %r12d, %xmm4
mov $1, %r12
pinsrd $0, %r12d, %xmm4
mov $2147483648, %r12
pinsrd $3, %r12d, %xmm5
movdqu %xmm3, %xmm1
movdqu %xmm1, %xmm2
psrld $31, %xmm2
pslld $1, %xmm1
vpslldq $4, %xmm2, %xmm2
pxor %xmm2, %xmm1
pand %xmm5, %xmm3
pcmpeqd %xmm5, %xmm3
pshufd $255, %xmm3, %xmm3
pand %xmm4, %xmm3
vpxor %xmm3, %xmm1, %xmm1
movdqu %xmm1, 64(%rcx)
movdqu %xmm6, %xmm2
movdqu 32(%rcx), %xmm1
movdqu %xmm1, %xmm5
pclmulqdq $16, %xmm2, %xmm1
movdqu %xmm1, %xmm3
movdqu %xmm5, %xmm1
pclmulqdq $1, %xmm2, %xmm1
movdqu %xmm1, %xmm4
movdqu %xmm5, %xmm1
pclmulqdq $0, %xmm2, %xmm1
pclmulqdq $17, %xmm2, %xmm5
movdqu %xmm5, %xmm2
movdqu %xmm1, %xmm5
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm4, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm1
pshufd $79, %xmm1, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm4
pshufd $79, %xmm4, %xmm4
pxor %xmm4, %xmm1
pxor %xmm5, %xmm1
movdqu %xmm1, %xmm3
psrld $31, %xmm3
movdqu %xmm2, %xmm4
psrld $31, %xmm4
pslld $1, %xmm1
pslld $1, %xmm2
vpslldq $4, %xmm3, %xmm5
vpslldq $4, %xmm4, %xmm4
mov $0, %r12
pinsrd $0, %r12d, %xmm3
pshufd $3, %xmm3, %xmm3
pxor %xmm4, %xmm3
pxor %xmm5, %xmm1
pxor %xmm3, %xmm2
movdqu %xmm2, %xmm6
pxor %xmm2, %xmm2
mov $3774873600, %r12
pinsrd $3, %r12d, %xmm2
movdqu %xmm1, %xmm5
pclmulqdq $16, %xmm2, %xmm1
movdqu %xmm1, %xmm3
movdqu %xmm5, %xmm1
pclmulqdq $1, %xmm2, %xmm1
movdqu %xmm1, %xmm4
movdqu %xmm5, %xmm1
pclmulqdq $0, %xmm2, %xmm1
pclmulqdq $17, %xmm2, %xmm5
movdqu %xmm5, %xmm2
movdqu %xmm1, %xmm5
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm4, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm1
pshufd $79, %xmm1, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm4
pshufd $79, %xmm4, %xmm4
pxor %xmm4, %xmm1
pxor %xmm5, %xmm1
movdqu %xmm1, %xmm3
psrld $31, %xmm3
movdqu %xmm2, %xmm4
psrld $31, %xmm4
pslld $1, %xmm1
pslld $1, %xmm2
vpslldq $4, %xmm3, %xmm5
vpslldq $4, %xmm4, %xmm4
mov $0, %r12
pinsrd $0, %r12d, %xmm3
pshufd $3, %xmm3, %xmm3
pxor %xmm4, %xmm3
pxor %xmm5, %xmm1
pxor %xmm3, %xmm2
movdqu %xmm2, %xmm5
pxor %xmm2, %xmm2
mov $3774873600, %r12
pinsrd $3, %r12d, %xmm2
pclmulqdq $17, %xmm2, %xmm1
movdqu %xmm1, %xmm2
psrld $31, %xmm2
pslld $1, %xmm1
vpslldq $4, %xmm2, %xmm2
pxor %xmm2, %xmm1
pxor %xmm5, %xmm1
pxor %xmm6, %xmm1
movdqu %xmm1, %xmm6
movdqu %xmm1, %xmm3
pxor %xmm4, %xmm4
pxor %xmm5, %xmm5
mov $3254779904, %r12
pinsrd $3, %r12d, %xmm4
mov $1, %r12
pinsrd $0, %r12d, %xmm4
mov $2147483648, %r12
pinsrd $3, %r12d, %xmm5
movdqu %xmm3, %xmm1
movdqu %xmm1, %xmm2
psrld $31, %xmm2
pslld $1, %xmm1
vpslldq $4, %xmm2, %xmm2
pxor %xmm2, %xmm1
pand %xmm5, %xmm3
pcmpeqd %xmm5, %xmm3
pshufd $255, %xmm3, %xmm3
pand %xmm4, %xmm3
vpxor %xmm3, %xmm1, %xmm1
movdqu %xmm1, 96(%rcx)
movdqu %xmm6, %xmm2
movdqu 32(%rcx), %xmm1
movdqu %xmm1, %xmm5
pclmulqdq $16, %xmm2, %xmm1
movdqu %xmm1, %xmm3
movdqu %xmm5, %xmm1
pclmulqdq $1, %xmm2, %xmm1
movdqu %xmm1, %xmm4
movdqu %xmm5, %xmm1
pclmulqdq $0, %xmm2, %xmm1
pclmulqdq $17, %xmm2, %xmm5
movdqu %xmm5, %xmm2
movdqu %xmm1, %xmm5
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm4, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm1
pshufd $79, %xmm1, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm4
pshufd $79, %xmm4, %xmm4
pxor %xmm4, %xmm1
pxor %xmm5, %xmm1
movdqu %xmm1, %xmm3
psrld $31, %xmm3
movdqu %xmm2, %xmm4
psrld $31, %xmm4
pslld $1, %xmm1
pslld $1, %xmm2
vpslldq $4, %xmm3, %xmm5
vpslldq $4, %xmm4, %xmm4
mov $0, %r12
pinsrd $0, %r12d, %xmm3
pshufd $3, %xmm3, %xmm3
pxor %xmm4, %xmm3
pxor %xmm5, %xmm1
pxor %xmm3, %xmm2
movdqu %xmm2, %xmm6
pxor %xmm2, %xmm2
mov $3774873600, %r12
pinsrd $3, %r12d, %xmm2
movdqu %xmm1, %xmm5
pclmulqdq $16, %xmm2, %xmm1
movdqu %xmm1, %xmm3
movdqu %xmm5, %xmm1
pclmulqdq $1, %xmm2, %xmm1
movdqu %xmm1, %xmm4
movdqu %xmm5, %xmm1
pclmulqdq $0, %xmm2, %xmm1
pclmulqdq $17, %xmm2, %xmm5
movdqu %xmm5, %xmm2
movdqu %xmm1, %xmm5
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm4, %xmm1
mov $0, %r12
pinsrd $0, %r12d, %xmm1
pshufd $14, %xmm1, %xmm1
pxor %xmm1, %xmm2
movdqu %xmm3, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm1
pshufd $79, %xmm1, %xmm1
mov $0, %r12
pinsrd $3, %r12d, %xmm4
pshufd $79, %xmm4, %xmm4
pxor %xmm4, %xmm1
pxor %xmm5, %xmm1
movdqu %xmm1, %xmm3
psrld $31, %xmm3
movdqu %xmm2, %xmm4
psrld $31, %xmm4
pslld $1, %xmm1
pslld $1, %xmm2
vpslldq $4, %xmm3, %xmm5
vpslldq $4, %xmm4, %xmm4
mov $0, %r12
pinsrd $0, %r12d, %xmm3
pshufd $3, %xmm3, %xmm3
pxor %xmm4, %xmm3
pxor %xmm5, %xmm1
pxor %xmm3, %xmm2
movdqu %xmm2, %xmm5
pxor %xmm2, %xmm2
mov $3774873600, %r12
pinsrd $3, %r12d, %xmm2
pclmulqdq $17, %xmm2, %xmm1
movdqu %xmm1, %xmm2
psrld $31, %xmm2
pslld $1, %xmm1
vpslldq $4, %xmm2, %xmm2
pxor %xmm2, %xmm1
pxor %xmm5, %xmm1
pxor %xmm6, %xmm1
movdqu %xmm1, %xmm6
movdqu %xmm1, %xmm3
pxor %xmm4, %xmm4
pxor %xmm5, %xmm5
mov $3254779904, %r12
pinsrd $3, %r12d, %xmm4
mov $1, %r12
pinsrd $0, %r12d, %xmm4
mov $2147483648, %r12
pinsrd $3, %r12d, %xmm5
movdqu %xmm3, %xmm1
movdqu %xmm1, %xmm2
psrld $31, %xmm2
pslld $1, %xmm1
vpslldq $4, %xmm2, %xmm2
pxor %xmm2, %xmm1
pand %xmm5, %xmm3
pcmpeqd %xmm5, %xmm3
pshufd $255, %xmm3, %xmm3
pand %xmm4, %xmm3
vpxor %xmm3, %xmm1, %xmm1
movdqu %xmm1, 112(%rcx)
movdqu %xmm0, %xmm6
mov %rax, %r12
ret
.global _gctr128_bytes
_gctr128_bytes:
push %r15
push %r14
push %r13
push %r12
push %rsi
push %rdi
push %rbp
push %rbx
movdqu 0(%r9), %xmm7
mov %rdi, %rax
mov %rdx, %rbx
mov %rcx, %r13
mov 72(%rsp), %rcx
mov %rcx, %rbp
imul $16, %rbp
mov $579005069656919567, %r12
pinsrq $0, %r12, %xmm8
mov $283686952306183, %r12
pinsrq $1, %r12, %xmm8
mov %rcx, %rdx
shr $2, %rdx
and $3, %rcx
cmp $0, %rdx
jbe L0
mov %rax, %r9
mov %rbx, %r10
pshufb %xmm8, %xmm7
movdqu %xmm7, %xmm9
mov $579005069656919567, %rax
pinsrq $0, %rax, %xmm0
mov $579005069656919567, %rax
pinsrq $1, %rax, %xmm0
pshufb %xmm0, %xmm9
movdqu %xmm9, %xmm10
pxor %xmm3, %xmm3
mov $1, %rax
pinsrd $2, %eax, %xmm3
paddd %xmm3, %xmm9
mov $3, %rax
pinsrd $2, %eax, %xmm3
mov $2, %rax
pinsrd $0, %eax, %xmm3
paddd %xmm3, %xmm10
pshufb %xmm8, %xmm9
pshufb %xmm8, %xmm10
pextrq $0, %xmm7, %rdi
mov $283686952306183, %rax
pinsrq $0, %rax, %xmm0
mov $579005069656919567, %rax
pinsrq $1, %rax, %xmm0
pxor %xmm15, %xmm15
mov $4, %rax
pinsrd $0, %eax, %xmm15
mov $4, %rax
pinsrd $2, %eax, %xmm15
jmp L3
.balign 16
L2:
pinsrq $0, %rdi, %xmm2
pinsrq $0, %rdi, %xmm12
pinsrq $0, %rdi, %xmm13
pinsrq $0, %rdi, %xmm14
shufpd $2, %xmm9, %xmm2
shufpd $0, %xmm9, %xmm12
shufpd $2, %xmm10, %xmm13
shufpd $0, %xmm10, %xmm14
pshufb %xmm0, %xmm9
pshufb %xmm0, %xmm10
movdqu 0(%r8), %xmm3
movdqu 16(%r8), %xmm4
movdqu 32(%r8), %xmm5
movdqu 48(%r8), %xmm6
paddd %xmm15, %xmm9
paddd %xmm15, %xmm10
pxor %xmm3, %xmm2
pxor %xmm3, %xmm12
pxor %xmm3, %xmm13
pxor %xmm3, %xmm14
pshufb %xmm0, %xmm9
pshufb %xmm0, %xmm10
aesenc %xmm4, %xmm2
aesenc %xmm4, %xmm12
aesenc %xmm4, %xmm13
aesenc %xmm4, %xmm14
aesenc %xmm5, %xmm2
aesenc %xmm5, %xmm12
aesenc %xmm5, %xmm13
aesenc %xmm5, %xmm14
aesenc %xmm6, %xmm2
aesenc %xmm6, %xmm12
aesenc %xmm6, %xmm13
aesenc %xmm6, %xmm14
movdqu 64(%r8), %xmm3
movdqu 80(%r8), %xmm4
movdqu 96(%r8), %xmm5
movdqu 112(%r8), %xmm6
aesenc %xmm3, %xmm2
aesenc %xmm3, %xmm12
aesenc %xmm3, %xmm13
aesenc %xmm3, %xmm14
aesenc %xmm4, %xmm2
aesenc %xmm4, %xmm12
aesenc %xmm4, %xmm13
aesenc %xmm4, %xmm14
aesenc %xmm5, %xmm2
aesenc %xmm5, %xmm12
aesenc %xmm5, %xmm13
aesenc %xmm5, %xmm14
aesenc %xmm6, %xmm2
aesenc %xmm6, %xmm12
aesenc %xmm6, %xmm13
aesenc %xmm6, %xmm14
movdqu 128(%r8), %xmm3
movdqu 144(%r8), %xmm4
movdqu 160(%r8), %xmm5
aesenc %xmm3, %xmm2
aesenc %xmm3, %xmm12
aesenc %xmm3, %xmm13
aesenc %xmm3, %xmm14
aesenc %xmm4, %xmm2
aesenc %xmm4, %xmm12
aesenc %xmm4, %xmm13
aesenc %xmm4, %xmm14
aesenclast %xmm5, %xmm2
aesenclast %xmm5, %xmm12
aesenclast %xmm5, %xmm13
aesenclast %xmm5, %xmm14
movdqu 0(%r9), %xmm7
pxor %xmm7, %xmm2
movdqu 16(%r9), %xmm7
pxor %xmm7, %xmm12
movdqu 32(%r9), %xmm7
pxor %xmm7, %xmm13
movdqu 48(%r9), %xmm7
pxor %xmm7, %xmm14
movdqu %xmm2, 0(%r10)
movdqu %xmm12, 16(%r10)
movdqu %xmm13, 32(%r10)
movdqu %xmm14, 48(%r10)
sub $1, %rdx
add $64, %r9
add $64, %r10
.balign 16
L3:
cmp $0, %rdx
ja L2
movdqu %xmm9, %xmm7
pinsrq $0, %rdi, %xmm7
pshufb %xmm8, %xmm7
mov %r9, %rax
mov %r10, %rbx
jmp L1
L0:
L1:
mov $0, %rdx
mov %rax, %r9
mov %rbx, %r10
pxor %xmm4, %xmm4
mov $1, %r12
pinsrd $0, %r12d, %xmm4
jmp L5
.balign 16
L4:
movdqu %xmm7, %xmm0
pshufb %xmm8, %xmm0
movdqu 0(%r8), %xmm2
pxor %xmm2, %xmm0
movdqu 16(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 32(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 48(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 64(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 80(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 96(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 112(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 128(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 144(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 160(%r8), %xmm2
aesenclast %xmm2, %xmm0
pxor %xmm2, %xmm2
movdqu 0(%r9), %xmm2
pxor %xmm0, %xmm2
movdqu %xmm2, 0(%r10)
add $1, %rdx
add $16, %r9
add $16, %r10
paddd %xmm4, %xmm7
.balign 16
L5:
cmp %rcx, %rdx
jne L4
cmp %rbp, %rsi
jbe L6
movdqu 0(%r13), %xmm1
movdqu %xmm7, %xmm0
mov $579005069656919567, %r12
pinsrq $0, %r12, %xmm2
mov $283686952306183, %r12
pinsrq $1, %r12, %xmm2
pshufb %xmm2, %xmm0
movdqu 0(%r8), %xmm2
pxor %xmm2, %xmm0
movdqu 16(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 32(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 48(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 64(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 80(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 96(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 112(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 128(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 144(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 160(%r8), %xmm2
aesenclast %xmm2, %xmm0
pxor %xmm2, %xmm2
pxor %xmm0, %xmm1
movdqu %xmm1, 0(%r13)
jmp L7
L6:
L7:
pop %rbx
pop %rbp
pop %rdi
pop %rsi
pop %r12
pop %r13
pop %r14
pop %r15
ret
.global _gctr256_bytes
_gctr256_bytes:
push %r15
push %r14
push %r13
push %r12
push %rsi
push %rdi
push %rbp
push %rbx
movdqu 0(%r9), %xmm7
mov %rdi, %rax
mov %rdx, %rbx
mov %rcx, %r13
mov 72(%rsp), %rcx
mov %rcx, %rbp
imul $16, %rbp
mov $579005069656919567, %r12
pinsrq $0, %r12, %xmm8
mov $283686952306183, %r12
pinsrq $1, %r12, %xmm8
mov %rcx, %rdx
shr $2, %rdx
and $3, %rcx
cmp $0, %rdx
jbe L8
mov %rax, %r9
mov %rbx, %r10
pshufb %xmm8, %xmm7
movdqu %xmm7, %xmm9
mov $579005069656919567, %rax
pinsrq $0, %rax, %xmm0
mov $579005069656919567, %rax
pinsrq $1, %rax, %xmm0
pshufb %xmm0, %xmm9
movdqu %xmm9, %xmm10
pxor %xmm3, %xmm3
mov $1, %rax
pinsrd $2, %eax, %xmm3
paddd %xmm3, %xmm9
mov $3, %rax
pinsrd $2, %eax, %xmm3
mov $2, %rax
pinsrd $0, %eax, %xmm3
paddd %xmm3, %xmm10
pshufb %xmm8, %xmm9
pshufb %xmm8, %xmm10
pextrq $0, %xmm7, %rdi
mov $283686952306183, %rax
pinsrq $0, %rax, %xmm0
mov $579005069656919567, %rax
pinsrq $1, %rax, %xmm0
pxor %xmm15, %xmm15
mov $4, %rax
pinsrd $0, %eax, %xmm15
mov $4, %rax
pinsrd $2, %eax, %xmm15
jmp L11
.balign 16
L10:
pinsrq $0, %rdi, %xmm2
pinsrq $0, %rdi, %xmm12
pinsrq $0, %rdi, %xmm13
pinsrq $0, %rdi, %xmm14
shufpd $2, %xmm9, %xmm2
shufpd $0, %xmm9, %xmm12
shufpd $2, %xmm10, %xmm13
shufpd $0, %xmm10, %xmm14
pshufb %xmm0, %xmm9
pshufb %xmm0, %xmm10
movdqu 0(%r8), %xmm3
movdqu 16(%r8), %xmm4
movdqu 32(%r8), %xmm5
movdqu 48(%r8), %xmm6
paddd %xmm15, %xmm9
paddd %xmm15, %xmm10
pxor %xmm3, %xmm2
pxor %xmm3, %xmm12
pxor %xmm3, %xmm13
pxor %xmm3, %xmm14
pshufb %xmm0, %xmm9
pshufb %xmm0, %xmm10
aesenc %xmm4, %xmm2
aesenc %xmm4, %xmm12
aesenc %xmm4, %xmm13
aesenc %xmm4, %xmm14
aesenc %xmm5, %xmm2
aesenc %xmm5, %xmm12
aesenc %xmm5, %xmm13
aesenc %xmm5, %xmm14
aesenc %xmm6, %xmm2
aesenc %xmm6, %xmm12
aesenc %xmm6, %xmm13
aesenc %xmm6, %xmm14
movdqu 64(%r8), %xmm3
movdqu 80(%r8), %xmm4
movdqu 96(%r8), %xmm5
movdqu 112(%r8), %xmm6
aesenc %xmm3, %xmm2
aesenc %xmm3, %xmm12
aesenc %xmm3, %xmm13
aesenc %xmm3, %xmm14
aesenc %xmm4, %xmm2
aesenc %xmm4, %xmm12
aesenc %xmm4, %xmm13
aesenc %xmm4, %xmm14
aesenc %xmm5, %xmm2
aesenc %xmm5, %xmm12
aesenc %xmm5, %xmm13
aesenc %xmm5, %xmm14
aesenc %xmm6, %xmm2
aesenc %xmm6, %xmm12
aesenc %xmm6, %xmm13
aesenc %xmm6, %xmm14
movdqu 128(%r8), %xmm3
movdqu 144(%r8), %xmm4
movdqu 160(%r8), %xmm5
aesenc %xmm3, %xmm2
aesenc %xmm3, %xmm12
aesenc %xmm3, %xmm13
aesenc %xmm3, %xmm14
aesenc %xmm4, %xmm2
aesenc %xmm4, %xmm12
aesenc %xmm4, %xmm13
aesenc %xmm4, %xmm14
movdqu %xmm5, %xmm3
movdqu 176(%r8), %xmm4
movdqu 192(%r8), %xmm5
movdqu 208(%r8), %xmm6
aesenc %xmm3, %xmm2
aesenc %xmm3, %xmm12
aesenc %xmm3, %xmm13
aesenc %xmm3, %xmm14
aesenc %xmm4, %xmm2
aesenc %xmm4, %xmm12
aesenc %xmm4, %xmm13
aesenc %xmm4, %xmm14
aesenc %xmm5, %xmm2
aesenc %xmm5, %xmm12
aesenc %xmm5, %xmm13
aesenc %xmm5, %xmm14
aesenc %xmm6, %xmm2
aesenc %xmm6, %xmm12
aesenc %xmm6, %xmm13
aesenc %xmm6, %xmm14
movdqu 224(%r8), %xmm5
aesenclast %xmm5, %xmm2
aesenclast %xmm5, %xmm12
aesenclast %xmm5, %xmm13
aesenclast %xmm5, %xmm14
movdqu 0(%r9), %xmm7
pxor %xmm7, %xmm2
movdqu 16(%r9), %xmm7
pxor %xmm7, %xmm12
movdqu 32(%r9), %xmm7
pxor %xmm7, %xmm13
movdqu 48(%r9), %xmm7
pxor %xmm7, %xmm14
movdqu %xmm2, 0(%r10)
movdqu %xmm12, 16(%r10)
movdqu %xmm13, 32(%r10)
movdqu %xmm14, 48(%r10)
sub $1, %rdx
add $64, %r9
add $64, %r10
.balign 16
L11:
cmp $0, %rdx
ja L10
movdqu %xmm9, %xmm7
pinsrq $0, %rdi, %xmm7
pshufb %xmm8, %xmm7
mov %r9, %rax
mov %r10, %rbx
jmp L9
L8:
L9:
mov $0, %rdx
mov %rax, %r9
mov %rbx, %r10
pxor %xmm4, %xmm4
mov $1, %r12
pinsrd $0, %r12d, %xmm4
jmp L13
.balign 16
L12:
movdqu %xmm7, %xmm0
pshufb %xmm8, %xmm0
movdqu 0(%r8), %xmm2
pxor %xmm2, %xmm0
movdqu 16(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 32(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 48(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 64(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 80(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 96(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 112(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 128(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 144(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 160(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 176(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 192(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 208(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 224(%r8), %xmm2
aesenclast %xmm2, %xmm0
pxor %xmm2, %xmm2
movdqu 0(%r9), %xmm2
pxor %xmm0, %xmm2
movdqu %xmm2, 0(%r10)
add $1, %rdx
add $16, %r9
add $16, %r10
paddd %xmm4, %xmm7
.balign 16
L13:
cmp %rcx, %rdx
jne L12
cmp %rbp, %rsi
jbe L14
movdqu 0(%r13), %xmm1
movdqu %xmm7, %xmm0
mov $579005069656919567, %r12
pinsrq $0, %r12, %xmm2
mov $283686952306183, %r12
pinsrq $1, %r12, %xmm2
pshufb %xmm2, %xmm0
movdqu 0(%r8), %xmm2
pxor %xmm2, %xmm0
movdqu 16(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 32(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 48(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 64(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 80(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 96(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 112(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 128(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 144(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 160(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 176(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 192(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 208(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 224(%r8), %xmm2
aesenclast %xmm2, %xmm0
pxor %xmm2, %xmm2
pxor %xmm0, %xmm1
movdqu %xmm1, 0(%r13)
jmp L15
L14:
L15:
pop %rbx
pop %rbp
pop %rdi
pop %rsi
pop %r12
pop %r13
pop %r14
pop %r15
ret
.global _compute_iv_stdcall
_compute_iv_stdcall:
cmp $12, %rsi
jne L16
cmp $12, %rsi
jne L18
movdqu 0(%r8), %xmm0
mov $579005069656919567, %rax
pinsrq $0, %rax, %xmm1
mov $283686952306183, %rax
pinsrq $1, %rax, %xmm1
pshufb %xmm1, %xmm0
mov $1, %rax
pinsrd $0, %eax, %xmm0
movdqu %xmm0, 0(%rcx)
jmp L19
L18:
mov %rcx, %rax
add $32, %r9
mov %r8, %rbx
mov %rdx, %rcx
imul $16, %rcx
mov $579005069656919567, %r10
pinsrq $0, %r10, %xmm9
mov $283686952306183, %r10
pinsrq $1, %r10, %xmm9
pxor %xmm8, %xmm8
mov %rdi, %r11
jmp L21
.balign 16
L20:
add $80, %r11
movdqu -32(%r9), %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
movdqu %xmm1, %xmm4
movdqu -16(%r9), %xmm1
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 16(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 32(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 64(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 80(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
vpxor %xmm1, %xmm4, %xmm4
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
pxor %xmm3, %xmm3
mov $3254779904, %r10
pinsrd $3, %r10d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
add $96, %r11
sub $6, %rdx
.balign 16
L21:
cmp $6, %rdx
jae L20
cmp $0, %rdx
jbe L22
mov %rdx, %r10
sub $1, %r10
imul $16, %r10
add %r10, %r11
movdqu -32(%r9), %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
cmp $1, %rdx
jne L24
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu %xmm1, %xmm4
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
jmp L25
L24:
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
movdqu %xmm1, %xmm4
movdqu -16(%r9), %xmm1
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
movdqu %xmm1, %xmm5
cmp $2, %rdx
je L26
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 16(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
cmp $3, %rdx
je L28
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 32(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
cmp $4, %rdx
je L30
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 64(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
jmp L31
L30:
L31:
jmp L29
L28:
L29:
jmp L27
L26:
L27:
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
vpxor %xmm1, %xmm4, %xmm4
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
L25:
pxor %xmm3, %xmm3
mov $3254779904, %r10
pinsrd $3, %r10d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
jmp L23
L22:
L23:
mov %rsi, %r15
cmp %rcx, %rsi
jbe L32
movdqu 0(%rbx), %xmm0
mov %rsi, %r10
and $15, %r10
cmp $8, %r10
jae L34
mov $0, %rcx
pinsrq $1, %rcx, %xmm0
mov %r10, %rcx
shl $3, %rcx
mov $1, %r11
shl %cl, %r11
sub $1, %r11
pextrq $0, %xmm0, %rcx
and %r11, %rcx
pinsrq $0, %rcx, %xmm0
jmp L35
L34:
mov %r10, %rcx
sub $8, %rcx
shl $3, %rcx
mov $1, %r11
shl %cl, %r11
sub $1, %r11
pextrq $1, %xmm0, %rcx
and %r11, %rcx
pinsrq $1, %rcx, %xmm0
L35:
pshufb %xmm9, %xmm0
movdqu -32(%r9), %xmm5
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu %xmm1, %xmm4
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
pxor %xmm3, %xmm3
mov $3254779904, %r11
pinsrd $3, %r11d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
jmp L33
L32:
L33:
mov %rax, %rcx
mov $0, %r11
mov %rsi, %r13
pxor %xmm0, %xmm0
mov %r11, %rax
imul $8, %rax
pinsrq $1, %rax, %xmm0
mov %r13, %rax
imul $8, %rax
pinsrq $0, %rax, %xmm0
movdqu -32(%r9), %xmm5
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu %xmm1, %xmm4
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
pxor %xmm3, %xmm3
mov $3254779904, %r11
pinsrd $3, %r11d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
movdqu %xmm8, 0(%rcx)
L19:
jmp L17
L16:
push %r15
push %r14
push %r13
push %r12
push %rsi
push %rdi
push %rbp
push %rbx
cmp $12, %rsi
jne L36
movdqu 0(%r8), %xmm0
mov $579005069656919567, %rax
pinsrq $0, %rax, %xmm1
mov $283686952306183, %rax
pinsrq $1, %rax, %xmm1
pshufb %xmm1, %xmm0
mov $1, %rax
pinsrd $0, %eax, %xmm0
movdqu %xmm0, 0(%rcx)
jmp L37
L36:
mov %rcx, %rax
add $32, %r9
mov %r8, %rbx
mov %rdx, %rcx
imul $16, %rcx
mov $579005069656919567, %r10
pinsrq $0, %r10, %xmm9
mov $283686952306183, %r10
pinsrq $1, %r10, %xmm9
pxor %xmm8, %xmm8
mov %rdi, %r11
jmp L39
.balign 16
L38:
add $80, %r11
movdqu -32(%r9), %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
movdqu %xmm1, %xmm4
movdqu -16(%r9), %xmm1
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 16(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 32(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 64(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 80(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
vpxor %xmm1, %xmm4, %xmm4
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
pxor %xmm3, %xmm3
mov $3254779904, %r10
pinsrd $3, %r10d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
add $96, %r11
sub $6, %rdx
.balign 16
L39:
cmp $6, %rdx
jae L38
cmp $0, %rdx
jbe L40
mov %rdx, %r10
sub $1, %r10
imul $16, %r10
add %r10, %r11
movdqu -32(%r9), %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
cmp $1, %rdx
jne L42
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu %xmm1, %xmm4
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
jmp L43
L42:
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
movdqu %xmm1, %xmm4
movdqu -16(%r9), %xmm1
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
movdqu %xmm1, %xmm5
cmp $2, %rdx
je L44
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 16(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
cmp $3, %rdx
je L46
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 32(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
cmp $4, %rdx
je L48
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 64(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
jmp L49
L48:
L49:
jmp L47
L46:
L47:
jmp L45
L44:
L45:
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
vpxor %xmm1, %xmm4, %xmm4
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
L43:
pxor %xmm3, %xmm3
mov $3254779904, %r10
pinsrd $3, %r10d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
jmp L41
L40:
L41:
mov %rsi, %r15
cmp %rcx, %rsi
jbe L50
movdqu 0(%rbx), %xmm0
mov %rsi, %r10
and $15, %r10
cmp $8, %r10
jae L52
mov $0, %rcx
pinsrq $1, %rcx, %xmm0
mov %r10, %rcx
shl $3, %rcx
mov $1, %r11
shl %cl, %r11
sub $1, %r11
pextrq $0, %xmm0, %rcx
and %r11, %rcx
pinsrq $0, %rcx, %xmm0
jmp L53
L52:
mov %r10, %rcx
sub $8, %rcx
shl $3, %rcx
mov $1, %r11
shl %cl, %r11
sub $1, %r11
pextrq $1, %xmm0, %rcx
and %r11, %rcx
pinsrq $1, %rcx, %xmm0
L53:
pshufb %xmm9, %xmm0
movdqu -32(%r9), %xmm5
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu %xmm1, %xmm4
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
pxor %xmm3, %xmm3
mov $3254779904, %r11
pinsrd $3, %r11d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
jmp L51
L50:
L51:
mov %rax, %rcx
mov $0, %r11
mov %rsi, %r13
pxor %xmm0, %xmm0
mov %r11, %rax
imul $8, %rax
pinsrq $1, %rax, %xmm0
mov %r13, %rax
imul $8, %rax
pinsrq $0, %rax, %xmm0
movdqu -32(%r9), %xmm5
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu %xmm1, %xmm4
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
pxor %xmm3, %xmm3
mov $3254779904, %r11
pinsrd $3, %r11d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
movdqu %xmm8, 0(%rcx)
L37:
pop %rbx
pop %rbp
pop %rdi
pop %rsi
pop %r12
pop %r13
pop %r14
pop %r15
L17:
ret
.global _gcm128_encrypt_opt
_gcm128_encrypt_opt:
push %r15
push %r14
push %r13
push %r12
push %rsi
push %rdi
push %rbp
push %rbx
mov 144(%rsp), %rbp
mov %rcx, %r13
lea 32(%r9), %r9
mov 72(%rsp), %rbx
mov %rdx, %rcx
imul $16, %rcx
mov $579005069656919567, %r10
pinsrq $0, %r10, %xmm9
mov $283686952306183, %r10
pinsrq $1, %r10, %xmm9
pxor %xmm8, %xmm8
mov %rdi, %r11
jmp L55
.balign 16
L54:
add $80, %r11
movdqu -32(%r9), %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
movdqu %xmm1, %xmm4
movdqu -16(%r9), %xmm1
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 16(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 32(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 64(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 80(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
vpxor %xmm1, %xmm4, %xmm4
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
pxor %xmm3, %xmm3
mov $3254779904, %r10
pinsrd $3, %r10d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
add $96, %r11
sub $6, %rdx
.balign 16
L55:
cmp $6, %rdx
jae L54
cmp $0, %rdx
jbe L56
mov %rdx, %r10
sub $1, %r10
imul $16, %r10
add %r10, %r11
movdqu -32(%r9), %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
cmp $1, %rdx
jne L58
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu %xmm1, %xmm4
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
jmp L59
L58:
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
movdqu %xmm1, %xmm4
movdqu -16(%r9), %xmm1
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
movdqu %xmm1, %xmm5
cmp $2, %rdx
je L60
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 16(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
cmp $3, %rdx
je L62
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 32(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
cmp $4, %rdx
je L64
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 64(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
jmp L65
L64:
L65:
jmp L63
L62:
L63:
jmp L61
L60:
L61:
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
vpxor %xmm1, %xmm4, %xmm4
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
L59:
pxor %xmm3, %xmm3
mov $3254779904, %r10
pinsrd $3, %r10d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
jmp L57
L56:
L57:
mov %rsi, %r15
cmp %rcx, %rsi
jbe L66
movdqu 0(%rbx), %xmm0
mov %rsi, %r10
and $15, %r10
cmp $8, %r10
jae L68
mov $0, %rcx
pinsrq $1, %rcx, %xmm0
mov %r10, %rcx
shl $3, %rcx
mov $1, %r11
shl %cl, %r11
sub $1, %r11
pextrq $0, %xmm0, %rcx
and %r11, %rcx
pinsrq $0, %rcx, %xmm0
jmp L69
L68:
mov %r10, %rcx
sub $8, %rcx
shl $3, %rcx
mov $1, %r11
shl %cl, %r11
sub $1, %r11
pextrq $1, %xmm0, %rcx
and %r11, %rcx
pinsrq $1, %rcx, %xmm0
L69:
pshufb %xmm9, %xmm0
movdqu -32(%r9), %xmm5
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu %xmm1, %xmm4
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
pxor %xmm3, %xmm3
mov $3254779904, %r11
pinsrd $3, %r11d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
jmp L67
L66:
L67:
mov 80(%rsp), %rdi
mov 88(%rsp), %rsi
mov 96(%rsp), %rdx
mov %r13, %rcx
movdqu %xmm9, %xmm0
movdqu 0(%r8), %xmm1
movdqu %xmm1, 0(%rbp)
pxor %xmm10, %xmm10
mov $1, %r11
pinsrq $0, %r11, %xmm10
vpaddd %xmm10, %xmm1, %xmm1
cmp $0, %rdx
jne L70
vpshufb %xmm0, %xmm1, %xmm1
movdqu %xmm1, 32(%rbp)
jmp L71
L70:
movdqu %xmm8, 32(%rbp)
add $128, %rcx
pextrq $0, %xmm1, %rbx
and $255, %rbx
vpshufb %xmm0, %xmm1, %xmm1
lea 96(%rsi), %r14
movdqu -128(%rcx), %xmm4
pxor %xmm2, %xmm2
mov $72057594037927936, %r11
pinsrq $1, %r11, %xmm2
movdqu -112(%rcx), %xmm15
mov %rcx, %r12
sub $96, %r12
vpxor %xmm4, %xmm1, %xmm9
add $6, %rbx
cmp $256, %rbx
jae L72
vpaddd %xmm2, %xmm1, %xmm10
vpaddd %xmm2, %xmm10, %xmm11
vpxor %xmm4, %xmm10, %xmm10
vpaddd %xmm2, %xmm11, %xmm12
vpxor %xmm4, %xmm11, %xmm11
vpaddd %xmm2, %xmm12, %xmm13
vpxor %xmm4, %xmm12, %xmm12
vpaddd %xmm2, %xmm13, %xmm14
vpxor %xmm4, %xmm13, %xmm13
vpaddd %xmm2, %xmm14, %xmm1
vpxor %xmm4, %xmm14, %xmm14
jmp L73
L72:
sub $256, %rbx
vpshufb %xmm0, %xmm1, %xmm6
pxor %xmm5, %xmm5
mov $1, %r11
pinsrq $0, %r11, %xmm5
vpaddd %xmm5, %xmm6, %xmm10
pxor %xmm5, %xmm5
mov $2, %r11
pinsrq $0, %r11, %xmm5
vpaddd %xmm5, %xmm6, %xmm11
vpaddd %xmm5, %xmm10, %xmm12
vpshufb %xmm0, %xmm10, %xmm10
vpaddd %xmm5, %xmm11, %xmm13
vpshufb %xmm0, %xmm11, %xmm11
vpxor %xmm4, %xmm10, %xmm10
vpaddd %xmm5, %xmm12, %xmm14
vpshufb %xmm0, %xmm12, %xmm12
vpxor %xmm4, %xmm11, %xmm11
vpaddd %xmm5, %xmm13, %xmm1
vpshufb %xmm0, %xmm13, %xmm13
vpxor %xmm4, %xmm12, %xmm12
vpshufb %xmm0, %xmm14, %xmm14
vpxor %xmm4, %xmm13, %xmm13
vpshufb %xmm0, %xmm1, %xmm1
vpxor %xmm4, %xmm14, %xmm14
L73:
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu -96(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu -80(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu -64(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu -48(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu -32(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu -16(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu 0(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu 16(%rcx), %xmm15
movdqu 32(%rcx), %xmm3
vaesenc %xmm15, %xmm9, %xmm9
vpxor 0(%rdi), %xmm3, %xmm4
vaesenc %xmm15, %xmm10, %xmm10
vpxor 16(%rdi), %xmm3, %xmm5
vaesenc %xmm15, %xmm11, %xmm11
vpxor 32(%rdi), %xmm3, %xmm6
vaesenc %xmm15, %xmm12, %xmm12
vpxor 48(%rdi), %xmm3, %xmm8
vaesenc %xmm15, %xmm13, %xmm13
vpxor 64(%rdi), %xmm3, %xmm2
vaesenc %xmm15, %xmm14, %xmm14
vpxor 80(%rdi), %xmm3, %xmm3
lea 96(%rdi), %rdi
vaesenclast %xmm4, %xmm9, %xmm9
vaesenclast %xmm5, %xmm10, %xmm10
vaesenclast %xmm6, %xmm11, %xmm11
vaesenclast %xmm8, %xmm12, %xmm12
vaesenclast %xmm2, %xmm13, %xmm13
vaesenclast %xmm3, %xmm14, %xmm14
movdqu %xmm9, 0(%rsi)
movdqu %xmm10, 16(%rsi)
movdqu %xmm11, 32(%rsi)
movdqu %xmm12, 48(%rsi)
movdqu %xmm13, 64(%rsi)
movdqu %xmm14, 80(%rsi)
lea 96(%rsi), %rsi
vpshufb %xmm0, %xmm9, %xmm8
vpshufb %xmm0, %xmm10, %xmm2
movdqu %xmm8, 112(%rbp)
vpshufb %xmm0, %xmm11, %xmm4
movdqu %xmm2, 96(%rbp)
vpshufb %xmm0, %xmm12, %xmm5
movdqu %xmm4, 80(%rbp)
vpshufb %xmm0, %xmm13, %xmm6
movdqu %xmm5, 64(%rbp)
vpshufb %xmm0, %xmm14, %xmm7
movdqu %xmm6, 48(%rbp)
movdqu -128(%rcx), %xmm4
pxor %xmm2, %xmm2
mov $72057594037927936, %r11
pinsrq $1, %r11, %xmm2
movdqu -112(%rcx), %xmm15
mov %rcx, %r12
sub $96, %r12
vpxor %xmm4, %xmm1, %xmm9
add $6, %rbx
cmp $256, %rbx
jae L74
vpaddd %xmm2, %xmm1, %xmm10
vpaddd %xmm2, %xmm10, %xmm11
vpxor %xmm4, %xmm10, %xmm10
vpaddd %xmm2, %xmm11, %xmm12
vpxor %xmm4, %xmm11, %xmm11
vpaddd %xmm2, %xmm12, %xmm13
vpxor %xmm4, %xmm12, %xmm12
vpaddd %xmm2, %xmm13, %xmm14
vpxor %xmm4, %xmm13, %xmm13
vpaddd %xmm2, %xmm14, %xmm1
vpxor %xmm4, %xmm14, %xmm14
jmp L75
L74:
sub $256, %rbx
vpshufb %xmm0, %xmm1, %xmm6
pxor %xmm5, %xmm5
mov $1, %r11
pinsrq $0, %r11, %xmm5
vpaddd %xmm5, %xmm6, %xmm10
pxor %xmm5, %xmm5
mov $2, %r11
pinsrq $0, %r11, %xmm5
vpaddd %xmm5, %xmm6, %xmm11
vpaddd %xmm5, %xmm10, %xmm12
vpshufb %xmm0, %xmm10, %xmm10
vpaddd %xmm5, %xmm11, %xmm13
vpshufb %xmm0, %xmm11, %xmm11
vpxor %xmm4, %xmm10, %xmm10
vpaddd %xmm5, %xmm12, %xmm14
vpshufb %xmm0, %xmm12, %xmm12
vpxor %xmm4, %xmm11, %xmm11
vpaddd %xmm5, %xmm13, %xmm1
vpshufb %xmm0, %xmm13, %xmm13
vpxor %xmm4, %xmm12, %xmm12
vpshufb %xmm0, %xmm14, %xmm14
vpxor %xmm4, %xmm13, %xmm13
vpshufb %xmm0, %xmm1, %xmm1
vpxor %xmm4, %xmm14, %xmm14
L75:
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu -96(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu -80(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu -64(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu -48(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu -32(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu -16(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu 0(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu 16(%rcx), %xmm15
movdqu 32(%rcx), %xmm3
vaesenc %xmm15, %xmm9, %xmm9
vpxor 0(%rdi), %xmm3, %xmm4
vaesenc %xmm15, %xmm10, %xmm10
vpxor 16(%rdi), %xmm3, %xmm5
vaesenc %xmm15, %xmm11, %xmm11
vpxor 32(%rdi), %xmm3, %xmm6
vaesenc %xmm15, %xmm12, %xmm12
vpxor 48(%rdi), %xmm3, %xmm8
vaesenc %xmm15, %xmm13, %xmm13
vpxor 64(%rdi), %xmm3, %xmm2
vaesenc %xmm15, %xmm14, %xmm14
vpxor 80(%rdi), %xmm3, %xmm3
lea 96(%rdi), %rdi
vaesenclast %xmm4, %xmm9, %xmm9
vaesenclast %xmm5, %xmm10, %xmm10
vaesenclast %xmm6, %xmm11, %xmm11
vaesenclast %xmm8, %xmm12, %xmm12
vaesenclast %xmm2, %xmm13, %xmm13
vaesenclast %xmm3, %xmm14, %xmm14
movdqu %xmm9, 0(%rsi)
movdqu %xmm10, 16(%rsi)
movdqu %xmm11, 32(%rsi)
movdqu %xmm12, 48(%rsi)
movdqu %xmm13, 64(%rsi)
movdqu %xmm14, 80(%rsi)
lea 96(%rsi), %rsi
sub $12, %rdx
movdqu 32(%rbp), %xmm8
pxor %xmm2, %xmm2
mov $72057594037927936, %r11
pinsrq $1, %r11, %xmm2
vpxor %xmm4, %xmm4, %xmm4
movdqu -128(%rcx), %xmm15
vpaddd %xmm2, %xmm1, %xmm10
vpaddd %xmm2, %xmm10, %xmm11
vpaddd %xmm2, %xmm11, %xmm12
vpaddd %xmm2, %xmm12, %xmm13
vpaddd %xmm2, %xmm13, %xmm14
vpxor %xmm15, %xmm1, %xmm9
movdqu %xmm4, 16(%rbp)
jmp L77
.balign 16
L76:
add $6, %rbx
cmp $256, %rbx
jb L78
mov $579005069656919567, %r11
pinsrq $0, %r11, %xmm0
mov $283686952306183, %r11
pinsrq $1, %r11, %xmm0
vpshufb %xmm0, %xmm1, %xmm6
pxor %xmm5, %xmm5
mov $1, %r11
pinsrq $0, %r11, %xmm5
vpaddd %xmm5, %xmm6, %xmm10
pxor %xmm5, %xmm5
mov $2, %r11
pinsrq $0, %r11, %xmm5
vpaddd %xmm5, %xmm6, %xmm11
movdqu -32(%r9), %xmm3
vpaddd %xmm5, %xmm10, %xmm12
vpshufb %xmm0, %xmm10, %xmm10
vpaddd %xmm5, %xmm11, %xmm13
vpshufb %xmm0, %xmm11, %xmm11
vpxor %xmm15, %xmm10, %xmm10
vpaddd %xmm5, %xmm12, %xmm14
vpshufb %xmm0, %xmm12, %xmm12
vpxor %xmm15, %xmm11, %xmm11
vpaddd %xmm5, %xmm13, %xmm1
vpshufb %xmm0, %xmm13, %xmm13
vpshufb %xmm0, %xmm14, %xmm14
vpshufb %xmm0, %xmm1, %xmm1
sub $256, %rbx
jmp L79
L78:
movdqu -32(%r9), %xmm3
vpaddd %xmm14, %xmm2, %xmm1
vpxor %xmm15, %xmm10, %xmm10
vpxor %xmm15, %xmm11, %xmm11
L79:
movdqu %xmm1, 128(%rbp)
vpclmulqdq $16, %xmm3, %xmm7, %xmm5
vpxor %xmm15, %xmm12, %xmm12
movdqu -112(%rcx), %xmm2
vpclmulqdq $1, %xmm3, %xmm7, %xmm6
vaesenc %xmm2, %xmm9, %xmm9
movdqu 48(%rbp), %xmm0
vpxor %xmm15, %xmm13, %xmm13
vpclmulqdq $0, %xmm3, %xmm7, %xmm1
vaesenc %xmm2, %xmm10, %xmm10
vpxor %xmm15, %xmm14, %xmm14
vpclmulqdq $17, %xmm3, %xmm7, %xmm7
vaesenc %xmm2, %xmm11, %xmm11
movdqu -16(%r9), %xmm3
vaesenc %xmm2, %xmm12, %xmm12
vpxor %xmm5, %xmm6, %xmm6
vpclmulqdq $0, %xmm3, %xmm0, %xmm5
vpxor %xmm4, %xmm8, %xmm8
vaesenc %xmm2, %xmm13, %xmm13
vpxor %xmm5, %xmm1, %xmm4
vpclmulqdq $16, %xmm3, %xmm0, %xmm1
vaesenc %xmm2, %xmm14, %xmm14
movdqu -96(%rcx), %xmm15
vpclmulqdq $1, %xmm3, %xmm0, %xmm2
vaesenc %xmm15, %xmm9, %xmm9
vpxor 16(%rbp), %xmm8, %xmm8
vpclmulqdq $17, %xmm3, %xmm0, %xmm3
movdqu 64(%rbp), %xmm0
vaesenc %xmm15, %xmm10, %xmm10
movbeq 88(%r14), %r13
vaesenc %xmm15, %xmm11, %xmm11
movbeq 80(%r14), %r12
vaesenc %xmm15, %xmm12, %xmm12
movq %r13, 32(%rbp)
vaesenc %xmm15, %xmm13, %xmm13
movq %r12, 40(%rbp)
movdqu 16(%r9), %xmm5
vaesenc %xmm15, %xmm14, %xmm14
movdqu -80(%rcx), %xmm15
vpxor %xmm1, %xmm6, %xmm6
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vaesenc %xmm15, %xmm9, %xmm9
vpxor %xmm2, %xmm6, %xmm6
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vaesenc %xmm15, %xmm10, %xmm10
vpxor %xmm3, %xmm7, %xmm7
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vaesenc %xmm15, %xmm11, %xmm11
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 80(%rbp), %xmm0
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vpxor %xmm1, %xmm4, %xmm4
movdqu 32(%r9), %xmm1
vaesenc %xmm15, %xmm14, %xmm14
movdqu -64(%rcx), %xmm15
vpxor %xmm2, %xmm6, %xmm6
vpclmulqdq $0, %xmm1, %xmm0, %xmm2
vaesenc %xmm15, %xmm9, %xmm9
vpxor %xmm3, %xmm6, %xmm6
vpclmulqdq $16, %xmm1, %xmm0, %xmm3
vaesenc %xmm15, %xmm10, %xmm10
movbeq 72(%r14), %r13
vpxor %xmm5, %xmm7, %xmm7
vpclmulqdq $1, %xmm1, %xmm0, %xmm5
vaesenc %xmm15, %xmm11, %xmm11
movbeq 64(%r14), %r12
vpclmulqdq $17, %xmm1, %xmm0, %xmm1
movdqu 96(%rbp), %xmm0
vaesenc %xmm15, %xmm12, %xmm12
movq %r13, 48(%rbp)
vaesenc %xmm15, %xmm13, %xmm13
movq %r12, 56(%rbp)
vpxor %xmm2, %xmm4, %xmm4
movdqu 64(%r9), %xmm2
vaesenc %xmm15, %xmm14, %xmm14
movdqu -48(%rcx), %xmm15
vpxor %xmm3, %xmm6, %xmm6
vpclmulqdq $0, %xmm2, %xmm0, %xmm3
vaesenc %xmm15, %xmm9, %xmm9
vpxor %xmm5, %xmm6, %xmm6
vpclmulqdq $16, %xmm2, %xmm0, %xmm5
vaesenc %xmm15, %xmm10, %xmm10
movbeq 56(%r14), %r13
vpxor %xmm1, %xmm7, %xmm7
vpclmulqdq $1, %xmm2, %xmm0, %xmm1
vpxor 112(%rbp), %xmm8, %xmm8
vaesenc %xmm15, %xmm11, %xmm11
movbeq 48(%r14), %r12
vpclmulqdq $17, %xmm2, %xmm0, %xmm2
vaesenc %xmm15, %xmm12, %xmm12
movq %r13, 64(%rbp)
vaesenc %xmm15, %xmm13, %xmm13
movq %r12, 72(%rbp)
vpxor %xmm3, %xmm4, %xmm4
movdqu 80(%r9), %xmm3
vaesenc %xmm15, %xmm14, %xmm14
movdqu -32(%rcx), %xmm15
vpxor %xmm5, %xmm6, %xmm6
vpclmulqdq $16, %xmm3, %xmm8, %xmm5
vaesenc %xmm15, %xmm9, %xmm9
vpxor %xmm1, %xmm6, %xmm6
vpclmulqdq $1, %xmm3, %xmm8, %xmm1
vaesenc %xmm15, %xmm10, %xmm10
movbeq 40(%r14), %r13
vpxor %xmm2, %xmm7, %xmm7
vpclmulqdq $0, %xmm3, %xmm8, %xmm2
vaesenc %xmm15, %xmm11, %xmm11
movbeq 32(%r14), %r12
vpclmulqdq $17, %xmm3, %xmm8, %xmm8
vaesenc %xmm15, %xmm12, %xmm12
movq %r13, 80(%rbp)
vaesenc %xmm15, %xmm13, %xmm13
movq %r12, 88(%rbp)
vpxor %xmm5, %xmm6, %xmm6
vaesenc %xmm15, %xmm14, %xmm14
vpxor %xmm1, %xmm6, %xmm6
movdqu -16(%rcx), %xmm15
vpslldq $8, %xmm6, %xmm5
vpxor %xmm2, %xmm4, %xmm4
pxor %xmm3, %xmm3
mov $13979173243358019584, %r11
pinsrq $1, %r11, %xmm3
vaesenc %xmm15, %xmm9, %xmm9
vpxor %xmm8, %xmm7, %xmm7
vaesenc %xmm15, %xmm10, %xmm10
vpxor %xmm5, %xmm4, %xmm4
movbeq 24(%r14), %r13
vaesenc %xmm15, %xmm11, %xmm11
movbeq 16(%r14), %r12
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
movq %r13, 96(%rbp)
vaesenc %xmm15, %xmm12, %xmm12
movq %r12, 104(%rbp)
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu 0(%rcx), %xmm1
vaesenc %xmm1, %xmm9, %xmm9
movdqu 16(%rcx), %xmm15
vaesenc %xmm1, %xmm10, %xmm10
vpsrldq $8, %xmm6, %xmm6
vaesenc %xmm1, %xmm11, %xmm11
vpxor %xmm6, %xmm7, %xmm7
vaesenc %xmm1, %xmm12, %xmm12
vpxor %xmm0, %xmm4, %xmm4
movbeq 8(%r14), %r13
vaesenc %xmm1, %xmm13, %xmm13
movbeq 0(%r14), %r12
vaesenc %xmm1, %xmm14, %xmm14
movdqu 32(%rcx), %xmm1
vaesenc %xmm15, %xmm9, %xmm9
movdqu %xmm7, 16(%rbp)
vpalignr $8, %xmm4, %xmm4, %xmm8
vaesenc %xmm15, %xmm10, %xmm10
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor 0(%rdi), %xmm1, %xmm2
vaesenc %xmm15, %xmm11, %xmm11
vpxor 16(%rdi), %xmm1, %xmm0
vaesenc %xmm15, %xmm12, %xmm12
vpxor 32(%rdi), %xmm1, %xmm5
vaesenc %xmm15, %xmm13, %xmm13
vpxor 48(%rdi), %xmm1, %xmm6
vaesenc %xmm15, %xmm14, %xmm14
vpxor 64(%rdi), %xmm1, %xmm7
vpxor 80(%rdi), %xmm1, %xmm3
movdqu 128(%rbp), %xmm1
vaesenclast %xmm2, %xmm9, %xmm9
pxor %xmm2, %xmm2
mov $72057594037927936, %r11
pinsrq $1, %r11, %xmm2
vaesenclast %xmm0, %xmm10, %xmm10
vpaddd %xmm2, %xmm1, %xmm0
movq %r13, 112(%rbp)
lea 96(%rdi), %rdi
vaesenclast %xmm5, %xmm11, %xmm11
vpaddd %xmm2, %xmm0, %xmm5
movq %r12, 120(%rbp)
lea 96(%rsi), %rsi
movdqu -128(%rcx), %xmm15
vaesenclast %xmm6, %xmm12, %xmm12
vpaddd %xmm2, %xmm5, %xmm6
vaesenclast %xmm7, %xmm13, %xmm13
vpaddd %xmm2, %xmm6, %xmm7
vaesenclast %xmm3, %xmm14, %xmm14
vpaddd %xmm2, %xmm7, %xmm3
sub $6, %rdx
add $96, %r14
cmp $0, %rdx
jbe L80
movdqu %xmm9, -96(%rsi)
vpxor %xmm15, %xmm1, %xmm9
movdqu %xmm10, -80(%rsi)
movdqu %xmm0, %xmm10
movdqu %xmm11, -64(%rsi)
movdqu %xmm5, %xmm11
movdqu %xmm12, -48(%rsi)
movdqu %xmm6, %xmm12
movdqu %xmm13, -32(%rsi)
movdqu %xmm7, %xmm13
movdqu %xmm14, -16(%rsi)
movdqu %xmm3, %xmm14
movdqu 32(%rbp), %xmm7
jmp L81
L80:
vpxor 16(%rbp), %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
L81:
.balign 16
L77:
cmp $0, %rdx
ja L76
movdqu 32(%rbp), %xmm7
movdqu %xmm1, 32(%rbp)
pxor %xmm4, %xmm4
movdqu %xmm4, 16(%rbp)
movdqu -32(%r9), %xmm3
vpclmulqdq $0, %xmm3, %xmm7, %xmm1
vpclmulqdq $16, %xmm3, %xmm7, %xmm5
movdqu 48(%rbp), %xmm0
vpclmulqdq $1, %xmm3, %xmm7, %xmm6
vpclmulqdq $17, %xmm3, %xmm7, %xmm7
movdqu -16(%r9), %xmm3
vpxor %xmm5, %xmm6, %xmm6
vpclmulqdq $0, %xmm3, %xmm0, %xmm5
vpxor %xmm4, %xmm8, %xmm8
vpxor %xmm5, %xmm1, %xmm4
vpclmulqdq $16, %xmm3, %xmm0, %xmm1
vpclmulqdq $1, %xmm3, %xmm0, %xmm2
vpxor 16(%rbp), %xmm8, %xmm8
vpclmulqdq $17, %xmm3, %xmm0, %xmm3
movdqu 64(%rbp), %xmm0
movdqu 16(%r9), %xmm5
vpxor %xmm1, %xmm6, %xmm6
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpxor %xmm3, %xmm7, %xmm7
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 80(%rbp), %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 32(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpclmulqdq $0, %xmm1, %xmm0, %xmm2
vpxor %xmm3, %xmm6, %xmm6
vpclmulqdq $16, %xmm1, %xmm0, %xmm3
vpxor %xmm5, %xmm7, %xmm7
vpclmulqdq $1, %xmm1, %xmm0, %xmm5
vpclmulqdq $17, %xmm1, %xmm0, %xmm1
movdqu 96(%rbp), %xmm0
vpxor %xmm2, %xmm4, %xmm4
movdqu 64(%r9), %xmm2
vpxor %xmm3, %xmm6, %xmm6
vpclmulqdq $0, %xmm2, %xmm0, %xmm3
vpxor %xmm5, %xmm6, %xmm6
vpclmulqdq $16, %xmm2, %xmm0, %xmm5
vpxor %xmm1, %xmm7, %xmm7
vpclmulqdq $1, %xmm2, %xmm0, %xmm1
vpxor 112(%rbp), %xmm8, %xmm8
vpclmulqdq $17, %xmm2, %xmm0, %xmm2
vpxor %xmm3, %xmm4, %xmm4
movdqu 80(%r9), %xmm3
vpxor %xmm5, %xmm6, %xmm6
vpclmulqdq $16, %xmm3, %xmm8, %xmm5
vpxor %xmm1, %xmm6, %xmm6
vpclmulqdq $1, %xmm3, %xmm8, %xmm1
vpxor %xmm2, %xmm7, %xmm7
vpclmulqdq $0, %xmm3, %xmm8, %xmm2
vpclmulqdq $17, %xmm3, %xmm8, %xmm8
vpxor %xmm5, %xmm6, %xmm6
vpxor %xmm1, %xmm6, %xmm6
vpxor %xmm2, %xmm4, %xmm4
pxor %xmm3, %xmm3
mov $3254779904, %rax
pinsrd $3, %eax, %xmm3
vpxor %xmm8, %xmm7, %xmm7
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
mov $579005069656919567, %r12
pinsrq $0, %r12, %xmm0
mov $283686952306183, %r12
pinsrq $1, %r12, %xmm0
movdqu %xmm9, -96(%rsi)
vpshufb %xmm0, %xmm9, %xmm9
vpxor %xmm7, %xmm1, %xmm1
movdqu %xmm10, -80(%rsi)
vpshufb %xmm0, %xmm10, %xmm10
movdqu %xmm11, -64(%rsi)
vpshufb %xmm0, %xmm11, %xmm11
movdqu %xmm12, -48(%rsi)
vpshufb %xmm0, %xmm12, %xmm12
movdqu %xmm13, -32(%rsi)
vpshufb %xmm0, %xmm13, %xmm13
movdqu %xmm14, -16(%rsi)
vpshufb %xmm0, %xmm14, %xmm14
pxor %xmm4, %xmm4
movdqu %xmm14, %xmm7
movdqu %xmm4, 16(%rbp)
movdqu %xmm13, 48(%rbp)
movdqu %xmm12, 64(%rbp)
movdqu %xmm11, 80(%rbp)
movdqu %xmm10, 96(%rbp)
movdqu %xmm9, 112(%rbp)
movdqu -32(%r9), %xmm3
vpclmulqdq $0, %xmm3, %xmm7, %xmm1
vpclmulqdq $16, %xmm3, %xmm7, %xmm5
movdqu 48(%rbp), %xmm0
vpclmulqdq $1, %xmm3, %xmm7, %xmm6
vpclmulqdq $17, %xmm3, %xmm7, %xmm7
movdqu -16(%r9), %xmm3
vpxor %xmm5, %xmm6, %xmm6
vpclmulqdq $0, %xmm3, %xmm0, %xmm5
vpxor %xmm4, %xmm8, %xmm8
vpxor %xmm5, %xmm1, %xmm4
vpclmulqdq $16, %xmm3, %xmm0, %xmm1
vpclmulqdq $1, %xmm3, %xmm0, %xmm2
vpxor 16(%rbp), %xmm8, %xmm8
vpclmulqdq $17, %xmm3, %xmm0, %xmm3
movdqu 64(%rbp), %xmm0
movdqu 16(%r9), %xmm5
vpxor %xmm1, %xmm6, %xmm6
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpxor %xmm3, %xmm7, %xmm7
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 80(%rbp), %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 32(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpclmulqdq $0, %xmm1, %xmm0, %xmm2
vpxor %xmm3, %xmm6, %xmm6
vpclmulqdq $16, %xmm1, %xmm0, %xmm3
vpxor %xmm5, %xmm7, %xmm7
vpclmulqdq $1, %xmm1, %xmm0, %xmm5
vpclmulqdq $17, %xmm1, %xmm0, %xmm1
movdqu 96(%rbp), %xmm0
vpxor %xmm2, %xmm4, %xmm4
movdqu 64(%r9), %xmm2
vpxor %xmm3, %xmm6, %xmm6
vpclmulqdq $0, %xmm2, %xmm0, %xmm3
vpxor %xmm5, %xmm6, %xmm6
vpclmulqdq $16, %xmm2, %xmm0, %xmm5
vpxor %xmm1, %xmm7, %xmm7
vpclmulqdq $1, %xmm2, %xmm0, %xmm1
vpxor 112(%rbp), %xmm8, %xmm8
vpclmulqdq $17, %xmm2, %xmm0, %xmm2
vpxor %xmm3, %xmm4, %xmm4
movdqu 80(%r9), %xmm3
vpxor %xmm5, %xmm6, %xmm6
vpclmulqdq $16, %xmm3, %xmm8, %xmm5
vpxor %xmm1, %xmm6, %xmm6
vpclmulqdq $1, %xmm3, %xmm8, %xmm1
vpxor %xmm2, %xmm7, %xmm7
vpclmulqdq $0, %xmm3, %xmm8, %xmm2
vpclmulqdq $17, %xmm3, %xmm8, %xmm8
vpxor %xmm5, %xmm6, %xmm6
vpxor %xmm1, %xmm6, %xmm6
vpxor %xmm2, %xmm4, %xmm4
pxor %xmm3, %xmm3
mov $3254779904, %rax
pinsrd $3, %eax, %xmm3
vpxor %xmm8, %xmm7, %xmm7
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
sub $128, %rcx
L71:
movdqu 32(%rbp), %xmm11
mov %rcx, %r8
mov 104(%rsp), %rax
mov 112(%rsp), %rdi
mov 120(%rsp), %rdx
mov %rdx, %r14
mov $579005069656919567, %r12
pinsrq $0, %r12, %xmm9
mov $283686952306183, %r12
pinsrq $1, %r12, %xmm9
pshufb %xmm9, %xmm11
pxor %xmm10, %xmm10
mov $1, %rbx
pinsrd $0, %ebx, %xmm10
mov %rax, %r11
mov %rdi, %r10
mov $0, %rbx
jmp L83
.balign 16
L82:
movdqu %xmm11, %xmm0
pshufb %xmm9, %xmm0
movdqu 0(%r8), %xmm2
pxor %xmm2, %xmm0
movdqu 16(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 32(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 48(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 64(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 80(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 96(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 112(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 128(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 144(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 160(%r8), %xmm2
aesenclast %xmm2, %xmm0
pxor %xmm2, %xmm2
movdqu 0(%r11), %xmm2
pxor %xmm0, %xmm2
movdqu %xmm2, 0(%r10)
add $1, %rbx
add $16, %r11
add $16, %r10
paddd %xmm10, %xmm11
.balign 16
L83:
cmp %rdx, %rbx
jne L82
mov %rdi, %r11
jmp L85
.balign 16
L84:
add $80, %r11
movdqu -32(%r9), %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
movdqu %xmm1, %xmm4
movdqu -16(%r9), %xmm1
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 16(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 32(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 64(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 80(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
vpxor %xmm1, %xmm4, %xmm4
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
pxor %xmm3, %xmm3
mov $3254779904, %r10
pinsrd $3, %r10d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
add $96, %r11
sub $6, %rdx
.balign 16
L85:
cmp $6, %rdx
jae L84
cmp $0, %rdx
jbe L86
mov %rdx, %r10
sub $1, %r10
imul $16, %r10
add %r10, %r11
movdqu -32(%r9), %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
cmp $1, %rdx
jne L88
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu %xmm1, %xmm4
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
jmp L89
L88:
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
movdqu %xmm1, %xmm4
movdqu -16(%r9), %xmm1
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
movdqu %xmm1, %xmm5
cmp $2, %rdx
je L90
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 16(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
cmp $3, %rdx
je L92
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 32(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
cmp $4, %rdx
je L94
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 64(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
jmp L95
L94:
L95:
jmp L93
L92:
L93:
jmp L91
L90:
L91:
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
vpxor %xmm1, %xmm4, %xmm4
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
L89:
pxor %xmm3, %xmm3
mov $3254779904, %r10
pinsrd $3, %r10d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
jmp L87
L86:
L87:
add 96(%rsp), %r14
imul $16, %r14
mov 136(%rsp), %r13
cmp %r14, %r13
jbe L96
mov 128(%rsp), %rax
mov %r13, %r10
and $15, %r10
movdqu %xmm11, %xmm0
pshufb %xmm9, %xmm0
movdqu 0(%r8), %xmm2
pxor %xmm2, %xmm0
movdqu 16(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 32(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 48(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 64(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 80(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 96(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 112(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 128(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 144(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 160(%r8), %xmm2
aesenclast %xmm2, %xmm0
pxor %xmm2, %xmm2
movdqu 0(%rax), %xmm4
pxor %xmm4, %xmm0
movdqu %xmm0, 0(%rax)
cmp $8, %r10
jae L98
mov $0, %rcx
pinsrq $1, %rcx, %xmm0
mov %r10, %rcx
shl $3, %rcx
mov $1, %r11
shl %cl, %r11
sub $1, %r11
pextrq $0, %xmm0, %rcx
and %r11, %rcx
pinsrq $0, %rcx, %xmm0
jmp L99
L98:
mov %r10, %rcx
sub $8, %rcx
shl $3, %rcx
mov $1, %r11
shl %cl, %r11
sub $1, %r11
pextrq $1, %xmm0, %rcx
and %r11, %rcx
pinsrq $1, %rcx, %xmm0
L99:
pshufb %xmm9, %xmm0
movdqu -32(%r9), %xmm5
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu %xmm1, %xmm4
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
pxor %xmm3, %xmm3
mov $3254779904, %r11
pinsrd $3, %r11d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
jmp L97
L96:
L97:
mov %r15, %r11
pxor %xmm0, %xmm0
mov %r11, %rax
imul $8, %rax
pinsrq $1, %rax, %xmm0
mov %r13, %rax
imul $8, %rax
pinsrq $0, %rax, %xmm0
movdqu -32(%r9), %xmm5
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu %xmm1, %xmm4
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
pxor %xmm3, %xmm3
mov $3254779904, %r11
pinsrd $3, %r11d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
movdqu 0(%rbp), %xmm0
pshufb %xmm9, %xmm0
movdqu 0(%r8), %xmm2
pxor %xmm2, %xmm0
movdqu 16(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 32(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 48(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 64(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 80(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 96(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 112(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 128(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 144(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 160(%r8), %xmm2
aesenclast %xmm2, %xmm0
pxor %xmm2, %xmm2
pshufb %xmm9, %xmm8
pxor %xmm0, %xmm8
mov 152(%rsp), %r15
movdqu %xmm8, 0(%r15)
pop %rbx
pop %rbp
pop %rdi
pop %rsi
pop %r12
pop %r13
pop %r14
pop %r15
ret
.global _gcm256_encrypt_opt
_gcm256_encrypt_opt:
push %r15
push %r14
push %r13
push %r12
push %rsi
push %rdi
push %rbp
push %rbx
mov 144(%rsp), %rbp
mov %rcx, %r13
lea 32(%r9), %r9
mov 72(%rsp), %rbx
mov %rdx, %rcx
imul $16, %rcx
mov $579005069656919567, %r10
pinsrq $0, %r10, %xmm9
mov $283686952306183, %r10
pinsrq $1, %r10, %xmm9
pxor %xmm8, %xmm8
mov %rdi, %r11
jmp L101
.balign 16
L100:
add $80, %r11
movdqu -32(%r9), %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
movdqu %xmm1, %xmm4
movdqu -16(%r9), %xmm1
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 16(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 32(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 64(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 80(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
vpxor %xmm1, %xmm4, %xmm4
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
pxor %xmm3, %xmm3
mov $3254779904, %r10
pinsrd $3, %r10d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
add $96, %r11
sub $6, %rdx
.balign 16
L101:
cmp $6, %rdx
jae L100
cmp $0, %rdx
jbe L102
mov %rdx, %r10
sub $1, %r10
imul $16, %r10
add %r10, %r11
movdqu -32(%r9), %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
cmp $1, %rdx
jne L104
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu %xmm1, %xmm4
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
jmp L105
L104:
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
movdqu %xmm1, %xmm4
movdqu -16(%r9), %xmm1
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
movdqu %xmm1, %xmm5
cmp $2, %rdx
je L106
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 16(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
cmp $3, %rdx
je L108
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 32(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
cmp $4, %rdx
je L110
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 64(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
jmp L111
L110:
L111:
jmp L109
L108:
L109:
jmp L107
L106:
L107:
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
vpxor %xmm1, %xmm4, %xmm4
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
L105:
pxor %xmm3, %xmm3
mov $3254779904, %r10
pinsrd $3, %r10d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
jmp L103
L102:
L103:
mov %rsi, %r15
cmp %rcx, %rsi
jbe L112
movdqu 0(%rbx), %xmm0
mov %rsi, %r10
and $15, %r10
cmp $8, %r10
jae L114
mov $0, %rcx
pinsrq $1, %rcx, %xmm0
mov %r10, %rcx
shl $3, %rcx
mov $1, %r11
shl %cl, %r11
sub $1, %r11
pextrq $0, %xmm0, %rcx
and %r11, %rcx
pinsrq $0, %rcx, %xmm0
jmp L115
L114:
mov %r10, %rcx
sub $8, %rcx
shl $3, %rcx
mov $1, %r11
shl %cl, %r11
sub $1, %r11
pextrq $1, %xmm0, %rcx
and %r11, %rcx
pinsrq $1, %rcx, %xmm0
L115:
pshufb %xmm9, %xmm0
movdqu -32(%r9), %xmm5
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu %xmm1, %xmm4
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
pxor %xmm3, %xmm3
mov $3254779904, %r11
pinsrd $3, %r11d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
jmp L113
L112:
L113:
mov 80(%rsp), %rdi
mov 88(%rsp), %rsi
mov 96(%rsp), %rdx
mov %r13, %rcx
movdqu %xmm9, %xmm0
movdqu 0(%r8), %xmm1
movdqu %xmm1, 0(%rbp)
pxor %xmm10, %xmm10
mov $1, %r11
pinsrq $0, %r11, %xmm10
vpaddd %xmm10, %xmm1, %xmm1
cmp $0, %rdx
jne L116
vpshufb %xmm0, %xmm1, %xmm1
movdqu %xmm1, 32(%rbp)
jmp L117
L116:
movdqu %xmm8, 32(%rbp)
add $128, %rcx
pextrq $0, %xmm1, %rbx
and $255, %rbx
vpshufb %xmm0, %xmm1, %xmm1
lea 96(%rsi), %r14
movdqu -128(%rcx), %xmm4
pxor %xmm2, %xmm2
mov $72057594037927936, %r11
pinsrq $1, %r11, %xmm2
movdqu -112(%rcx), %xmm15
mov %rcx, %r12
sub $96, %r12
vpxor %xmm4, %xmm1, %xmm9
add $6, %rbx
cmp $256, %rbx
jae L118
vpaddd %xmm2, %xmm1, %xmm10
vpaddd %xmm2, %xmm10, %xmm11
vpxor %xmm4, %xmm10, %xmm10
vpaddd %xmm2, %xmm11, %xmm12
vpxor %xmm4, %xmm11, %xmm11
vpaddd %xmm2, %xmm12, %xmm13
vpxor %xmm4, %xmm12, %xmm12
vpaddd %xmm2, %xmm13, %xmm14
vpxor %xmm4, %xmm13, %xmm13
vpaddd %xmm2, %xmm14, %xmm1
vpxor %xmm4, %xmm14, %xmm14
jmp L119
L118:
sub $256, %rbx
vpshufb %xmm0, %xmm1, %xmm6
pxor %xmm5, %xmm5
mov $1, %r11
pinsrq $0, %r11, %xmm5
vpaddd %xmm5, %xmm6, %xmm10
pxor %xmm5, %xmm5
mov $2, %r11
pinsrq $0, %r11, %xmm5
vpaddd %xmm5, %xmm6, %xmm11
vpaddd %xmm5, %xmm10, %xmm12
vpshufb %xmm0, %xmm10, %xmm10
vpaddd %xmm5, %xmm11, %xmm13
vpshufb %xmm0, %xmm11, %xmm11
vpxor %xmm4, %xmm10, %xmm10
vpaddd %xmm5, %xmm12, %xmm14
vpshufb %xmm0, %xmm12, %xmm12
vpxor %xmm4, %xmm11, %xmm11
vpaddd %xmm5, %xmm13, %xmm1
vpshufb %xmm0, %xmm13, %xmm13
vpxor %xmm4, %xmm12, %xmm12
vpshufb %xmm0, %xmm14, %xmm14
vpxor %xmm4, %xmm13, %xmm13
vpshufb %xmm0, %xmm1, %xmm1
vpxor %xmm4, %xmm14, %xmm14
L119:
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu -96(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu -80(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu -64(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu -48(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu -32(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu -16(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu 0(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu 16(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu 32(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu 48(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu 64(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu 80(%rcx), %xmm15
movdqu 96(%rcx), %xmm3
vaesenc %xmm15, %xmm9, %xmm9
vpxor 0(%rdi), %xmm3, %xmm4
vaesenc %xmm15, %xmm10, %xmm10
vpxor 16(%rdi), %xmm3, %xmm5
vaesenc %xmm15, %xmm11, %xmm11
vpxor 32(%rdi), %xmm3, %xmm6
vaesenc %xmm15, %xmm12, %xmm12
vpxor 48(%rdi), %xmm3, %xmm8
vaesenc %xmm15, %xmm13, %xmm13
vpxor 64(%rdi), %xmm3, %xmm2
vaesenc %xmm15, %xmm14, %xmm14
vpxor 80(%rdi), %xmm3, %xmm3
lea 96(%rdi), %rdi
vaesenclast %xmm4, %xmm9, %xmm9
vaesenclast %xmm5, %xmm10, %xmm10
vaesenclast %xmm6, %xmm11, %xmm11
vaesenclast %xmm8, %xmm12, %xmm12
vaesenclast %xmm2, %xmm13, %xmm13
vaesenclast %xmm3, %xmm14, %xmm14
movdqu %xmm9, 0(%rsi)
movdqu %xmm10, 16(%rsi)
movdqu %xmm11, 32(%rsi)
movdqu %xmm12, 48(%rsi)
movdqu %xmm13, 64(%rsi)
movdqu %xmm14, 80(%rsi)
lea 96(%rsi), %rsi
vpshufb %xmm0, %xmm9, %xmm8
vpshufb %xmm0, %xmm10, %xmm2
movdqu %xmm8, 112(%rbp)
vpshufb %xmm0, %xmm11, %xmm4
movdqu %xmm2, 96(%rbp)
vpshufb %xmm0, %xmm12, %xmm5
movdqu %xmm4, 80(%rbp)
vpshufb %xmm0, %xmm13, %xmm6
movdqu %xmm5, 64(%rbp)
vpshufb %xmm0, %xmm14, %xmm7
movdqu %xmm6, 48(%rbp)
movdqu -128(%rcx), %xmm4
pxor %xmm2, %xmm2
mov $72057594037927936, %r11
pinsrq $1, %r11, %xmm2
movdqu -112(%rcx), %xmm15
mov %rcx, %r12
sub $96, %r12
vpxor %xmm4, %xmm1, %xmm9
add $6, %rbx
cmp $256, %rbx
jae L120
vpaddd %xmm2, %xmm1, %xmm10
vpaddd %xmm2, %xmm10, %xmm11
vpxor %xmm4, %xmm10, %xmm10
vpaddd %xmm2, %xmm11, %xmm12
vpxor %xmm4, %xmm11, %xmm11
vpaddd %xmm2, %xmm12, %xmm13
vpxor %xmm4, %xmm12, %xmm12
vpaddd %xmm2, %xmm13, %xmm14
vpxor %xmm4, %xmm13, %xmm13
vpaddd %xmm2, %xmm14, %xmm1
vpxor %xmm4, %xmm14, %xmm14
jmp L121
L120:
sub $256, %rbx
vpshufb %xmm0, %xmm1, %xmm6
pxor %xmm5, %xmm5
mov $1, %r11
pinsrq $0, %r11, %xmm5
vpaddd %xmm5, %xmm6, %xmm10
pxor %xmm5, %xmm5
mov $2, %r11
pinsrq $0, %r11, %xmm5
vpaddd %xmm5, %xmm6, %xmm11
vpaddd %xmm5, %xmm10, %xmm12
vpshufb %xmm0, %xmm10, %xmm10
vpaddd %xmm5, %xmm11, %xmm13
vpshufb %xmm0, %xmm11, %xmm11
vpxor %xmm4, %xmm10, %xmm10
vpaddd %xmm5, %xmm12, %xmm14
vpshufb %xmm0, %xmm12, %xmm12
vpxor %xmm4, %xmm11, %xmm11
vpaddd %xmm5, %xmm13, %xmm1
vpshufb %xmm0, %xmm13, %xmm13
vpxor %xmm4, %xmm12, %xmm12
vpshufb %xmm0, %xmm14, %xmm14
vpxor %xmm4, %xmm13, %xmm13
vpshufb %xmm0, %xmm1, %xmm1
vpxor %xmm4, %xmm14, %xmm14
L121:
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu -96(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu -80(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu -64(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu -48(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu -32(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu -16(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu 0(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu 16(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu 32(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu 48(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu 64(%rcx), %xmm15
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu 80(%rcx), %xmm15
movdqu 96(%rcx), %xmm3
vaesenc %xmm15, %xmm9, %xmm9
vpxor 0(%rdi), %xmm3, %xmm4
vaesenc %xmm15, %xmm10, %xmm10
vpxor 16(%rdi), %xmm3, %xmm5
vaesenc %xmm15, %xmm11, %xmm11
vpxor 32(%rdi), %xmm3, %xmm6
vaesenc %xmm15, %xmm12, %xmm12
vpxor 48(%rdi), %xmm3, %xmm8
vaesenc %xmm15, %xmm13, %xmm13
vpxor 64(%rdi), %xmm3, %xmm2
vaesenc %xmm15, %xmm14, %xmm14
vpxor 80(%rdi), %xmm3, %xmm3
lea 96(%rdi), %rdi
vaesenclast %xmm4, %xmm9, %xmm9
vaesenclast %xmm5, %xmm10, %xmm10
vaesenclast %xmm6, %xmm11, %xmm11
vaesenclast %xmm8, %xmm12, %xmm12
vaesenclast %xmm2, %xmm13, %xmm13
vaesenclast %xmm3, %xmm14, %xmm14
movdqu %xmm9, 0(%rsi)
movdqu %xmm10, 16(%rsi)
movdqu %xmm11, 32(%rsi)
movdqu %xmm12, 48(%rsi)
movdqu %xmm13, 64(%rsi)
movdqu %xmm14, 80(%rsi)
lea 96(%rsi), %rsi
sub $12, %rdx
movdqu 32(%rbp), %xmm8
pxor %xmm2, %xmm2
mov $72057594037927936, %r11
pinsrq $1, %r11, %xmm2
vpxor %xmm4, %xmm4, %xmm4
movdqu -128(%rcx), %xmm15
vpaddd %xmm2, %xmm1, %xmm10
vpaddd %xmm2, %xmm10, %xmm11
vpaddd %xmm2, %xmm11, %xmm12
vpaddd %xmm2, %xmm12, %xmm13
vpaddd %xmm2, %xmm13, %xmm14
vpxor %xmm15, %xmm1, %xmm9
movdqu %xmm4, 16(%rbp)
jmp L123
.balign 16
L122:
add $6, %rbx
cmp $256, %rbx
jb L124
mov $579005069656919567, %r11
pinsrq $0, %r11, %xmm0
mov $283686952306183, %r11
pinsrq $1, %r11, %xmm0
vpshufb %xmm0, %xmm1, %xmm6
pxor %xmm5, %xmm5
mov $1, %r11
pinsrq $0, %r11, %xmm5
vpaddd %xmm5, %xmm6, %xmm10
pxor %xmm5, %xmm5
mov $2, %r11
pinsrq $0, %r11, %xmm5
vpaddd %xmm5, %xmm6, %xmm11
movdqu -32(%r9), %xmm3
vpaddd %xmm5, %xmm10, %xmm12
vpshufb %xmm0, %xmm10, %xmm10
vpaddd %xmm5, %xmm11, %xmm13
vpshufb %xmm0, %xmm11, %xmm11
vpxor %xmm15, %xmm10, %xmm10
vpaddd %xmm5, %xmm12, %xmm14
vpshufb %xmm0, %xmm12, %xmm12
vpxor %xmm15, %xmm11, %xmm11
vpaddd %xmm5, %xmm13, %xmm1
vpshufb %xmm0, %xmm13, %xmm13
vpshufb %xmm0, %xmm14, %xmm14
vpshufb %xmm0, %xmm1, %xmm1
sub $256, %rbx
jmp L125
L124:
movdqu -32(%r9), %xmm3
vpaddd %xmm14, %xmm2, %xmm1
vpxor %xmm15, %xmm10, %xmm10
vpxor %xmm15, %xmm11, %xmm11
L125:
movdqu %xmm1, 128(%rbp)
vpclmulqdq $16, %xmm3, %xmm7, %xmm5
vpxor %xmm15, %xmm12, %xmm12
movdqu -112(%rcx), %xmm2
vpclmulqdq $1, %xmm3, %xmm7, %xmm6
vaesenc %xmm2, %xmm9, %xmm9
movdqu 48(%rbp), %xmm0
vpxor %xmm15, %xmm13, %xmm13
vpclmulqdq $0, %xmm3, %xmm7, %xmm1
vaesenc %xmm2, %xmm10, %xmm10
vpxor %xmm15, %xmm14, %xmm14
vpclmulqdq $17, %xmm3, %xmm7, %xmm7
vaesenc %xmm2, %xmm11, %xmm11
movdqu -16(%r9), %xmm3
vaesenc %xmm2, %xmm12, %xmm12
vpxor %xmm5, %xmm6, %xmm6
vpclmulqdq $0, %xmm3, %xmm0, %xmm5
vpxor %xmm4, %xmm8, %xmm8
vaesenc %xmm2, %xmm13, %xmm13
vpxor %xmm5, %xmm1, %xmm4
vpclmulqdq $16, %xmm3, %xmm0, %xmm1
vaesenc %xmm2, %xmm14, %xmm14
movdqu -96(%rcx), %xmm15
vpclmulqdq $1, %xmm3, %xmm0, %xmm2
vaesenc %xmm15, %xmm9, %xmm9
vpxor 16(%rbp), %xmm8, %xmm8
vpclmulqdq $17, %xmm3, %xmm0, %xmm3
movdqu 64(%rbp), %xmm0
vaesenc %xmm15, %xmm10, %xmm10
movbeq 88(%r14), %r13
vaesenc %xmm15, %xmm11, %xmm11
movbeq 80(%r14), %r12
vaesenc %xmm15, %xmm12, %xmm12
movq %r13, 32(%rbp)
vaesenc %xmm15, %xmm13, %xmm13
movq %r12, 40(%rbp)
movdqu 16(%r9), %xmm5
vaesenc %xmm15, %xmm14, %xmm14
movdqu -80(%rcx), %xmm15
vpxor %xmm1, %xmm6, %xmm6
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vaesenc %xmm15, %xmm9, %xmm9
vpxor %xmm2, %xmm6, %xmm6
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vaesenc %xmm15, %xmm10, %xmm10
vpxor %xmm3, %xmm7, %xmm7
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vaesenc %xmm15, %xmm11, %xmm11
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 80(%rbp), %xmm0
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vpxor %xmm1, %xmm4, %xmm4
movdqu 32(%r9), %xmm1
vaesenc %xmm15, %xmm14, %xmm14
movdqu -64(%rcx), %xmm15
vpxor %xmm2, %xmm6, %xmm6
vpclmulqdq $0, %xmm1, %xmm0, %xmm2
vaesenc %xmm15, %xmm9, %xmm9
vpxor %xmm3, %xmm6, %xmm6
vpclmulqdq $16, %xmm1, %xmm0, %xmm3
vaesenc %xmm15, %xmm10, %xmm10
movbeq 72(%r14), %r13
vpxor %xmm5, %xmm7, %xmm7
vpclmulqdq $1, %xmm1, %xmm0, %xmm5
vaesenc %xmm15, %xmm11, %xmm11
movbeq 64(%r14), %r12
vpclmulqdq $17, %xmm1, %xmm0, %xmm1
movdqu 96(%rbp), %xmm0
vaesenc %xmm15, %xmm12, %xmm12
movq %r13, 48(%rbp)
vaesenc %xmm15, %xmm13, %xmm13
movq %r12, 56(%rbp)
vpxor %xmm2, %xmm4, %xmm4
movdqu 64(%r9), %xmm2
vaesenc %xmm15, %xmm14, %xmm14
movdqu -48(%rcx), %xmm15
vpxor %xmm3, %xmm6, %xmm6
vpclmulqdq $0, %xmm2, %xmm0, %xmm3
vaesenc %xmm15, %xmm9, %xmm9
vpxor %xmm5, %xmm6, %xmm6
vpclmulqdq $16, %xmm2, %xmm0, %xmm5
vaesenc %xmm15, %xmm10, %xmm10
movbeq 56(%r14), %r13
vpxor %xmm1, %xmm7, %xmm7
vpclmulqdq $1, %xmm2, %xmm0, %xmm1
vpxor 112(%rbp), %xmm8, %xmm8
vaesenc %xmm15, %xmm11, %xmm11
movbeq 48(%r14), %r12
vpclmulqdq $17, %xmm2, %xmm0, %xmm2
vaesenc %xmm15, %xmm12, %xmm12
movq %r13, 64(%rbp)
vaesenc %xmm15, %xmm13, %xmm13
movq %r12, 72(%rbp)
vpxor %xmm3, %xmm4, %xmm4
movdqu 80(%r9), %xmm3
vaesenc %xmm15, %xmm14, %xmm14
movdqu -32(%rcx), %xmm15
vpxor %xmm5, %xmm6, %xmm6
vpclmulqdq $16, %xmm3, %xmm8, %xmm5
vaesenc %xmm15, %xmm9, %xmm9
vpxor %xmm1, %xmm6, %xmm6
vpclmulqdq $1, %xmm3, %xmm8, %xmm1
vaesenc %xmm15, %xmm10, %xmm10
movbeq 40(%r14), %r13
vpxor %xmm2, %xmm7, %xmm7
vpclmulqdq $0, %xmm3, %xmm8, %xmm2
vaesenc %xmm15, %xmm11, %xmm11
movbeq 32(%r14), %r12
vpclmulqdq $17, %xmm3, %xmm8, %xmm8
vaesenc %xmm15, %xmm12, %xmm12
movq %r13, 80(%rbp)
vaesenc %xmm15, %xmm13, %xmm13
movq %r12, 88(%rbp)
vpxor %xmm5, %xmm6, %xmm6
vaesenc %xmm15, %xmm14, %xmm14
vpxor %xmm1, %xmm6, %xmm6
movdqu -16(%rcx), %xmm15
vpslldq $8, %xmm6, %xmm5
vpxor %xmm2, %xmm4, %xmm4
pxor %xmm3, %xmm3
mov $13979173243358019584, %r11
pinsrq $1, %r11, %xmm3
vaesenc %xmm15, %xmm9, %xmm9
vpxor %xmm8, %xmm7, %xmm7
vaesenc %xmm15, %xmm10, %xmm10
vpxor %xmm5, %xmm4, %xmm4
movbeq 24(%r14), %r13
vaesenc %xmm15, %xmm11, %xmm11
movbeq 16(%r14), %r12
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
movq %r13, 96(%rbp)
vaesenc %xmm15, %xmm12, %xmm12
movq %r12, 104(%rbp)
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu 0(%rcx), %xmm1
vaesenc %xmm1, %xmm9, %xmm9
movdqu 16(%rcx), %xmm15
vaesenc %xmm1, %xmm10, %xmm10
vpsrldq $8, %xmm6, %xmm6
vaesenc %xmm1, %xmm11, %xmm11
vpxor %xmm6, %xmm7, %xmm7
vaesenc %xmm1, %xmm12, %xmm12
vpxor %xmm0, %xmm4, %xmm4
movbeq 8(%r14), %r13
vaesenc %xmm1, %xmm13, %xmm13
movbeq 0(%r14), %r12
vaesenc %xmm1, %xmm14, %xmm14
movdqu 32(%rcx), %xmm1
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
vaesenc %xmm1, %xmm9, %xmm9
vaesenc %xmm1, %xmm10, %xmm10
vaesenc %xmm1, %xmm11, %xmm11
vaesenc %xmm1, %xmm12, %xmm12
vaesenc %xmm1, %xmm13, %xmm13
movdqu 48(%rcx), %xmm15
vaesenc %xmm1, %xmm14, %xmm14
movdqu 64(%rcx), %xmm1
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
vaesenc %xmm1, %xmm9, %xmm9
vaesenc %xmm1, %xmm10, %xmm10
vaesenc %xmm1, %xmm11, %xmm11
vaesenc %xmm1, %xmm12, %xmm12
vaesenc %xmm1, %xmm13, %xmm13
movdqu 80(%rcx), %xmm15
vaesenc %xmm1, %xmm14, %xmm14
movdqu 96(%rcx), %xmm1
vaesenc %xmm15, %xmm9, %xmm9
movdqu %xmm7, 16(%rbp)
vpalignr $8, %xmm4, %xmm4, %xmm8
vaesenc %xmm15, %xmm10, %xmm10
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor 0(%rdi), %xmm1, %xmm2
vaesenc %xmm15, %xmm11, %xmm11
vpxor 16(%rdi), %xmm1, %xmm0
vaesenc %xmm15, %xmm12, %xmm12
vpxor 32(%rdi), %xmm1, %xmm5
vaesenc %xmm15, %xmm13, %xmm13
vpxor 48(%rdi), %xmm1, %xmm6
vaesenc %xmm15, %xmm14, %xmm14
vpxor 64(%rdi), %xmm1, %xmm7
vpxor 80(%rdi), %xmm1, %xmm3
movdqu 128(%rbp), %xmm1
vaesenclast %xmm2, %xmm9, %xmm9
pxor %xmm2, %xmm2
mov $72057594037927936, %r11
pinsrq $1, %r11, %xmm2
vaesenclast %xmm0, %xmm10, %xmm10
vpaddd %xmm2, %xmm1, %xmm0
movq %r13, 112(%rbp)
lea 96(%rdi), %rdi
vaesenclast %xmm5, %xmm11, %xmm11
vpaddd %xmm2, %xmm0, %xmm5
movq %r12, 120(%rbp)
lea 96(%rsi), %rsi
movdqu -128(%rcx), %xmm15
vaesenclast %xmm6, %xmm12, %xmm12
vpaddd %xmm2, %xmm5, %xmm6
vaesenclast %xmm7, %xmm13, %xmm13
vpaddd %xmm2, %xmm6, %xmm7
vaesenclast %xmm3, %xmm14, %xmm14
vpaddd %xmm2, %xmm7, %xmm3
sub $6, %rdx
add $96, %r14
cmp $0, %rdx
jbe L126
movdqu %xmm9, -96(%rsi)
vpxor %xmm15, %xmm1, %xmm9
movdqu %xmm10, -80(%rsi)
movdqu %xmm0, %xmm10
movdqu %xmm11, -64(%rsi)
movdqu %xmm5, %xmm11
movdqu %xmm12, -48(%rsi)
movdqu %xmm6, %xmm12
movdqu %xmm13, -32(%rsi)
movdqu %xmm7, %xmm13
movdqu %xmm14, -16(%rsi)
movdqu %xmm3, %xmm14
movdqu 32(%rbp), %xmm7
jmp L127
L126:
vpxor 16(%rbp), %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
L127:
.balign 16
L123:
cmp $0, %rdx
ja L122
movdqu 32(%rbp), %xmm7
movdqu %xmm1, 32(%rbp)
pxor %xmm4, %xmm4
movdqu %xmm4, 16(%rbp)
movdqu -32(%r9), %xmm3
vpclmulqdq $0, %xmm3, %xmm7, %xmm1
vpclmulqdq $16, %xmm3, %xmm7, %xmm5
movdqu 48(%rbp), %xmm0
vpclmulqdq $1, %xmm3, %xmm7, %xmm6
vpclmulqdq $17, %xmm3, %xmm7, %xmm7
movdqu -16(%r9), %xmm3
vpxor %xmm5, %xmm6, %xmm6
vpclmulqdq $0, %xmm3, %xmm0, %xmm5
vpxor %xmm4, %xmm8, %xmm8
vpxor %xmm5, %xmm1, %xmm4
vpclmulqdq $16, %xmm3, %xmm0, %xmm1
vpclmulqdq $1, %xmm3, %xmm0, %xmm2
vpxor 16(%rbp), %xmm8, %xmm8
vpclmulqdq $17, %xmm3, %xmm0, %xmm3
movdqu 64(%rbp), %xmm0
movdqu 16(%r9), %xmm5
vpxor %xmm1, %xmm6, %xmm6
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpxor %xmm3, %xmm7, %xmm7
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 80(%rbp), %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 32(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpclmulqdq $0, %xmm1, %xmm0, %xmm2
vpxor %xmm3, %xmm6, %xmm6
vpclmulqdq $16, %xmm1, %xmm0, %xmm3
vpxor %xmm5, %xmm7, %xmm7
vpclmulqdq $1, %xmm1, %xmm0, %xmm5
vpclmulqdq $17, %xmm1, %xmm0, %xmm1
movdqu 96(%rbp), %xmm0
vpxor %xmm2, %xmm4, %xmm4
movdqu 64(%r9), %xmm2
vpxor %xmm3, %xmm6, %xmm6
vpclmulqdq $0, %xmm2, %xmm0, %xmm3
vpxor %xmm5, %xmm6, %xmm6
vpclmulqdq $16, %xmm2, %xmm0, %xmm5
vpxor %xmm1, %xmm7, %xmm7
vpclmulqdq $1, %xmm2, %xmm0, %xmm1
vpxor 112(%rbp), %xmm8, %xmm8
vpclmulqdq $17, %xmm2, %xmm0, %xmm2
vpxor %xmm3, %xmm4, %xmm4
movdqu 80(%r9), %xmm3
vpxor %xmm5, %xmm6, %xmm6
vpclmulqdq $16, %xmm3, %xmm8, %xmm5
vpxor %xmm1, %xmm6, %xmm6
vpclmulqdq $1, %xmm3, %xmm8, %xmm1
vpxor %xmm2, %xmm7, %xmm7
vpclmulqdq $0, %xmm3, %xmm8, %xmm2
vpclmulqdq $17, %xmm3, %xmm8, %xmm8
vpxor %xmm5, %xmm6, %xmm6
vpxor %xmm1, %xmm6, %xmm6
vpxor %xmm2, %xmm4, %xmm4
pxor %xmm3, %xmm3
mov $3254779904, %rax
pinsrd $3, %eax, %xmm3
vpxor %xmm8, %xmm7, %xmm7
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
mov $579005069656919567, %r12
pinsrq $0, %r12, %xmm0
mov $283686952306183, %r12
pinsrq $1, %r12, %xmm0
movdqu %xmm9, -96(%rsi)
vpshufb %xmm0, %xmm9, %xmm9
vpxor %xmm7, %xmm1, %xmm1
movdqu %xmm10, -80(%rsi)
vpshufb %xmm0, %xmm10, %xmm10
movdqu %xmm11, -64(%rsi)
vpshufb %xmm0, %xmm11, %xmm11
movdqu %xmm12, -48(%rsi)
vpshufb %xmm0, %xmm12, %xmm12
movdqu %xmm13, -32(%rsi)
vpshufb %xmm0, %xmm13, %xmm13
movdqu %xmm14, -16(%rsi)
vpshufb %xmm0, %xmm14, %xmm14
pxor %xmm4, %xmm4
movdqu %xmm14, %xmm7
movdqu %xmm4, 16(%rbp)
movdqu %xmm13, 48(%rbp)
movdqu %xmm12, 64(%rbp)
movdqu %xmm11, 80(%rbp)
movdqu %xmm10, 96(%rbp)
movdqu %xmm9, 112(%rbp)
movdqu -32(%r9), %xmm3
vpclmulqdq $0, %xmm3, %xmm7, %xmm1
vpclmulqdq $16, %xmm3, %xmm7, %xmm5
movdqu 48(%rbp), %xmm0
vpclmulqdq $1, %xmm3, %xmm7, %xmm6
vpclmulqdq $17, %xmm3, %xmm7, %xmm7
movdqu -16(%r9), %xmm3
vpxor %xmm5, %xmm6, %xmm6
vpclmulqdq $0, %xmm3, %xmm0, %xmm5
vpxor %xmm4, %xmm8, %xmm8
vpxor %xmm5, %xmm1, %xmm4
vpclmulqdq $16, %xmm3, %xmm0, %xmm1
vpclmulqdq $1, %xmm3, %xmm0, %xmm2
vpxor 16(%rbp), %xmm8, %xmm8
vpclmulqdq $17, %xmm3, %xmm0, %xmm3
movdqu 64(%rbp), %xmm0
movdqu 16(%r9), %xmm5
vpxor %xmm1, %xmm6, %xmm6
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpxor %xmm3, %xmm7, %xmm7
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 80(%rbp), %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 32(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpclmulqdq $0, %xmm1, %xmm0, %xmm2
vpxor %xmm3, %xmm6, %xmm6
vpclmulqdq $16, %xmm1, %xmm0, %xmm3
vpxor %xmm5, %xmm7, %xmm7
vpclmulqdq $1, %xmm1, %xmm0, %xmm5
vpclmulqdq $17, %xmm1, %xmm0, %xmm1
movdqu 96(%rbp), %xmm0
vpxor %xmm2, %xmm4, %xmm4
movdqu 64(%r9), %xmm2
vpxor %xmm3, %xmm6, %xmm6
vpclmulqdq $0, %xmm2, %xmm0, %xmm3
vpxor %xmm5, %xmm6, %xmm6
vpclmulqdq $16, %xmm2, %xmm0, %xmm5
vpxor %xmm1, %xmm7, %xmm7
vpclmulqdq $1, %xmm2, %xmm0, %xmm1
vpxor 112(%rbp), %xmm8, %xmm8
vpclmulqdq $17, %xmm2, %xmm0, %xmm2
vpxor %xmm3, %xmm4, %xmm4
movdqu 80(%r9), %xmm3
vpxor %xmm5, %xmm6, %xmm6
vpclmulqdq $16, %xmm3, %xmm8, %xmm5
vpxor %xmm1, %xmm6, %xmm6
vpclmulqdq $1, %xmm3, %xmm8, %xmm1
vpxor %xmm2, %xmm7, %xmm7
vpclmulqdq $0, %xmm3, %xmm8, %xmm2
vpclmulqdq $17, %xmm3, %xmm8, %xmm8
vpxor %xmm5, %xmm6, %xmm6
vpxor %xmm1, %xmm6, %xmm6
vpxor %xmm2, %xmm4, %xmm4
pxor %xmm3, %xmm3
mov $3254779904, %rax
pinsrd $3, %eax, %xmm3
vpxor %xmm8, %xmm7, %xmm7
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
sub $128, %rcx
L117:
movdqu 32(%rbp), %xmm11
mov %rcx, %r8
mov 104(%rsp), %rax
mov 112(%rsp), %rdi
mov 120(%rsp), %rdx
mov %rdx, %r14
mov $579005069656919567, %r12
pinsrq $0, %r12, %xmm9
mov $283686952306183, %r12
pinsrq $1, %r12, %xmm9
pshufb %xmm9, %xmm11
pxor %xmm10, %xmm10
mov $1, %rbx
pinsrd $0, %ebx, %xmm10
mov %rax, %r11
mov %rdi, %r10
mov $0, %rbx
jmp L129
.balign 16
L128:
movdqu %xmm11, %xmm0
pshufb %xmm9, %xmm0
movdqu 0(%r8), %xmm2
pxor %xmm2, %xmm0
movdqu 16(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 32(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 48(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 64(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 80(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 96(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 112(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 128(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 144(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 160(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 176(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 192(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 208(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 224(%r8), %xmm2
aesenclast %xmm2, %xmm0
pxor %xmm2, %xmm2
movdqu 0(%r11), %xmm2
pxor %xmm0, %xmm2
movdqu %xmm2, 0(%r10)
add $1, %rbx
add $16, %r11
add $16, %r10
paddd %xmm10, %xmm11
.balign 16
L129:
cmp %rdx, %rbx
jne L128
mov %rdi, %r11
jmp L131
.balign 16
L130:
add $80, %r11
movdqu -32(%r9), %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
movdqu %xmm1, %xmm4
movdqu -16(%r9), %xmm1
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 16(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 32(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 64(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 80(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
vpxor %xmm1, %xmm4, %xmm4
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
pxor %xmm3, %xmm3
mov $3254779904, %r10
pinsrd $3, %r10d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
add $96, %r11
sub $6, %rdx
.balign 16
L131:
cmp $6, %rdx
jae L130
cmp $0, %rdx
jbe L132
mov %rdx, %r10
sub $1, %r10
imul $16, %r10
add %r10, %r11
movdqu -32(%r9), %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
cmp $1, %rdx
jne L134
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu %xmm1, %xmm4
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
jmp L135
L134:
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
movdqu %xmm1, %xmm4
movdqu -16(%r9), %xmm1
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
movdqu %xmm1, %xmm5
cmp $2, %rdx
je L136
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 16(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
cmp $3, %rdx
je L138
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 32(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
cmp $4, %rdx
je L140
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 64(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
jmp L141
L140:
L141:
jmp L139
L138:
L139:
jmp L137
L136:
L137:
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
vpxor %xmm1, %xmm4, %xmm4
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
L135:
pxor %xmm3, %xmm3
mov $3254779904, %r10
pinsrd $3, %r10d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
jmp L133
L132:
L133:
add 96(%rsp), %r14
imul $16, %r14
mov 136(%rsp), %r13
cmp %r14, %r13
jbe L142
mov 128(%rsp), %rax
mov %r13, %r10
and $15, %r10
movdqu %xmm11, %xmm0
pshufb %xmm9, %xmm0
movdqu 0(%r8), %xmm2
pxor %xmm2, %xmm0
movdqu 16(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 32(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 48(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 64(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 80(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 96(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 112(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 128(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 144(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 160(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 176(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 192(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 208(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 224(%r8), %xmm2
aesenclast %xmm2, %xmm0
pxor %xmm2, %xmm2
movdqu 0(%rax), %xmm4
pxor %xmm4, %xmm0
movdqu %xmm0, 0(%rax)
cmp $8, %r10
jae L144
mov $0, %rcx
pinsrq $1, %rcx, %xmm0
mov %r10, %rcx
shl $3, %rcx
mov $1, %r11
shl %cl, %r11
sub $1, %r11
pextrq $0, %xmm0, %rcx
and %r11, %rcx
pinsrq $0, %rcx, %xmm0
jmp L145
L144:
mov %r10, %rcx
sub $8, %rcx
shl $3, %rcx
mov $1, %r11
shl %cl, %r11
sub $1, %r11
pextrq $1, %xmm0, %rcx
and %r11, %rcx
pinsrq $1, %rcx, %xmm0
L145:
pshufb %xmm9, %xmm0
movdqu -32(%r9), %xmm5
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu %xmm1, %xmm4
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
pxor %xmm3, %xmm3
mov $3254779904, %r11
pinsrd $3, %r11d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
jmp L143
L142:
L143:
mov %r15, %r11
pxor %xmm0, %xmm0
mov %r11, %rax
imul $8, %rax
pinsrq $1, %rax, %xmm0
mov %r13, %rax
imul $8, %rax
pinsrq $0, %rax, %xmm0
movdqu -32(%r9), %xmm5
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu %xmm1, %xmm4
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
pxor %xmm3, %xmm3
mov $3254779904, %r11
pinsrd $3, %r11d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
movdqu 0(%rbp), %xmm0
pshufb %xmm9, %xmm0
movdqu 0(%r8), %xmm2
pxor %xmm2, %xmm0
movdqu 16(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 32(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 48(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 64(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 80(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 96(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 112(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 128(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 144(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 160(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 176(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 192(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 208(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 224(%r8), %xmm2
aesenclast %xmm2, %xmm0
pxor %xmm2, %xmm2
pshufb %xmm9, %xmm8
pxor %xmm0, %xmm8
mov 152(%rsp), %r15
movdqu %xmm8, 0(%r15)
pop %rbx
pop %rbp
pop %rdi
pop %rsi
pop %r12
pop %r13
pop %r14
pop %r15
ret
.global _gcm128_decrypt_opt
_gcm128_decrypt_opt:
push %r15
push %r14
push %r13
push %r12
push %rsi
push %rdi
push %rbp
push %rbx
mov 144(%rsp), %rbp
mov %rcx, %r13
lea 32(%r9), %r9
mov 72(%rsp), %rbx
mov %rdx, %rcx
imul $16, %rcx
mov $579005069656919567, %r10
pinsrq $0, %r10, %xmm9
mov $283686952306183, %r10
pinsrq $1, %r10, %xmm9
pxor %xmm8, %xmm8
mov %rdi, %r11
jmp L147
.balign 16
L146:
add $80, %r11
movdqu -32(%r9), %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
movdqu %xmm1, %xmm4
movdqu -16(%r9), %xmm1
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 16(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 32(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 64(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 80(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
vpxor %xmm1, %xmm4, %xmm4
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
pxor %xmm3, %xmm3
mov $3254779904, %r10
pinsrd $3, %r10d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
add $96, %r11
sub $6, %rdx
.balign 16
L147:
cmp $6, %rdx
jae L146
cmp $0, %rdx
jbe L148
mov %rdx, %r10
sub $1, %r10
imul $16, %r10
add %r10, %r11
movdqu -32(%r9), %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
cmp $1, %rdx
jne L150
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu %xmm1, %xmm4
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
jmp L151
L150:
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
movdqu %xmm1, %xmm4
movdqu -16(%r9), %xmm1
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
movdqu %xmm1, %xmm5
cmp $2, %rdx
je L152
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 16(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
cmp $3, %rdx
je L154
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 32(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
cmp $4, %rdx
je L156
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 64(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
jmp L157
L156:
L157:
jmp L155
L154:
L155:
jmp L153
L152:
L153:
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
vpxor %xmm1, %xmm4, %xmm4
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
L151:
pxor %xmm3, %xmm3
mov $3254779904, %r10
pinsrd $3, %r10d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
jmp L149
L148:
L149:
mov %rsi, %r15
cmp %rcx, %rsi
jbe L158
movdqu 0(%rbx), %xmm0
mov %rsi, %r10
and $15, %r10
cmp $8, %r10
jae L160
mov $0, %rcx
pinsrq $1, %rcx, %xmm0
mov %r10, %rcx
shl $3, %rcx
mov $1, %r11
shl %cl, %r11
sub $1, %r11
pextrq $0, %xmm0, %rcx
and %r11, %rcx
pinsrq $0, %rcx, %xmm0
jmp L161
L160:
mov %r10, %rcx
sub $8, %rcx
shl $3, %rcx
mov $1, %r11
shl %cl, %r11
sub $1, %r11
pextrq $1, %xmm0, %rcx
and %r11, %rcx
pinsrq $1, %rcx, %xmm0
L161:
pshufb %xmm9, %xmm0
movdqu -32(%r9), %xmm5
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu %xmm1, %xmm4
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
pxor %xmm3, %xmm3
mov $3254779904, %r11
pinsrd $3, %r11d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
jmp L159
L158:
L159:
mov 80(%rsp), %rdi
mov 88(%rsp), %rsi
mov 96(%rsp), %rdx
mov %r13, %rcx
movdqu %xmm9, %xmm0
movdqu 0(%r8), %xmm1
movdqu %xmm1, 0(%rbp)
pxor %xmm10, %xmm10
mov $1, %r11
pinsrq $0, %r11, %xmm10
vpaddd %xmm10, %xmm1, %xmm1
cmp $0, %rdx
jne L162
vpshufb %xmm0, %xmm1, %xmm1
movdqu %xmm1, 32(%rbp)
jmp L163
L162:
movdqu %xmm8, 32(%rbp)
add $128, %rcx
pextrq $0, %xmm1, %rbx
and $255, %rbx
vpshufb %xmm0, %xmm1, %xmm1
lea 96(%rdi), %r14
movdqu 32(%rbp), %xmm8
movdqu 80(%rdi), %xmm7
movdqu 64(%rdi), %xmm4
movdqu 48(%rdi), %xmm5
movdqu 32(%rdi), %xmm6
vpshufb %xmm0, %xmm7, %xmm7
movdqu 16(%rdi), %xmm2
vpshufb %xmm0, %xmm4, %xmm4
movdqu 0(%rdi), %xmm3
vpshufb %xmm0, %xmm5, %xmm5
movdqu %xmm4, 48(%rbp)
vpshufb %xmm0, %xmm6, %xmm6
movdqu %xmm5, 64(%rbp)
vpshufb %xmm0, %xmm2, %xmm2
movdqu %xmm6, 80(%rbp)
vpshufb %xmm0, %xmm3, %xmm3
movdqu %xmm2, 96(%rbp)
movdqu %xmm3, 112(%rbp)
pxor %xmm2, %xmm2
mov $72057594037927936, %r11
pinsrq $1, %r11, %xmm2
vpxor %xmm4, %xmm4, %xmm4
movdqu -128(%rcx), %xmm15
vpaddd %xmm2, %xmm1, %xmm10
vpaddd %xmm2, %xmm10, %xmm11
vpaddd %xmm2, %xmm11, %xmm12
vpaddd %xmm2, %xmm12, %xmm13
vpaddd %xmm2, %xmm13, %xmm14
vpxor %xmm15, %xmm1, %xmm9
movdqu %xmm4, 16(%rbp)
cmp $6, %rdx
jne L164
sub $96, %r14
jmp L165
L164:
L165:
jmp L167
.balign 16
L166:
add $6, %rbx
cmp $256, %rbx
jb L168
mov $579005069656919567, %r11
pinsrq $0, %r11, %xmm0
mov $283686952306183, %r11
pinsrq $1, %r11, %xmm0
vpshufb %xmm0, %xmm1, %xmm6
pxor %xmm5, %xmm5
mov $1, %r11
pinsrq $0, %r11, %xmm5
vpaddd %xmm5, %xmm6, %xmm10
pxor %xmm5, %xmm5
mov $2, %r11
pinsrq $0, %r11, %xmm5
vpaddd %xmm5, %xmm6, %xmm11
movdqu -32(%r9), %xmm3
vpaddd %xmm5, %xmm10, %xmm12
vpshufb %xmm0, %xmm10, %xmm10
vpaddd %xmm5, %xmm11, %xmm13
vpshufb %xmm0, %xmm11, %xmm11
vpxor %xmm15, %xmm10, %xmm10
vpaddd %xmm5, %xmm12, %xmm14
vpshufb %xmm0, %xmm12, %xmm12
vpxor %xmm15, %xmm11, %xmm11
vpaddd %xmm5, %xmm13, %xmm1
vpshufb %xmm0, %xmm13, %xmm13
vpshufb %xmm0, %xmm14, %xmm14
vpshufb %xmm0, %xmm1, %xmm1
sub $256, %rbx
jmp L169
L168:
movdqu -32(%r9), %xmm3
vpaddd %xmm14, %xmm2, %xmm1
vpxor %xmm15, %xmm10, %xmm10
vpxor %xmm15, %xmm11, %xmm11
L169:
movdqu %xmm1, 128(%rbp)
vpclmulqdq $16, %xmm3, %xmm7, %xmm5
vpxor %xmm15, %xmm12, %xmm12
movdqu -112(%rcx), %xmm2
vpclmulqdq $1, %xmm3, %xmm7, %xmm6
vaesenc %xmm2, %xmm9, %xmm9
movdqu 48(%rbp), %xmm0
vpxor %xmm15, %xmm13, %xmm13
vpclmulqdq $0, %xmm3, %xmm7, %xmm1
vaesenc %xmm2, %xmm10, %xmm10
vpxor %xmm15, %xmm14, %xmm14
vpclmulqdq $17, %xmm3, %xmm7, %xmm7
vaesenc %xmm2, %xmm11, %xmm11
movdqu -16(%r9), %xmm3
vaesenc %xmm2, %xmm12, %xmm12
vpxor %xmm5, %xmm6, %xmm6
vpclmulqdq $0, %xmm3, %xmm0, %xmm5
vpxor %xmm4, %xmm8, %xmm8
vaesenc %xmm2, %xmm13, %xmm13
vpxor %xmm5, %xmm1, %xmm4
vpclmulqdq $16, %xmm3, %xmm0, %xmm1
vaesenc %xmm2, %xmm14, %xmm14
movdqu -96(%rcx), %xmm15
vpclmulqdq $1, %xmm3, %xmm0, %xmm2
vaesenc %xmm15, %xmm9, %xmm9
vpxor 16(%rbp), %xmm8, %xmm8
vpclmulqdq $17, %xmm3, %xmm0, %xmm3
movdqu 64(%rbp), %xmm0
vaesenc %xmm15, %xmm10, %xmm10
movbeq 88(%r14), %r13
vaesenc %xmm15, %xmm11, %xmm11
movbeq 80(%r14), %r12
vaesenc %xmm15, %xmm12, %xmm12
movq %r13, 32(%rbp)
vaesenc %xmm15, %xmm13, %xmm13
movq %r12, 40(%rbp)
movdqu 16(%r9), %xmm5
vaesenc %xmm15, %xmm14, %xmm14
movdqu -80(%rcx), %xmm15
vpxor %xmm1, %xmm6, %xmm6
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vaesenc %xmm15, %xmm9, %xmm9
vpxor %xmm2, %xmm6, %xmm6
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vaesenc %xmm15, %xmm10, %xmm10
vpxor %xmm3, %xmm7, %xmm7
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vaesenc %xmm15, %xmm11, %xmm11
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 80(%rbp), %xmm0
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vpxor %xmm1, %xmm4, %xmm4
movdqu 32(%r9), %xmm1
vaesenc %xmm15, %xmm14, %xmm14
movdqu -64(%rcx), %xmm15
vpxor %xmm2, %xmm6, %xmm6
vpclmulqdq $0, %xmm1, %xmm0, %xmm2
vaesenc %xmm15, %xmm9, %xmm9
vpxor %xmm3, %xmm6, %xmm6
vpclmulqdq $16, %xmm1, %xmm0, %xmm3
vaesenc %xmm15, %xmm10, %xmm10
movbeq 72(%r14), %r13
vpxor %xmm5, %xmm7, %xmm7
vpclmulqdq $1, %xmm1, %xmm0, %xmm5
vaesenc %xmm15, %xmm11, %xmm11
movbeq 64(%r14), %r12
vpclmulqdq $17, %xmm1, %xmm0, %xmm1
movdqu 96(%rbp), %xmm0
vaesenc %xmm15, %xmm12, %xmm12
movq %r13, 48(%rbp)
vaesenc %xmm15, %xmm13, %xmm13
movq %r12, 56(%rbp)
vpxor %xmm2, %xmm4, %xmm4
movdqu 64(%r9), %xmm2
vaesenc %xmm15, %xmm14, %xmm14
movdqu -48(%rcx), %xmm15
vpxor %xmm3, %xmm6, %xmm6
vpclmulqdq $0, %xmm2, %xmm0, %xmm3
vaesenc %xmm15, %xmm9, %xmm9
vpxor %xmm5, %xmm6, %xmm6
vpclmulqdq $16, %xmm2, %xmm0, %xmm5
vaesenc %xmm15, %xmm10, %xmm10
movbeq 56(%r14), %r13
vpxor %xmm1, %xmm7, %xmm7
vpclmulqdq $1, %xmm2, %xmm0, %xmm1
vpxor 112(%rbp), %xmm8, %xmm8
vaesenc %xmm15, %xmm11, %xmm11
movbeq 48(%r14), %r12
vpclmulqdq $17, %xmm2, %xmm0, %xmm2
vaesenc %xmm15, %xmm12, %xmm12
movq %r13, 64(%rbp)
vaesenc %xmm15, %xmm13, %xmm13
movq %r12, 72(%rbp)
vpxor %xmm3, %xmm4, %xmm4
movdqu 80(%r9), %xmm3
vaesenc %xmm15, %xmm14, %xmm14
movdqu -32(%rcx), %xmm15
vpxor %xmm5, %xmm6, %xmm6
vpclmulqdq $16, %xmm3, %xmm8, %xmm5
vaesenc %xmm15, %xmm9, %xmm9
vpxor %xmm1, %xmm6, %xmm6
vpclmulqdq $1, %xmm3, %xmm8, %xmm1
vaesenc %xmm15, %xmm10, %xmm10
movbeq 40(%r14), %r13
vpxor %xmm2, %xmm7, %xmm7
vpclmulqdq $0, %xmm3, %xmm8, %xmm2
vaesenc %xmm15, %xmm11, %xmm11
movbeq 32(%r14), %r12
vpclmulqdq $17, %xmm3, %xmm8, %xmm8
vaesenc %xmm15, %xmm12, %xmm12
movq %r13, 80(%rbp)
vaesenc %xmm15, %xmm13, %xmm13
movq %r12, 88(%rbp)
vpxor %xmm5, %xmm6, %xmm6
vaesenc %xmm15, %xmm14, %xmm14
vpxor %xmm1, %xmm6, %xmm6
movdqu -16(%rcx), %xmm15
vpslldq $8, %xmm6, %xmm5
vpxor %xmm2, %xmm4, %xmm4
pxor %xmm3, %xmm3
mov $13979173243358019584, %r11
pinsrq $1, %r11, %xmm3
vaesenc %xmm15, %xmm9, %xmm9
vpxor %xmm8, %xmm7, %xmm7
vaesenc %xmm15, %xmm10, %xmm10
vpxor %xmm5, %xmm4, %xmm4
movbeq 24(%r14), %r13
vaesenc %xmm15, %xmm11, %xmm11
movbeq 16(%r14), %r12
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
movq %r13, 96(%rbp)
vaesenc %xmm15, %xmm12, %xmm12
movq %r12, 104(%rbp)
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu 0(%rcx), %xmm1
vaesenc %xmm1, %xmm9, %xmm9
movdqu 16(%rcx), %xmm15
vaesenc %xmm1, %xmm10, %xmm10
vpsrldq $8, %xmm6, %xmm6
vaesenc %xmm1, %xmm11, %xmm11
vpxor %xmm6, %xmm7, %xmm7
vaesenc %xmm1, %xmm12, %xmm12
vpxor %xmm0, %xmm4, %xmm4
movbeq 8(%r14), %r13
vaesenc %xmm1, %xmm13, %xmm13
movbeq 0(%r14), %r12
vaesenc %xmm1, %xmm14, %xmm14
movdqu 32(%rcx), %xmm1
vaesenc %xmm15, %xmm9, %xmm9
movdqu %xmm7, 16(%rbp)
vpalignr $8, %xmm4, %xmm4, %xmm8
vaesenc %xmm15, %xmm10, %xmm10
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor 0(%rdi), %xmm1, %xmm2
vaesenc %xmm15, %xmm11, %xmm11
vpxor 16(%rdi), %xmm1, %xmm0
vaesenc %xmm15, %xmm12, %xmm12
vpxor 32(%rdi), %xmm1, %xmm5
vaesenc %xmm15, %xmm13, %xmm13
vpxor 48(%rdi), %xmm1, %xmm6
vaesenc %xmm15, %xmm14, %xmm14
vpxor 64(%rdi), %xmm1, %xmm7
vpxor 80(%rdi), %xmm1, %xmm3
movdqu 128(%rbp), %xmm1
vaesenclast %xmm2, %xmm9, %xmm9
pxor %xmm2, %xmm2
mov $72057594037927936, %r11
pinsrq $1, %r11, %xmm2
vaesenclast %xmm0, %xmm10, %xmm10
vpaddd %xmm2, %xmm1, %xmm0
movq %r13, 112(%rbp)
lea 96(%rdi), %rdi
vaesenclast %xmm5, %xmm11, %xmm11
vpaddd %xmm2, %xmm0, %xmm5
movq %r12, 120(%rbp)
lea 96(%rsi), %rsi
movdqu -128(%rcx), %xmm15
vaesenclast %xmm6, %xmm12, %xmm12
vpaddd %xmm2, %xmm5, %xmm6
vaesenclast %xmm7, %xmm13, %xmm13
vpaddd %xmm2, %xmm6, %xmm7
vaesenclast %xmm3, %xmm14, %xmm14
vpaddd %xmm2, %xmm7, %xmm3
sub $6, %rdx
cmp $6, %rdx
jbe L170
add $96, %r14
jmp L171
L170:
L171:
cmp $0, %rdx
jbe L172
movdqu %xmm9, -96(%rsi)
vpxor %xmm15, %xmm1, %xmm9
movdqu %xmm10, -80(%rsi)
movdqu %xmm0, %xmm10
movdqu %xmm11, -64(%rsi)
movdqu %xmm5, %xmm11
movdqu %xmm12, -48(%rsi)
movdqu %xmm6, %xmm12
movdqu %xmm13, -32(%rsi)
movdqu %xmm7, %xmm13
movdqu %xmm14, -16(%rsi)
movdqu %xmm3, %xmm14
movdqu 32(%rbp), %xmm7
jmp L173
L172:
vpxor 16(%rbp), %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
L173:
.balign 16
L167:
cmp $0, %rdx
ja L166
movdqu %xmm1, 32(%rbp)
movdqu %xmm9, -96(%rsi)
movdqu %xmm10, -80(%rsi)
movdqu %xmm11, -64(%rsi)
movdqu %xmm12, -48(%rsi)
movdqu %xmm13, -32(%rsi)
movdqu %xmm14, -16(%rsi)
sub $128, %rcx
L163:
movdqu 32(%rbp), %xmm11
mov %rcx, %r8
mov 104(%rsp), %rax
mov 112(%rsp), %rdi
mov 120(%rsp), %rdx
mov %rdx, %r14
mov $579005069656919567, %r12
pinsrq $0, %r12, %xmm9
mov $283686952306183, %r12
pinsrq $1, %r12, %xmm9
pshufb %xmm9, %xmm11
mov %rdi, %rbx
mov %rdx, %r12
mov %rax, %rdi
mov %rdi, %r11
jmp L175
.balign 16
L174:
add $80, %r11
movdqu -32(%r9), %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
movdqu %xmm1, %xmm4
movdqu -16(%r9), %xmm1
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 16(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 32(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 64(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 80(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
vpxor %xmm1, %xmm4, %xmm4
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
pxor %xmm3, %xmm3
mov $3254779904, %r10
pinsrd $3, %r10d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
add $96, %r11
sub $6, %rdx
.balign 16
L175:
cmp $6, %rdx
jae L174
cmp $0, %rdx
jbe L176
mov %rdx, %r10
sub $1, %r10
imul $16, %r10
add %r10, %r11
movdqu -32(%r9), %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
cmp $1, %rdx
jne L178
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu %xmm1, %xmm4
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
jmp L179
L178:
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
movdqu %xmm1, %xmm4
movdqu -16(%r9), %xmm1
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
movdqu %xmm1, %xmm5
cmp $2, %rdx
je L180
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 16(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
cmp $3, %rdx
je L182
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 32(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
cmp $4, %rdx
je L184
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 64(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
jmp L185
L184:
L185:
jmp L183
L182:
L183:
jmp L181
L180:
L181:
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
vpxor %xmm1, %xmm4, %xmm4
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
L179:
pxor %xmm3, %xmm3
mov $3254779904, %r10
pinsrd $3, %r10d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
jmp L177
L176:
L177:
mov %rbx, %rdi
mov %r12, %rdx
pxor %xmm10, %xmm10
mov $1, %rbx
pinsrd $0, %ebx, %xmm10
mov %rax, %r11
mov %rdi, %r10
mov $0, %rbx
jmp L187
.balign 16
L186:
movdqu %xmm11, %xmm0
pshufb %xmm9, %xmm0
movdqu 0(%r8), %xmm2
pxor %xmm2, %xmm0
movdqu 16(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 32(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 48(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 64(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 80(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 96(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 112(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 128(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 144(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 160(%r8), %xmm2
aesenclast %xmm2, %xmm0
pxor %xmm2, %xmm2
movdqu 0(%r11), %xmm2
pxor %xmm0, %xmm2
movdqu %xmm2, 0(%r10)
add $1, %rbx
add $16, %r11
add $16, %r10
paddd %xmm10, %xmm11
.balign 16
L187:
cmp %rdx, %rbx
jne L186
add 96(%rsp), %r14
imul $16, %r14
mov 136(%rsp), %r13
cmp %r14, %r13
jbe L188
mov 128(%rsp), %rax
mov %r13, %r10
and $15, %r10
movdqu 0(%rax), %xmm0
movdqu %xmm0, %xmm10
cmp $8, %r10
jae L190
mov $0, %rcx
pinsrq $1, %rcx, %xmm0
mov %r10, %rcx
shl $3, %rcx
mov $1, %r11
shl %cl, %r11
sub $1, %r11
pextrq $0, %xmm0, %rcx
and %r11, %rcx
pinsrq $0, %rcx, %xmm0
jmp L191
L190:
mov %r10, %rcx
sub $8, %rcx
shl $3, %rcx
mov $1, %r11
shl %cl, %r11
sub $1, %r11
pextrq $1, %xmm0, %rcx
and %r11, %rcx
pinsrq $1, %rcx, %xmm0
L191:
pshufb %xmm9, %xmm0
movdqu -32(%r9), %xmm5
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu %xmm1, %xmm4
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
pxor %xmm3, %xmm3
mov $3254779904, %r11
pinsrd $3, %r11d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
movdqu %xmm11, %xmm0
pshufb %xmm9, %xmm0
movdqu 0(%r8), %xmm2
pxor %xmm2, %xmm0
movdqu 16(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 32(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 48(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 64(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 80(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 96(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 112(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 128(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 144(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 160(%r8), %xmm2
aesenclast %xmm2, %xmm0
pxor %xmm2, %xmm2
pxor %xmm0, %xmm10
movdqu %xmm10, 0(%rax)
jmp L189
L188:
L189:
mov %r15, %r11
pxor %xmm0, %xmm0
mov %r11, %rax
imul $8, %rax
pinsrq $1, %rax, %xmm0
mov %r13, %rax
imul $8, %rax
pinsrq $0, %rax, %xmm0
movdqu -32(%r9), %xmm5
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu %xmm1, %xmm4
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
pxor %xmm3, %xmm3
mov $3254779904, %r11
pinsrd $3, %r11d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
movdqu 0(%rbp), %xmm0
pshufb %xmm9, %xmm0
movdqu 0(%r8), %xmm2
pxor %xmm2, %xmm0
movdqu 16(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 32(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 48(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 64(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 80(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 96(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 112(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 128(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 144(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 160(%r8), %xmm2
aesenclast %xmm2, %xmm0
pxor %xmm2, %xmm2
pshufb %xmm9, %xmm8
pxor %xmm0, %xmm8
mov 152(%rsp), %r15
movdqu 0(%r15), %xmm0
pcmpeqd %xmm8, %xmm0
pextrq $0, %xmm0, %rdx
sub $18446744073709551615, %rdx
mov $0, %rax
adc $0, %rax
pextrq $1, %xmm0, %rdx
sub $18446744073709551615, %rdx
mov $0, %rdx
adc $0, %rdx
add %rdx, %rax
mov %rax, %rcx
pop %rbx
pop %rbp
pop %rdi
pop %rsi
pop %r12
pop %r13
pop %r14
pop %r15
mov %rcx, %rax
ret
.global _gcm256_decrypt_opt
_gcm256_decrypt_opt:
push %r15
push %r14
push %r13
push %r12
push %rsi
push %rdi
push %rbp
push %rbx
mov 144(%rsp), %rbp
mov %rcx, %r13
lea 32(%r9), %r9
mov 72(%rsp), %rbx
mov %rdx, %rcx
imul $16, %rcx
mov $579005069656919567, %r10
pinsrq $0, %r10, %xmm9
mov $283686952306183, %r10
pinsrq $1, %r10, %xmm9
pxor %xmm8, %xmm8
mov %rdi, %r11
jmp L193
.balign 16
L192:
add $80, %r11
movdqu -32(%r9), %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
movdqu %xmm1, %xmm4
movdqu -16(%r9), %xmm1
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 16(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 32(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 64(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 80(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
vpxor %xmm1, %xmm4, %xmm4
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
pxor %xmm3, %xmm3
mov $3254779904, %r10
pinsrd $3, %r10d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
add $96, %r11
sub $6, %rdx
.balign 16
L193:
cmp $6, %rdx
jae L192
cmp $0, %rdx
jbe L194
mov %rdx, %r10
sub $1, %r10
imul $16, %r10
add %r10, %r11
movdqu -32(%r9), %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
cmp $1, %rdx
jne L196
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu %xmm1, %xmm4
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
jmp L197
L196:
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
movdqu %xmm1, %xmm4
movdqu -16(%r9), %xmm1
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
movdqu %xmm1, %xmm5
cmp $2, %rdx
je L198
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 16(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
cmp $3, %rdx
je L200
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 32(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
cmp $4, %rdx
je L202
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 64(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
jmp L203
L202:
L203:
jmp L201
L200:
L201:
jmp L199
L198:
L199:
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
vpxor %xmm1, %xmm4, %xmm4
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
L197:
pxor %xmm3, %xmm3
mov $3254779904, %r10
pinsrd $3, %r10d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
jmp L195
L194:
L195:
mov %rsi, %r15
cmp %rcx, %rsi
jbe L204
movdqu 0(%rbx), %xmm0
mov %rsi, %r10
and $15, %r10
cmp $8, %r10
jae L206
mov $0, %rcx
pinsrq $1, %rcx, %xmm0
mov %r10, %rcx
shl $3, %rcx
mov $1, %r11
shl %cl, %r11
sub $1, %r11
pextrq $0, %xmm0, %rcx
and %r11, %rcx
pinsrq $0, %rcx, %xmm0
jmp L207
L206:
mov %r10, %rcx
sub $8, %rcx
shl $3, %rcx
mov $1, %r11
shl %cl, %r11
sub $1, %r11
pextrq $1, %xmm0, %rcx
and %r11, %rcx
pinsrq $1, %rcx, %xmm0
L207:
pshufb %xmm9, %xmm0
movdqu -32(%r9), %xmm5
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu %xmm1, %xmm4
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
pxor %xmm3, %xmm3
mov $3254779904, %r11
pinsrd $3, %r11d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
jmp L205
L204:
L205:
mov 80(%rsp), %rdi
mov 88(%rsp), %rsi
mov 96(%rsp), %rdx
mov %r13, %rcx
movdqu %xmm9, %xmm0
movdqu 0(%r8), %xmm1
movdqu %xmm1, 0(%rbp)
pxor %xmm10, %xmm10
mov $1, %r11
pinsrq $0, %r11, %xmm10
vpaddd %xmm10, %xmm1, %xmm1
cmp $0, %rdx
jne L208
vpshufb %xmm0, %xmm1, %xmm1
movdqu %xmm1, 32(%rbp)
jmp L209
L208:
movdqu %xmm8, 32(%rbp)
add $128, %rcx
pextrq $0, %xmm1, %rbx
and $255, %rbx
vpshufb %xmm0, %xmm1, %xmm1
lea 96(%rdi), %r14
movdqu 32(%rbp), %xmm8
movdqu 80(%rdi), %xmm7
movdqu 64(%rdi), %xmm4
movdqu 48(%rdi), %xmm5
movdqu 32(%rdi), %xmm6
vpshufb %xmm0, %xmm7, %xmm7
movdqu 16(%rdi), %xmm2
vpshufb %xmm0, %xmm4, %xmm4
movdqu 0(%rdi), %xmm3
vpshufb %xmm0, %xmm5, %xmm5
movdqu %xmm4, 48(%rbp)
vpshufb %xmm0, %xmm6, %xmm6
movdqu %xmm5, 64(%rbp)
vpshufb %xmm0, %xmm2, %xmm2
movdqu %xmm6, 80(%rbp)
vpshufb %xmm0, %xmm3, %xmm3
movdqu %xmm2, 96(%rbp)
movdqu %xmm3, 112(%rbp)
pxor %xmm2, %xmm2
mov $72057594037927936, %r11
pinsrq $1, %r11, %xmm2
vpxor %xmm4, %xmm4, %xmm4
movdqu -128(%rcx), %xmm15
vpaddd %xmm2, %xmm1, %xmm10
vpaddd %xmm2, %xmm10, %xmm11
vpaddd %xmm2, %xmm11, %xmm12
vpaddd %xmm2, %xmm12, %xmm13
vpaddd %xmm2, %xmm13, %xmm14
vpxor %xmm15, %xmm1, %xmm9
movdqu %xmm4, 16(%rbp)
cmp $6, %rdx
jne L210
sub $96, %r14
jmp L211
L210:
L211:
jmp L213
.balign 16
L212:
add $6, %rbx
cmp $256, %rbx
jb L214
mov $579005069656919567, %r11
pinsrq $0, %r11, %xmm0
mov $283686952306183, %r11
pinsrq $1, %r11, %xmm0
vpshufb %xmm0, %xmm1, %xmm6
pxor %xmm5, %xmm5
mov $1, %r11
pinsrq $0, %r11, %xmm5
vpaddd %xmm5, %xmm6, %xmm10
pxor %xmm5, %xmm5
mov $2, %r11
pinsrq $0, %r11, %xmm5
vpaddd %xmm5, %xmm6, %xmm11
movdqu -32(%r9), %xmm3
vpaddd %xmm5, %xmm10, %xmm12
vpshufb %xmm0, %xmm10, %xmm10
vpaddd %xmm5, %xmm11, %xmm13
vpshufb %xmm0, %xmm11, %xmm11
vpxor %xmm15, %xmm10, %xmm10
vpaddd %xmm5, %xmm12, %xmm14
vpshufb %xmm0, %xmm12, %xmm12
vpxor %xmm15, %xmm11, %xmm11
vpaddd %xmm5, %xmm13, %xmm1
vpshufb %xmm0, %xmm13, %xmm13
vpshufb %xmm0, %xmm14, %xmm14
vpshufb %xmm0, %xmm1, %xmm1
sub $256, %rbx
jmp L215
L214:
movdqu -32(%r9), %xmm3
vpaddd %xmm14, %xmm2, %xmm1
vpxor %xmm15, %xmm10, %xmm10
vpxor %xmm15, %xmm11, %xmm11
L215:
movdqu %xmm1, 128(%rbp)
vpclmulqdq $16, %xmm3, %xmm7, %xmm5
vpxor %xmm15, %xmm12, %xmm12
movdqu -112(%rcx), %xmm2
vpclmulqdq $1, %xmm3, %xmm7, %xmm6
vaesenc %xmm2, %xmm9, %xmm9
movdqu 48(%rbp), %xmm0
vpxor %xmm15, %xmm13, %xmm13
vpclmulqdq $0, %xmm3, %xmm7, %xmm1
vaesenc %xmm2, %xmm10, %xmm10
vpxor %xmm15, %xmm14, %xmm14
vpclmulqdq $17, %xmm3, %xmm7, %xmm7
vaesenc %xmm2, %xmm11, %xmm11
movdqu -16(%r9), %xmm3
vaesenc %xmm2, %xmm12, %xmm12
vpxor %xmm5, %xmm6, %xmm6
vpclmulqdq $0, %xmm3, %xmm0, %xmm5
vpxor %xmm4, %xmm8, %xmm8
vaesenc %xmm2, %xmm13, %xmm13
vpxor %xmm5, %xmm1, %xmm4
vpclmulqdq $16, %xmm3, %xmm0, %xmm1
vaesenc %xmm2, %xmm14, %xmm14
movdqu -96(%rcx), %xmm15
vpclmulqdq $1, %xmm3, %xmm0, %xmm2
vaesenc %xmm15, %xmm9, %xmm9
vpxor 16(%rbp), %xmm8, %xmm8
vpclmulqdq $17, %xmm3, %xmm0, %xmm3
movdqu 64(%rbp), %xmm0
vaesenc %xmm15, %xmm10, %xmm10
movbeq 88(%r14), %r13
vaesenc %xmm15, %xmm11, %xmm11
movbeq 80(%r14), %r12
vaesenc %xmm15, %xmm12, %xmm12
movq %r13, 32(%rbp)
vaesenc %xmm15, %xmm13, %xmm13
movq %r12, 40(%rbp)
movdqu 16(%r9), %xmm5
vaesenc %xmm15, %xmm14, %xmm14
movdqu -80(%rcx), %xmm15
vpxor %xmm1, %xmm6, %xmm6
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vaesenc %xmm15, %xmm9, %xmm9
vpxor %xmm2, %xmm6, %xmm6
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vaesenc %xmm15, %xmm10, %xmm10
vpxor %xmm3, %xmm7, %xmm7
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vaesenc %xmm15, %xmm11, %xmm11
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 80(%rbp), %xmm0
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vpxor %xmm1, %xmm4, %xmm4
movdqu 32(%r9), %xmm1
vaesenc %xmm15, %xmm14, %xmm14
movdqu -64(%rcx), %xmm15
vpxor %xmm2, %xmm6, %xmm6
vpclmulqdq $0, %xmm1, %xmm0, %xmm2
vaesenc %xmm15, %xmm9, %xmm9
vpxor %xmm3, %xmm6, %xmm6
vpclmulqdq $16, %xmm1, %xmm0, %xmm3
vaesenc %xmm15, %xmm10, %xmm10
movbeq 72(%r14), %r13
vpxor %xmm5, %xmm7, %xmm7
vpclmulqdq $1, %xmm1, %xmm0, %xmm5
vaesenc %xmm15, %xmm11, %xmm11
movbeq 64(%r14), %r12
vpclmulqdq $17, %xmm1, %xmm0, %xmm1
movdqu 96(%rbp), %xmm0
vaesenc %xmm15, %xmm12, %xmm12
movq %r13, 48(%rbp)
vaesenc %xmm15, %xmm13, %xmm13
movq %r12, 56(%rbp)
vpxor %xmm2, %xmm4, %xmm4
movdqu 64(%r9), %xmm2
vaesenc %xmm15, %xmm14, %xmm14
movdqu -48(%rcx), %xmm15
vpxor %xmm3, %xmm6, %xmm6
vpclmulqdq $0, %xmm2, %xmm0, %xmm3
vaesenc %xmm15, %xmm9, %xmm9
vpxor %xmm5, %xmm6, %xmm6
vpclmulqdq $16, %xmm2, %xmm0, %xmm5
vaesenc %xmm15, %xmm10, %xmm10
movbeq 56(%r14), %r13
vpxor %xmm1, %xmm7, %xmm7
vpclmulqdq $1, %xmm2, %xmm0, %xmm1
vpxor 112(%rbp), %xmm8, %xmm8
vaesenc %xmm15, %xmm11, %xmm11
movbeq 48(%r14), %r12
vpclmulqdq $17, %xmm2, %xmm0, %xmm2
vaesenc %xmm15, %xmm12, %xmm12
movq %r13, 64(%rbp)
vaesenc %xmm15, %xmm13, %xmm13
movq %r12, 72(%rbp)
vpxor %xmm3, %xmm4, %xmm4
movdqu 80(%r9), %xmm3
vaesenc %xmm15, %xmm14, %xmm14
movdqu -32(%rcx), %xmm15
vpxor %xmm5, %xmm6, %xmm6
vpclmulqdq $16, %xmm3, %xmm8, %xmm5
vaesenc %xmm15, %xmm9, %xmm9
vpxor %xmm1, %xmm6, %xmm6
vpclmulqdq $1, %xmm3, %xmm8, %xmm1
vaesenc %xmm15, %xmm10, %xmm10
movbeq 40(%r14), %r13
vpxor %xmm2, %xmm7, %xmm7
vpclmulqdq $0, %xmm3, %xmm8, %xmm2
vaesenc %xmm15, %xmm11, %xmm11
movbeq 32(%r14), %r12
vpclmulqdq $17, %xmm3, %xmm8, %xmm8
vaesenc %xmm15, %xmm12, %xmm12
movq %r13, 80(%rbp)
vaesenc %xmm15, %xmm13, %xmm13
movq %r12, 88(%rbp)
vpxor %xmm5, %xmm6, %xmm6
vaesenc %xmm15, %xmm14, %xmm14
vpxor %xmm1, %xmm6, %xmm6
movdqu -16(%rcx), %xmm15
vpslldq $8, %xmm6, %xmm5
vpxor %xmm2, %xmm4, %xmm4
pxor %xmm3, %xmm3
mov $13979173243358019584, %r11
pinsrq $1, %r11, %xmm3
vaesenc %xmm15, %xmm9, %xmm9
vpxor %xmm8, %xmm7, %xmm7
vaesenc %xmm15, %xmm10, %xmm10
vpxor %xmm5, %xmm4, %xmm4
movbeq 24(%r14), %r13
vaesenc %xmm15, %xmm11, %xmm11
movbeq 16(%r14), %r12
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
movq %r13, 96(%rbp)
vaesenc %xmm15, %xmm12, %xmm12
movq %r12, 104(%rbp)
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
movdqu 0(%rcx), %xmm1
vaesenc %xmm1, %xmm9, %xmm9
movdqu 16(%rcx), %xmm15
vaesenc %xmm1, %xmm10, %xmm10
vpsrldq $8, %xmm6, %xmm6
vaesenc %xmm1, %xmm11, %xmm11
vpxor %xmm6, %xmm7, %xmm7
vaesenc %xmm1, %xmm12, %xmm12
vpxor %xmm0, %xmm4, %xmm4
movbeq 8(%r14), %r13
vaesenc %xmm1, %xmm13, %xmm13
movbeq 0(%r14), %r12
vaesenc %xmm1, %xmm14, %xmm14
movdqu 32(%rcx), %xmm1
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
vaesenc %xmm1, %xmm9, %xmm9
vaesenc %xmm1, %xmm10, %xmm10
vaesenc %xmm1, %xmm11, %xmm11
vaesenc %xmm1, %xmm12, %xmm12
vaesenc %xmm1, %xmm13, %xmm13
movdqu 48(%rcx), %xmm15
vaesenc %xmm1, %xmm14, %xmm14
movdqu 64(%rcx), %xmm1
vaesenc %xmm15, %xmm9, %xmm9
vaesenc %xmm15, %xmm10, %xmm10
vaesenc %xmm15, %xmm11, %xmm11
vaesenc %xmm15, %xmm12, %xmm12
vaesenc %xmm15, %xmm13, %xmm13
vaesenc %xmm15, %xmm14, %xmm14
vaesenc %xmm1, %xmm9, %xmm9
vaesenc %xmm1, %xmm10, %xmm10
vaesenc %xmm1, %xmm11, %xmm11
vaesenc %xmm1, %xmm12, %xmm12
vaesenc %xmm1, %xmm13, %xmm13
movdqu 80(%rcx), %xmm15
vaesenc %xmm1, %xmm14, %xmm14
movdqu 96(%rcx), %xmm1
vaesenc %xmm15, %xmm9, %xmm9
movdqu %xmm7, 16(%rbp)
vpalignr $8, %xmm4, %xmm4, %xmm8
vaesenc %xmm15, %xmm10, %xmm10
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor 0(%rdi), %xmm1, %xmm2
vaesenc %xmm15, %xmm11, %xmm11
vpxor 16(%rdi), %xmm1, %xmm0
vaesenc %xmm15, %xmm12, %xmm12
vpxor 32(%rdi), %xmm1, %xmm5
vaesenc %xmm15, %xmm13, %xmm13
vpxor 48(%rdi), %xmm1, %xmm6
vaesenc %xmm15, %xmm14, %xmm14
vpxor 64(%rdi), %xmm1, %xmm7
vpxor 80(%rdi), %xmm1, %xmm3
movdqu 128(%rbp), %xmm1
vaesenclast %xmm2, %xmm9, %xmm9
pxor %xmm2, %xmm2
mov $72057594037927936, %r11
pinsrq $1, %r11, %xmm2
vaesenclast %xmm0, %xmm10, %xmm10
vpaddd %xmm2, %xmm1, %xmm0
movq %r13, 112(%rbp)
lea 96(%rdi), %rdi
vaesenclast %xmm5, %xmm11, %xmm11
vpaddd %xmm2, %xmm0, %xmm5
movq %r12, 120(%rbp)
lea 96(%rsi), %rsi
movdqu -128(%rcx), %xmm15
vaesenclast %xmm6, %xmm12, %xmm12
vpaddd %xmm2, %xmm5, %xmm6
vaesenclast %xmm7, %xmm13, %xmm13
vpaddd %xmm2, %xmm6, %xmm7
vaesenclast %xmm3, %xmm14, %xmm14
vpaddd %xmm2, %xmm7, %xmm3
sub $6, %rdx
cmp $6, %rdx
jbe L216
add $96, %r14
jmp L217
L216:
L217:
cmp $0, %rdx
jbe L218
movdqu %xmm9, -96(%rsi)
vpxor %xmm15, %xmm1, %xmm9
movdqu %xmm10, -80(%rsi)
movdqu %xmm0, %xmm10
movdqu %xmm11, -64(%rsi)
movdqu %xmm5, %xmm11
movdqu %xmm12, -48(%rsi)
movdqu %xmm6, %xmm12
movdqu %xmm13, -32(%rsi)
movdqu %xmm7, %xmm13
movdqu %xmm14, -16(%rsi)
movdqu %xmm3, %xmm14
movdqu 32(%rbp), %xmm7
jmp L219
L218:
vpxor 16(%rbp), %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
L219:
.balign 16
L213:
cmp $0, %rdx
ja L212
movdqu %xmm1, 32(%rbp)
movdqu %xmm9, -96(%rsi)
movdqu %xmm10, -80(%rsi)
movdqu %xmm11, -64(%rsi)
movdqu %xmm12, -48(%rsi)
movdqu %xmm13, -32(%rsi)
movdqu %xmm14, -16(%rsi)
sub $128, %rcx
L209:
movdqu 32(%rbp), %xmm11
mov %rcx, %r8
mov 104(%rsp), %rax
mov 112(%rsp), %rdi
mov 120(%rsp), %rdx
mov %rdx, %r14
mov $579005069656919567, %r12
pinsrq $0, %r12, %xmm9
mov $283686952306183, %r12
pinsrq $1, %r12, %xmm9
pshufb %xmm9, %xmm11
mov %rdi, %rbx
mov %rdx, %r12
mov %rax, %rdi
mov %rdi, %r11
jmp L221
.balign 16
L220:
add $80, %r11
movdqu -32(%r9), %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
movdqu %xmm1, %xmm4
movdqu -16(%r9), %xmm1
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 16(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 32(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 64(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 80(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
vpxor %xmm1, %xmm4, %xmm4
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
pxor %xmm3, %xmm3
mov $3254779904, %r10
pinsrd $3, %r10d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
add $96, %r11
sub $6, %rdx
.balign 16
L221:
cmp $6, %rdx
jae L220
cmp $0, %rdx
jbe L222
mov %rdx, %r10
sub $1, %r10
imul $16, %r10
add %r10, %r11
movdqu -32(%r9), %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
cmp $1, %rdx
jne L224
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu %xmm1, %xmm4
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
jmp L225
L224:
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
movdqu %xmm1, %xmm4
movdqu -16(%r9), %xmm1
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
movdqu %xmm1, %xmm5
cmp $2, %rdx
je L226
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 16(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
cmp $3, %rdx
je L228
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 32(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
cmp $4, %rdx
je L230
sub $16, %r11
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu 0(%r11), %xmm0
pshufb %xmm9, %xmm0
vpxor %xmm1, %xmm4, %xmm4
movdqu 64(%r9), %xmm1
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
movdqu %xmm1, %xmm5
jmp L231
L230:
L231:
jmp L229
L228:
L229:
jmp L227
L226:
L227:
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
vpxor %xmm1, %xmm4, %xmm4
vpxor %xmm2, %xmm6, %xmm6
vpxor %xmm3, %xmm6, %xmm6
vpxor %xmm5, %xmm7, %xmm7
L225:
pxor %xmm3, %xmm3
mov $3254779904, %r10
pinsrd $3, %r10d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
jmp L223
L222:
L223:
mov %rbx, %rdi
mov %r12, %rdx
pxor %xmm10, %xmm10
mov $1, %rbx
pinsrd $0, %ebx, %xmm10
mov %rax, %r11
mov %rdi, %r10
mov $0, %rbx
jmp L233
.balign 16
L232:
movdqu %xmm11, %xmm0
pshufb %xmm9, %xmm0
movdqu 0(%r8), %xmm2
pxor %xmm2, %xmm0
movdqu 16(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 32(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 48(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 64(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 80(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 96(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 112(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 128(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 144(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 160(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 176(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 192(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 208(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 224(%r8), %xmm2
aesenclast %xmm2, %xmm0
pxor %xmm2, %xmm2
movdqu 0(%r11), %xmm2
pxor %xmm0, %xmm2
movdqu %xmm2, 0(%r10)
add $1, %rbx
add $16, %r11
add $16, %r10
paddd %xmm10, %xmm11
.balign 16
L233:
cmp %rdx, %rbx
jne L232
add 96(%rsp), %r14
imul $16, %r14
mov 136(%rsp), %r13
cmp %r14, %r13
jbe L234
mov 128(%rsp), %rax
mov %r13, %r10
and $15, %r10
movdqu 0(%rax), %xmm0
movdqu %xmm0, %xmm10
cmp $8, %r10
jae L236
mov $0, %rcx
pinsrq $1, %rcx, %xmm0
mov %r10, %rcx
shl $3, %rcx
mov $1, %r11
shl %cl, %r11
sub $1, %r11
pextrq $0, %xmm0, %rcx
and %r11, %rcx
pinsrq $0, %rcx, %xmm0
jmp L237
L236:
mov %r10, %rcx
sub $8, %rcx
shl $3, %rcx
mov $1, %r11
shl %cl, %r11
sub $1, %r11
pextrq $1, %xmm0, %rcx
and %r11, %rcx
pinsrq $1, %rcx, %xmm0
L237:
pshufb %xmm9, %xmm0
movdqu -32(%r9), %xmm5
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu %xmm1, %xmm4
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
pxor %xmm3, %xmm3
mov $3254779904, %r11
pinsrd $3, %r11d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
movdqu %xmm11, %xmm0
pshufb %xmm9, %xmm0
movdqu 0(%r8), %xmm2
pxor %xmm2, %xmm0
movdqu 16(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 32(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 48(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 64(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 80(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 96(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 112(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 128(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 144(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 160(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 176(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 192(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 208(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 224(%r8), %xmm2
aesenclast %xmm2, %xmm0
pxor %xmm2, %xmm2
pxor %xmm0, %xmm10
movdqu %xmm10, 0(%rax)
jmp L235
L234:
L235:
mov %r15, %r11
pxor %xmm0, %xmm0
mov %r11, %rax
imul $8, %rax
pinsrq $1, %rax, %xmm0
mov %r13, %rax
imul $8, %rax
pinsrq $0, %rax, %xmm0
movdqu -32(%r9), %xmm5
vpxor %xmm0, %xmm8, %xmm0
vpclmulqdq $0, %xmm5, %xmm0, %xmm1
vpclmulqdq $16, %xmm5, %xmm0, %xmm2
vpclmulqdq $1, %xmm5, %xmm0, %xmm3
vpclmulqdq $17, %xmm5, %xmm0, %xmm5
movdqu %xmm1, %xmm4
vpxor %xmm3, %xmm2, %xmm6
movdqu %xmm5, %xmm7
pxor %xmm3, %xmm3
mov $3254779904, %r11
pinsrd $3, %r11d, %xmm3
vpslldq $8, %xmm6, %xmm5
vpxor %xmm5, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm0
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpsrldq $8, %xmm6, %xmm6
vpxor %xmm6, %xmm7, %xmm7
vpxor %xmm0, %xmm4, %xmm4
vpalignr $8, %xmm4, %xmm4, %xmm8
vpclmulqdq $16, %xmm3, %xmm4, %xmm4
vpxor %xmm7, %xmm8, %xmm8
vpxor %xmm4, %xmm8, %xmm8
movdqu 0(%rbp), %xmm0
pshufb %xmm9, %xmm0
movdqu 0(%r8), %xmm2
pxor %xmm2, %xmm0
movdqu 16(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 32(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 48(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 64(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 80(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 96(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 112(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 128(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 144(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 160(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 176(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 192(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 208(%r8), %xmm2
aesenc %xmm2, %xmm0
movdqu 224(%r8), %xmm2
aesenclast %xmm2, %xmm0
pxor %xmm2, %xmm2
pshufb %xmm9, %xmm8
pxor %xmm0, %xmm8
mov 152(%rsp), %r15
movdqu 0(%r15), %xmm0
pcmpeqd %xmm8, %xmm0
pextrq $0, %xmm0, %rdx
sub $18446744073709551615, %rdx
mov $0, %rax
adc $0, %rax
pextrq $1, %xmm0, %rdx
sub $18446744073709551615, %rdx
mov $0, %rdx
adc $0, %rdx
add %rdx, %rax
mov %rax, %rcx
pop %rbx
pop %rbp
pop %rdi
pop %rsi
pop %r12
pop %r13
pop %r14
pop %r15
mov %rcx, %rax
ret
|
USTB-806/chaos-double-pagetable
| 1,688
|
os/src/trap/init_entry.S
|
.altmacro
.section .text
.globl __init_entry
.globl __user_entry
.align 2
__init_entry:
# a0: *TrapContext of initproc; a1: initproc token
# switch to user space
csrw satp, a1
sfence.vma
csrw sscratch, a0
mv sp, a0
# now sp points to TrapContext in user space, start restoring based on it
# restore sstatus/sepc
ld t0, 32*8(sp)
ld t1, 33*8(sp)
csrw sstatus, t0
csrw sepc, t1
# restore general purpose registers except x0/sp/tp
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
# back to user stack
ld sp, 2*8(sp)
sret
__user_entry:
# a0: *TrapContext in user space(Constant); a1: user space token
# switch to user space
csrw satp, a1
sfence.vma
csrw sscratch, a0
mv sp, a0
# now sp points to TrapContext in user space, start restoring based on it
# restore sstatus/sepc
ld t0, 32*8(sp)
ld t1, 33*8(sp)
csrw sstatus, t0
csrw sepc, t1
# restore general purpose registers except x0/sp/tp
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
# back to user stack
ld sp, 2*8(sp)
sret
__wait_return:
# a0: *TrapContext in user space(Constant); a1: user space token
# switch to user space
csrw satp, a1
sfence.vma
# csrw sscratch, a0
# mv sp, a0
# # now sp points to TrapContext in user space, start restoring based on it
# # restore general purpose registers except x0/sp/tp
# ld x3, 3*8(sp)
# .set n, 5
# .rept 27
# LOAD_GP %n
# .set n, n+1
# .endr
ret
|
USTB-806/chaos-double-pagetable
| 1,882
|
os/src/trap/trap.S
|
.altmacro
.macro SAVE_GP n
sd x\n, \n*8(sp)
.endm
.macro LOAD_GP n
ld x\n, \n*8(sp)
.endm
.section .text
.globl __alltraps
.globl __restore
.align 2
__alltraps:
csrrw sp, sscratch, sp
# now sp->*TrapContext in user space, sscratch->user stack
# save other general purpose registers
sd x1, 1*8(sp)
# skip sp(x2), we will save it later
sd x3, 3*8(sp)
# skip tp(x4), application does not use it
# save x5~x31
.set n, 5
.rept 27
SAVE_GP %n
.set n, n+1
.endr
# we can use t0/t1/t2 freely, because they have been saved in TrapContext
csrr t0, sstatus
csrr t1, sepc
sd t0, 32*8(sp)
sd t1, 33*8(sp)
# read user stack from sscratch and save it in TrapContext
csrr t2, sscratch
sd t2, 2*8(sp)
# load kernel_satp into t0
ld t0, 34*8(sp)
# load trap_handler into t1
ld t1, 36*8(sp)
# move to kernel_sp
ld sp, 35*8(sp)
# jump to trap_handler
jr t1
__restore:
# a0: *TrapContext in user space(Constant); a1: user space token
csrw sscratch, a0
mv sp, a0
# now sp points to TrapContext in user space, start restoring based on it
# restore sstatus/sepc
ld t0, 32*8(sp)
ld t1, 33*8(sp)
csrw sstatus, t0
csrw sepc, t1
# restore general purpose registers except x0/sp/tp
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
# back to user stack
ld sp, 2*8(sp)
sret
.section .data
# emergency stack for kernel trap
# in order to print trap info even if the kernel stack is corrupted.
__emergency:
.align 4
.space 1024 * 4
__emergency_end:
.section .text
.globl __trap_from_kernel
# 2^2=4 bytes aligned for stvec
.align 2
__trap_from_kernel:
la sp, __emergency_end
j trap_from_kernel
|
USTB-806/chaos-double-pagetable
| 1,237
|
os/src/task/switch.S
|
.altmacro
.macro SAVE_SN n
sd s\n, (\n+2)*8(a0)
.endm
.macro LOAD_SN n
ld s\n, (\n+2)*8(a1)
.endm
.section .text
.globl __switch
.globl __schedule
__switch:
# __switch(
# current_task_cx_ptr: *mut TaskContext,
# next_task_cx_ptr: *const TaskContext
# )
# save kernel stack of current task
sd sp, 8(a0)
# save ra & s0~s11 of current execution
sd ra, 0(a0)
.set n, 0
.rept 12
SAVE_SN %n
.set n, n + 1
.endr
# restore ra & s0~s11 of next execution
ld ra, 0(a1)
.set n, 0
.rept 12
LOAD_SN %n
.set n, n + 1
.endr
# restore kernel stack of next task
ld sp, 8(a1)
ret
__schedule:
# __schedule(
# current_task_cx_ptr: *mut TaskContext,
# next_task_cx_ptr: *const TaskContext
# )
# save kernel stack of current task
sd sp, 8(a0)
# save ra & s0~s11 of current execution
sd ra, 0(a0)
.set n, 0
.rept 12
SAVE_SN %n
.set n, n + 1
.endr
# restore ra & s0~s11 of next execution
ld ra, 0(a1)
.set n, 0
.rept 12
LOAD_SN %n
.set n, n + 1
.endr
# restore kernel stack of next task
ld sp, 8(a1)
ret
|
uzogoduyah/uzogoduyah-gmail.com
| 14,166
|
contents/tree_traversal/code/asm-x64/tree_traversal.s
|
.intel_syntax noprefix
# System V calling convention cheatsheet
# Params: rdi, rsi, rdx, rcx, r8, r9, xmm0-7
# Return: rax (int 64 bits), rax:rdx (int 128 bits), xmm0 (float)
# Callee cleanup: rbx, rbp, r12-15
# Scratch: rax, rdi, rsi, rdx, rcx, r8, r9, r10, r11
.section .rodata
not_bt: .string "This is not a binary tree.\n"
fmt_tree: .string "%d \n"
.equ stack_size, 16
.equ stack_array, 0
.equ stack_top, 8
.equ stack_cap, 12
.equ queue_size, 20
.equ queue_array, 0
.equ queue_front, 8
.equ queue_back, 12
.equ queue_cap, 16
.equ tree_children, 0
.equ tree_num_children, 8
.equ tree_value, 12
.equ tree_size, 16
.section .text
.global main
.extern printf, malloc, free, memcpy
# rdi - stack ptr
get_stack:
push r12
mov r12, rdi
mov rdi, 32 # Creating a 32 byte array
call malloc
mov QWORD PTR [r12], rax # Saving the data into the stack
mov DWORD PTR [r12 + 8], 0
mov DWORD PTR [r12 + 12], 32
pop r12
ret
# rdi - stack ptr
# rsi - element ptr
stack_push:
push r12
push r13
push r14
mov r12, rdi # Saving the variables
mov r13, rsi
mov r14d, DWORD PTR [r12 + 8]
mov esi, DWORD PTR [r12 + 12]
cmp rsi, r14 # Check if top is equal to capacity
jne stack_push_append
shl rsi, 1 # Calculate new capacity in bytes
mov DWORD PTR [r12 + 12], esi # Saving new capcaity
mov rdi, [r12]
call realloc # Making the array bigger
mov QWORD PTR [r12], rax
stack_push_append:
add r14, 8
mov rax, QWORD PTR [r12]
lea rax, [rax + r14]
mov QWORD PTR [rax], r13 # Saving element and new top
mov DWORD PTR [r12 + 8], r14d
pop r14
pop r13
pop r12
ret
# rdi - stack ptr
# RET rax - element ptr
stack_pop:
push r12
mov r12d, DWORD PTR [rdi + 8] # Get top
test r12, r12 # Check if top is zero
jne stack_pop_element
xor rax, rax # Return 0
jmp stack_pop_return
stack_pop_element:
mov rax, [rdi]
lea rax, [rax + r12] # Get the element
mov rax, QWORD PTR [rax]
sub r12, 8 # Subtract 1 from top and save it
mov DWORD PTR [rdi + 8], r12d
stack_pop_return:
pop r12
ret
# rdi - stack ptr
free_stack:
mov rdi, QWORD PTR [rdi]
call free # Free stack array
ret
# rdi - queue ptr
get_queue:
push r12
mov r12, rdi
mov rdi, 32 # Create a 32 byte array
call malloc
mov QWORD PTR [r12], rax # Saving data to the queue pointer
mov QWORD PTR [r12 + 8], 0
mov DWORD PTR [r12 + 16], 32
pop r12
ret
# rdi - queue ptr
queue_resize:
push r12
push r13
push r14
mov r12, rdi
mov edi, DWORD PTR [r12 + 16] # Get new capacity and create new array
shl rdi, 1
call malloc
mov r13, rax
mov r14, QWORD PTR[r12]
mov rdi, r13 # Copy data from front to capacity
mov eax, DWORD PTR [r12 + 8]
lea rsi, [r14 + rax]
mov edx, DWORD PTR [r12 + 16]
sub edx, DWORD PTR [r12 + 8]
call memcpy
mov eax, DWORD PTR [r12 + 16] # Copy data from start of array to front
sub eax, DWORD PTR [r12 + 8]
lea rdi, [r13 + rax]
mov rsi, r14
mov edx, DWORD PTR [r12 + 8]
call memcpy
mov rdi, r14 # New array has front at 0 and back at the old capacity
call free # So free the old array then save the new queue
mov QWORD PTR [r12], r13
mov eax, DWORD PTR [r12 + 16]
sub rax, 8
mov DWORD PTR [r12 + 12], eax
mov DWORD PTR [r12 + 8], 0
mov eax, DWORD PTR [r12 + 16]
shl rax, 1
mov DWORD PTR [r12 + 16], eax
pop r14
pop r13
pop r12
ret
# rdi - queue ptr
# rsi - element
enqueue:
push r12
push r13
push r14
push r15
mov r12, rdi # Saving parameters
mov r13, rsi
mov r14d, DWORD PTR [rdi + 8]
mov eax, DWORD PTR [rdi + 12]# Calculating new back
add eax, 8
mov edi, DWORD PTR [r12 + 16]
cdq
idiv edi
cmp rdx, r14 # Check if front and new back are equal
jne enqueue_append
mov rdi, r12 # If so resize the queue
call queue_resize
enqueue_append:
mov r14, QWORD PTR [r12] # Saving the element
mov r15d, DWORD PTR [r12 + 12]
lea r14, [r14 + r15]
mov QWORD PTR [r14], r13
mov r14d, DWORD PTR [r12 + 16]# Calculating new back and then saving it
add r15, 8
mov rax, r15
cdq
idiv r14d
mov DWORD PTR [r12 + 12], edx
pop r15
pop r14
pop r13
pop r12
ret
# rdi - queue ptr
# RET rax - element
dequeue:
push r12
push r13
mov r12d, DWORD PTR [rdi + 8] # Check if queue is empty
mov r13d, DWORD PTR [rdi + 12]
xor rax, rax
cmp r12, r13
je dequeue_return # if empty return null
mov r12, QWORD PTR [rdi] # else return element pointer
mov r13d, DWORD PTR [rdi + 8]
lea r13, [r12 + r13]
mov eax, DWORD PTR [rdi + 8]
add eax, 8
mov r12d, DWORD PTR [rdi + 16] # Calculate new front
cdq
idiv r12d
mov DWORD PTR [rdi + 8], edx # Save new front
mov rax, QWORD PTR [r13]
dequeue_return:
pop r13
pop r12
ret
# rdi - queue ptr
free_queue:
mov rdi, QWORD PTR [rdi] # Free queue array
call free
ret
# rdi - levels
# rsi - children_size
# RET rax:rdx - the tree - children|value|children_size
create_tree:
push rbx
push r12
push r13
push r14
push r15
mov r12, rdi
mov r13, rsi
test rdi, rdi
jz create_tree_leaf
mov r14, rsi # We'll allocate sizeof(tree) * children_size bytes of memory
shl r14, 4 # save the size calculation to a callee-saved register so we can reuse it after the malloc
mov rdi, r14
call malloc
mov r15, rax # Save the children address twice, once for the return value, once for the loop variable
mov rbx, rax
lea r14, [rax + r14] # Calculate the address of the element after last of the children array
create_tree_children:
cmp rbx, r14
je create_tree_return
lea rdi, [r12 - 1] # levels - 1
mov rsi, r13
call create_tree
mov QWORD PTR [rbx], rax # Save the created tree to memory
mov QWORD PTR [rbx + 8], rdx # The offset of children_size, writing out explicitly would've made the line way too long
add rbx, tree_size
jmp create_tree_children
create_tree_leaf:
mov r15, 0
xor r13, r13 # Leaves won't have any children
create_tree_return:
mov rax, r15 # The children pointer will be in r15
mov rdx, r12
shl rdx, 32 # The tree's value will be the current "levels"
shl r13, 4
or rdx, r13 # Generate the return value by moving the value to the upper 32 bits
pop r15
pop r14
pop r13
pop r12
pop rbx
ret
# rdi - children ptr
# rsi - children size
free_tree:
push r12
push r13
push r14
push r15
test rdi, rdi # Make sure the pointer is non-zero
jz free_tree_return
mov r12, rdi # Saving array
lea r13, [r12 + rsi] # Get start and end of the array
mov r14, r12
free_tree_free_kid:
cmp r14, r13 # Loop thought the array and free all children
je free_tree_free_array
mov rdi, QWORD PTR [r14]
mov esi, DWORD PTR [r14 + 8]
call free_tree
add r14, tree_size
jmp free_tree_free_kid
free_tree_free_array:
mov rdi, r12 # Free the array
call free
free_tree_return:
pop r15
pop r14
pop r13
pop r12
ret
# rdi - children ptr
# rsi - value|children_size
dfs_recursive:
push r12
push r13
mov r12, rdi
mov r13, rsi
mov rdi, OFFSET fmt_tree # Handle the current node
shr rsi, 32 # The tree value is in the upper 32 bits
xor rax, rax
call printf
mov r13d, r13d # Zero out the top 32 bits
add r13, r12 # Pointer pointing after the last element of the children array
dfs_recursive_children:
cmp r12, r13 # If we reached the end, return
je dfs_recursive_return
mov rdi, QWORD PTR [r12]
mov rsi, QWORD PTR [r12 + 8]
call dfs_recursive
add r12, tree_size
jmp dfs_recursive_children
dfs_recursive_return:
pop r13
pop r12
ret
# rdi - children ptr
# rsi - value|children_size
dfs_recursive_postorder:
push r12
push r13
push r14
mov r12, rdi
mov r13, rsi
mov r14, rsi
mov r13d, r13d # Zero out the top 32 bits
add r13, r12 # Pointer pointing after the last element of the children array
dfs_recursive_po_children:
cmp r12, r13 # If we reached the end, return
je dfs_recursive_po_return
mov rdi, QWORD PTR [r12]
mov rsi, QWORD PTR [r12 + 8]
call dfs_recursive_postorder
add r12, tree_size
jmp dfs_recursive_po_children
dfs_recursive_po_return:
mov rdi, OFFSET fmt_tree # Handle the current node
mov rsi, r14
shr rsi, 32 # The tree value is in the upper 32 bits
xor rax, rax
call printf
pop r14
pop r13
pop r12
ret
# rdi - children ptr
# rsi - value|children_size
dfs_recursive_inorder_btree:
push r12
push r13
mov r12, rdi
mov r13, rsi
mov rax, rsi
mov eax, eax
cmp rax, 0 # Check what type of tree it is.
je dfs_recursive_bt_size0
cmp rax, 16
je dfs_recursive_bt_size1
cmp rax, 32
je dfs_recursive_bt_size2
mov rdi, OFFSET not_bt # If the tree is not binary then print a warning
xor rax, rax
call printf
jmp dfs_recursive_bt_return
dfs_recursive_bt_size0:
mov rdi, OFFSET fmt_tree # If the node is a leaf then print its id
shr rsi, 32
xor rax, rax
call printf
jmp dfs_recursive_bt_return
dfs_recursive_bt_size1:
mov rdi, QWORD PTR [r12] # If the node has 1 child then call the function and print the id
mov rsi, QWORD PTR [r12 + 8]
call dfs_recursive_inorder_btree
mov rdi, OFFSET fmt_tree
mov rsi, r13
shr rsi, 32
xor rax, rax
call printf
jmp dfs_recursive_bt_return
dfs_recursive_bt_size2:
mov rdi, QWORD PTR [r12] # Same as above just print id inbetween the calls
mov rsi, QWORD PTR [r12 + 8]
call dfs_recursive_inorder_btree
mov rdi, OFFSET fmt_tree
mov rsi, r13
shr rsi, 32
xor rax, rax
call printf
mov rdi, QWORD PTR [r12 + 16]
mov rsi, QWORD PTR [r12 + 24]
call dfs_recursive_inorder_btree
dfs_recursive_bt_return:
pop r13
pop r12
ret
# rdi - children ptr
# rsi - value|children_size
dfs_stack:
push r12
push r13
push r14
sub rsp, 16 # Create stack
mov r12, rsp
push rsi # Save node to use as pointer
push rdi
mov rdi, r12
call get_stack # Init stack
mov rdi, r12
mov rsi, rsp
call stack_push # Push node
mov rdi, r12 # Pop stack
call stack_pop
dfs_stack_loop:
test rax, rax # Test if stack is empty
jz dfs_stack_return
mov r13, rax
mov rdi, OFFSET fmt_tree # Print id
mov esi, DWORD PTR [r13 + 12]
xor rax, rax
call printf
mov eax, DWORD PTR [r13 + 8] # Get start and end of array
mov r13, QWORD PTR [r13]
lea r14, [r13 + rax]
dfs_stack_push_child:
cmp r13, r14 # Check if the pointers are the same
je dfs_stack_end_push
mov rdi, r12 # Push node into the stack
mov rsi, r13
call stack_push
add r13, tree_size
jmp dfs_stack_push_child
dfs_stack_end_push:
mov rdi, r12 # Pop stack
call stack_pop
jmp dfs_stack_loop
dfs_stack_return:
mov rdi, r12 # Free stack
call free_stack
add rsp, 32
pop r14
pop r13
pop r12
ret
# rdi - children ptr
# rsi - value|children_size
bfs_queue:
push r12
push r13
push r14
sub rsp, 20 # Create queue
mov r12, rsp
push rsi # Save node to use as pointer
push rdi
mov rdi, r12
call get_queue # Init queue
mov rdi, r12
mov rsi, rsp
call enqueue # enqueue node
mov eax, DWORD PTR [r12 + 8]
mov edi, DWORD PTR [r12 + 12]
bfs_queue_loop:
cmp eax, edi
je bfs_queue_return
mov rdi, r12 # dequeue
call dequeue
test rax, rax # Test if queue is empty
jz bfs_queue_return
mov r13, rax
mov rdi, OFFSET fmt_tree # Print id
mov esi, DWORD PTR [r13 + 12]
xor rax, rax
call printf
mov eax, DWORD PTR [r13 + 8] # Get start and end of array
mov r13, QWORD PTR [r13]
lea r14, [r13 + rax]
bfs_queue_push_child:
cmp r13, r14 # Check if the pointers are the same
je bfs_queue_end_push
mov rdi, r12 # enqueue node
mov rsi, r13
call enqueue
add r13, tree_size
jmp bfs_queue_push_child
bfs_queue_end_push:
mov eax, DWORD PTR [r12 + 8]
mov edi, DWORD PTR [r12 + 12]
jmp bfs_queue_loop
bfs_queue_return:
mov rdi, r12 # Free queue
call free_queue
add rsp, 36
pop r14
pop r13
pop r12
ret
main:
push r12
push r13
mov rdi, 3
mov rsi, 3
call create_tree
mov r12, rax
mov r13, rdx
mov rdi, rax
mov rsi, rdx
call bfs_queue
mov rdi, r12
mov rsi, r13
mov esi, esi
call free_tree
pop r13
pop r12
ret
|
uzogoduyah/uzogoduyah-gmail.com
| 2,489
|
contents/monte_carlo_integration/code/asm-x64/monte_carlo.s
|
.intel_syntax noprefix
.section .rodata
pi: .double 3.141592653589793
one: .double 1.0
four: .double 4.0
hundred: .double 100.0
rand_max: .long 4290772992
.long 1105199103
fabs_const: .long 4294967295
.long 2147483647
.long 0
.long 0
estimate_fmt: .string "The estaimate of pi is %lf\n"
error_fmt: .string "Percentage error: %0.2f\n"
.section .text
.global main
.extern printf, srand, time, rand
# xmm0 - x
# xmm1 - y
# RET rax - bool
in_circle:
mulsd xmm0, xmm0 # Calculate x * x + y * y
mulsd xmm1, xmm1
addsd xmm0, xmm1
movsd xmm1, one # Set circle radius to 1
xor rax, rax
comisd xmm1, xmm0 # Return bool xmm0 < xmm1
seta al
ret
# rdi - samples
# RET xmm0 - estimate
monte_carlo:
pxor xmm2, xmm2 # Setting it to zero for loop
cvtsi2sd xmm3, rdi # From int to double
pxor xmm4, xmm4 # Setting to zero for counter
monte_carlo_iter:
comisd xmm2, xmm3 # Check if we went through all samples
je monte_carlo_return
call rand # Get random point in the first quartile
cvtsi2sd xmm0, rax
divsd xmm0, rand_max
call rand
cvtsi2sd xmm1, rax
divsd xmm1, rand_max
call in_circle # Check if its in the circle
test rax, rax
jz monte_carlo_false
addsd xmm4, one # if so increment counter
monte_carlo_false:
addsd xmm2, one
jmp monte_carlo_iter
monte_carlo_return:
mulsd xmm4, four # Return estimate
divsd xmm4, xmm2
movsd xmm0, xmm4
ret
main:
push rbp
sub rsp, 16
mov rdi, 0
call time
mov rdi, rax
call srand
mov rdi, 1000000
call monte_carlo
movsd QWORD PTR [rsp], xmm0 # Save estimate to stack
mov rdi, OFFSET estimate_fmt # Print estimate
mov rax, 1
call printf
movsd xmm0, QWORD PTR [rsp] # Get estimate from stack
movsd xmm1, pi # Calculate fabs(M_PI - estimate)
subsd xmm0, xmm1
movq xmm1, fabs_const
andpd xmm0, xmm1
divsd xmm0, pi # Print percentage error on pi
mulsd xmm0, hundred
mov rdi, OFFSET error_fmt
mov rax, 1
call printf
add rsp, 16
pop rbp
xor rax, rax # Set exit code to 0
ret
|
uzogoduyah/uzogoduyah-gmail.com
| 4,183
|
contents/verlet_integration/code/asm-x64/verlet.s
|
.intel_syntax noprefix
.section .rodata
zero: .double 0.0
two: .double 2.0
half: .double 0.5
verlet_fmt: .string "[#]\nTime for Verlet integration is:\n%lf\n"
stormer_fmt: .string "[#]\nTime for Stormer Verlet Integration is:\n%lf\n[#]\nVelocity for Stormer Verlet Integration is:\n%lf\n"
velocity_fmt: .string "[#]\nTime for Velocity Verlet Integration is:\n%lf\n[#]\nVelocity for Velocity Verlet Integration is:\n%lf\n"
pos: .double 5.0
acc: .double -10.0
dt: .double 0.01
.section .text
.global main
.extern printf
# xmm0 - pos
# xmm1 - acc
# xmm2 - dt
# RET xmm0 - time
verlet:
pxor xmm7, xmm7 # Holds 0 for comparisons
pxor xmm3, xmm3 # Holds time value
comisd xmm0, xmm7 # Check if pos is greater then 0.0
jbe verlet_return
movsd xmm6, xmm1 # xmm6 = acc * dt * dt
mulsd xmm6, xmm2
mulsd xmm6, xmm2
movsd xmm5, xmm0 # Holds previous position
verlet_loop:
addsd xmm3, xmm2 # Adding dt to time
movsd xmm4, xmm0 # Hold old value of posistion
addsd xmm0, xmm0 # Calculating new position
subsd xmm0, xmm5
addsd xmm0, xmm6
movsd xmm5, xmm4
comisd xmm0, xmm7 # Check if position is greater then 0.0
ja verlet_loop
verlet_return:
movsd xmm0, xmm3 # Saving time value
ret
# xmm0 - pos
# xmm1 - acc
# xmm2 - dt
# RET xmm0 - time
# RET xmm1 - velocity
stormer_verlet:
pxor xmm7, xmm7 # Holds 0 for comparisons
pxor xmm3, xmm3 # Holds time value
comisd xmm0, xmm7 # Check if pos is greater then 0.0
jbe stormer_verlet_return
movsd xmm6, xmm1 # xmm6 = acc * dt * dt
mulsd xmm6, xmm2
mulsd xmm6, xmm2
movsd xmm5, xmm0 # Holds previous position
stormer_verlet_loop:
addsd xmm3, xmm2 # Adding dt to time
movsd xmm4, xmm0 # Hold old value of posistion
addsd xmm0, xmm0 # Calculating new position
subsd xmm0, xmm5
addsd xmm0, xmm6
movsd xmm5, xmm4
comisd xmm0, xmm7 # Check if position is greater then 0.0
ja stormer_verlet_loop
stormer_verlet_return:
movsd xmm0, xmm3 # Saving time and velocity
mulsd xmm3, xmm1
movsd xmm1, xmm3
ret
# xmm0 - pos
# xmm1 - acc
# xmm2 - dt
# RET xmm0 - time
# RET xmm1 - velocity
velocity_verlet:
pxor xmm7, xmm7 # Holds 0 for comparisons
pxor xmm3, xmm3 # Holds the velocity value
pxor xmm4, xmm4 # Holds the time value
comisd xmm0, xmm7 # Check if pos is greater then 0.0
jbe velocity_verlet_return
movsd xmm5, half # xmm5 = 0.5 * dt * dt * acc
mulsd xmm5, xmm2
mulsd xmm5, xmm2
mulsd xmm5, xmm1
velocity_verlet_loop:
movsd xmm6, xmm3 # Move velocity into register
mulsd xmm6, xmm2 # Calculate new position
addsd xmm6, xmm5
addsd xmm0, xmm6
addsd xmm4, xmm2 # Incrementing time
movsd xmm3, xmm4 # Updating velocity
mulsd xmm3, xmm1
comisd xmm0, xmm7
ja velocity_verlet_loop
velocity_verlet_return:
movsd xmm0, xmm4 # Saving time and velocity
movsd xmm1, xmm3
ret
main:
push rbp
movsd xmm0, pos # Calling verlet
movsd xmm1, acc
movsd xmm2, dt
call verlet
mov rdi, OFFSET verlet_fmt # Print output
mov rax, 1
call printf
movsd xmm0, pos # Calling stormer_verlet
movsd xmm1, acc
movsd xmm2, dt
call stormer_verlet
mov rdi, OFFSET stormer_fmt # Print output
mov rax, 1
call printf
movsd xmm0, pos # Calling velocity_verlet
movsd xmm1, acc
movsd xmm2, dt
call velocity_verlet
mov rdi, OFFSET velocity_fmt # Print output
mov rax, 1
call printf
pop rbp
xor rax, rax # Set exit code to 0
ret
|
uzogoduyah/uzogoduyah-gmail.com
| 1,840
|
contents/euclidean_algorithm/code/asm-x64/euclidean_example.s
|
.intel_syntax noprefix
.section .rodata
euclid_mod_fmt: .string "[#]\nModulus-based euclidean algorithm result:\n%d\n"
euclid_sub_fmt: .string "[#]\nSubtraction-based euclidean algorithm result:\n%d\n"
.section .text
.global main
.extern printf
# rdi - a
# rsi - b
# RET rax - gcd of a and b
euclid_mod:
mov rax, rdi # Get abs of a
sar rax, 31
xor rdi, rax
sub rdi, rax
mov rax, rsi # Get abs of b
sar rax, 31
xor rsi, rax
sub rsi, rax
jmp mod_check
mod_loop:
xor rdx, rdx # Take the mod of a and b
mov rax, rdi
div rsi
mov rdi, rsi # Set b to the mod of a and b
mov rsi, rdx # Set a to b
mod_check:
cmp rsi, 0 # Check if b is non-zero
jne mod_loop
mov rax, rdi # Return the result
ret
euclid_sub:
mov rax, rdi # Get abs of a
sar rax, 31
xor rdi, rax
sub rdi, rax
mov rax, rsi # Get abs of b
sar rax, 31
xor rsi, rax
sub rsi, rax
jmp check
loop:
cmp rdi, rsi # Find which is bigger
jle if_true
sub rdi, rsi # If a is bigger then a -= b
jmp check
if_true:
sub rsi, rdi # Else b -= a
check:
cmp rsi, rdi # Check if a and b are not equal
jne loop
mov rax, rdi # Return results
ret
main:
mov rdi, 4288 # Call euclid_mod
mov rsi, 5184
call euclid_mod
mov rdi, OFFSET euclid_mod_fmt # Print output
mov rsi, rax
xor rax, rax
call printf
mov rdi, 1536 # Call euclid_sub
mov rsi, 9856
call euclid_sub
mov rdi, OFFSET euclid_sub_fmt # Print output
mov rsi, rax
xor rax, rax
call printf
xor rax, rax # Return 0
ret
|
uzogoduyah/uzogoduyah-gmail.com
| 2,783
|
contents/forward_euler_method/code/asm-x64/euler.s
|
.intel_syntax noprefix
.section .rodata
three: .double -3.0
fabs_const:
.long 4294967295
.long 2147483647
.long 0
.long 0
inital_val: .double 1.0
threshold: .double 0.01
timestep: .double 0.01
error_fmt: .string "%f %f\n"
fmt: .string "%d\n"
.section .text
.global main
.extern printf
.extern exp
# rdi - array size
# rsi - array ptr
# xmm0 - timestep
solve_euler:
movsd xmm1, inital_val
lea rax, [rsi + 8 * rdi + 8] # Set to end of the array
solve_euler_loop:
movsd xmm3, three # Set to -3.0
mulsd xmm2, xmm1 # xmm2 = -3.0 * array[i-1] * timestep
mulsd xmm2, xmm0
subsd xmm1, xmm2 # xmm1 = xmm1 - xmm2
movsd QWORD PTR [rsi], xmm1
add rsi, 8
cmp rsi, rax # Test if we have gone through the array
jne solve_euler_loop
solve_euler_return:
ret
# rdi - array size
# rsi - array ptr
# xmm0 - timestep
# xmm1 - threshold
# RET rax - success code 0 if sucess else 1
check_result:
push r12
push r13
xor rax, rax # Return code is 0
xor r12, r12 # The index is set to 0
mov r13, rdi # Moving array size to free rdi for printf
movsd xmm2, xmm0 # Moving timestep to free xmm0 for exp
jmp loop_check
results_loop:
cvtsi2sd xmm0, r12 # Making int to a double
movsd xmm3, three # Calculating exp(-3.0 * i * timestep)
mulsd xmm0, xmm3
mulsd xmm0, xmm2
call exp
movsd xmm3, QWORD PTR [rsi + r12 * 8] # Calculating abs(array[i] - xmm0)
subsd xmm2, xmm3
movq xmm3, fabs_const
andpd xmm0, xmm3
comisd xmm0, xmm1 # Check if abs(...) > threshold
jbe if_false
mov rdi, OFFSET error_fmt # If true print out array[i] and solution
mov rax, 1
call printf
mov rax, 1 # and set sucess code to failed (rax = 1)
if_false:
add r12, 1
loop_check:
cmp r12, r13 # Check if index is less the array size
jle results_loop
pop r13
pop r12
ret
main:
push rbp
sub rsp, 800 # Making double array[100]
mov rdi, 100
mov rsi, rsp
movsd xmm0, timestep
call solve_euler # Calling solve_euler
mov rdi, 100
mov rsi, rsp
movsd xmm0, timestep
movsd xmm1, threshold
call check_result # Check if results are correct
mov rdi, OFFSET fmt
mov rsi, rax
xor rax, rax
call printf # Print out success code
add rsp, 800 # Deallocating array
pop rbp
xor rax, rax
ret
|
uzogoduyah/uzogoduyah-gmail.com
| 24,157
|
contents/huffman_encoding/code/asm-x64/huffman.s
|
.intel_syntax noprefix
# System V calling convention cheatsheet
# Params: rdi, rsi, rdx, rcx, r8, r9, xmm0-7
# Return: rax (int 64 bits), rax:rdx (int 128 bits), xmm0 (float)
# Callee cleanup: rbx, rbp, r12-15
# Scratch: rax, rdi, rsi, rdx, rcx, r8, r9, r10, r11
.section .rodata
text: .string "bibbity bobbity"
original: .string "Original message: %s\n"
encoded: .string "Encoded message: "
decoded: .string "Decoded message: %s\n"
.equ bitstr_len, 32
.equ bitstr_size, 40
.equ codebook_size, 256 * bitstr_size
.equ tree_left, 0
.equ tree_right, 8
.equ tree_count, 16
.equ tree_value, 20
.equ tree_size, 24
.equ heap_len, 0
.equ heap_data, 4
.equ heap_size, 512 * 8 + 16 # 512 ptrs + 4 byte length + 12 byte padding
.equ counts_size, 256 * 4
.equ msg_len, 0
.equ msg_data, 8
.section .text
.global main
.extern printf, calloc, malloc, memset, puts
main:
push r12
push r13
sub rsp, codebook_size + 16 # 8 extra bytes for the Huffman-tree ptr, 8 bytes for padding
# Print the original text
mov rdi, OFFSET original
mov rsi, OFFSET text
xor rax, rax
call printf
# First encode the text. This will also initialize the Huffman-tree and the codebook
mov rdi, OFFSET text
mov rsi, rsp
lea rdx, [rsp + codebook_size]
call encode
mov r12, rax # Save the returned message ptr
# Print the codebook and the encoded message
mov rdi, rsp
call print_codebook
mov rdi, OFFSET encoded
xor rax, rax
call printf
mov rdi, r12
call print_message
# Decode and print the message
mov rdi, r12
mov rsi, QWORD PTR [rsp + codebook_size]
call decode
mov r13, rax
mov rdi, OFFSET decoded
mov rsi, r13
xor rax, rax
call printf
# Free allocated resources
mov rdi, r12
call free
mov rdi, r13
call free
mov rdi, QWORD PTR [rsp + codebook_size]
call free_tree
add rsp, codebook_size + 16
pop r13
pop r12
# Indiciate success with a 0 exit code
xor rax, rax
ret
# rdi - text
# rsi - codebook ptr
# rdx - Huffman-tree ptr
# RET rax - encoded message ptr
encode:
push r12
push r13
push r14
mov r12, rdi # Save the original arguments
mov r13, rsi
mov r14, rdx
call generate_tree # The text is already in rdi
mov QWORD PTR [r14], rax # Save the Huffman-tree's root
mov rdi, r13 # Set up the parameters for codebook generation: codebook ptr, Huffman-tree root
mov rsi, rax
call generate_codebook
xor rax, rax
xor r14, r14 # We'll use r14 to keep track of the length of the message
mov rcx, r12 # Make a copy of the pointer to the message to be encoded
encode_calculate_length:
mov al, BYTE PTR [rcx]
test al, al # If we're at the terminating null character then we're ready to encode
jz encode_message
lea rdx, [rax + 4*rax] # We get the codebook entry at the specific index
lea r8, [r13 + 8*rdx]
add r14, QWORD PTR [r8 + bitstr_len] # And add the encoded word length to the total
inc rcx
jmp encode_calculate_length
encode_message:
mov rdi, 1
lea rsi, [r14 + 7] # Calculate the number of bytes we need to allocate to fit all the bits
shr rsi, 3 # length % 8 rounded up = (length + 8 - 1) / 8
lea rsi, [rsi + 8] # Make space for an 8-byte length field
call calloc # Allocate the necessary memory, the message will be in rax
mov QWORD PTR [rax], r14 # Save the length of the message
# Registers:
# - r12: text
# - r13: codebook_ptr
# - rax: message ptr
# - free to use: rdi, rsi, rcx, rdx, r8, r9, r10, r11, r14
xor r8, r8 # Bit offset
lea r9, [rax + 8] # 8-byte message block
encode_message_bits:
xor rdi, rdi # We need to clear rdi because moving a single byte to dil doesn't do so
mov dil, BYTE PTR [r12] # Iterate the message again
test dil, dil # If we're at the the null terminator we're done
jz encode_done
lea rdx, [rdi + 4*rdi] # Get the codebook entry
lea r10, [r13 + 8*rdx]
mov r11, QWORD PTR [r10 + bitstr_len] # Load the bitstring length
lea r14, [r10] # The bitstring qword we're currently processing
encode_message_bits_qword:
mov rdi, QWORD PTR [r14] # Calculate the first mask: [code qword] << [bit offset]
mov rsi, rdi # Get a second copy of the code's current qword
mov rcx, r8
shl rdi, cl
or QWORD PTR [r9], rdi # Apply the mask to the current block
mov rcx, 64 # Calculate the second mask: [code qword] >> [64 - bit offset]
sub rcx, r8
shr rsi, cl
mov rcx, r11 # Copy the code length so we can manipulate it without destroying the original value
sub rcx, 64
jle encode_message_bits_try_overflow # If the length was less than or equal to 64, check if the code qword would overflow the current message block
mov r11, rcx # We wanted to subtract 64 from the code length anyway
lea r9, [r9 + 8] # Load the next message block
or QWORD PTR [r9], rsi # Save the second mask to the new message block
jmp encode_message_bits_qword
encode_message_bits_try_overflow:
add rcx, r8 # Calculate [code length] + [bit offset] - 64
jl encode_calculate_new_bit_offset # If the result is less than 0 then we have no remaining bits -> calculate the new bit offset
mov r8, rcx # Otherwise this also happens to be our new bit offset
lea r9, [r9 + 8] # Load the next message block
or QWORD PTR [r9], rsi # Save the second mask to the new message block
inc r12 # Go to the next character in the input
jmp encode_message_bits
encode_calculate_new_bit_offset:
lea r8, [r8 + r11] # Calculate the bit offset for the next code qword
inc r12
jmp encode_message_bits
encode_done:
pop r14
pop r13
pop r12
ret
# rdi - encoded message
# rsi - Huffman-tree root (ptr)
# RET rax - the decoded message
decode:
push r12
push r13
push r14
mov r12, rdi
mov r13, rsi
mov rdi, QWORD PTR [r12] # Load the length of the message
mov r14, rdi # We'll use the length of the message as a loop counter later
lea rdi, [rdi + 1] # The null terminator
call malloc # This will usually be more than enough memory to contain the whole decoded message (we don't handle pathological cases right now)
mov rdi, r12 # The single-character decoder doesn't touch rdi so we can hoist it before the loop
xor rcx, rcx
mov rdx, rax # The current byte in the output string
decode_loop:
cmp rcx, r14 # The encoded message bit counter
jge decode_done
mov rsi, r13 # The current node in the Huffman-tree
decode_loop_char:
test rsi, rsi # If the Huffman-tree node is null then we reached a dead-end -> start over
jz decode_loop
cmp QWORD PTR [rsi + tree_left], 0 # If the node has either a left or a right child, treat it as a branch
jnz decode_loop_char_branch
cmp QWORD PTR [rsi + tree_right], 0
jnz decode_loop_char_branch
mov r9d, DWORD PTR [rsi + tree_value] # Load the value in this node in case the next iteration needs it
mov BYTE PTR [rdx], r9b # And save it to the output
lea rdx, [rdx + 1] # Advance the output string
jmp decode_loop
decode_loop_char_branch:
mov r9, rcx # First, load the byte of the message the current bit is in
shr r9, 3
mov r10b, BYTE PTR [rdi + r9 + msg_data]
mov r11, rcx # Save rcx in another register temporarily so we can restore it without push/pop
and rcx, 7
shr r10, cl # Get the bit we're interested in to position 0
lea rcx, [r11 + 1] # Restore rcx and immediately add 1 to get the next bit to decode
and r10, 0x1 # Zero out all other bits
mov r8, rsi
mov rsi, QWORD PTR [r8 + tree_left] # Take the left branch for 0, the right branch for a non-zero bit
cmovnz rsi, QWORD PTR [r8 + tree_right]
jmp decode_loop_char
decode_done:
mov BYTE PTR [rdx], 0 # Write the null terminator at the end of the string
pop r14
pop r13
pop r12
ret
# rdi - The starting address of the codebook we want to generate
# rsi - Huffman-tree root (ptr)
generate_codebook:
push r12
sub rsp, bitstr_size + 16 # 16 extra bytes for alignment
mov r12, rsi
xorps xmm0, xmm0 # Create a 0-initialized bitstring. This will be
movaps XMMWORD PTR [rsp], xmm0 # used in the recursive function calls
movaps XMMWORD PTR [rsp + 16], xmm0
mov QWORD PTR [rsp + 32], 0
xor rsi, rsi
mov rdx, codebook_size
call memset
mov rdi, rax
mov rsi, r12
mov rdx, rsp
call generate_codebook_recurse
add rsp, bitstr_size + 16
pop r12
ret
# rdi - The codebook's starting address
# rsi - The current Huffman-tree node
# rdx - The bitstring used for code generation
generate_codebook_recurse:
push rbp
push r12
push r13
test rsi, rsi # If we reached a null pointer we're done
jz generate_codebook_recurse_done
mov r12, rsi
cmp QWORD PTR [r12 + tree_left], 0 # If at least one of the children is not null
jnz generate_codebook_branch # then we need to treat the current node as a branch
cmp QWORD PTR [r12 + tree_right], 0
jnz generate_codebook_branch
mov r8d, DWORD PTR [r12 + tree_value] # Get the value of the current node
movaps xmm0, XMMWORD PTR [rdx] # Get the values of the current bitstring into some registers
movaps xmm1, XMMWORD PTR [rdx + 16]
mov r9, QWORD PTR [rdx + 32]
lea rax, [r8 + 4*r8] # The index calculation needs to add 40 * index. With lea arithmetic this can be represented as
lea r10, [rdi + 8*rax] # base address + 8 * (5 * index). This is done in two lea instructions
movups XMMWORD PTR [r10], xmm0 # And copy the data over to it
movups XMMWORD PTR [r10 + 16], xmm1
mov QWORD PTR [r10 + 32], r9
jmp generate_codebook_recurse_done
generate_codebook_branch:
# First, calculate the necessary indices and bitmask to use for the bitstring
mov r13, QWORD PTR [rdx + bitstr_len] # Load the current length of the bitstring
mov rcx, r13 # This will be used to index into the bitstring data. We'll need two copies for it
shr r13, 6 # We first get which 64 bit chunk of the bitstring we want to modify
and rcx, 63 # Then the bit we want to change
mov rbp, 1 # Generate the mask we'll use to set the correct bit
shl rbp, cl
# We'll start with the right branch
or QWORD PTR [rdx + 8*r13], rbp # Set the bit
inc QWORD PTR [rdx + bitstr_len] # Increase the bitstring length
mov rsi, QWORD PTR [r12 + tree_right]
call generate_codebook_recurse
# Now we move on to the left branch: rbx - left child, r13 - bitstring index, rbp - mask
not rbp
and QWORD PTR [rdx + 8*r13], rbp
mov rsi, QWORD PTR [r12 + tree_left]
call generate_codebook_recurse
dec QWORD PTR [rdx + bitstr_len] # Decrease the bitstring length
generate_codebook_recurse_done:
pop r13
pop r12
pop rbp
ret
# rdi - text
# RET rax - Huffman-tree root (ptr)
generate_tree:
push r12
push r13
sub rsp, 5128 # 1024 bytes for the char counts, 4 bytes for heap length, 4096 bytes for the heap, 4 byte padding
mov r12, rdi # Save the original text so it doesn't get clobbered
mov rdi, rsp # Zero out the character counts and the heap length
xor rsi, rsi
mov rdx, 1040
call memset
xor rax, rax
generate_tree_count_chars:
mov al, BYTE PTR [r12]
test al, al
jz generate_tree_leaves_setup
inc DWORD PTR [rsp + 4*rax]
inc r12
jmp generate_tree_count_chars
generate_tree_leaves_setup:
mov r12, 255 # The loop counter. We can only get here if the "test" on line 301 resulted in a zero so the next jl instruction will do the right thing
generate_tree_leaves:
jl generate_tree_one_leaf # If not then it's time to generate the branches
mov r13d, DWORD PTR [rsp + 4*r12] # Load the count at the ith position
test r13d, r13d # And check if it's zero
jz generate_tree_leaves_counters # If it is we can skip this iteration
mov rdi, 1 # If not, we need to allocate a new leaf node
mov rsi, tree_size
call calloc
mov DWORD PTR [rax + tree_value], r12d # Save the value and the count to the tree
mov DWORD PTR [rax + tree_count], r13d
lea rdi, [rsp + counts_size] # Then push it onto the heap
mov rsi, rax
call heap_push
generate_tree_leaves_counters:
dec r12 # Decrement the loop counter and start over
jmp generate_tree_leaves
generate_tree_one_leaf:
cmp DWORD PTR [rsp + counts_size], 1 # Check if there is only one element in the heap
jne generate_tree_branches
lea rdi, [rsp + counts_size] # Get the element
call heap_pop
mov r12, rax
mov rdi, tree_size # Create the new tree node, the pointer to it will be in rax
call malloc
mov QWORD PTR [rax + tree_left], r12 # Save element in the left node
mov ecx, DWORD PTR [r12 + tree_count] # Save element count in branch
mov DWORD PTR [rax + tree_count], ecx
jmp generate_tree_ret # Returning
generate_tree_branches:
cmp DWORD PTR [rsp + counts_size], 1 # Check if there are still at least two elements in the heap
jle generate_tree_done # If not, we're done
lea rdi, [rsp + counts_size] # Get the left child
call heap_pop
mov r12, rax
lea rdi, [rsp + counts_size] # Get the right child
call heap_pop
mov r13, rax
mov rdi, tree_size # Create the new tree node, the pointer to it will be in rax
call malloc
mov ecx, DWORD PTR [r12 + tree_count] # The new node's count: left count + right count
add ecx, DWORD PTR [r13 + tree_count]
mov QWORD PTR [rax + tree_left], r12 # Save the new node's fields: left, right, count (leave value unititialized, it shouldn't be used with branch nodes)
mov QWORD PTR [rax + tree_right], r13
mov DWORD PTR [rax + tree_count], ecx
lea rdi, [rsp + counts_size] # Add the branch to the heap
mov rsi, rax
call heap_push
jmp generate_tree_branches
generate_tree_done:
lea rdi, [rsp + counts_size] # The tree's root will be in rax after the pop
call heap_pop
generate_tree_ret:
add rsp, 5128
pop r13
pop r12
ret
# rdi - heap ptr
# rsi - tree ptr
heap_push:
lea rax, QWORD PTR [rdi + heap_data] # We load the heap's data ptr and length to the respective registers
mov ecx, DWORD PTR [rdi + heap_len] # Load the current length
lea edx, [ecx + 1] # First, calculate the new length (length + 1)
mov DWORD PTR [rdi + heap_len], edx # Then save it
mov QWORD PTR [rax + 8*rcx], rsi # And finally add the new value at the end of the array
heap_push_sift_up:
test rcx, rcx # Test if we got to the root (index == 0)
jz heap_push_done
lea rdx, [rcx - 1] # Calculate the parent index: (index - 1) / 2
shr rdx, 1
lea r8, [rax + 8*rcx] # Get the pointer to the current and parent elements
lea r9, [rax + 8*rdx]
mov r10, QWORD PTR [r8] # Load the current and the parent elements
mov r11, QWORD PTR [r9]
mov esi, DWORD PTR [r10 + tree_count] # Load the current tree's count
cmp DWORD PTR [r11 + tree_count], esi # If parent count <= current count
jle heap_push_done # Then we're done
mov QWORD PTR [r8], r11 # Otherwise swap the two elements
mov QWORD PTR [r9], r10
mov rcx, rdx
jmp heap_push_sift_up
heap_push_done:
ret
# rdi - heap ptr
# RET rax - tree ptr
heap_pop:
mov r8d, DWORD PTR [rdi + heap_len] # Load the heap's length
test r8d, r8d # If it's 0 then the heap's empty
jz heap_empty
lea rdx, [rdi + heap_data] # Get the heap's data ptr
mov rax, QWORD PTR [rdx] # The return value will be the tree's current root
lea r8d, [r8d - 1] # Calculate the new length
mov DWORD PTR [rdi + heap_len], r8d # And save it
mov rsi, QWORD PTR [rdx + 8*r8] # Load the element we're going to swap with the root
mov QWORD PTR [rdx], rsi # Swap the root and the last element
mov QWORD PTR [rdx + 8*r8], rax
xor r9, r9 # The loop index
heap_pop_sift_down:
mov rcx, r9 # Save the target index at the start of the loop
lea r10, [r9 + r9 + 1] # The left child index
lea r11, [r9 + r9 + 2] # The right child index
cmp r10, r8
jge heap_pop_check_right
mov rdi, QWORD PTR [rdx + 8*r10] # Load the left child
mov rsi, QWORD PTR [rdx + 8*rcx] # Load the target
mov esi, DWORD PTR [rsi + tree_count] # Load the target tree count
cmp DWORD PTR [rdi + tree_count], esi # If the left tree count < target tree count
jge heap_pop_check_right
mov rcx, r10
heap_pop_check_right:
cmp r11, r8
jge heap_pop_compare_indices
mov rdi, QWORD PTR [rdx + 8*r11] # Load the right child
mov rsi, QWORD PTR [rdx + 8*rcx] # Load the target
mov esi, DWORD PTR [rsi + tree_count] # Load the target tree count
cmp DWORD PTR [rdi + tree_count], esi # If the right tree count < target tree count
jge heap_pop_compare_indices
mov rcx, r11
heap_pop_compare_indices:
cmp r9, rcx # If the target index == current index we're done
je heap_pop_done
mov rdi, QWORD PTR [rdx + 8*r9] # Otherwise we swap the values
mov rsi, QWORD PTR [rdx + 8*rcx]
mov QWORD PTR [rdx + 8*r9], rsi
mov QWORD PTR [rdx + 8*rcx], rdi
mov r9, rcx
jmp heap_pop_sift_down
heap_empty:
xor rax, rax # Return a null pointer to indicate the heap was empty
heap_pop_done:
ret
# rdi - codebook start ptr
print_codebook:
push rbx
push r12
sub rsp, 272 # The bitstring we're going to print
mov r12, rdi
xor rbx, rbx # Save the loop counter into a register that doesn't get clobbered
print_codebook_loop:
cmp rbx, 255
jg print_codebook_done
lea rax, [rbx + 4*rbx] # We get the codebook entry at the specific index
lea r10, [r12 + 8*rax]
mov rdx, QWORD PTR [r10 + bitstr_len] # Load the length of the bitstring
test rdx, rdx # If it's zero then the codepoint didn't exist in the original alphabet, skip
jz print_codebook_counters
print_codebook_char:
mov BYTE PTR [rsp], bl # First, the character we're printing the code for
mov WORD PTR [rsp + 1], 0x203a # Then ": "
mov BYTE PTR [rsp + rdx + 3], 0x00 # At the end add the null terminator
print_codebook_generate_binary:
dec rdx
jl print_codebook_binary
mov r9, rdx # Two copies of the loop counter
mov rcx, rdx
shr r9, 6 # Calculate the bitstring part we're going to load
and rcx, 63 # The bit we're interested in
mov rsi, QWORD PTR [r10 + r9] # One of the 4, 64 bit parts of the bitstring we're going to print
shr rsi, cl # Get the relevant bit into the 0th position
and rsi, 1 # Mask the rest of the bits
add rsi, '0' # Convert it to ASCII
mov BYTE PTR [rsp + rdx + 3], sil # And copy it into the string
jmp print_codebook_generate_binary
print_codebook_binary:
mov rdi, rsp # Print the current bitstring
call puts
print_codebook_counters:
inc rbx # And go to the next codebook entry
jmp print_codebook_loop
print_codebook_done:
add rsp, 272
pop r12
pop rbx
ret
# rdi - message ptr
# This would run out of stack space for long messages but it will do for now
print_message:
push r12
push r13
mov r12, rdi
mov r13, QWORD PTR [rdi] # Get the length of the message
lea rdi, [r13 + 1] # For the length of the string we'll need an additional the null terminator
call malloc
xor rdx, rdx
print_message_generate_string:
cmp rdx, r13
jge print_message_puts
mov r8, rdx # Get two copies of the current index
mov rcx, rdx
shr r8, 3 # We first get the byte we want to print
mov r10b, BYTE PTR [r12 + r8 + msg_data]
and rcx, 7 # Then the bit in that byte
shr r10, cl
and r10, 0x1 # Mask it so only the bit we're interested in is visible
add r10, '0' # Convert it to ASCII
mov BYTE PTR [rax + rdx], r10b # Write it into the printable string
inc rdx
jmp print_message_generate_string
print_message_puts:
mov BYTE PTR [rax + rdx], 0x00 # Write the null terminator
mov rdi, rax # And print the string
call puts
pop r13
pop r12
ret
# rdi - tree ptr
free_tree:
push rbx
mov rbx, rdi
test rbx, rbx # When the tree ptr we're trying to free is already null we reached the termination condition
jz free_tree_done
mov rdi, [rbx + tree_left] # Otherwise free the left child first
call free_tree
mov rdi, [rbx + tree_right] # Then the right child
call free_tree
mov rdi, rbx # And finally, the node itself
call free
free_tree_done:
pop rbx
ret
|
uzogoduyah/uzogoduyah-gmail.com
| 10,437
|
contents/cooley_tukey/code/asm-x64/fft.s
|
.intel_syntax noprefix
.section .rodata
two: .double 2.0
one: .double 1.0
two_pi: .double -6.28318530718
rand_max: .long 4290772992
.long 1105199103
fmt: .string "%g\n"
.section .text
.global main
.extern printf, memset, memcpy, srand, rand, time, cexp, __muldc3, cabs, log2
# rdi - array ptr
# rsi - array size
dft:
push rbx
push r12
push r13
push r14
push r15
mov r12, rdi # Save parameters
mov r13, rsi
sub rsp, r13 # Make a double complex array
xor r14, r14 # Set index to 0
dft_loop_i:
cmp r14, r13 # Check if index is equal to array size
je dft_end_i
lea rax, [rsp + r14] # Set tmp array to zero at r14
mov QWORD PTR [rax], 0
mov QWORD PTR [rax + 8], 0
xor r15, r15 # Set second index to 0
dft_loop_j:
cmp r15, r13 # Check if the index is equal to array size
je dft_end_j
movsd xmm1, two_pi # Calculate xmm1 = -2pi * i * j / N
mov rax, r14
imul rax, r15
shr rax, 4
cvtsi2sdq xmm2, rax
mulsd xmm1, xmm2
cvtsi2sdq xmm2, r13
divsd xmm1, xmm2
pxor xmm0, xmm0 # Set xmm0 to 0
call cexp
lea rax, [r12 + r15] # Calculate X[i] * cexp(-2pi * i * j / N)
movsd xmm2, QWORD PTR [rax]
movsd xmm3, QWORD PTR [rax + 8]
call __muldc3
lea rax, [rsp + r14]
movsd xmm6, QWORD PTR [rax] # Sum to tmp array
movsd xmm7, QWORD PTR [rax + 8]
addsd xmm6, xmm0
addsd xmm7, xmm1
movsd QWORD PTR [rax], xmm6 # Save to tmp array
movsd QWORD PTR [rax + 8], xmm7
add r15, 16
jmp dft_loop_j
dft_end_j:
add r14, 16
jmp dft_loop_i
dft_end_i:
mov rdi, r12 # Move tmp array to array ptr
mov rsi, rsp
mov rdx, r13
call memcpy
add rsp, r13
pop r15
pop r14
pop r13
pop r12
pop rbx
ret
# rdi - array ptr
# rsi - array size
cooley_tukey:
cmp rsi, 16 # Check if size if greater then 1
jle cooley_tukey_return
push rbx
push r12
push r13
push r14
push r15
mov r12, rdi # Save parameters
mov r13, rsi
mov r14, rsi # Save N / 2
shr r14, 1
sub rsp, r14 # Make a tmp array
xor r15, r15
mov rbx, r12
cooley_tukey_spliting:
cmp r15, r14
je cooley_tukey_split
lea rax, [r12 + 2 * r15] # Moving all odd entries to the front of the array
movaps xmm0, XMMWORD PTR [rax + 16]
movaps xmm1, XMMWORD PTR [rax]
movaps XMMWORD PTR [rsp + r15], xmm0
movaps XMMWORD PTR [rbx], xmm1
add rbx, 16
add r15, 16
jmp cooley_tukey_spliting
cooley_tukey_split:
mov rax, rsp
lea rdi, [r12 + r13]
cooley_tukey_mov_data:
cmp rbx, rdi
je cooley_tukey_moved
movaps xmm0, XMMWORD PTR [rax]
movaps XMMWORD PTR [rbx], xmm0
add rbx, 16
add rax, 16
jmp cooley_tukey_mov_data
cooley_tukey_moved:
add rsp, r14
mov rdi, r12 # Makking a recursive call
mov rsi, r14
call cooley_tukey
lea rdi, [r12 + r14] # Makking a recursive call
mov rsi, r14
call cooley_tukey
lea rbx, [r12 + r14]
mov r14, rbx
mov r15, r12
cooley_tukey_loop:
cmp r15, rbx
je cooley_tukey_end
pxor xmm0, xmm0 # Calculate cexp(-2.0 * I * M_PI * i / N)
movsd xmm1, two_pi
mov rax, r14
sub rax, rbx
cvtsi2sdq xmm2, rax
cvtsi2sdq xmm3, r13
divsd xmm2, xmm3
mulsd xmm1, xmm2
call cexp
movq xmm2, QWORD PTR [r14] # Calculating X[i] - cexp() * X[i + N / 2]
movq xmm3, QWORD PTR [r14 + 8]
call __muldc3
movq xmm2, QWORD PTR [r15]
movq xmm3, QWORD PTR [r15 + 8]
subsd xmm2, xmm0
subsd xmm3, xmm1
movq QWORD PTR [r14], xmm2 # Save value in X[i + N / 2]
movq QWORD PTR [r14 + 8], xmm3
movq xmm0, QWORD PTR [r15] # Calculating X[i] -= X[i + N / 2] - X[i]
movq xmm1, QWORD PTR [r15 + 8]
subsd xmm2, xmm0
subsd xmm3, xmm1
subsd xmm0, xmm2
subsd xmm1, xmm3
movq QWORD PTR [r15], xmm0
movq QWORD PTR [r15 + 8], xmm1
add r14, 16
add r15, 16
jmp cooley_tukey_loop
cooley_tukey_end:
pop r15
pop r14
pop r13
pop r12
pop rbx
cooley_tukey_return:
ret
# rdi - array ptr
# rsi - array size
bit_reverse:
push rbx
push r12
push r13
push r14
push r15
mov r12, rdi # Save parameters
mov r13, rsi
shr r13, 4
xor r14, r14 # Loop through all entries
bit_reverse_entries:
cmp r14, r13
je bit_reverse_return
cvtsi2sdq xmm0, r13 # Calculating the number of bit in N
call log2
cvttsd2si rcx, xmm0
mov rdi, 1 # Calculating (1 << log2(N)) - 1
sal edi, cl
sub edi, 1
sub ecx, 1
mov rax, r14
mov r15, r14
bit_reverse_loop:
sar r15 # Check if r15 is 0
je bit_reverse_reversed
sal rax, 1 # Calculating (rax << 1) | (r15 & 1)
mov rsi, r15
and rsi, 1
or rax, rsi
sub ecx, 1 # Decrement bit count
jmp bit_reverse_loop
bit_reverse_reversed:
sal eax, cl # Calculate (rax << rcx) & (1 << bit count)
and rax, rdi
cmp rax, r14 # Check if rax is greater then r14
jle bit_reverse_no_swap # If so then swap entries
shl rax, 4 # Times index by 16 to get bytes to entry
shl r14, 4
movaps xmm0, XMMWORD PTR [r12 + rax]
movaps xmm1, XMMWORD PTR [r12 + r14]
movaps XMMWORD PTR [r12 + rax], xmm1
movaps XMMWORD PTR [r12 + r14], xmm0
shr r14, 4
bit_reverse_no_swap:
add r14, 1
jmp bit_reverse_entries
bit_reverse_return:
pop r15
pop r14
pop r13
pop r12
pop rbx
ret
# rdi - array ptr
# rsi - array size
iterative_cooley_tukey:
push r12
push r13
push r14
push r15
push rbx
sub rsp, 48
mov r12, rdi
mov r13, rsi
call bit_reverse # Bit reversing array
sar r13, 4 # Calculate log2(N)
cvtsi2sdq xmm0, r13
call log2
cvttsd2si rax, xmm0
mov QWORD PTR [rsp], rax # Save it to the stack
mov r14, 1
iter_ct_loop_i:
cmp r14, rax # Check if r14 is greater then log2(N)
jg iter_ct_end_i
movsd xmm0, two # Calculate stride = 2^(r14)
cvtsi2sdq xmm1, r14
call pow
cvttsd2si r10, xmm0
mov QWORD PTR [rsp + 40], r10# move stride to stack
movsd xmm1, two_pi # Calculating cexp(-2pi * I / stride)
divsd xmm1, xmm0
pxor xmm0, xmm0
call cexp
movq QWORD PTR [rsp + 8], xmm0 # Save it to stack
movq QWORD PTR [rsp + 16], xmm1
xor r15, r15
iter_ct_loop_j:
cmp r15, r13 # Check if r15 is less then array size
je iter_ct_end_j
movsd xmm4, one # Save 1 + 0i to stack
pxor xmm5, xmm5
movsd QWORD PTR [rsp + 24], xmm4
movsd QWORD PTR [rsp + 32], xmm5
xor rbx, rbx
mov rax, QWORD PTR [rsp + 40]# Calculate stride / 2
sar rax, 1
iter_ct_loop_k:
cmp rbx, rax # Check if rbx is less then stride / 2
je iter_ct_end_k
mov r8, r15 # Saving pointers to X[k + j + stride / 2] and X[k + j]
add r8, rbx
sal r8, 4
mov r9, QWORD PTR [rsp + 40]
sal r9, 3
add r9, r8
lea r9, [r12 + r9]
lea r8, [r12 + r8]
movsd xmm0, QWORD PTR [r9] # Calculate X[k + j] - v * X[k + j + stride / 2]
movsd xmm1, QWORD PTR [r9 + 8]
movsd xmm2, QWORD PTR [rsp + 24]
movsd xmm3, QWORD PTR [rsp + 32]
call __muldc3
movsd xmm2, QWORD PTR [r8]
movsd xmm3, QWORD PTR [r8 + 8]
subsd xmm2, xmm0
subsd xmm3, xmm1
movsd QWORD PTR [r9], xmm2 # Saving answer
movsd QWORD PTR [r9 + 8], xmm3
movsd xmm0, QWORD PTR [r8] # Calculating X[k + j] - (X[k + j + stride / 2] - X[k + j])
movsd xmm1, QWORD PTR [r8 + 8]
subsd xmm2, xmm0
subsd xmm3, xmm1
subsd xmm0, xmm2
subsd xmm1, xmm3
movsd QWORD PTR [r8], xmm0 # Saving answer
movsd QWORD PTR [r8 + 8], xmm1
movsd xmm0, QWORD PTR [rsp + 24] # Calculating v * w
movsd xmm1, QWORD PTR [rsp + 32]
movsd xmm2, QWORD PTR [rsp + 8]
movsd xmm3, QWORD PTR [rsp + 16]
call __muldc3
movsd QWORD PTR [rsp + 24], xmm0 # Saving answer
movsd QWORD PTR [rsp + 32], xmm1
add rbx, 1
mov rax, QWORD PTR [rsp + 40]
sar rax, 1
jmp iter_ct_loop_k
iter_ct_end_k:
add r15, QWORD PTR [rsp + 40]
jmp iter_ct_loop_j
iter_ct_end_j:
add r14, 1
mov rax, QWORD PTR [rsp]
jmp iter_ct_loop_i
iter_ct_end_i:
add rsp, 48
pop rbx
pop r15
pop r14
pop r13
pop r12
ret
# rdi - array a ptr
# rsi - array b ptr
# rdx - array size
approx:
push r12
push r13
push r14
push r15
mov r12, rdi
mov r13, rsi
mov r14, rdx
lea r15, [rdi + rdx]
sub rsp, 8
approx_loop:
cmp r12, r15
je approx_return
movsd xmm0, QWORD PTR[r13]
movsd xmm1, QWORD PTR[r13 + 8]
call cabs
movsd QWORD PTR [rsp], xmm0
movsd xmm0, QWORD PTR[r12]
movsd xmm1, QWORD PTR[r12 + 8]
call cabs
movsd xmm1, QWORD PTR [rsp]
subsd xmm0, xmm1
mov rdi, OFFSET fmt
mov rax, 1
call printf
add r12, 16
add r13, 16
jmp approx_loop
approx_return:
add rsp, 8
pop r15
pop r14
pop r13
pop r12
ret
main:
push r12
sub rsp, 2048
mov rdi, 0
call time
mov edi, eax
call srand
lea r12, [rsp + 1024]
loop:
cmp r12, rsp
je end_loop
sub r12, 16
call rand
cvtsi2sd xmm0, rax
divsd xmm0, rand_max
lea rax, [r12 + 1024]
movsd QWORD PTR [r12], xmm0
movsd QWORD PTR [rax], xmm0
mov QWORD PTR [r12 + 8], 0
mov QWORD PTR [rax + 8], 0
jmp loop
end_loop:
mov rdi, rsp
mov rsi, 1024
call iterative_cooley_tukey
lea rdi, [rsp + 1024]
mov rsi, 1024
call cooley_tukey
mov rdi, rsp
lea rsi, [rsp + 1024]
mov rdx, 1024
call approx
xor rax, rax
add rsp, 2048
pop r12
ret
|
vandercookking/h7_device_RTT
| 34,861
|
libraries/CMSIS/Device/ST/STM32H7xx/Source/Templates/gcc/startup_stm32h747xx.S
|
/**
******************************************************************************
* @file startup_stm32h747xx.s
* @author MCD Application Team
* @brief STM32H747xx Devices vector table for GCC based toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
* @attention
*
* <h2><center>© Copyright (c) 2019 STMicroelectronics.
* All rights reserved.</center></h2>
*
* This software component is licensed by ST under BSD 3-Clause license,
* the "License"; You may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
* opensource.org/licenses/BSD-3-Clause
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m7
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
/* stack used for SystemInit_ExtMemCtl; always internal RAM used */
/**
* @brief This is the code that gets called when the processor first
* starts execution following a reset event. Only the absolutely
* necessary set is performed, after which the application
* supplied main() routine is called.
* @param None
* @retval : None
*/
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr sp, =_estack /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2], #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
/* bl __libc_init_array */
/* Call the application's entry point.*/
bl entry
bx lr
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
* @param None
* @retval None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
*******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word MemManage_Handler
.word BusFault_Handler
.word UsageFault_Handler
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word DebugMon_Handler
.word 0
.word PendSV_Handler
.word SysTick_Handler
/* External Interrupts */
.word WWDG_IRQHandler /* Window WatchDog Interrupt ( wwdg1_it, wwdg2_it) */
.word PVD_AVD_IRQHandler /* PVD/AVD through EXTI Line detection */
.word TAMP_STAMP_IRQHandler /* Tamper and TimeStamps through the EXTI line */
.word RTC_WKUP_IRQHandler /* RTC Wakeup through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_IRQHandler /* RCC */
.word EXTI0_IRQHandler /* EXTI Line0 */
.word EXTI1_IRQHandler /* EXTI Line1 */
.word EXTI2_IRQHandler /* EXTI Line2 */
.word EXTI3_IRQHandler /* EXTI Line3 */
.word EXTI4_IRQHandler /* EXTI Line4 */
.word DMA1_Stream0_IRQHandler /* DMA1 Stream 0 */
.word DMA1_Stream1_IRQHandler /* DMA1 Stream 1 */
.word DMA1_Stream2_IRQHandler /* DMA1 Stream 2 */
.word DMA1_Stream3_IRQHandler /* DMA1 Stream 3 */
.word DMA1_Stream4_IRQHandler /* DMA1 Stream 4 */
.word DMA1_Stream5_IRQHandler /* DMA1 Stream 5 */
.word DMA1_Stream6_IRQHandler /* DMA1 Stream 6 */
.word ADC_IRQHandler /* ADC1, ADC2 and ADC3s */
.word FDCAN1_IT0_IRQHandler /* FDCAN1 interrupt line 0 */
.word FDCAN2_IT0_IRQHandler /* FDCAN2 interrupt line 0 */
.word FDCAN1_IT1_IRQHandler /* FDCAN1 interrupt line 1 */
.word FDCAN2_IT1_IRQHandler /* FDCAN2 interrupt line 1 */
.word EXTI9_5_IRQHandler /* External Line[9:5]s */
.word TIM1_BRK_IRQHandler /* TIM1 Break interrupt */
.word TIM1_UP_IRQHandler /* TIM1 Update interrupt */
.word TIM1_TRG_COM_IRQHandler /* TIM1 Trigger and Commutation interrupt */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM4_IRQHandler /* TIM4 */
.word I2C1_EV_IRQHandler /* I2C1 Event */
.word I2C1_ER_IRQHandler /* I2C1 Error */
.word I2C2_EV_IRQHandler /* I2C2 Event */
.word I2C2_ER_IRQHandler /* I2C2 Error */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_IRQHandler /* USART3 */
.word EXTI15_10_IRQHandler /* External Line[15:10]s */
.word RTC_Alarm_IRQHandler /* RTC Alarm (A and B) through EXTI Line */
.word 0 /* Reserved */
.word TIM8_BRK_TIM12_IRQHandler /* TIM8 Break and TIM12 */
.word TIM8_UP_TIM13_IRQHandler /* TIM8 Update and TIM13 */
.word TIM8_TRG_COM_TIM14_IRQHandler /* TIM8 Trigger and Commutation and TIM14 */
.word TIM8_CC_IRQHandler /* TIM8 Capture Compare */
.word DMA1_Stream7_IRQHandler /* DMA1 Stream7 */
.word FMC_IRQHandler /* FMC */
.word SDMMC1_IRQHandler /* SDMMC1 */
.word TIM5_IRQHandler /* TIM5 */
.word SPI3_IRQHandler /* SPI3 */
.word UART4_IRQHandler /* UART4 */
.word UART5_IRQHandler /* UART5 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC1&2 underrun errors */
.word TIM7_IRQHandler /* TIM7 */
.word DMA2_Stream0_IRQHandler /* DMA2 Stream 0 */
.word DMA2_Stream1_IRQHandler /* DMA2 Stream 1 */
.word DMA2_Stream2_IRQHandler /* DMA2 Stream 2 */
.word DMA2_Stream3_IRQHandler /* DMA2 Stream 3 */
.word DMA2_Stream4_IRQHandler /* DMA2 Stream 4 */
.word ETH_IRQHandler /* Ethernet */
.word ETH_WKUP_IRQHandler /* Ethernet Wakeup through EXTI line */
.word FDCAN_CAL_IRQHandler /* FDCAN calibration unit interrupt */
.word CM7_SEV_IRQHandler /* CM7 Send event interrupt for CM4 */
.word CM4_SEV_IRQHandler /* CM4 Send event interrupt for CM7 */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word DMA2_Stream5_IRQHandler /* DMA2 Stream 5 */
.word DMA2_Stream6_IRQHandler /* DMA2 Stream 6 */
.word DMA2_Stream7_IRQHandler /* DMA2 Stream 7 */
.word USART6_IRQHandler /* USART6 */
.word I2C3_EV_IRQHandler /* I2C3 event */
.word I2C3_ER_IRQHandler /* I2C3 error */
.word OTG_HS_EP1_OUT_IRQHandler /* USB OTG HS End Point 1 Out */
.word OTG_HS_EP1_IN_IRQHandler /* USB OTG HS End Point 1 In */
.word OTG_HS_WKUP_IRQHandler /* USB OTG HS Wakeup through EXTI */
.word OTG_HS_IRQHandler /* USB OTG HS */
.word DCMI_IRQHandler /* DCMI */
.word 0 /* Reserved */
.word RNG_IRQHandler /* Rng */
.word FPU_IRQHandler /* FPU */
.word UART7_IRQHandler /* UART7 */
.word UART8_IRQHandler /* UART8 */
.word SPI4_IRQHandler /* SPI4 */
.word SPI5_IRQHandler /* SPI5 */
.word SPI6_IRQHandler /* SPI6 */
.word SAI1_IRQHandler /* SAI1 */
.word LTDC_IRQHandler /* LTDC */
.word LTDC_ER_IRQHandler /* LTDC error */
.word DMA2D_IRQHandler /* DMA2D */
.word SAI2_IRQHandler /* SAI2 */
.word QUADSPI_IRQHandler /* QUADSPI */
.word LPTIM1_IRQHandler /* LPTIM1 */
.word CEC_IRQHandler /* HDMI_CEC */
.word I2C4_EV_IRQHandler /* I2C4 Event */
.word I2C4_ER_IRQHandler /* I2C4 Error */
.word SPDIF_RX_IRQHandler /* SPDIF_RX */
.word OTG_FS_EP1_OUT_IRQHandler /* USB OTG FS End Point 1 Out */
.word OTG_FS_EP1_IN_IRQHandler /* USB OTG FS End Point 1 In */
.word OTG_FS_WKUP_IRQHandler /* USB OTG FS Wakeup through EXTI */
.word OTG_FS_IRQHandler /* USB OTG FS */
.word DMAMUX1_OVR_IRQHandler /* DMAMUX1 Overrun interrupt */
.word HRTIM1_Master_IRQHandler /* HRTIM Master Timer global Interrupt */
.word HRTIM1_TIMA_IRQHandler /* HRTIM Timer A global Interrupt */
.word HRTIM1_TIMB_IRQHandler /* HRTIM Timer B global Interrupt */
.word HRTIM1_TIMC_IRQHandler /* HRTIM Timer C global Interrupt */
.word HRTIM1_TIMD_IRQHandler /* HRTIM Timer D global Interrupt */
.word HRTIM1_TIME_IRQHandler /* HRTIM Timer E global Interrupt */
.word HRTIM1_FLT_IRQHandler /* HRTIM Fault global Interrupt */
.word DFSDM1_FLT0_IRQHandler /* DFSDM Filter0 Interrupt */
.word DFSDM1_FLT1_IRQHandler /* DFSDM Filter1 Interrupt */
.word DFSDM1_FLT2_IRQHandler /* DFSDM Filter2 Interrupt */
.word DFSDM1_FLT3_IRQHandler /* DFSDM Filter3 Interrupt */
.word SAI3_IRQHandler /* SAI3 global Interrupt */
.word SWPMI1_IRQHandler /* Serial Wire Interface 1 global interrupt */
.word TIM15_IRQHandler /* TIM15 global Interrupt */
.word TIM16_IRQHandler /* TIM16 global Interrupt */
.word TIM17_IRQHandler /* TIM17 global Interrupt */
.word MDIOS_WKUP_IRQHandler /* MDIOS Wakeup Interrupt */
.word MDIOS_IRQHandler /* MDIOS global Interrupt */
.word JPEG_IRQHandler /* JPEG global Interrupt */
.word MDMA_IRQHandler /* MDMA global Interrupt */
.word DSI_IRQHandler /* DSI global Interrupt */
.word SDMMC2_IRQHandler /* SDMMC2 global Interrupt */
.word HSEM1_IRQHandler /* HSEM1 global Interrupt */
.word HSEM2_IRQHandler /* HSEM1 global Interrupt */
.word ADC3_IRQHandler /* ADC3 global Interrupt */
.word DMAMUX2_OVR_IRQHandler /* DMAMUX Overrun interrupt */
.word BDMA_Channel0_IRQHandler /* BDMA Channel 0 global Interrupt */
.word BDMA_Channel1_IRQHandler /* BDMA Channel 1 global Interrupt */
.word BDMA_Channel2_IRQHandler /* BDMA Channel 2 global Interrupt */
.word BDMA_Channel3_IRQHandler /* BDMA Channel 3 global Interrupt */
.word BDMA_Channel4_IRQHandler /* BDMA Channel 4 global Interrupt */
.word BDMA_Channel5_IRQHandler /* BDMA Channel 5 global Interrupt */
.word BDMA_Channel6_IRQHandler /* BDMA Channel 6 global Interrupt */
.word BDMA_Channel7_IRQHandler /* BDMA Channel 7 global Interrupt */
.word COMP1_IRQHandler /* COMP1 global Interrupt */
.word LPTIM2_IRQHandler /* LP TIM2 global interrupt */
.word LPTIM3_IRQHandler /* LP TIM3 global interrupt */
.word LPTIM4_IRQHandler /* LP TIM4 global interrupt */
.word LPTIM5_IRQHandler /* LP TIM5 global interrupt */
.word LPUART1_IRQHandler /* LP UART1 interrupt */
.word WWDG_RST_IRQHandler /* Window Watchdog reset interrupt (exti_d2_wwdg_it, exti_d1_wwdg_it) */
.word CRS_IRQHandler /* Clock Recovery Global Interrupt */
.word ECC_IRQHandler /* ECC diagnostic Global Interrupt */
.word SAI4_IRQHandler /* SAI4 global interrupt */
.word 0 /* Reserved */
.word HOLD_CORE_IRQHandler /* Hold core interrupt */
.word WAKEUP_PIN_IRQHandler /* Interrupt for all 6 wake-up pins */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak MemManage_Handler
.thumb_set MemManage_Handler,Default_Handler
.weak BusFault_Handler
.thumb_set BusFault_Handler,Default_Handler
.weak UsageFault_Handler
.thumb_set UsageFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak DebugMon_Handler
.thumb_set DebugMon_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_AVD_IRQHandler
.thumb_set PVD_AVD_IRQHandler,Default_Handler
.weak TAMP_STAMP_IRQHandler
.thumb_set TAMP_STAMP_IRQHandler,Default_Handler
.weak RTC_WKUP_IRQHandler
.thumb_set RTC_WKUP_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_IRQHandler
.thumb_set RCC_IRQHandler,Default_Handler
.weak EXTI0_IRQHandler
.thumb_set EXTI0_IRQHandler,Default_Handler
.weak EXTI1_IRQHandler
.thumb_set EXTI1_IRQHandler,Default_Handler
.weak EXTI2_IRQHandler
.thumb_set EXTI2_IRQHandler,Default_Handler
.weak EXTI3_IRQHandler
.thumb_set EXTI3_IRQHandler,Default_Handler
.weak EXTI4_IRQHandler
.thumb_set EXTI4_IRQHandler,Default_Handler
.weak DMA1_Stream0_IRQHandler
.thumb_set DMA1_Stream0_IRQHandler,Default_Handler
.weak DMA1_Stream1_IRQHandler
.thumb_set DMA1_Stream1_IRQHandler,Default_Handler
.weak DMA1_Stream2_IRQHandler
.thumb_set DMA1_Stream2_IRQHandler,Default_Handler
.weak DMA1_Stream3_IRQHandler
.thumb_set DMA1_Stream3_IRQHandler,Default_Handler
.weak DMA1_Stream4_IRQHandler
.thumb_set DMA1_Stream4_IRQHandler,Default_Handler
.weak DMA1_Stream5_IRQHandler
.thumb_set DMA1_Stream5_IRQHandler,Default_Handler
.weak DMA1_Stream6_IRQHandler
.thumb_set DMA1_Stream6_IRQHandler,Default_Handler
.weak ADC_IRQHandler
.thumb_set ADC_IRQHandler,Default_Handler
.weak FDCAN1_IT0_IRQHandler
.thumb_set FDCAN1_IT0_IRQHandler,Default_Handler
.weak FDCAN2_IT0_IRQHandler
.thumb_set FDCAN2_IT0_IRQHandler,Default_Handler
.weak FDCAN1_IT1_IRQHandler
.thumb_set FDCAN1_IT1_IRQHandler,Default_Handler
.weak FDCAN2_IT1_IRQHandler
.thumb_set FDCAN2_IT1_IRQHandler,Default_Handler
.weak EXTI9_5_IRQHandler
.thumb_set EXTI9_5_IRQHandler,Default_Handler
.weak TIM1_BRK_IRQHandler
.thumb_set TIM1_BRK_IRQHandler,Default_Handler
.weak TIM1_UP_IRQHandler
.thumb_set TIM1_UP_IRQHandler,Default_Handler
.weak TIM1_TRG_COM_IRQHandler
.thumb_set TIM1_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM4_IRQHandler
.thumb_set TIM4_IRQHandler,Default_Handler
.weak I2C1_EV_IRQHandler
.thumb_set I2C1_EV_IRQHandler,Default_Handler
.weak I2C1_ER_IRQHandler
.thumb_set I2C1_ER_IRQHandler,Default_Handler
.weak I2C2_EV_IRQHandler
.thumb_set I2C2_EV_IRQHandler,Default_Handler
.weak I2C2_ER_IRQHandler
.thumb_set I2C2_ER_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_IRQHandler
.thumb_set USART3_IRQHandler,Default_Handler
.weak EXTI15_10_IRQHandler
.thumb_set EXTI15_10_IRQHandler,Default_Handler
.weak RTC_Alarm_IRQHandler
.thumb_set RTC_Alarm_IRQHandler,Default_Handler
.weak TIM8_BRK_TIM12_IRQHandler
.thumb_set TIM8_BRK_TIM12_IRQHandler,Default_Handler
.weak TIM8_UP_TIM13_IRQHandler
.thumb_set TIM8_UP_TIM13_IRQHandler,Default_Handler
.weak TIM8_TRG_COM_TIM14_IRQHandler
.thumb_set TIM8_TRG_COM_TIM14_IRQHandler,Default_Handler
.weak TIM8_CC_IRQHandler
.thumb_set TIM8_CC_IRQHandler,Default_Handler
.weak DMA1_Stream7_IRQHandler
.thumb_set DMA1_Stream7_IRQHandler,Default_Handler
.weak FMC_IRQHandler
.thumb_set FMC_IRQHandler,Default_Handler
.weak SDMMC1_IRQHandler
.thumb_set SDMMC1_IRQHandler,Default_Handler
.weak TIM5_IRQHandler
.thumb_set TIM5_IRQHandler,Default_Handler
.weak SPI3_IRQHandler
.thumb_set SPI3_IRQHandler,Default_Handler
.weak UART4_IRQHandler
.thumb_set UART4_IRQHandler,Default_Handler
.weak UART5_IRQHandler
.thumb_set UART5_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak DMA2_Stream0_IRQHandler
.thumb_set DMA2_Stream0_IRQHandler,Default_Handler
.weak DMA2_Stream1_IRQHandler
.thumb_set DMA2_Stream1_IRQHandler,Default_Handler
.weak DMA2_Stream2_IRQHandler
.thumb_set DMA2_Stream2_IRQHandler,Default_Handler
.weak DMA2_Stream3_IRQHandler
.thumb_set DMA2_Stream3_IRQHandler,Default_Handler
.weak DMA2_Stream4_IRQHandler
.thumb_set DMA2_Stream4_IRQHandler,Default_Handler
.weak ETH_IRQHandler
.thumb_set ETH_IRQHandler,Default_Handler
.weak ETH_WKUP_IRQHandler
.thumb_set ETH_WKUP_IRQHandler,Default_Handler
.weak FDCAN_CAL_IRQHandler
.thumb_set FDCAN_CAL_IRQHandler,Default_Handler
.weak CM7_SEV_IRQHandler
.thumb_set CM7_SEV_IRQHandler,Default_Handler
.weak CM4_SEV_IRQHandler
.thumb_set CM4_SEV_IRQHandler,Default_Handler
.weak DMA2_Stream5_IRQHandler
.thumb_set DMA2_Stream5_IRQHandler,Default_Handler
.weak DMA2_Stream6_IRQHandler
.thumb_set DMA2_Stream6_IRQHandler,Default_Handler
.weak DMA2_Stream7_IRQHandler
.thumb_set DMA2_Stream7_IRQHandler,Default_Handler
.weak USART6_IRQHandler
.thumb_set USART6_IRQHandler,Default_Handler
.weak I2C3_EV_IRQHandler
.thumb_set I2C3_EV_IRQHandler,Default_Handler
.weak I2C3_ER_IRQHandler
.thumb_set I2C3_ER_IRQHandler,Default_Handler
.weak OTG_HS_EP1_OUT_IRQHandler
.thumb_set OTG_HS_EP1_OUT_IRQHandler,Default_Handler
.weak OTG_HS_EP1_IN_IRQHandler
.thumb_set OTG_HS_EP1_IN_IRQHandler,Default_Handler
.weak OTG_HS_WKUP_IRQHandler
.thumb_set OTG_HS_WKUP_IRQHandler,Default_Handler
.weak OTG_HS_IRQHandler
.thumb_set OTG_HS_IRQHandler,Default_Handler
.weak DCMI_IRQHandler
.thumb_set DCMI_IRQHandler,Default_Handler
.weak RNG_IRQHandler
.thumb_set RNG_IRQHandler,Default_Handler
.weak FPU_IRQHandler
.thumb_set FPU_IRQHandler,Default_Handler
.weak UART7_IRQHandler
.thumb_set UART7_IRQHandler,Default_Handler
.weak UART8_IRQHandler
.thumb_set UART8_IRQHandler,Default_Handler
.weak SPI4_IRQHandler
.thumb_set SPI4_IRQHandler,Default_Handler
.weak SPI5_IRQHandler
.thumb_set SPI5_IRQHandler,Default_Handler
.weak SPI6_IRQHandler
.thumb_set SPI6_IRQHandler,Default_Handler
.weak SAI1_IRQHandler
.thumb_set SAI1_IRQHandler,Default_Handler
.weak LTDC_IRQHandler
.thumb_set LTDC_IRQHandler,Default_Handler
.weak LTDC_ER_IRQHandler
.thumb_set LTDC_ER_IRQHandler,Default_Handler
.weak DMA2D_IRQHandler
.thumb_set DMA2D_IRQHandler,Default_Handler
.weak SAI2_IRQHandler
.thumb_set SAI2_IRQHandler,Default_Handler
.weak QUADSPI_IRQHandler
.thumb_set QUADSPI_IRQHandler,Default_Handler
.weak LPTIM1_IRQHandler
.thumb_set LPTIM1_IRQHandler,Default_Handler
.weak CEC_IRQHandler
.thumb_set CEC_IRQHandler,Default_Handler
.weak I2C4_EV_IRQHandler
.thumb_set I2C4_EV_IRQHandler,Default_Handler
.weak I2C4_ER_IRQHandler
.thumb_set I2C4_ER_IRQHandler,Default_Handler
.weak SPDIF_RX_IRQHandler
.thumb_set SPDIF_RX_IRQHandler,Default_Handler
.weak OTG_FS_EP1_OUT_IRQHandler
.thumb_set OTG_FS_EP1_OUT_IRQHandler,Default_Handler
.weak OTG_FS_EP1_IN_IRQHandler
.thumb_set OTG_FS_EP1_IN_IRQHandler,Default_Handler
.weak OTG_FS_WKUP_IRQHandler
.thumb_set OTG_FS_WKUP_IRQHandler,Default_Handler
.weak OTG_FS_IRQHandler
.thumb_set OTG_FS_IRQHandler,Default_Handler
.weak DMAMUX1_OVR_IRQHandler
.thumb_set DMAMUX1_OVR_IRQHandler,Default_Handler
.weak HRTIM1_Master_IRQHandler
.thumb_set HRTIM1_Master_IRQHandler,Default_Handler
.weak HRTIM1_TIMA_IRQHandler
.thumb_set HRTIM1_TIMA_IRQHandler,Default_Handler
.weak HRTIM1_TIMB_IRQHandler
.thumb_set HRTIM1_TIMB_IRQHandler,Default_Handler
.weak HRTIM1_TIMC_IRQHandler
.thumb_set HRTIM1_TIMC_IRQHandler,Default_Handler
.weak HRTIM1_TIMD_IRQHandler
.thumb_set HRTIM1_TIMD_IRQHandler,Default_Handler
.weak HRTIM1_TIME_IRQHandler
.thumb_set HRTIM1_TIME_IRQHandler,Default_Handler
.weak HRTIM1_FLT_IRQHandler
.thumb_set HRTIM1_FLT_IRQHandler,Default_Handler
.weak DFSDM1_FLT0_IRQHandler
.thumb_set DFSDM1_FLT0_IRQHandler,Default_Handler
.weak DFSDM1_FLT1_IRQHandler
.thumb_set DFSDM1_FLT1_IRQHandler,Default_Handler
.weak DFSDM1_FLT2_IRQHandler
.thumb_set DFSDM1_FLT2_IRQHandler,Default_Handler
.weak DFSDM1_FLT3_IRQHandler
.thumb_set DFSDM1_FLT3_IRQHandler,Default_Handler
.weak SAI3_IRQHandler
.thumb_set SAI3_IRQHandler,Default_Handler
.weak SWPMI1_IRQHandler
.thumb_set SWPMI1_IRQHandler,Default_Handler
.weak TIM15_IRQHandler
.thumb_set TIM15_IRQHandler,Default_Handler
.weak TIM16_IRQHandler
.thumb_set TIM16_IRQHandler,Default_Handler
.weak TIM17_IRQHandler
.thumb_set TIM17_IRQHandler,Default_Handler
.weak MDIOS_WKUP_IRQHandler
.thumb_set MDIOS_WKUP_IRQHandler,Default_Handler
.weak MDIOS_IRQHandler
.thumb_set MDIOS_IRQHandler,Default_Handler
.weak JPEG_IRQHandler
.thumb_set JPEG_IRQHandler,Default_Handler
.weak MDMA_IRQHandler
.thumb_set MDMA_IRQHandler,Default_Handler
.weak DSI_IRQHandler
.thumb_set DSI_IRQHandler,Default_Handler
.weak SDMMC2_IRQHandler
.thumb_set SDMMC2_IRQHandler,Default_Handler
.weak HSEM1_IRQHandler
.thumb_set HSEM1_IRQHandler,Default_Handler
.weak HSEM2_IRQHandler
.thumb_set HSEM2_IRQHandler,Default_Handler
.weak ADC3_IRQHandler
.thumb_set ADC3_IRQHandler,Default_Handler
.weak DMAMUX2_OVR_IRQHandler
.thumb_set DMAMUX2_OVR_IRQHandler,Default_Handler
.weak BDMA_Channel0_IRQHandler
.thumb_set BDMA_Channel0_IRQHandler,Default_Handler
.weak BDMA_Channel1_IRQHandler
.thumb_set BDMA_Channel1_IRQHandler,Default_Handler
.weak BDMA_Channel2_IRQHandler
.thumb_set BDMA_Channel2_IRQHandler,Default_Handler
.weak BDMA_Channel3_IRQHandler
.thumb_set BDMA_Channel3_IRQHandler,Default_Handler
.weak BDMA_Channel4_IRQHandler
.thumb_set BDMA_Channel4_IRQHandler,Default_Handler
.weak BDMA_Channel5_IRQHandler
.thumb_set BDMA_Channel5_IRQHandler,Default_Handler
.weak BDMA_Channel6_IRQHandler
.thumb_set BDMA_Channel6_IRQHandler,Default_Handler
.weak BDMA_Channel7_IRQHandler
.thumb_set BDMA_Channel7_IRQHandler,Default_Handler
.weak COMP1_IRQHandler
.thumb_set COMP1_IRQHandler,Default_Handler
.weak LPTIM2_IRQHandler
.thumb_set LPTIM2_IRQHandler,Default_Handler
.weak LPTIM3_IRQHandler
.thumb_set LPTIM3_IRQHandler,Default_Handler
.weak LPTIM4_IRQHandler
.thumb_set LPTIM4_IRQHandler,Default_Handler
.weak LPTIM5_IRQHandler
.thumb_set LPTIM5_IRQHandler,Default_Handler
.weak LPUART1_IRQHandler
.thumb_set LPUART1_IRQHandler,Default_Handler
.weak WWDG_RST_IRQHandler
.thumb_set WWDG_RST_IRQHandler,Default_Handler
.weak CRS_IRQHandler
.thumb_set CRS_IRQHandler,Default_Handler
.weak ECC_IRQHandler
.thumb_set ECC_IRQHandler,Default_Handler
.weak SAI4_IRQHandler
.thumb_set SAI4_IRQHandler,Default_Handler
.weak HOLD_CORE_IRQHandler
.thumb_set HOLD_CORE_IRQHandler,Default_Handler
.weak WAKEUP_PIN_IRQHandler
.thumb_set WAKEUP_PIN_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
vandercookking/h7_device_RTT
| 3,304
|
rt-thread/libcpu/rx/context_iar.S
|
#include "cpuconfig.h"
//#include "iorx62n.h"
EXTERN _rt_thread_switch_interrupt_flag
EXTERN _rt_interrupt_from_thread
EXTERN _rt_interrupt_to_thread
EXTERN _rt_hw_hard_fault_exception
EXTERN _rt_hw_cpu_shutdown
/*PUBLIC _Interrupt_SWINT*/
PUBLIC ___interrupt_27
PUBLIC ___interrupt_0
RSEG CODE:CODE(4)
;/*
; * rt_base_t rt_hw_interrupt_disable();
; */
PUBLIC _rt_hw_interrupt_disable
_rt_hw_interrupt_disable:
MVTIPL #MAX_SYSCALL_INTERRUPT_PRIORITY
RTS
;/*
; * void rt_hw_interrupt_enable(rt_base_t level);
; */
PUBLIC _rt_hw_interrupt_enable
_rt_hw_interrupt_enable:
MVTIPL #KERNEL_INTERRUPT_PRIORITY
RTS
; r0 --> switch from thread stack
; r1 --> switch to thread stack
; psr, pc, lr, r12, r3, r2, r1, r0 are pushed into [from] stack
___interrupt_27:
/* enable interrupt because enter the interrupt,it will be clear */
SETPSW I
MVTIPL #MAX_SYSCALL_INTERRUPT_PRIORITY
PUSH.L R15
/* justage if it should switch thread*/
MOV.L #_rt_thread_switch_interrupt_flag, R15
MOV.L [ R15 ], R15
CMP #0, R15
BEQ notask_exit
/* clean the flag*/
MOV.L #_rt_thread_switch_interrupt_flag, R15
MOV.L #0, [ R15 ]
/* justage if it should save the register*/
MOV.L #_rt_interrupt_from_thread, R15
MOV.L [ R15 ], R15
CMP #0, R15
BEQ need_modify_isp
/*save register*/
MVFC USP, R15
SUB #12, R15
MVTC R15, USP
MOV.L [ R0 ], [ R15 ] ;PSW
MOV.L 4[ R0 ], 4[ R15 ];PC
MOV.L 8[ R0 ], 8[ R15 ] ;R15
ADD #12, R0
SETPSW U
PUSHM R1-R14
MVFC FPSW, R15
PUSH.L R15
MVFACHI R15
PUSH.L R15
MVFACMI R15 ; Middle order word.
SHLL #16, R15 ; Shifted left as it is restored to the low orde r w
PUSH.L R15
/*save thread stack pointer and switch to new thread*/
MOV.L #_rt_interrupt_from_thread, R15
MOV.L [ R15 ], R15
MOV.L R0, [ R15 ]
BRA switch_to_thread
need_modify_isp:
MVFC ISP, R15
ADD #12, R15
MVTC R15, ISP
switch_to_thread:
SETPSW U
MOV.L #_rt_interrupt_to_thread, R15
MOV.L [ R15 ], R15
MOV.L [ R15 ], R0
POP R15
MVTACLO R15
POP R15
MVTACHI R15
POP R15
MVTC R15, FPSW
POPM R1-R15
BRA pendsv_exit
notask_exit:
POP R15
pendsv_exit:
MVTIPL #KERNEL_INTERRUPT_PRIORITY
RTE
NOP
NOP
/*exception interrupt*/
___interrupt_0:
PUSH.L R15
/*save the register for infomation*/
MVFC USP, R15
SUB #12, R15
MVTC R15, USP
MOV.L [ R0 ], [ R15 ] ;PSW
MOV.L 4[ R0 ], 4[ R15 ];PC
MOV.L 8[ R0 ], 8[ R15 ] ;R15
ADD #12, R0
SETPSW U
PUSHM R1-R14
MVFC FPSW, R15
PUSH.L R15
MVFACHI R15
PUSH.L R15
MVFACMI R15 ; Middle order word.
SHLL #16, R15 ; Shifted left as it is restored to the low orde r w
PUSH.L R15
/*save the exception infomation add R1 as a parameter of
* function rt_hw_hard_fault_exception
*/
MOV.L R0, R1
BRA _rt_hw_hard_fault_exception
BRA _rt_hw_cpu_shutdown
RTE
NOP
NOP
END
|
vandercookking/h7_device_RTT
| 5,054
|
rt-thread/libcpu/aarch64/link.lds.S
|
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Date Author Notes
* 2017-5-30 bernard first version
*/
#include "rtconfig.h"
OUTPUT_FORMAT("elf64-littleaarch64", "elf64-littleaarch64", "elf64-littleaarch64")
OUTPUT_ARCH(aarch64)
#ifndef ARCH_TEXT_OFFSET
#define ARCH_TEXT_OFFSET 0x200000 /* We always boot in address where is 2MB aligned */
#endif
#ifndef ARCH_RAM_OFFSET
#define ARCH_RAM_OFFSET 0
#endif
SECTIONS
{
_text_offset = ARCH_TEXT_OFFSET;
#ifdef RT_USING_SMART
. = KERNEL_VADDR_START + _text_offset;
#else
. = ARCH_RAM_OFFSET + _text_offset;
#endif
.text :
{
PROVIDE(__text_start = .);
KEEP(*(.text.entrypoint)) /* The entry point */
*(.vectors)
*(.text) /* remaining code */
*(.text.*) /* remaining code */
*(.rodata) /* read-only data (constants) */
*(.rodata*)
*(.glue_7)
*(.glue_7t)
*(.gnu.linkonce.t*)
/* section information for utest */
. = ALIGN(8);
PROVIDE(__rt_utest_tc_tab_start = .);
KEEP(*(UtestTcTab))
PROVIDE(__rt_utest_tc_tab_end = .);
/* section information for finsh shell */
. = ALIGN(8);
PROVIDE(__fsymtab_start = .);
KEEP(*(FSymTab))
PROVIDE(__fsymtab_end = .);
. = ALIGN(8);
PROVIDE(__vsymtab_start = .);
KEEP(*(VSymTab))
PROVIDE(__vsymtab_end = .);
. = ALIGN(8);
/* section information for modules */
. = ALIGN(8);
PROVIDE(__rtmsymtab_start = .);
KEEP(*(RTMSymTab))
PROVIDE(__rtmsymtab_end = .);
/* section information for initialization */
. = ALIGN(8);
PROVIDE(__rt_init_start = .);
KEEP(*(SORT(.rti_fn*)))
PROVIDE(__rt_init_end = .);
/* section information for rt_ofw. */
. = ALIGN(16);
PROVIDE(__rt_ofw_data_start = .);
KEEP(*(SORT(.rt_ofw_data.*)))
PROVIDE(__rt_ofw_data_end = .);
. = ALIGN(16);
PROVIDE(__text_end = .);
}
.eh_frame_hdr :
{
*(.eh_frame_hdr)
*(.eh_frame_entry)
}
.eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
. = ALIGN(8);
.data :
{
*(.data)
*(.data.*)
*(.data1)
*(.data1.*)
. = ALIGN(16);
_gp = ABSOLUTE(.); /* Base of small data */
*(.sdata)
*(.sdata.*)
*(.rel.local)
}
. = ALIGN(8);
.ctors :
{
PROVIDE(__ctors_start = .);
/* new GCC version uses .init_array */
KEEP(*(SORT(.init_array.*)))
KEEP(*(.init_array))
PROVIDE(__ctors_end = .);
}
.dtors :
{
PROVIDE(__dtors_start = .);
KEEP(*(SORT(.dtors.*)))
KEEP(*(.dtors))
PROVIDE(__dtors_end = .);
}
. = ALIGN(16);
.bss :
{
/*
* We need some free space to page or cpu stack, move .bss.noclean.*
* to optimize size.
*/
PROVIDE(__bss_noclean_start = .);
*(.bss.noclean.*)
PROVIDE(__bss_noclean_end = .);
. = ALIGN(8);
PROVIDE(__bss_start = .);
*(.bss)
*(.bss.*)
*(.dynbss)
*(COMMON)
. = ALIGN(8);
PROVIDE(__bss_end = .);
}
/*
* We should make the bootloader know the size of memory we need,
* so we MUST calc the image's size with section '.bss'.
*/
_end = .;
/* Stabs debugging sections. */
.stab 0 : { *(.stab) }
.stabstr 0 : { *(.stabstr) }
.stab.excl 0 : { *(.stab.excl) }
.stab.exclstr 0 : { *(.stab.exclstr) }
.stab.index 0 : { *(.stab.index) }
.stab.indexstr 0 : { *(.stab.indexstr) }
.comment 0 : { *(.comment) }
/* DWARF debug sections.
* Symbols in the DWARF debugging sections are relative to the beginning
* of the section so we begin them at 0. */
/* DWARF 1 */
.debug 0 : { *(.debug) }
.line 0 : { *(.line) }
/* GNU DWARF 1 extensions */
.debug_srcinfo 0 : { *(.debug_srcinfo) }
.debug_sfnames 0 : { *(.debug_sfnames) }
/* DWARF 1.1 and DWARF 2 */
.debug_aranges 0 : { *(.debug_aranges) }
.debug_pubnames 0 : { *(.debug_pubnames) }
/* DWARF 2 */
.debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
.debug_abbrev 0 : { *(.debug_abbrev) }
.debug_line 0 : { *(.debug_line) }
.debug_frame 0 : { *(.debug_frame) }
.debug_str 0 : { *(.debug_str) }
.debug_loc 0 : { *(.debug_loc) }
.debug_macinfo 0 : { *(.debug_macinfo) }
/* SGI/MIPS DWARF 2 extensions */
.debug_weaknames 0 : { *(.debug_weaknames) }
.debug_funcnames 0 : { *(.debug_funcnames) }
.debug_typenames 0 : { *(.debug_typenames) }
.debug_varnames 0 : { *(.debug_varnames) }
__data_size = SIZEOF(.data);
__bss_size = SIZEOF(.bss);
}
|
vandercookking/h7_device_RTT
| 1,681
|
rt-thread/libcpu/ia32/trapisr_gcc.S
|
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2006-09-15 QiuYi The first version.
*/
/**
* @addtogroup I386
*/
/*@{*/
#define ENTRY(proc)\
.align 2;\
.globl proc;\
.type proc,@function;\
proc:
#define TRAPFNC(name,num)\
ENTRY(name)\
pushl $(num);\
jmp _traps;\
.data;\
.long name;\
.text
#define TRAPFNC_NOEC(name,num)\
ENTRY(name)\
pushl $0;\
pushl $(num);\
jmp _traps;\
.data;\
.long name;\
.text
.globl trap_func
.data
.align 4
.type trap_func,@object
trap_func :
.text
/* CPU traps */
TRAPFNC_NOEC(Xdivide, 0)
TRAPFNC_NOEC(Xdebug, 1)
TRAPFNC_NOEC(Xnmi, 2)
TRAPFNC_NOEC(Xbrkpt, 3)
TRAPFNC_NOEC(Xoflow, 4)
TRAPFNC_NOEC(Xbound, 5)
TRAPFNC_NOEC(Xillop, 6)
TRAPFNC_NOEC(Xdevice, 7)
TRAPFNC (Xdblflt, 8)
TRAPFNC (Xtss, 9)
TRAPFNC (Xsegnp, 10)
TRAPFNC (Xstack, 11)
TRAPFNC (Xgpflt, 12)
TRAPFNC (Xpgflt, 13)
TRAPFNC_NOEC(Xfperr, 14)
TRAPFNC (Xalign, 15)
/* default handler -- not for any specific trap */
TRAPFNC (Xdefault, 500)
.p2align 4,0x90
.globl _traps
.type _traps,@function
.globl rt_interrupt_enter
.globl rt_interrupt_leave
_traps:
push %ds
push %es
pushal
movw $0x10,%ax
movw %ax,%ds
movw %ax,%es
pushl %esp
call rt_interrupt_enter
movl %esp, %eax
addl $0x2c,%eax /*get trapno*/
movl (%eax),%eax
pushl %eax /*push trapno*/
call rt_hw_trap_irq
addl $4,%esp
call rt_interrupt_leave
popl %esp
popal
pop %es
pop %ds
add $8,%esp
iret
/*@}*/
|
vandercookking/h7_device_RTT
| 2,412
|
rt-thread/libcpu/ia32/context_gcc.S
|
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2006-09-15 QiuYi The first version
* 2006-10-09 Bernard add rt_hw_context_switch_to implementation
*/
/**
* @addtogroup ia32
*/
/*@{*/
/*
* void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
*/
.globl rt_hw_context_switch
rt_hw_context_switch:
pushfl /*pushed eflags*/
/*
* add by ssslady@gmail.com 2009-10-14
* When we return again the esp should no be change.
* The old code change the esp to esp-4 :-(.
* A protection fault maybe occure for img created by some compiler,eg.gcc in the fedor-11
* -------------------------------------------------------------------------
* entry old code new code
* EIP ->return esp EIP FLAGS ->return esp
* ... FLAGS ->retern esp CS
* CS EIP
* EIP
*/
popl %eax /*get flags*/
popl %ebx /*get eip*/
pushl %eax /*push flags*/
push %cs /*push cs*/
pushl %ebx /*push eip*/
/*-------------------------------------------------------------------
*/
/*push %cs*/ /*push cs register*/
/*pushl 0x8(%esp)*/ /*pushed eip register*/
pushl $0 /*fill irqno*/
push %ds /*push ds register*/
push %es /*push es register*/
pushal /*push eax,ecx,edx,ebx,esp,ebp,esp,edi registers*/
/*movl 0x40(%esp), %eax*/ /*to thread TCB*/
/*movl 0x3c(%esp), %ebx*/ /*from thread TCB*/
movl 0x3c(%esp), %eax /*to thread TCB*/
movl 0x38(%esp), %ebx /*from thread TCB*/
movl %esp, (%ebx) /*store esp in preempted tasks TCB*/
movl (%eax), %esp /*get new task stack pointer*/
popal /*restore new task TCB*/
pop %es
pop %ds
add $4,%esp /*skip irqno*/
iret
/*
* void rt_hw_context_switch_to(rt_uint32 to);
*/
.globl rt_hw_context_switch_to
rt_hw_context_switch_to:
push %ebp
movl %esp, %ebp
movl 0x8(%ebp), %eax /* to thread TCB */
movl (%eax), %esp /* get new task stack pointer */
popal /* restore new task TCB*/
pop %es
pop %ds
add $4, %esp /* skip irqno */
iret
|
vandercookking/h7_device_RTT
| 2,338
|
rt-thread/libcpu/ia32/hdisr_gcc.S
|
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2006-09-15 QiuYi The first version
*/
/**
* @addtogroup I386
*/
/*@{*/
#define ENTRY(proc)\
.align 2;\
.globl proc;\
.type proc,@function;\
proc:
#define HDINTERRUPTFNC(name,num) \
ENTRY(name)\
pushl $(num);\
jmp _hdinterrupts;\
.data;\
.long name;\
.text
.globl hdinterrupt_func
.data
.align 4
.type hdinterrupt_func,@object
hdinterrupt_func :
.text
/* the external device interrupts */
HDINTERRUPTFNC(irq0, 0)
HDINTERRUPTFNC(irq1, 1)
HDINTERRUPTFNC(irq2, 2)
HDINTERRUPTFNC(irq3, 3)
HDINTERRUPTFNC(irq4, 4)
HDINTERRUPTFNC(irq5, 5)
HDINTERRUPTFNC(irq6, 6)
HDINTERRUPTFNC(irq7, 7)
HDINTERRUPTFNC(irq8, 8)
HDINTERRUPTFNC(irq9, 9)
HDINTERRUPTFNC(irq10, 10)
HDINTERRUPTFNC(irq11, 11)
HDINTERRUPTFNC(irq12, 12)
HDINTERRUPTFNC(irq13, 13)
HDINTERRUPTFNC(irq14, 14)
HDINTERRUPTFNC(irq15, 15)
.p2align 4,0x90
.globl _hdinterrupts
.type _hdinterrupts,@function
.globl rt_interrupt_enter
.globl rt_interrupt_leave
.globl rt_hw_isr
.globl rt_thread_switch_interrupt_flag
.globl rt_interrupt_from_thread
.globl rt_interrupt_to_thread
_hdinterrupts:
push %ds
push %es
pushal
movw $0x10, %ax
movw %ax, %ds
movw %ax, %es
pushl %esp
call rt_interrupt_enter
movl %esp, %eax /* copy esp to eax */
addl $0x2c, %eax /* move to vector address */
movl (%eax), %eax /* vector(eax) = *eax */
pushl %eax /* push argument : int vector */
call rt_hw_isr
add $4, %esp /* restore argument */
call rt_interrupt_leave
/* if rt_thread_switch_interrupt_flag set, jump to _interrupt_thread_switch and don't return */
movl $rt_thread_switch_interrupt_flag, %eax
movl (%eax), %ebx
cmp $0x1, %ebx
jz _interrupt_thread_switch
popl %esp
popal
pop %es
pop %ds
add $4,%esp
iret
_interrupt_thread_switch:
popl %esp
movl $0x0, %ebx
movl %ebx, (%eax)
movl $rt_interrupt_from_thread, %eax
movl (%eax), %ebx
movl %esp, (%ebx)
movl $rt_interrupt_to_thread, %ecx
movl (%ecx), %edx
movl (%edx), %esp
popal
pop %es
pop %ds
add $4,%esp
iret
/*@}*/
|
vandercookking/h7_device_RTT
| 1,765
|
rt-thread/libcpu/ia32/start_gcc.S
|
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2006-09-15 QiuYi The first version.
* 2012-02-15 aozima update.
*/
/* the magic number for the multiboot header. */
#define MULTIBOOT_HEADER_MAGIC 0x1BADB002
/* the flags for the multiboot header. */
#define MULTIBOOT_HEADER_FLAGS 0x00000003
#define CONFIG_STACKSIZE 8192
/**
* @addtogroup I386
*/
/*@{*/
.section .init, "ax"
/* the system entry */
.globl _start
_start:
jmp multiboot_entry
/* Align 32 bits boundary. */
.align 4
/* multiboot header. */
multiboot_header:
/* magic */
.long MULTIBOOT_HEADER_MAGIC
/* flags */
.long MULTIBOOT_HEADER_FLAGS
/* checksum */
.long -(MULTIBOOT_HEADER_MAGIC + MULTIBOOT_HEADER_FLAGS)
multiboot_entry:
movl $(_end + 0x1000),%esp
/* reset eflags. */
pushl $0
popf
/* rebuild globe describe table */
lgdt __gdtdesc
movl $0x10,%eax
movw %ax,%ds
movw %ax,%es
movw %ax,%ss
ljmp $0x08, $relocated
relocated:
/* push the pointer to the multiboot information structure. */
pushl %ebx
/* push the magic value. */
pushl %eax
call rtthread_startup
/* never get here */
spin:
hlt
jmp spin
.data
.p2align 2
__gdt:
.word 0,0,0,0
.word 0x07FF /* 8Mb - limit=2047 */
.word 0x0000
.word 0x9A00 /* code read/exec */
.word 0x00C0
.word 0x07FF /* 8Mb - limit=2047 */
.word 0x0000
.word 0x9200 /* data read/write */
.word 0x00C0
__gdtdesc:
.word 0x17
.long __gdt
/*@}*/
|
vandercookking/h7_device_RTT
| 2,724
|
rt-thread/libcpu/c-sky/ck802/contex_ck802_gcc.S
|
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2017-01-01 Urey first version
* 2018-06-05 tanek clean code
*/
.file "contex_ck802.S"
#undef VIC_TSPDR
#define VIC_TSPDR 0XE000EC08
.global rt_thread_switch_interrupt_flag
.global rt_interrupt_from_thread
.global rt_interrupt_to_thread
.text
.align 2
/*
* rt_base_t rt_hw_interrupt_disable(void);
*/
.global rt_hw_interrupt_disable
.type rt_hw_interrupt_disable, %function
rt_hw_interrupt_disable:
mfcr r0, psr
psrclr ie
rts
/*
* void rt_hw_interrupt_enable(rt_base_t psr);
*/
.global rt_hw_interrupt_enable
.type rt_hw_interrupt_enable, %function
rt_hw_interrupt_enable:
mtcr r0, psr
rts
/*
* void rt_hw_context_switch_to(rt_uint32 to);
* R0 --> to
*/
.global rt_hw_context_switch_to
.type rt_hw_context_switch_to, %function
rt_hw_context_switch_to:
lrw r2, rt_interrupt_to_thread
stw r0, (r2)
/* set form thread = 0 */
lrw r2, rt_interrupt_from_thread
movi r0, 0
stw r0, (r2)
psrclr ie
jbr __tspend_handler_nosave
/*
* void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to);
* r0 --> from
* r1 --> to
*/
.global rt_hw_context_switch_interrupt
.type rt_hw_context_switch_interrupt, %function
rt_hw_context_switch_interrupt:
lrw r2, rt_interrupt_from_thread /* set rt_interrupt_from_thread */
stw r0, (r2)
lrw r2, rt_interrupt_to_thread /* set rt_interrupt_to_thread */
stw r1, (r2)
lrw r0, VIC_TSPDR
bgeni r1, 0
stw r1, (r0)
rts
/*
* void rt_hw_context_switch(rt_uint32 from, rt_uint32 to)
* r0 --> from
* r1 --> to
*/
.global rt_hw_context_switch
.type rt_hw_context_switch, %function
rt_hw_context_switch:
lrw r2, rt_interrupt_from_thread /* set rt_interrupt_from_thread */
stw r0, (r2)
lrw r2, rt_interrupt_to_thread /* set rt_interrupt_to_thread */
stw r1, (r2)
lrw r0, VIC_TSPDR
bgeni r1, 0
stw r1, (r0)
rts
.global PendSV_Handler
.type PendSV_Handler, %function
PendSV_Handler:
subi sp, 68
stm r0-r13, (sp)
stw r15, (sp, 56)
mfcr r0, epsr
stw r0, (sp, 60)
mfcr r0, epc
stw r0, (sp, 64)
lrw r0, rt_interrupt_from_thread
ldw r1, (r0)
stw sp, (r1)
__tspend_handler_nosave:
lrw r6, rt_interrupt_to_thread
lrw r7, rt_interrupt_from_thread
ldw r8, (r6)
stw r8, (r7)
ldw sp, (r8)
#ifdef CONFIG_STACK_GUARD
mfcr r3, cr<0, 4>
bseti r3, 0
bseti r3, 1
mtcr r3, cr<0, 4>
#endif
ldw r0, (sp, 64)
mtcr r0, epc
ldw r0, (sp, 60)
bseti r0, 6
mtcr r0, epsr
ldw r15, (sp, 56)
ldm r0-r13, (sp)
addi sp, 68
rte
|
vandercookking/h7_device_RTT
| 7,001
|
rt-thread/libcpu/sparc-v8/bm3803/vector_gcc.S
|
/*
* Copyright (c) 2020, Shenzhen Academy of Aerospace Technology
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2020-10-16 Dystopia the first version
*/
#define TRAPL(H) mov %g0, %l0; sethi %hi(H), %l4; jmp %l4 + %lo(H); nop;
#define TRAP(H) mov %psr, %l0; sethi %hi(H), %l4; jmp %l4 + %lo(H); nop;
#define TRAP_ENTRY(H) mov %psr, %l0; sethi %hi(H), %l4; jmp %l4 + %lo(H); mov %tbr, %l3;
#define BAD_TRAP ta 0; nop; nop; nop;
#define SOFT_TRAP BAD_TRAP
#define NWINDOWS 8
.section .vectors, "ax"
.globl _ISR_Handler
.globl _window_overflow
.globl _window_underflow
.globl _reset
.globl _context_switch
.globl system_vectors
system_vectors:
TRAPL(_reset); ! 00 reset trap
BAD_TRAP; ! 01 instruction_access_exception
BAD_TRAP; ! 02 illegal_instruction
BAD_TRAP; ! 03 priveleged_instruction
BAD_TRAP;
TRAP(_window_overflow); ! 05 window_overflow
TRAP(_window_underflow); ! 06 window_underflow
BAD_TRAP; ! 07 memory_add0ress_not_aligned
BAD_TRAP; ! 08 fp_exception
BAD_TRAP; ! 09 data_access_exception
BAD_TRAP; ! 0A tag_overflow
BAD_TRAP; ! 0B undefined
BAD_TRAP; ! 0C undefined
BAD_TRAP; ! 0D undefined
BAD_TRAP; ! 0E undefined
BAD_TRAP; ! 0F undefined
BAD_TRAP; ! 10 undefined
/* Interrupt entries */
TRAP_ENTRY(_ISR_Handler) ! 11 interrupt level 1
TRAP_ENTRY(_ISR_Handler) ! 12 interrupt level 2
TRAP_ENTRY(_ISR_Handler) ! 13 interrupt level 3
TRAP_ENTRY(_ISR_Handler) ! 14 interrupt level 4
TRAP_ENTRY(_ISR_Handler) ! 15 interrupt level 5
TRAP_ENTRY(_ISR_Handler) ! 16 interrupt level 6
TRAP_ENTRY(_ISR_Handler) ! 17 interrupt level 7
TRAP_ENTRY(_ISR_Handler) ! 18 interrupt level 8
TRAP_ENTRY(_ISR_Handler) ! 19 interrupt level 9
TRAP_ENTRY(_ISR_Handler) ! 1A interrupt level 1
TRAP_ENTRY(_ISR_Handler) ! 1B interrupt level 11
TRAP_ENTRY(_ISR_Handler) ! 1C interrupt level 12
TRAP_ENTRY(_ISR_Handler) ! 1D interrupt level 13
TRAP_ENTRY(_ISR_Handler) ! 1E interrupt level 14
TRAP_ENTRY(_ISR_Handler) ! 1F interrupt level 15
BAD_TRAP; BAD_TRAP; BAD_TRAP; BAD_TRAP; ! 20 - 23 undefined
BAD_TRAP; BAD_TRAP; BAD_TRAP; BAD_TRAP; ! 24 - 27 undefined
BAD_TRAP; BAD_TRAP; BAD_TRAP; BAD_TRAP; ! 28 - 2B undefined
BAD_TRAP; BAD_TRAP; BAD_TRAP; BAD_TRAP; ! 2C - 2F undefined
BAD_TRAP; BAD_TRAP; BAD_TRAP; BAD_TRAP; ! 30 - 33 undefined
BAD_TRAP; BAD_TRAP; BAD_TRAP; BAD_TRAP; ! 34 - 37 undefined
BAD_TRAP; BAD_TRAP; BAD_TRAP; BAD_TRAP; ! 38 - 3B undefined
BAD_TRAP; BAD_TRAP; BAD_TRAP; BAD_TRAP; ! 3C - 3F undefined
BAD_TRAP; BAD_TRAP; BAD_TRAP; BAD_TRAP; ! 40 - 43 undefined
BAD_TRAP; BAD_TRAP; BAD_TRAP; BAD_TRAP; ! 44 - 47 undefined
BAD_TRAP; BAD_TRAP; BAD_TRAP; BAD_TRAP; ! 48 - 4B undefined
BAD_TRAP; BAD_TRAP; BAD_TRAP; BAD_TRAP; ! 4C - 4F undefined
BAD_TRAP; BAD_TRAP; BAD_TRAP; BAD_TRAP; ! 50 - 53 undefined
BAD_TRAP; BAD_TRAP; BAD_TRAP; BAD_TRAP; ! 54 - 57 undefined
BAD_TRAP; BAD_TRAP; BAD_TRAP; BAD_TRAP; ! 58 - 5B undefined
BAD_TRAP; BAD_TRAP; BAD_TRAP; BAD_TRAP; ! 5C - 5F undefined
BAD_TRAP; BAD_TRAP; BAD_TRAP; BAD_TRAP; ! 60 - 63 undefined
BAD_TRAP; BAD_TRAP; BAD_TRAP; BAD_TRAP; ! 64 - 67 undefined
BAD_TRAP; BAD_TRAP; BAD_TRAP; BAD_TRAP; ! 68 - 6B undefined
BAD_TRAP; BAD_TRAP; BAD_TRAP; BAD_TRAP; ! 6C - 6F undefined
BAD_TRAP; BAD_TRAP; BAD_TRAP; BAD_TRAP; ! 70 - 73 undefined
BAD_TRAP; BAD_TRAP; BAD_TRAP; BAD_TRAP; ! 74 - 77 undefined
BAD_TRAP; BAD_TRAP; BAD_TRAP; BAD_TRAP; ! 78 - 7B undefined
BAD_TRAP; BAD_TRAP; BAD_TRAP; BAD_TRAP; ! 7C - 7F undefined
/* Software traps */
SOFT_TRAP; SOFT_TRAP; TRAP_ENTRY(_context_switch); TRAP_ENTRY(_context_switch) ! 80 - 83
SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! 84 - 87
SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! 88 - 8B
SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! 8C - 8F
SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! 90 - 93
SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! 94 - 97
SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! 98 - 9B
SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! 9C - 9F
SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! A0 - A3
SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! A4 - A7
SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! A8 - AB
SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! AC - AF
SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! B0 - B3
SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! B4 - B7
SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! B8 - BB
SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! BC - BF
SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! C0 - C3
SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! C4 - C7
SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! C8 - CB
SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! CC - CF
SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! D0 - D3
SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! D4 - D7
SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! D8 - DB
SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! DC - DF
SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! E0 - E3
SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! E4 - E7
SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! E8 - EB
SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! EC - EF
SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! F0 - F3
SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! F4 - F7
SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! F8 - FB
SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; SOFT_TRAP; ! FC - FF
_window_overflow:
mov %wim, %l3 ! Calculate next WIM
mov %g1, %l7
srl %l3, 1, %g1
sll %l3, NWINDOWS - 1, %l4
or %l4, %g1, %g1
save ! Get into window to be saved.
mov %g1, %wim
nop
nop
nop
std %l0, [%sp + 0]
std %l2, [%sp + 8]
std %l4, [%sp + 16]
std %l6, [%sp + 24]
std %i0, [%sp + 32]
std %i2, [%sp + 40]
std %i4, [%sp + 48]
std %i6, [%sp + 56]
restore ! Go back to trap window.
mov %l7, %g1
jmp %l1 ! Re-execute save.
rett %l2
_window_underflow:
mov %wim, %l3 ! Calculate next WIM
sll %l3, 1, %l4
srl %l3, NWINDOWS - 1, %l5
or %l5, %l4, %l5
mov %l5, %wim
nop
nop
nop
restore ! Two restores to get into the
restore ! window to restore
ldd [%sp + 0], %l0 ! Restore window from the stack
ldd [%sp + 8], %l2
ldd [%sp + 16], %l4
ldd [%sp + 24], %l6
ldd [%sp + 32], %i0
ldd [%sp + 40], %i2
ldd [%sp + 48], %i4
ldd [%sp + 56], %i6
save ! Get back to the trap window.
save
jmp %l1 ! Re-execute restore.
rett %l2
|
vandercookking/h7_device_RTT
| 1,443
|
rt-thread/libcpu/sparc-v8/bm3803/context_gcc.S
|
/*
* Copyright (c) 2020, Shenzhen Academy of Aerospace Technology
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2020-10-16 Dystopia the first version
*/
#define SPARC_PSR_PIL_MASK 0x00000F00
#define SPARC_PSR_ET_MASK 0x00000020
/*
* rt_base_t rt_hw_interrupt_disable();
*/
.globl rt_hw_interrupt_disable
rt_hw_interrupt_disable:
mov %psr, %o0
or %o0, SPARC_PSR_PIL_MASK, %o1
mov %o1, %psr
nop
nop
nop
retl
nop
/*
* void rt_hw_interrupt_enable(rt_base_t level);
*/
.globl rt_hw_interrupt_enable
rt_hw_interrupt_enable:
mov %o0, %psr
nop
nop
nop
retl
nop
/*
* void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
* o0 --> from
* o1 --> to
*/
.globl rt_hw_context_switch
rt_hw_context_switch:
ta 2
retl
nop
/*
* void rt_hw_context_switch_to(rt_uint32 to);
* o0 --> to
*/
.globl rt_hw_context_switch_to
rt_hw_context_switch_to:
mov %o0, %o1
ta 3
retl
nop
/*
* void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to);
*/
.globl rt_thread_switch_interrupt_flag
.globl rt_interrupt_from_thread
.globl rt_interrupt_to_thread
.globl rt_hw_context_switch_interrupt
rt_hw_context_switch_interrupt:
set rt_thread_switch_interrupt_flag, %o2
ld [%o2], %o3
cmp %o3, 1
be _reswitch
nop
mov 1, %o3
st %o3, [%o2]
set rt_interrupt_from_thread, %o2
st %o0, [%o2]
_reswitch:
set rt_interrupt_to_thread, %o2
st %o1, [%o2]
retl
nop
|
vandercookking/h7_device_RTT
| 10,519
|
rt-thread/libcpu/sparc-v8/bm3803/start_gcc.S
|
/*
* Copyright (c) 2020, Shenzhen Academy of Aerospace Technology
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2020-10-16 Dystopia the first version
*/
#define PSR_INIT 0x10C0
#define PREGS 0x80000000
#define IMASK 0x90
#define ICLEAR 0x9c
#define NWINDOWS 8
#define CPU_INTERRUPT_FRAME_SIZE (0x60 + 0x50 + 34 * 4)
#define SPARC_PSR_PIL_MASK 0x00000F00
#define SPARC_PSR_ET_MASK 0x00000020
#define SPARC_PSR_CWP_MASK 0x07
.text
.globl system_vectors
.globl _reset
.globl _context_switch
_reset:
mov %g0, %asr16
mov %g0, %asr17
nop
nop
nop
set PSR_INIT, %g1
mov %g1, %psr
nop
nop
nop
mov %g0, %wim
nop
nop
nop
mov %g0, %g1
mov %g0, %g2
mov %g0, %g3
mov %g0, %g4
mov %g0, %g5
mov %g0, %g6
mov %g0, %g7
mov 0x8, %g1
1:
mov %g0, %l0
mov %g0, %l1
mov %g0, %l2
mov %g0, %l3
mov %g0, %l4
mov %g0, %l5
mov %g0, %l6
mov %g0, %l7
mov %g0, %i0
mov %g0, %i1
mov %g0, %i2
mov %g0, %i3
mov %g0, %i4
mov %g0, %i5
mov %g0, %i6
mov %g0, %i7
subcc %g1, 1, %g1
save
bne 1b
nop
set 2, %g1
mov %g1, %wim
nop
nop
nop
sethi %hi(system_vectors), %g1
mov %g1, %tbr
nop
nop
nop
set PREGS, %g1
set 0xffff, %g2
st %g2, [%g1 + ICLEAR]
st %g0, [%g1 + IMASK]
set 0x7C47907F, %g2
st %g2, [%g1 + 4]
set PSR_INIT | 0x20, %g1
mov %g1, %psr
nop
nop
nop
set _fsrinit, %g1
ld [%g1], %fsr
nop
nop
nop
set _fpdata, %g1
ldd [%g1], %f0
ldd [%g1], %f2
ldd [%g1], %f4
ldd [%g1], %f6
ldd [%g1], %f8
ldd [%g1], %f10
ldd [%g1], %f12
ldd [%g1], %f14
ldd [%g1], %f16
ldd [%g1], %f18
ldd [%g1], %f20
ldd [%g1], %f22
ldd [%g1], %f24
ldd [%g1], %f26
ldd [%g1], %f28
ldd [%g1], %f30
set __bss_start, %g2
set __bss_end, %g3
mov %g0, %g1
bss_loop:
std %g0, [%g2]
add %g2, 8, %g2
cmp %g2, %g3
bleu,a bss_loop
nop
set 0x401FFF00, %g1
mov %g1, %sp
/* start RT-Thread Kernel */
call rtthread_startup
nop
/*
l0 = psr
l1 = pc
l2 = npc
l3 = tbr
*/
.globl _ISR_Handler
_ISR_Handler:
mov %g4, %l4
mov %g5, %l5
mov %wim, %g4
srl %g4, %l0, %g5
cmp %g5, 1
bne dont_do_the_window
nop
srl %g4, 1, %g5
sll %g4, NWINDOWS - 1, %g4
or %g4, %g5, %g4
save
mov %g4, %wim
nop
nop
nop
std %l0, [%sp + 0x00]
std %l2, [%sp + 0x08]
std %l4, [%sp + 0x10]
std %l6, [%sp + 0x18]
std %i0, [%sp + 0x20]
std %i2, [%sp + 0x28]
std %i4, [%sp + 0x30]
std %i6, [%sp + 0x38]
restore
nop
dont_do_the_window:
sub %fp, CPU_INTERRUPT_FRAME_SIZE, %sp
std %l0, [%sp + 0x60]
st %l2, [%sp + 0x68]
st %g1, [%sp + 0x6c]
std %g2, [%sp + 0x70]
std %l4, [%sp + 0x78]
std %g6, [%sp + 0x80]
std %i0, [%sp + 0x88]
std %i2, [%sp + 0x90]
std %i4, [%sp + 0x98]
std %i6, [%sp + 0xA0]
mov %y, %g1
st %g1, [%sp + 0xA8]
st %l6, [%sp + 0xAc]
std %f0, [%sp + 0xB0 + 8 * 0x0]
std %f2, [%sp + 0xB0 + 8 * 0x1]
std %f4, [%sp + 0xB0 + 8 * 0x2]
std %f6, [%sp + 0xB0 + 8 * 0x3]
std %f8, [%sp + 0xB0 + 8 * 0x4]
std %f10, [%sp + 0xB0 + 8 * 0x5]
std %f12, [%sp + 0xB0 + 8 * 0x6]
std %f14, [%sp + 0xB0 + 8 * 0x7]
std %f16, [%sp + 0xB0 + 8 * 0x8]
std %f18, [%sp + 0xB0 + 8 * 0x9]
std %f20, [%sp + 0xB0 + 8 * 0xA]
std %f22, [%sp + 0xB0 + 8 * 0xB]
std %f24, [%sp + 0xB0 + 8 * 0xC]
std %f26, [%sp + 0xB0 + 8 * 0xD]
std %f28, [%sp + 0xB0 + 8 * 0xE]
std %f30, [%sp + 0xB0 + 8 * 0xF]
st %fsr, [%sp + 0xB0 + 8 * 0x10]
mov %l0, %g5
or %g5, SPARC_PSR_PIL_MASK, %g5
wr %g5, SPARC_PSR_ET_MASK, %psr
nop
nop
nop
call rt_interrupt_enter
nop
and %l3, 0x0FF0, %l3
srl %l3, 4, %o0
mov %sp, %o1
call rt_hw_trap
nop
call rt_interrupt_leave
nop
mov %l0, %psr
nop
nop
nop
ld [%sp + 0xA8], %l5
mov %l5, %y
ldd [%sp + 0x60], %l0
ld [%sp + 0x68], %l2
ld [%sp + 0x6c], %g1
ldd [%sp + 0x70], %g2
ldd [%sp + 0x78], %g4
ldd [%sp + 0x80], %g6
ldd [%sp + 0x88], %i0
ldd [%sp + 0x90], %i2
ldd [%sp + 0x98], %i4
ldd [%sp + 0xA0], %i6
ldd [%sp + 0xB0 + 8 * 0x0], %f0
ldd [%sp + 0xB0 + 8 * 0x1], %f2
ldd [%sp + 0xB0 + 8 * 0x2], %f4
ldd [%sp + 0xB0 + 8 * 0x3], %f6
ldd [%sp + 0xB0 + 8 * 0x4], %f8
ldd [%sp + 0xB0 + 8 * 0x5], %f10
ldd [%sp + 0xB0 + 8 * 0x6], %f12
ldd [%sp + 0xB0 + 8 * 0x7], %f14
ldd [%sp + 0xB0 + 8 * 0x8], %f16
ldd [%sp + 0xB0 + 8 * 0x9], %f18
ldd [%sp + 0xB0 + 8 * 0xA], %f20
ldd [%sp + 0xB0 + 8 * 0xB], %f22
ldd [%sp + 0xB0 + 8 * 0xC], %f24
ldd [%sp + 0xB0 + 8 * 0xD], %f26
ldd [%sp + 0xB0 + 8 * 0xE], %f28
ldd [%sp + 0xB0 + 8 * 0xF], %f30
ld [%sp + 0xB0 + 8 * 0x10], %fsr
nop
nop
nop
mov %wim, %l4
add %l0, 1, %l6
and %l6, SPARC_PSR_CWP_MASK, %l6
srl %l4, %l6, %l5
cmp %l5, 1
bne good_task_window
nop
sll %l4, 1, %l5
srl %l4, NWINDOWS - 1, %l4
or %l4, %l5, %l4
mov %l4, %wim
nop
nop
nop
restore
ldd [%sp + 0], %l0 ! Restore window from the stack
ldd [%sp + 8], %l2
ldd [%sp + 16], %l4
ldd [%sp + 24], %l6
ldd [%sp + 32], %i0
ldd [%sp + 40], %i2
ldd [%sp + 48], %i4
ldd [%sp + 56], %i6
save
good_task_window:
set rt_thread_switch_interrupt_flag, %l4
ld [%l4], %l5
cmp %l5, 1
be rt_hw_context_switch_interrupt_do
nop
mov %l0, %psr
nop
nop
nop
jmp %l1
rett %l2
rt_hw_context_switch_interrupt_do:
st %g0, [%l4]
sub %fp, 0x20, %sp
std %g0, [%sp + 0x00]
std %g2, [%sp + 0x08]
std %g4, [%sp + 0x10]
std %g6, [%sp + 0x18]
mov %sp, %g3
mov %l1, %g4
mov %l2, %g5
mov %l0, %g6
mov %wim, %g7
mov %g0, %wim
nop
nop
nop
set 0xFFFFFFF8, %g1
and %g1, %g6, %g1
mov %g1, %psr
nop
nop
nop
mov %g0, %g1
save_loop:
save
sub %g3, 0x40, %g3
std %l0, [%g3 + 0x00]
std %l2, [%g3 + 0x08]
std %l4, [%g3 + 0x10]
std %l6, [%g3 + 0x18]
std %i0, [%g3 + 0x20]
std %i2, [%g3 + 0x28]
std %i4, [%g3 + 0x30]
std %i6, [%g3 + 0x38]
inc %g1
cmp %g1, NWINDOWS
bne save_loop
nop
sub %g3, 0x88, %g3
std %f0, [%g3 + 0x00]
std %f2, [%g3 + 0x08]
std %f4, [%g3 + 0x10]
std %f6, [%g3 + 0x18]
std %f8, [%g3 + 0x20]
std %f10, [%g3 + 0x28]
std %f12, [%g3 + 0x30]
std %f14, [%g3 + 0x38]
std %f16, [%g3 + 0x40]
std %f18, [%g3 + 0x48]
std %f20, [%g3 + 0x50]
std %f22, [%g3 + 0x58]
std %f24, [%g3 + 0x60]
std %f26, [%g3 + 0x68]
std %f28, [%g3 + 0x70]
std %f30, [%g3 + 0x78]
mov %y, %g1
st %g1, [%g3 + 0x80]
st %fsr, [%g3 + 0x84]
sub %g3, 0x10, %g3
std %g4, [%g3 + 0x00]
std %g6, [%g3 + 0x08]
set rt_interrupt_from_thread, %g1
ld [%g1], %g2
st %g3, [%g2]
set rt_interrupt_to_thread, %g1
ld [%g1], %g1
ld [%g1], %g3
ldd [%g3 + 0x00], %g4
ldd [%g3 + 0x08], %g6
add %g3, 0x10, %g3
ldd [%g3 + 0x00], %f0
ldd [%g3 + 0x08], %f2
ldd [%g3 + 0x10], %f4
ldd [%g3 + 0x18], %f6
ldd [%g3 + 0x20], %f8
ldd [%g3 + 0x28], %f10
ldd [%g3 + 0x30], %f12
ldd [%g3 + 0x38], %f14
ldd [%g3 + 0x40], %f16
ldd [%g3 + 0x48], %f18
ldd [%g3 + 0x50], %f20
ldd [%g3 + 0x58], %f22
ldd [%g3 + 0x60], %f24
ldd [%g3 + 0x68], %f26
ldd [%g3 + 0x70], %f28
ldd [%g3 + 0x78], %f30
ld [%g3 + 0x80], %g1
mov %g1, %y
ld [%g3 + 0x84], %fsr
add %g3, 0x88, %g3
set NWINDOWS - 1, %g1
or %g1, %g6, %g1
mov %g1, %psr
nop
nop
nop
mov %g0, %g1
restore_loop:
restore
ldd [%g3 + 0x00], %l0
ldd [%g3 + 0x08], %l2
ldd [%g3 + 0x10], %l4
ldd [%g3 + 0x18], %l6
ldd [%g3 + 0x20], %i0
ldd [%g3 + 0x28], %i2
ldd [%g3 + 0x30], %i4
ldd [%g3 + 0x38], %i6
add %g3, 0x40, %g3
inc %g1
cmp %g1, NWINDOWS
bne restore_loop
nop
mov %g6, %psr
nop
nop
nop
mov %g7, %wim
nop
nop
nop
mov %g4, %l1
mov %g5, %l2
mov %g3, %sp
ldd [%sp + 0x00], %g0
ldd [%sp + 0x08], %g2
ldd [%sp + 0x10], %g4
ldd [%sp + 0x18], %g6
add %sp, 0x20, %fp
jmp %l1
rett %l2
/*
l0 = psr
l1 = pc
l2 = npc
l3 = tbr
*/
_context_switch:
mov %l2, %l1
add %l2, 4, %l2
mov %g4, %l4
mov %g5, %l5
mov %wim, %g4
srl %g4, %l0, %g5
cmp %g5, 1
bne good_window
nop
srl %g4, 1, %g5
sll %g4, NWINDOWS - 1, %g4
or %g4, %g5, %g4
save
mov %g4, %wim
nop
nop
nop
std %l0, [%sp + 0x00]
std %l2, [%sp + 0x08]
std %l4, [%sp + 0x10]
std %l6, [%sp + 0x18]
std %i0, [%sp + 0x20]
std %i2, [%sp + 0x28]
std %i4, [%sp + 0x30]
std %i6, [%sp + 0x38]
restore
nop
good_window:
and %l3, 0x0FF0, %l3
srl %l3, 4, %l4
cmp %l4, 0x82
bne switch_to
nop
sub %fp, 0x20, %sp
std %g0, [%sp + 0x00]
std %g2, [%sp + 0x08]
std %g4, [%sp + 0x10]
std %g6, [%sp + 0x18]
mov %sp, %g3
mov %l1, %g4
mov %l2, %g5
mov %l0, %g6
mov %wim, %g7
mov %g0, %wim
nop
nop
nop
set 0xFFFFFFF8, %g1
and %g1, %g6, %g1
mov %g1, %psr
nop
nop
nop
mov %g0, %g1
save_window:
save
sub %g3, 0x40, %g3
std %l0, [%g3 + 0x00]
std %l2, [%g3 + 0x08]
std %l4, [%g3 + 0x10]
std %l6, [%g3 + 0x18]
std %i0, [%g3 + 0x20]
std %i2, [%g3 + 0x28]
std %i4, [%g3 + 0x30]
std %i6, [%g3 + 0x38]
inc %g1
cmp %g1, NWINDOWS
bne save_window
nop
sub %g3, 0x88, %g3
std %f0, [%g3 + 0x00]
std %f2, [%g3 + 0x08]
std %f4, [%g3 + 0x10]
std %f6, [%g3 + 0x18]
std %f8, [%g3 + 0x20]
std %f10, [%g3 + 0x28]
std %f12, [%g3 + 0x30]
std %f14, [%g3 + 0x38]
std %f16, [%g3 + 0x40]
std %f18, [%g3 + 0x48]
std %f20, [%g3 + 0x50]
std %f22, [%g3 + 0x58]
std %f24, [%g3 + 0x60]
std %f26, [%g3 + 0x68]
std %f28, [%g3 + 0x70]
std %f30, [%g3 + 0x78]
mov %y, %g1
st %g1, [%g3 + 0x80]
st %fsr, [%g3 + 0x84]
sub %g3, 0x10, %g3
std %g4, [%g3 + 0x00]
std %g6, [%g3 + 0x08]
mov %g6, %psr
nop
nop
nop
st %g3, [%i0]
switch_to:
mov %g0, %wim
nop
nop
nop
ld [%i1], %g3
ldd [%g3 + 0x00], %g4
ldd [%g3 + 0x08], %g6
add %g3, 0x10, %g3
ldd [%g3 + 0x00], %f0
ldd [%g3 + 0x08], %f2
ldd [%g3 + 0x10], %f4
ldd [%g3 + 0x18], %f6
ldd [%g3 + 0x20], %f8
ldd [%g3 + 0x28], %f10
ldd [%g3 + 0x30], %f12
ldd [%g3 + 0x38], %f14
ldd [%g3 + 0x40], %f16
ldd [%g3 + 0x48], %f18
ldd [%g3 + 0x50], %f20
ldd [%g3 + 0x58], %f22
ldd [%g3 + 0x60], %f24
ldd [%g3 + 0x68], %f26
ldd [%g3 + 0x70], %f28
ldd [%g3 + 0x78], %f30
ld [%g3 + 0x80], %g1
mov %g1, %y
ld [%g3 + 0x84], %fsr
add %g3, 0x88, %g3
set NWINDOWS - 1, %g1
or %g1, %g6, %g1
mov %g1, %psr
nop
nop
nop
mov %g0, %g1
restore_window:
restore
ldd [%g3 + 0x00], %l0
ldd [%g3 + 0x08], %l2
ldd [%g3 + 0x10], %l4
ldd [%g3 + 0x18], %l6
ldd [%g3 + 0x20], %i0
ldd [%g3 + 0x28], %i2
ldd [%g3 + 0x30], %i4
ldd [%g3 + 0x38], %i6
add %g3, 0x40, %g3
inc %g1
cmp %g1, NWINDOWS
bne restore_window
nop
mov %g6, %psr
nop
nop
nop
mov %g7, %wim
nop
nop
nop
mov %g4, %l1
mov %g5, %l2
mov %g3, %sp
ldd [%sp + 0x00], %g0
ldd [%sp + 0x08], %g2
ldd [%sp + 0x10], %g4
ldd [%sp + 0x18], %g6
add %sp, 0x20, %fp
jmp %l1
rett %l2
.data
.align 8
_fpdata:
.word 0, 0
_fsrinit:
.word 0
|
vandercookking/h7_device_RTT
| 8,472
|
rt-thread/libcpu/ti-dsp/c28x/context.s
|
;
; Copyright (c) 2006-2022, RT-Thread Development Team
;
; SPDX-License-Identifier: Apache-2.0
;
; Change Logs:
; Date Author Notes
; 2018-09-01 xuzhuoyi the first version.
; 2019-06-17 zhaoxiaowei fix bugs of old c28x interrupt api.
; 2019-07-03 zhaoxiaowei add _rt_hw_calc_csb function to support __rt_ffs.
; 2019-12-05 xiaolifan add support for hardware fpu32
; 2022-06-21 guyunjie trim pendsv (RTOSINT_Handler)
; 2022-08-24 guyunjie fix bugs in context switching
; 2022-10-15 guyunjie add zero-latency interrupt
.ref rt_interrupt_to_thread
.ref rt_interrupt_from_thread
.ref rt_thread_switch_interrupt_flag
.def rtosint_handler
.def rt_hw_get_st0
.def rt_hw_get_st1
.def rt_hw_calc_csb
.def rt_hw_context_switch_interrupt
.def rt_hw_context_switch
.def rt_hw_context_switch_to
.def rt_hw_interrupt_thread_switch
.def rt_hw_interrupt_disable
.def rt_hw_interrupt_enable
;importing settings from compiler and config
.cdecls C,NOLIST
%{
#include <rtconfig.h>
#ifdef __TMS320C28XX_FPU32__
#define __FPU32__ 1
#else
#define __FPU32__ 0
#endif
#ifdef __TMS320C28XX_FPU64__
#define __FPU64__ 1
#else
#define __FPU64__ 0
#endif
#ifdef __TMS320C28XX_VCRC__
#define __VCRC__ 1
#else
#define __VCRC__ 0
#endif
#ifdef RT_USING_ZERO_LATENCY
#define ZERO_LATENCY 1
#ifndef ZERO_LATENCY_INT_MASK
#error ZERO_LATENCY_INT_MASK must be defined for zero latency interrupt
#elif ZERO_LATENCY_INT_MASK & 0x8000
#error RTOS bit (0x8000) must not be set in ZERO_LATENCY_INT_MASK
#endif
#else
#define ZERO_LATENCY 0
#endif
%}
.text
.newblock
;
; rt_base_t rt_hw_interrupt_disable();
;
.asmfunc
rt_hw_interrupt_disable:
.if ZERO_LATENCY
MOV AL, IER
AND IER, #ZERO_LATENCY_INT_MASK
.else
PUSH ST1
SETC INTM
POP AL
.endif
MOV AH, #0
LRETR
.endasmfunc
;
; void rt_hw_interrupt_enable(rt_base_t level);
;
.asmfunc
rt_hw_interrupt_enable:
.if ZERO_LATENCY
MOV IER, AL
.else
PUSH AL
POP ST1
.endif
LRETR
.endasmfunc
;
; void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
; ACC --> from
; SP[4] --> to
;
.asmfunc
rt_hw_context_switch_interrupt:
; ACC, XAR4-7 are "save on call" following TI C28x C/C++ compiler convention
; and therefore can be used in a function without being saved on stack first
; (the compiler has already saved it before the call).
; Reference: TMS320C28x Optimizing CC++ Compiler
; note this convention is only applicable to normal functions not to isrs
MOVL XAR6, ACC
MOVL XAR4, *-SP[4]
; set rt_thread_switch_interrupt_flag to 1
MOVL XAR5, #rt_thread_switch_interrupt_flag
MOVL ACC, *XAR5
BF reswitch2, NEQ ; ACC!=0
MOVB ACC, #1
MOVL *XAR5, ACC
MOVL XAR5, #rt_interrupt_from_thread ; set rt_interrupt_from_thread
MOVL *XAR5, XAR6
reswitch2:
MOVL XAR5, #rt_interrupt_to_thread ; set rt_interrupt_to_thread
MOVL *XAR5, XAR4
LRETR
.endasmfunc
;
; void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
; ACC --> from
; SP[4] --> to
;
.asmfunc
rt_hw_context_switch:
MOVL XAR6, ACC
MOVL XAR4, *-SP[4]
; set rt_thread_switch_interrupt_flag to 1
MOVL XAR5, #rt_thread_switch_interrupt_flag
MOVL ACC, *XAR5
BF reswitch1, NEQ ; ACC!=0
MOVB ACC, #1
MOVL *XAR5, ACC
MOVL XAR5, #rt_interrupt_from_thread ; set rt_interrupt_from_thread
MOVL *XAR5, XAR6
reswitch1:
MOVL XAR5, #rt_interrupt_to_thread ; set rt_interrupt_to_thread
MOVL *XAR5, XAR4
OR IFR, #0x8000
LRETR
.endasmfunc
;
; * void rt_hw_context_switch_to(rt_uint32 to);
; * ACC --> to
;
.asmfunc
rt_hw_context_switch_to:
; get to thread
MOVL XAR5, #rt_interrupt_to_thread
MOVL *XAR5, ACC
; set from thread to 0
MOVL XAR5, #rt_interrupt_from_thread
MOVL XAR4, #0
MOVL *XAR5, XAR4
; set interrupt flag to 1
MOVL XAR5, #rt_thread_switch_interrupt_flag
MOVL XAR4, #1
MOVL *XAR5, XAR4
; trigger rtos interrupt
OR IFR, #0x8000
OR IER, #0x8000
CLRC INTM
; never reach here!
.endasmfunc
.asmfunc
rtosint_handler:
.if ZERO_LATENCY
; mask out non-critical interrupts and enable global interrupt
; so rtosint_handler won't block critical interrupts
AND IER, #ZERO_LATENCY_INT_MASK
CLRC INTM
.endif
MOVL ACC, *-SP[4]
MOV AR0, AL ; save original IER
PUSH AR1H:AR0H
PUSH XAR2
; get rt_thread_switch_interrupt_flag
MOVL XAR1, #rt_thread_switch_interrupt_flag
MOVL ACC, *XAR1
BF rtosint_exit, EQ ; rtos_int already handled
; clear rt_thread_switch_interrupt_flag to 0
MOVL XAR2, #0
MOVL *XAR1, XAR2
MOVL XAR1, #rt_interrupt_from_thread
MOVL ACC, *XAR1
BF switch_to_thread, EQ ; skip register save at the first time
PUSH XAR3
PUSH XAR4
PUSH XAR5
PUSH XAR6
PUSH XAR7
PUSH XT
PUSH RPC
.if __FPU32__
PUSH RB
MOV32 *SP++, STF
MOV32 *SP++, R0H
MOV32 *SP++, R1H
MOV32 *SP++, R2H
MOV32 *SP++, R3H
MOV32 *SP++, R4H
MOV32 *SP++, R5H
MOV32 *SP++, R6H
MOV32 *SP++, R7H
.endif
.if __FPU64__
MOV32 *SP++, R0L
MOV32 *SP++, R1L
MOV32 *SP++, R2L
MOV32 *SP++, R3L
MOV32 *SP++, R4L
MOV32 *SP++, R5L
MOV32 *SP++, R6L
MOV32 *SP++, R7L
.endif
.if __VCRC__
VMOV32 *SP++, VCRC
VMOV32 *SP++, VSTATUS
VMOV32 *SP++, VCRCPOLY
VMOV32 *SP++, VCRCSIZE
.endif
MOVL ACC, *XAR1
MOVL XAR1, ACC
MOVZ AR2, @SP ; get from thread stack pointer
MOVL *XAR1, XAR2 ; update from thread stack pointer
switch_to_thread:
MOVL XAR1, #rt_interrupt_to_thread
MOVL ACC, *XAR1
MOVL XAR1, ACC
MOVL ACC, *XAR1
MOV @SP, AL ; load thread stack pointer
.if __VCRC__
VMOV32 VCRCSIZE, *--SP
VMOV32 VCRCPOLY, *--SP
VMOV32 VSTATUS, *--SP
VMOV32 VCRC, *--SP
.endif
.if __FPU64__
MOV32 R7L, *--SP
MOV32 R6L, *--SP
MOV32 R5L, *--SP
MOV32 R4L, *--SP
MOV32 R3L, *--SP
MOV32 R2L, *--SP
MOV32 R1L, *--SP
MOV32 R0L, *--SP
.endif
.if __FPU32__
MOV32 R7H, *--SP
MOV32 R6H, *--SP
MOV32 R5H, *--SP
MOV32 R4H, *--SP
MOV32 R3H, *--SP
MOV32 R2H, *--SP
MOV32 R1H, *--SP
MOV32 R0H, *--SP
MOV32 STF, *--SP
POP RB
.endif
POP RPC
POP XT
POP XAR7
POP XAR6
POP XAR5
POP XAR4
POP XAR3
rtosint_exit:
; do not restore interrupt here: to be restored according to the
; switched-to context during IRET (automaticlly by hardware)
POP XAR2
POP AR1H:AR0H
MOVL ACC , *-SP[4]
MOV AL, AR0
MOVL *-SP[4], ACC
IRET
.endasmfunc
.asmfunc
rt_hw_get_st0:
PUSH ST0
POP AL
LRETR
.endasmfunc
.asmfunc
rt_hw_get_st1:
PUSH ST1
POP AL
LRETR
.endasmfunc
; C28x do not have a build-in "__ffs" func in its C compiler.
; We can use the "Count Sign Bits" (CSB) instruction to make one.
; CSB will return the number of 0's minus 1 above the highest set bit.
; The count is placed in T. For example:
; ACC T maxbit
; 0x00000001 30 0
; 0x00000010 26 4
; 0x000001FF 22 8
; 0x000001F0 22 8
.asmfunc
rt_hw_calc_csb:
MOV AH, #0
CSB ACC ; T = no. of sign bits - 1
MOVU ACC, T ; ACC = no. of sign bits - 1
SUBB ACC, #30 ; ACC = ACC - 30
ABS ACC ; ACC = |ACC|
LRETR
.endasmfunc
; compatible with old version
.asmfunc
rt_hw_interrupt_thread_switch:
LRETR
NOP
.endasmfunc
.end
|
vandercookking/h7_device_RTT
| 4,075
|
rt-thread/libcpu/ppc/ppc405/cache_gcc.S
|
#define L1_CACHE_SHIFT 5
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
#define DCACHE_SIZE (16 << 10)/* For AMCC 405 CPUs */
/*
* Flush instruction cache.
*/
.globl invalidate_icache
invalidate_icache:
iccci r0,r0
isync
blr
/*
* Write any modified data cache blocks out to memory
* and invalidate the corresponding instruction cache blocks.
*
* flush_icache_range(unsigned long start, unsigned long stop)
*/
.globl flush_icache_range
flush_icache_range:
li r5,L1_CACHE_BYTES-1
andc r3,r3,r5
subf r4,r3,r4
add r4,r4,r5
srwi. r4,r4,L1_CACHE_SHIFT
beqlr
mtctr r4
mr r6,r3
1: dcbst 0,r3
addi r3,r3,L1_CACHE_BYTES
bdnz 1b
sync /* wait for dcbst's to get to ram */
mtctr r4
2: icbi 0,r6
addi r6,r6,L1_CACHE_BYTES
bdnz 2b
sync /* additional sync needed on g4 */
isync
blr
/*
* Write any modified data cache blocks out to memory.
* Does not invalidate the corresponding cache lines (especially for
* any corresponding instruction cache).
*
* clean_dcache_range(unsigned long start, unsigned long stop)
*/
.globl clean_dcache_range
clean_dcache_range:
li r5,L1_CACHE_BYTES-1
andc r3,r3,r5
subf r4,r3,r4
add r4,r4,r5
srwi. r4,r4,L1_CACHE_SHIFT
beqlr
mtctr r4
1: dcbst 0,r3
addi r3,r3,L1_CACHE_BYTES
bdnz 1b
sync /* wait for dcbst's to get to ram */
blr
/*
* Write any modified data cache blocks out to memory and invalidate them.
* Does not invalidate the corresponding instruction cache blocks.
*
* flush_dcache_range(unsigned long start, unsigned long stop)
*/
.globl flush_dcache_range
flush_dcache_range:
li r5,L1_CACHE_BYTES-1
andc r3,r3,r5
subf r4,r3,r4
add r4,r4,r5
srwi. r4,r4,L1_CACHE_SHIFT
beqlr
mtctr r4
1: dcbf 0,r3
addi r3,r3,L1_CACHE_BYTES
bdnz 1b
sync /* wait for dcbst's to get to ram */
blr
/*
* Like above, but invalidate the D-cache. This is used by the 8xx
* to invalidate the cache so the PPC core doesn't get stale data
* from the CPM (no cache snooping here :-).
*
* invalidate_dcache_range(unsigned long start, unsigned long stop)
*/
.globl invalidate_dcache_range
invalidate_dcache_range:
li r5,L1_CACHE_BYTES-1
andc r3,r3,r5
subf r4,r3,r4
add r4,r4,r5
srwi. r4,r4,L1_CACHE_SHIFT
beqlr
mtctr r4
1: dcbi 0,r3
addi r3,r3,L1_CACHE_BYTES
bdnz 1b
sync /* wait for dcbi's to get to ram */
blr
/*
* 40x cores have 8K or 16K dcache and 32 byte line size.
* 44x has a 32K dcache and 32 byte line size.
* 8xx has 1, 2, 4, 8K variants.
* For now, cover the worst case of the 44x.
* Must be called with external interrupts disabled.
*/
#define CACHE_NWAYS 64
#define CACHE_NLINES 32
.globl flush_dcache
flush_dcache:
li r4,(2 * CACHE_NWAYS * CACHE_NLINES)
mtctr r4
lis r5,0
1: lwz r3,0(r5) /* Load one word from every line */
addi r5,r5,L1_CACHE_BYTES
bdnz 1b
sync
blr
.globl invalidate_dcache
invalidate_dcache:
addi r6,0,0x0000 /* clear GPR 6 */
/* Do loop for # of dcache congruence classes. */
lis r7,(DCACHE_SIZE / L1_CACHE_BYTES / 2)@ha /* TBS for large sized cache */
ori r7,r7,(DCACHE_SIZE / L1_CACHE_BYTES / 2)@l
/* NOTE: dccci invalidates both */
mtctr r7 /* ways in the D cache */
dcloop:
dccci 0,r6 /* invalidate line */
addi r6,r6,L1_CACHE_BYTES /* bump to next line */
bdnz dcloop
sync
blr
/*
* Cache functions.
*
* Icache-related functions are used in POST framework.
*/
.globl icache_enable
icache_enable:
mflr r8
bl invalidate_icache
mtlr r8
isync
addis r3,r0, 0xc000 /* set bit 0 */
mticcr r3
blr
.globl icache_disable
icache_disable:
addis r3,r0, 0x0000 /* clear bit 0 */
mticcr r3
isync
blr
.globl icache_status
icache_status:
mficcr r3
srwi r3, r3, 31 /* >>31 => select bit 0 */
blr
.globl dcache_enable
dcache_enable:
mflr r8
bl invalidate_dcache
mtlr r8
isync
addis r3,r0, 0x8000 /* set bit 0 */
mtdccr r3
blr
.globl dcache_disable
dcache_disable:
mflr r8
bl flush_dcache
mtlr r8
addis r3,r0, 0x0000 /* clear bit 0 */
mtdccr r3
blr
.globl dcache_status
dcache_status:
mfdccr r3
srwi r3, r3, 31 /* >>31 => select bit 0 */
blr
|
vandercookking/h7_device_RTT
| 6,643
|
rt-thread/libcpu/ppc/ppc405/context_gcc.S
|
#include "context.h"
#define SPRG0 0x110 /* Special Purpose Register General 0 */
#define SPRG1 0x111 /* Special Purpose Register General 1 */
.globl rt_hw_interrupt_disable
.globl rt_hw_interrupt_enable
.globl rt_hw_context_switch
.globl rt_hw_context_switch_to
.globl rt_hw_context_switch_interrupt
.globl rt_hw_systemcall_entry
/*
* rt_base_t rt_hw_interrupt_disable();
* return the interrupt status and disable interrupt
*/
#if 0
rt_hw_interrupt_disable:
mfmsr r3 /* Disable interrupts */
li r4,0
ori r4,r4,MSR_EE
andc r4,r4,r3
SYNC /* Some chip revs need this... */
mtmsr r4
SYNC
blr
#else
rt_hw_interrupt_disable:
addis r4, r0, 0xFFFD
ori r4, r4, 0x7FFF
mfmsr r3
and r4, r4, 3 /* Clear bits 14 and 16, corresponding to... */
mtmsr r4 /* ...critical and non-critical interrupts */
blr
#endif
/*
* void rt_hw_interrupt_enable(rt_base_t level);
* restore interrupt
*/
rt_hw_interrupt_enable:
mtmsr r3
SYNC
blr
/*
* void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
* r3 --> from
* r4 --> to
*
* r1: stack pointer
*/
rt_hw_systemcall_entry:
mtspr SPRG0,r3 /* save r3 to SPRG0 */
mtspr SPRG1,r4 /* save r4 to SPRG1 */
lis r3,rt_thread_switch_interrput_flag@h
ori r3,r3,rt_thread_switch_interrput_flag@l
lwz r4,0(r3)
cmpi cr0,0,r4,0x0 /* whether is 0 */
beq _no_switch /* no switch, exit */
li r4,0x0 /* set rt_thread_switch_interrput_flag to 0 */
stw r4,0(r3)
/* load from thread to r3 */
lis r3,rt_interrupt_from_thread@h /* set rt_interrupt_from_thread */
ori r3,r3,rt_interrupt_from_thread@l
lwz r3,0(r3)
cmpi cr0,0,r3,0x0 /* whether is 0 */
beq _restore /* it's first switch, goto _restore */
/* save r1:sp to thread[from] stack pointer */
subi r1, r1, STACK_FRAME_SIZE
stw r1, 0(r3)
/* restore r3, r4 from SPRG */
mfspr r3,SPRG0
mfspr r4,SPRG0
/* save registers */
stw r0,GPR0(r1) /* save general purpose registers 0 */
stmw r2,GPR2(r1) /* save general purpose registers 2-31 */
mfusprg0 r0 /* save usprg0 */
stw r0,USPRG0(r1)
mfcr r0, /* save cr */
stw r0,CR(r1)
mfxer r0 /* save xer */
stw r0,XER(r1)
mfctr r0 /* save ctr */
stw r0,CTR(r1)
mflr r0 /* save lr */
stw r0, LR(r1)
mfsrr0 r0 /* save SRR0 and SRR1 */
stw r0,SRR0(r1)
mfsrr1 r0
stw r0,SRR1(r1)
_restore:
/* get thread[to] stack pointer */
lis r4,rt_interrupt_to_thread@h
ori r4,r4,rt_interrupt_to_thread@l
lwz r1,0(r4)
lwz r1,0(r1)
lwz r0,SRR1(r1) /* restore SRR1 and SRR0 */
mtsrr1 r0
lwz r0,SRR0(r1)
mtsrr0 r0
lwz r0,LR(r1) /* restore lr */
mtlr r0
lwz r0,CTR(r1) /* restore ctr */
mtctr r0
lwz r0,XER(r1) /* restore xer */
mtxer r0
lwz r0,CR(r1) /* restore cr */
mtcr r0
lwz r0,USPRG0(r1) /* restore usprg0 */
// mtusprg0 r0
lmw r2, GPR2(r1) /* restore general register */
lwz r0,GPR0(r1)
addi r1, r1, STACK_FRAME_SIZE
/* RFI will restore status register and thus the correct priority*/
rfi
_no_switch:
/* restore r3, r4 from SPRG */
mfspr r3,SPRG0
mfspr r4,SPRG0
rfi
/* void rt_hw_context_switch_to(to); */
.globl rt_hw_context_switch_to
rt_hw_context_switch_to:
/* set rt_thread_switch_interrput_flag = 1 */
lis r5,rt_thread_switch_interrput_flag@h
ori r5,r5,rt_thread_switch_interrput_flag@l
li r6, 0x01
stw r6,0(r5)
/* set rt_interrupt_from_thread = 0 */
lis r5,rt_interrupt_from_thread@h
ori r5,r5,rt_interrupt_from_thread@l
li r6, 0x00
stw r6,0(r5)
/* set rt_interrupt_from_thread = to */
lis r5,rt_interrupt_to_thread@h
ori r5,r5,rt_interrupt_to_thread@l
stw r3,0(r5)
/* trigger a system call */
sc
blr
/* void rt_hw_context_switch(from, to); */
.globl rt_hw_context_switch
rt_hw_context_switch:
/* compare rt_thread_switch_interrupt_flag and set it */
lis r5,rt_thread_switch_interrput_flag@h
ori r5,r5,rt_thread_switch_interrput_flag@l
lwz r6,0(r5)
cmpi cr0,0,r6,0x1 /* whether is 1 */
beq _reswitch /* set already, goto _reswitch */
li r6,0x1 /* set rt_thread_switch_interrput_flag to 1*/
stw r6,0(r5)
/* set rt_interrupt_from_thread to 'from' */
lis r5,rt_interrupt_from_thread@h
ori r5,r5,rt_interrupt_from_thread@l
stw r3,0(r5)
_reswitch:
/* set rt_interrupt_to_thread to 'to' */
lis r6,rt_interrupt_to_thread@h
ori r6,r6,rt_interrupt_to_thread@l
stw r4,0(r6)
/* trigger a system call */
sc
blr
.globl rt_hw_context_switch_interrupt
rt_hw_context_switch_interrupt:
/* compare rt_thread_switch_interrupt_flag and set it */
lis r5,rt_thread_switch_interrput_flag@h
ori r5,r5,rt_thread_switch_interrput_flag@l
lwz r6,0(r5)
cmpi cr0,0,r6,0x1 /* whether is 1 */
beq _int_reswitch /* set already, goto _reswitch */
li r6,0x1 /* set rt_thread_switch_interrput_flag to 1*/
stw r6,0(r5)
/* set rt_interrupt_from_thread to 'from' */
lis r5,rt_interrupt_from_thread@h
ori r5,r5,rt_interrupt_from_thread@l
stw r3,0(r5)
_int_reswitch:
/* set rt_interrupt_to_thread to 'to' */
lis r6,rt_interrupt_to_thread@h
ori r6,r6,rt_interrupt_to_thread@l
stw r4,0(r6)
blr
|
vandercookking/h7_device_RTT
| 5,590
|
rt-thread/libcpu/ppc/ppc405/dcr_gcc.S
|
/*
* (C) Copyright 2001
* Erik Theisen, Wave 7 Optics, etheisen@mindspring.com
*
* See file CREDITS for list of people who contributed to this
* project.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston,
* MA 02111-1307 USA
*/
#include <asm/ppc4xx.h>
/*****************************************************************************
*
* XXX - DANGER
* These routines make use of self modifying code. DO NOT CALL THEM
* UNTIL THEY ARE RELOCATED TO RAM. Additionally, I do not
* recommend them for use in anything other than an interactive
* debugging environment. This is mainly due to performance reasons.
*
****************************************************************************/
/*
* static void _create_MFDCR(unsigned short dcrn)
*
* Builds a 'mfdcr' instruction for get_dcr
* function.
*/
.section ".text"
.align 2
.type _create_MFDCR,@function
_create_MFDCR:
/*
* Build up a 'mfdcr' instruction formatted as follows:
*
* OPCD | RT | DCRF | XO | CR |
* ---------------|--------------|--------------|----|
* 0 5 | 6 10 | 11 20 | 21 30 | 31 |
* | | DCRN | | |
* 31 | %r3 | (5..9|0..4) | 323 | 0 |
*
* Where:
* OPCD = opcode - 31
* RT = destination register - %r3 return register
* DCRF = DCRN # with upper and lower halves swapped
* XO = extended opcode - 323
* CR = CR[CR0] NOT undefined - 0
*/
rlwinm r0, r3, 27, 27, 31 /* OPCD = 31 */
rlwinm r3, r3, 5, 22, 26
or r3, r3, r0
slwi r3, r3, 10
oris r3, r3, 0x3e30 /* RT = %r3 */
ori r3, r3, 323 /* XO = 323 */
slwi r3, r3, 1 /* CR = 0 */
mflr r4
stw r3, 0(r4) /* Store instr in get_dcr() */
dcbst r0, r4 /* Make sure val is written out */
sync /* Wait for write to complete */
icbi r0, r4 /* Make sure old instr is dumped */
isync /* Wait for icbi to complete */
blr
.Lfe1: .size _create_MFDCR,.Lfe1-_create_MFDCR
/* end _create_MFDCR() */
/*
* static void _create_MTDCR(unsigned short dcrn, unsigned long value)
*
* Builds a 'mtdcr' instruction for set_dcr
* function.
*/
.section ".text"
.align 2
.type _create_MTDCR,@function
_create_MTDCR:
/*
* Build up a 'mtdcr' instruction formatted as follows:
*
* OPCD | RS | DCRF | XO | CR |
* ---------------|--------------|--------------|----|
* 0 5 | 6 10 | 11 20 | 21 30 | 31 |
* | | DCRN | | |
* 31 | %r3 | (5..9|0..4) | 451 | 0 |
*
* Where:
* OPCD = opcode - 31
* RS = source register - %r4
* DCRF = dest. DCRN # with upper and lower halves swapped
* XO = extended opcode - 451
* CR = CR[CR0] NOT undefined - 0
*/
rlwinm r0, r3, 27, 27, 31 /* OPCD = 31 */
rlwinm r3, r3, 5, 22, 26
or r3, r3, r0
slwi r3, r3, 10
oris r3, r3, 0x3e40 /* RS = %r4 */
ori r3, r3, 451 /* XO = 451 */
slwi r3, r3, 1 /* CR = 0 */
mflr r5
stw r3, 0(r5) /* Store instr in set_dcr() */
dcbst r0, r5 /* Make sure val is written out */
sync /* Wait for write to complete */
icbi r0, r5 /* Make sure old instr is dumped */
isync /* Wait for icbi to complete */
blr
.Lfe2: .size _create_MTDCR,.Lfe2-_create_MTDCR
/* end _create_MTDCR() */
/*
* unsigned long get_dcr(unsigned short dcrn)
*
* Return a given DCR's value.
*/
/* */
/* XXX - This is self modifying code, hence */
/* it is in the data section. */
/* */
.section ".text"
.align 2
.globl get_dcr
.type get_dcr,@function
get_dcr:
mflr r0 /* Get link register */
stwu r1, -32(r1) /* Save back chain and move SP */
stw r0, +36(r1) /* Save link register */
bl _create_MFDCR /* Build following instruction */
/* XXX - we build this instuction up on the fly. */
.long 0 /* Get DCR's value */
lwz r0, +36(r1) /* Get saved link register */
mtlr r0 /* Restore link register */
addi r1, r1, +32 /* Remove frame from stack */
blr /* Return to calling function */
.Lfe3: .size get_dcr,.Lfe3-get_dcr
/* end get_dcr() */
/*
* unsigned void set_dcr(unsigned short dcrn, unsigned long value)
*
* Return a given DCR's value.
*/
/*
* XXX - This is self modifying code, hence
* it is in the data section.
*/
.section ".text"
.align 2
.globl set_dcr
.type set_dcr,@function
set_dcr:
mflr r0 /* Get link register */
stwu r1, -32(r1) /* Save back chain and move SP */
stw r0, +36(r1) /* Save link register */
bl _create_MTDCR /* Build following instruction */
/* XXX - we build this instuction up on the fly. */
.long 0 /* Set DCR's value */
lwz r0, +36(r1) /* Get saved link register */
mtlr r0 /* Restore link register */
addi r1, r1, +32 /* Remove frame from stack */
blr /* Return to calling function */
.Lfe4: .size set_dcr,.Lfe4-set_dcr
/* end set_dcr() */
|
vandercookking/h7_device_RTT
| 19,790
|
rt-thread/libcpu/ppc/ppc405/start_gcc.S
|
#include <config.h>
#include <asm/ppc_defs.h>
/* #include <asm/cache.h> */
#include "cache.h"
#include <asm/ppc4xx.h>
#include "context.h"
#define CONFIG_SYS_DCACHE_SACR_VALUE (0x00000000)
#define CONFIG_SYS_ICACHE_SACR_VALUE \
(PPC_128MB_SACR_VALUE(CONFIG_SYS_SDRAM_BASE + ( 0 << 20)) | \
PPC_128MB_SACR_VALUE(CONFIG_SYS_SDRAM_BASE + (128 << 20)) | \
PPC_128MB_SACR_VALUE(CONFIG_SYS_FLASH_BASE))
#define function_prolog(func_name) .text; \
.align 2; \
.globl func_name; \
func_name:
#define function_epilog(func_name) .type func_name,@function; \
.size func_name,.-func_name
/* We don't want the MMU yet.
*/
#undef MSR_KERNEL
#define MSR_KERNEL ( MSR_ME ) /* Machine Check */
#define SYNC \
sync; \
isync
/*
* Macros for storing registers into and loading registers from
* exception frames.
*/
#define SAVE_GPR(n, base) stw n,GPR0+4*(n)(base)
#define SAVE_2GPRS(n, base) SAVE_GPR(n, base); SAVE_GPR(n+1, base)
#define SAVE_4GPRS(n, base) SAVE_2GPRS(n, base); SAVE_2GPRS(n+2, base)
#define SAVE_8GPRS(n, base) SAVE_4GPRS(n, base); SAVE_4GPRS(n+4, base)
#define SAVE_10GPRS(n,base) SAVE_8GPRS(n, base); SAVE_2GPRS(n+8, base)
#define REST_GPR(n, base) lwz n,GPR0+4*(n)(base)
#define REST_2GPRS(n, base) REST_GPR(n, base); REST_GPR(n+1, base)
#define REST_4GPRS(n, base) REST_2GPRS(n, base); REST_2GPRS(n+2, base)
#define REST_8GPRS(n, base) REST_4GPRS(n, base); REST_4GPRS(n+4, base)
#define REST_10GPRS(n,base) REST_8GPRS(n, base); REST_2GPRS(n+8, base)
/*
* GCC sometimes accesses words at negative offsets from the stack
* pointer, although the SysV ABI says it shouldn't. To cope with
* this, we leave this much untouched space on the stack on exception
* entry.
*/
#define STACK_UNDERHEAD 64
/*
* Exception entry code. This code runs with address translation
* turned off, i.e. using physical addresses.
* We assume sprg3 has the physical address of the current
* task's thread_struct.
*/
/* Save:
* CR, r0, r1 (sp), r2, r3, r4, r5, r6, r20, r21, r22, r23,
* LR, CTR, XER, DAR, SRR0, SRR1
*/
#define EXCEPTION_PROLOG(reg1, reg2) \
mtspr SPRG0,r20; \
mtspr SPRG1,r21; \
mfcr r20; \
subi r21,r1,INT_FRAME_SIZE+STACK_UNDERHEAD; /* alloc exc. frame */\
stw r20,_CCR(r21); /* save registers */ \
stw r22,GPR22(r21); \
stw r23,GPR23(r21); \
mfspr r20,SPRG0; \
stw r20,GPR20(r21); \
mfspr r22,SPRG1; \
stw r22,GPR21(r21); \
mflr r20; \
stw r20,_LINK(r21); \
mfctr r22; \
stw r22,_CTR(r21); \
mfspr r20,XER; \
stw r20,_XER(r21); \
mfspr r20, DAR_DEAR; \
stw r20,_DAR(r21); \
mfspr r22,reg1; \
mfspr r23,reg2; \
stw r0,GPR0(r21); \
stw r1,GPR1(r21); \
stw r2,GPR2(r21); \
stw r1,0(r21);/* back chain */ \
mr r1,r21;/* set new kernel sp */ \
SAVE_4GPRS(3, r21);
/*
* Note: code which follows this uses cr0.eq (set if from kernel),
* r21, r22 (SRR0), and r23 (SRR1).
*/
/*
* Exception vectors.
*
* The data words for `hdlr' and `int_return' are initialized with
* OFFSET values only; they must be relocated first before they can
* be used!
*/
#define STD_EXCEPTION(n, label, hdlr) \
. = n; \
label: \
EXCEPTION_PROLOG(SRR0, SRR1); \
lwz r3,GOT(transfer_to_handler); \
mtlr r3; \
addi r3,r1,STACK_FRAME_OVERHEAD; \
li r20,MSR_KERNEL; \
rlwimi r20,r23,0,25,25; \
blrl; \
.L_ ## label : \
.long hdlr - _start + _START_OFFSET; \
.long int_return - _start + _START_OFFSET
#define CRIT_EXCEPTION(n, label, hdlr) \
. = n; \
label: \
EXCEPTION_PROLOG(CSRR0, CSRR1); \
lwz r3,GOT(transfer_to_handler); \
mtlr r3; \
addi r3,r1,STACK_FRAME_OVERHEAD; \
li r20,(MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)); \
rlwimi r20,r23,0,25,25; \
blrl; \
.L_ ## label : \
.long hdlr - _start + _START_OFFSET; \
.long crit_return - _start + _START_OFFSET
#define MCK_EXCEPTION(n, label, hdlr) \
. = n; \
label: \
EXCEPTION_PROLOG(MCSRR0, MCSRR1); \
lwz r3,GOT(transfer_to_handler); \
mtlr r3; \
addi r3,r1,STACK_FRAME_OVERHEAD; \
li r20,(MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)); \
rlwimi r20,r23,0,25,25; \
blrl; \
.L_ ## label : \
.long hdlr - _start + _START_OFFSET; \
.long mck_return - _start + _START_OFFSET
/***************************************************************************
*
* These definitions simplify the ugly declarations necessary for GOT
* definitions.
*
* Stolen from prepboot/bootldr.h, (C) 1998 Gabriel Paubert, paubert@iram.es
*
* Uses r14 to access the GOT
*/
#define START_GOT \
.section ".got2","aw"; \
.LCTOC1 = .+32768
#define END_GOT \
.text
#define GET_GOT \
bl 1f ; \
.text 2 ; \
0: .long .LCTOC1-1f ; \
.text ; \
1: mflr r14 ; \
lwz r0,0b-1b(r14) ; \
add r14,r0,r14 ;
#define GOT_ENTRY(NAME) .L_ ## NAME = . - .LCTOC1 ; .long NAME
#define GOT(NAME) .L_ ## NAME (r14)
/*
* Set up GOT: Global Offset Table
*
* Use r14 to access the GOT
*/
START_GOT
GOT_ENTRY(_GOT2_TABLE_)
GOT_ENTRY(_FIXUP_TABLE_)
GOT_ENTRY(_start)
GOT_ENTRY(_start_of_vectors)
GOT_ENTRY(_end_of_vectors)
GOT_ENTRY(transfer_to_handler)
GOT_ENTRY(__init_end)
GOT_ENTRY(_end)
GOT_ENTRY(__bss_start)
END_GOT
/*
* r3 - 1st arg to board_init(): IMMP pointer
* r4 - 2nd arg to board_init(): boot flag
*/
.text
version_string:
.ascii "RT-Thread 0.4.0"
. = EXC_OFF_SYS_RESET
_start_of_vectors:
/* Critical input. */
CRIT_EXCEPTION(0x100, CritcalInput, UnknownException)
CRIT_EXCEPTION(0x200, MachineCheck, MachineCheckException)
/* Data Storage exception. */
STD_EXCEPTION(0x300, DataStorage, UnknownException)
/* Instruction Storage exception. */
STD_EXCEPTION(0x400, InstStorage, UnknownException)
. = 0x0500
ExtInterrupt:
/* save current thread stack */
subi r1, r1, STACK_FRAME_SIZE
/* save registers */
stw r0,GPR0(r1) /* save general purpose registers 0 */
stmw r2,GPR2(r1) /* save general purpose registers 2-31 */
mfusprg0 r0 /* save usprg0 */
stw r0,USPRG0(r1)
mfcr r0, /* save cr */
stw r0,CR(r1)
mfxer r0 /* save xer */
stw r0,XER(r1)
mfctr r0 /* save ctr */
stw r0,CTR(r1)
mflr r0 /* save lr */
stw r0, LR(r1)
mfsrr0 r0 /* save SRR0 and SRR1 */
stw r0,SRR0(r1)
mfsrr1 r0
stw r0,SRR1(r1)
bl rt_interrupt_enter
bl external_interrupt
bl rt_interrupt_leave
/* restore thread context */
lwz r0,SRR1(r1) /* restore SRR1 and SRR0 */
mtsrr1 r0
lwz r0,SRR0(r1)
mtsrr0 r0
lwz r0,LR(r1) /* restore lr */
mtlr r0
lwz r0,CTR(r1) /* restore ctr */
mtctr r0
lwz r0,XER(r1) /* restore xer */
mtxer r0
lwz r0,CR(r1) /* restore cr */
mtcr r0
lwz r0,USPRG0(r1) /* restore usprg0 */
// mtusprg0 r0
lmw r2, GPR2(r1) /* restore general register */
lwz r0,GPR0(r1)
addi r1, r1, STACK_FRAME_SIZE
b rt_hw_systemcall_entry
/* Alignment exception. */
. = 0x600
Alignment:
EXCEPTION_PROLOG(SRR0, SRR1)
mfspr r4,DAR
stw r4,_DAR(r21)
mfspr r5,DSISR
stw r5,_DSISR(r21)
addi r3,r1,STACK_FRAME_OVERHEAD
li r20,MSR_KERNEL
rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
lwz r6,GOT(transfer_to_handler)
mtlr r6
blrl
.L_Alignment:
.long AlignmentException - _start + _START_OFFSET
.long int_return - _start + _START_OFFSET
/* Program check exception */
. = 0x700
ProgramCheck:
EXCEPTION_PROLOG(SRR0, SRR1)
addi r3,r1,STACK_FRAME_OVERHEAD
li r20,MSR_KERNEL
rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
lwz r6,GOT(transfer_to_handler)
mtlr r6
blrl
.L_ProgramCheck:
.long ProgramCheckException - _start + _START_OFFSET
.long int_return - _start + _START_OFFSET
. = 0x0c00
SystemCall:
b rt_hw_systemcall_entry
. = 0x1000
PIT:
/* save current thread stack */
subi r1, r1, STACK_FRAME_SIZE
/* save registers */
stw r0,GPR0(r1) /* save general purpose registers 0 */
stmw r2,GPR2(r1) /* save general purpose registers 2-31 */
mfusprg0 r0 /* save usprg0 */
stw r0,USPRG0(r1)
mfcr r0, /* save cr */
stw r0,CR(r1)
mfxer r0 /* save xer */
stw r0,XER(r1)
mfctr r0 /* save ctr */
stw r0,CTR(r1)
mflr r0 /* save lr */
stw r0, LR(r1)
mfsrr0 r0 /* save SRR0 and SRR1 */
stw r0,SRR0(r1)
mfsrr1 r0
stw r0,SRR1(r1)
bl rt_interrupt_enter
bl DecrementerPITException
bl rt_interrupt_leave
/* restore thread context */
lwz r0,SRR1(r1) /* restore SRR1 and SRR0 */
mtsrr1 r0
lwz r0,SRR0(r1)
mtsrr0 r0
lwz r0,LR(r1) /* restore lr */
mtlr r0
lwz r0,CTR(r1) /* restore ctr */
mtctr r0
lwz r0,XER(r1) /* restore xer */
mtxer r0
lwz r0,CR(r1) /* restore cr */
mtcr r0
lwz r0,USPRG0(r1) /* restore usprg0 */
// mtusprg0 r0
lmw r2, GPR2(r1) /* restore general register */
lwz r0,GPR0(r1)
addi r1, r1, STACK_FRAME_SIZE
b rt_hw_systemcall_entry
STD_EXCEPTION(0x1100, InstructionTLBMiss, UnknownException)
STD_EXCEPTION(0x1200, DataTLBMiss, UnknownException)
CRIT_EXCEPTION(0x2000, DebugBreakpoint, DebugException )
_end_of_vectors:
. = _START_OFFSET
/*
* start and end addresses of the BSS section
* they are taken from the linker script
*/
.set START_BSS, __bss_start
.set END_BSS, __bss_end
/* stack top address exported from linker script */
.set STACK_TOP, __stack_top
_start:
/*----------------------------------------------------------------------- */
/* Clear and set up some registers. */
/*----------------------------------------------------------------------- */
addi r4,r0,0x0000
mtsgr r4 /* Configure guarded attribute for performance. */
mtsler r4 /* Configure endinanness */
mtsu0r r4 /* and compression. */
/*------------------------------------------------------------------------
* Initialize vector tables and other registers
* set them all to 0. The Interrupt Handler implementation
* has to set these registers later on
*-----------------------------------------------------------------------*/
mtdcwr r4
mtesr r4 /* clear Exception Syndrome Reg */
mttcr r4 /* clear Timer Control Reg */
mtxer r4 /* clear Fixed-Point Exception Reg */
mtevpr r4 /* clear Exception Vector Prefix Reg */
addi r4,r0,(0xFFFF-0x10000) /* set r4 to 0xFFFFFFFF (status in the */
/* dbsr is cleared by setting bits to 1) */
mtdbsr r4 /* clear/reset the dbsr */
/* Invalidate the i- and d-caches. */
bl invalidate_icache
bl invalidate_dcache
/* Set-up icache cacheability. */
lis r4, CONFIG_SYS_ICACHE_SACR_VALUE@h
ori r4, r4, CONFIG_SYS_ICACHE_SACR_VALUE@l
mticcr r4
isync
/* Set-up dcache cacheability. */
lis r4, CONFIG_SYS_DCACHE_SACR_VALUE@h
ori r4, r4, CONFIG_SYS_DCACHE_SACR_VALUE@l
mtdccr r4
/*----------------------------------------------------------------------- */
/* DMA Status, clear to come up clean */
/*----------------------------------------------------------------------- */
addis r3,r0, 0xFFFF /* Clear all existing DMA status */
ori r3,r3, 0xFFFF
mtdcr dmasr, r3
/* clear the BSS section */
lis r3,START_BSS@h // load start of BSS into r3
ori r3,r3,START_BSS@l
lis r4,END_BSS@h // load end of BSS into r4
ori r4,r4,END_BSS@l
sub r4,r4,r3 // calculate length of BSS
srwi r4,r4,2 // convert byte-length to word-length
li r5,0 // zero r5
cmplw 0,r4,r5 // check to see whether length equals 0
beql 0,2f // in case of length 0 we're already done
subi r3,r3,4 // because of offset start 4 bytes lower
mtctr r4 // use word-length of BSS section as counter
1: /* bss clear start */
stwu r5,4(r3) // zero one word of BSS section
bdnz 1b // keep going until BSS is entirely clean
2: /* bss clear done */
/* Set up stack in the linker script defined RAM area */
lis r1, STACK_TOP@h
ori r1, r1, STACK_TOP@l
/* Set up a zeroized stack frame so that backtrace works right */
li r0, 0
stwu r0, -4(r1)
stwu r0, -4(r1)
/*
* Set up a dummy frame to store reset vector as return address.
* this causes stack underflow to reset board.
*/
stwu r1, -8(r1) /* Save back chain and move SP */
lis r0, RESET_VECTOR@h /* Address of reset vector */
ori r0, r0, RESET_VECTOR@l
stwu r1, -8(r1) /* Save back chain and move SP */
stw r0, +12(r1) /* Save return addr (underflow vect) */
GET_GOT /* initialize GOT access */
/* NEVER RETURNS! */
bl rtthread_startup
/*
* Note: code which follows this uses cr0.eq (set if from kernel),
* r20(new MSR), r21(trap frame), r22 (SRR0), and r23 (SRR1).
*/
/*
* This code finishes saving the registers to the exception frame
* and jumps to the appropriate handler for the exception.
* Register r21 is pointer into trap frame, r1 has new stack pointer.
*/
transfer_to_handler:
stw r22,_NIP(r21)
lis r22,MSR_POW@h /* clear POW bit */
andc r23,r23,r22 /* use normal power management */
stw r23,_MSR(r21) /* MSC value when the exception returns */
SAVE_GPR(7, r21)
SAVE_4GPRS(8, r21)
SAVE_8GPRS(12, r21)
SAVE_8GPRS(24, r21)
mflr r23 /* hdlr/int_return addr immediately follows */
andi. r24,r23,0x3f00 /* get vector offset */
stw r24,TRAP(r21) /* vector address, such as 0x1000 for PIT */
li r22,0
stw r22,RESULT(r21) /* clear the sc return value */
mtspr SPRG2,r22 /* r1 is now kernel sp */
lwz r24,0(r23) /* virtual address of hdlr */
lwz r23,4(r23) /* where to go when done */
mtspr SRR0,r24 /* hdlr */
mtspr SRR1,r20 /* MSR_KERNEL with ME enabled */
mtlr r23 /* call hdlr and then return to int_return */
SYNC /* note r3 has address for pt_regs on stack */
rfi /* jump to handler, enable ME */
int_return:
addi r3,r1,STACK_FRAME_OVERHEAD
lwz r4,_MQ(r1)
cmpwi r4, 0
beq goon_return
switch_stack:
subi r1,r4,STACK_FRAME_OVERHEAD
goon_return:
mfmsr r28 /* Disable interrupts */
li r4,0
ori r4,r4,MSR_EE /* clear External Interrupt Enable */
ori r4,r4,MSR_DE /* clear Debug Interrupts Enable - 4xx */
andc r28,r28,r4
SYNC /* Some chip revs need this... */
mtmsr r28
SYNC
lwz r2,_CTR(r1)
lwz r0,_LINK(r1)
mtctr r2
mtlr r0
lwz r2,_XER(r1)
lwz r0,_CCR(r1)
mtspr XER,r2
mtcrf 0xFF,r0
REST_10GPRS(3, r1)
REST_10GPRS(13, r1)
REST_8GPRS(23, r1)
REST_GPR(31, r1)
lwz r2,_NIP(r1) /* Restore environment */
lwz r0,_MSR(r1)
mtspr SRR0,r2
mtspr SRR1,r0
lwz r0,GPR0(r1)
lwz r2,GPR2(r1)
lwz r1,GPR1(r1)
SYNC
rfi
b . /* prevent prefetch past rfi */
crit_return:
mfmsr r28 /* Disable interrupts */
li r4,0
ori r4,r4,MSR_EE
andc r28,r28,r4
SYNC /* Some chip revs need this... */
mtmsr r28
SYNC
lwz r2,_CTR(r1)
lwz r0,_LINK(r1)
mtctr r2
mtlr r0
lwz r2,_XER(r1)
lwz r0,_CCR(r1)
mtspr XER,r2
mtcrf 0xFF,r0
REST_10GPRS(3, r1)
REST_10GPRS(13, r1)
REST_8GPRS(23, r1)
REST_GPR(31, r1)
lwz r2,_NIP(r1) /* Restore environment */
lwz r0,_MSR(r1)
mtspr SPRN_CSRR0,r2
mtspr SPRN_CSRR1,r0
lwz r0,GPR0(r1)
lwz r2,GPR2(r1)
lwz r1,GPR1(r1)
SYNC
rfci
get_pvr:
mfspr r3, PVR
blr
|
vandercookking/h7_device_RTT
| 2,464
|
rt-thread/libcpu/unicore32/sep6200/context_gcc.S
|
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2013-7-14 Peng Fan sep6200 implementation
*/
/**
* \addtogroup sep6200
*/
/*@{*/
#define NOINT 0xc0
/*
* rt_base_t rt_hw_interrupt_disable();
*/
.globl rt_hw_interrupt_disable
.type rt_hw_interrupt_disable, %function
rt_hw_interrupt_disable:
stw.w r1, [sp-], #4
mov r0, asr
or r1, r0, #NOINT
mov.a asr, r1
ldw.w r1, [sp]+, #4
mov pc, lr
/*
* void rt_hw_interrupt_enable(rt_base_t level);
*/
.globl rt_hw_interrupt_enable
.type rt_hw_interrupt_disable, %function
rt_hw_interrupt_enable:
mov.a asr, r0
mov pc, lr
/*
* void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
* r0 --> from
* r1 --> to
*/
.globl rt_hw_context_switch
.type rt_hw_interrupt_disable, %function
rt_hw_context_switch:
stm.w (lr), [sp-]
stm.w (r16, r17, r18, r19, r20, r21, r22, r23, r24, r25, r26, r27, r28, lr), [sp-]
stm.w (r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15), [sp-]
mov r4, asr
stm.w (r4), [sp-]
mov r4, bsr
stm.w (r4), [sp-]
stw sp, [r0+]
ldw sp, [r1+]
ldm.w (r4), [sp]+
mov.a bsr,r4
ldm.w (r4), [sp]+
mov.a asr, r4
ldm.w (r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15), [sp]+
ldm.w (r16, r17, r18, r19, r20, r21, r22, r23, r24, r25, r26, r27, r28, lr, pc), [sp]+
/*
* void rt_hw_context_switch_to(rt_uint32 to);
* r0 --> to
*/
.globl rt_hw_context_switch_to
rt_hw_context_switch_to:
ldw sp, [r0+]
ldm.w (r4), [sp]+
mov.a bsr, r4
ldm.w (r4), [sp]+
mov.a asr, r4
ldm.w (r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15), [sp]+
ldm.w (r16, r17, r18, r19, r20, r21, r22, r23, r24, r25, r26, r27, r28, lr, pc), [sp]+
/*
* void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to);
*/
.globl rt_thread_switch_interrupt_flag
.globl rt_interrupt_from_thread
.globl rt_interrupt_to_thread
.globl rt_hw_context_switch_interrupt
rt_hw_context_switch_interrupt:
ldw r2, =rt_thread_switch_interrupt_flag
ldw r3, [r2+]
cmpsub.a r3, #1
beq _reswitch
mov r3, #1
stw r3, [r2+]
ldw r2, =rt_interrupt_from_thread
stw r0, [r2+]
_reswitch:
ldw r2, =rt_interrupt_to_thread
stw r1, [r2+]
mov pc, lr
|
vandercookking/h7_device_RTT
| 8,393
|
rt-thread/libcpu/unicore32/sep6200/start_gcc.S
|
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2013-07-13 Peng Fan First implementation
*/
#define CONFIG_STACKSIZE 1024
#define S_FRAME_SIZE 132
#define S_OLD_R0 132
#define S_PSR 128
#define S_PC 124
#define S_LR 120
#define S_SP 116
#define S_IP 112
#define S_FP 108
#define S_R26 104
#define S_R25 100
#define S_R24 96
#define S_R23 92
#define S_R22 88
#define S_R21 84
#define S_R20 80
#define S_R19 76
#define S_R18 72
#define S_R17 68
#define S_R16 64
#define S_R15 60
#define S_R14 56
#define S_R13 52
#define S_R12 48
#define S_R11 44
#define S_R10 40
#define S_R9 36
#define S_R8 32
#define S_R7 28
#define S_R6 24
#define S_R5 20
#define S_R4 16
#define S_R3 12
#define S_R2 8
#define S_R1 4
#define S_R0 0
.equ USERMODE, 0x10
.equ REALMODE, 0x11
.equ IRQMODE, 0x12
.equ PRIVMODE, 0x13
.equ TRAPMODE, 0x17
.equ EXTNMODE, 0x1b
.equ MODEMASK, 0x1f
.equ NOINT, 0xc0
/*
*************************************************************************
*
* Jump vector table
*
*************************************************************************
*/
.section .init, "ax"
.code 32
.globl _start
_start:
b reset
ldw pc, _extend_handle
ldw pc, _swi_handle
ldw pc, _iabort_handle
ldw pc, _dabort_handle
ldw pc, _reserve_handle
ldw pc, _IRQ_handle
ldw pc, _FIQ_handle
_extend_handle: .word extend_handle
_swi_handle: .word swi_handle
_iabort_handle: .word iabort_handle
_dabort_handle: .word dabort_handle
_reserve_handle: .word reserve_handle
_IRQ_handle: .word IRQ_handle
_FIQ_handle: .word FIQ_handle
.balignl 16,0xdeadbeef
/*
*************************************************************************
*
* Startup Code (reset vector)
* relocate armboot to ram
* setup stack
* jump to second stage
*
*************************************************************************
*/
.global _TEXT_BASE
_TEXT_BASE:
.word TEXT_BASE
.globl _rtthread_start
_rtthread_start:
.word _start
.globl _rtthread_end
_rtthread_end:
.word _end
.globl _bss_start
_bss_start:
.word __bss_start @ load end address
.globl _bss_end
_bss_end:
.word __bss_end
.globl IRQ_STACK_START
IRQ_STACK_START:
.word _irq_stack_start + 1024
.globl FIQ_STACK_START
FIQ_STACK_START:
.word _fiq_stack_start +1024
.globl UNDEFINED_STACK_START
UNDEFINED_STACK_START:
.word _undefined_stack_start + CONFIG_STACKSIZE
.globl ABORT_STACK_START
ABORT_STACK_START:
.word _abort_stack_start + CONFIG_STACKSIZE
.globl _STACK_START
_STACK_START:
.word _priv_stack_start + 4096
.equ SEP6200_VIC_BASE, 0xb0000000
.equ SEP6200_SYSCTL_BASE, 0xb0008000
/* ----------------------------------entry------------------------------*/
reset:
/* set the cpu to PRIV mode and disable cpu interrupt */
mov r0, asr
andn r0, r0, #0xff
or r0, r0, #PRIVMODE|NOINT
mov.a asr, r0
/* mask all IRQs by clearing all bits in the INTMRs */
ldw r1, =SEP6200_VIC_BASE
ldw r0, =0xffffffff
stw r0, [r1+], #0x20 /*interrupt enable clear*/
stw r0, [r1+], #0x24
/*remap ddr to 0x00000000 address*/
ldw r1, =SEP6200_SYSCTL_BASE
ldw r0, [r1+]
ldw r2, =0x80000000
or r0, r0, r2
stw r2, [r1+]
/* set interrupt vector */
/*do nothing here for vector*/
/* setup stack */
b.l stack_setup
/* copy the vector code to address 0 */
ldw r12, =0x100
ldw r0, = 0x40000000
ldw r1, = 0x00000000
copy_vetor:
ldw r2, [r0]
stw r2, [r1]
add r0, r0, #4
add r1, r1, #4
sub r12, r12, #4
cmpsub.a r12, #0
bne copy_vetor
/* clear .bss */
ldw r0, _bss_start /* bss start */
ldw r1, _bss_end /* bss end */
mov r2,#0 /* get a zero */
bss_loop:
stw r2, [r0] @ clear loop...
add r0, r0, #4
cmpsub.a r0, r1
bel bss_loop
/* call C++ constructors of global objects */
ldw r0, =__ctors_start__
ldw r1, =__ctors_end__
ctor_loop:
cmpsub.a r0, r1
beq ctor_end
ldw.w r2, [r0]+, #4
stm.w (r0, r1), [sp-]
add lr, pc, #4
mov pc, r2
ldm.w (r0, r1), [sp]+
b ctor_loop
ctor_end:
/*enable interrupt*/
mov r0, asr
andn r1, r0, #NOINT
mov.a asr, r1
/* start RT-Thread Kernel */
ldw pc, _rtthread_startup
_rtthread_startup:
.word rtthread_startup
/*
*************************************************************************
*
* Interrupt handling
*
*************************************************************************
*/
/* exception handlers */
/*Just simple implementation here */
.align 5
extend_handle:
b rt_hw_trap_extn
swi_handle:
b rt_hw_trap_swi
iabort_handle:
b rt_hw_trap_pabt
dabort_handle:
b rt_hw_trap_dabt
reserve_handle:
b rt_hw_trap_resv
.globl rt_interrupt_enter
.globl rt_interrupt_leave
.globl rt_thread_switch_interrupt_flag
.globl rt_interrupt_from_thread
.globl rt_interrupt_to_thread
IRQ_handle:
stm.w (lr), [sp-]
stm.w (r16 - r28), [sp-]
stm.w (r0 - r15), [sp-]
b.l rt_interrupt_enter
b.l rt_hw_trap_irq
b.l rt_interrupt_leave
/* if rt_thread_switch_interrupt_flag set, jump to _interrupt_thread_switch and don't return */
ldw r0, =rt_thread_switch_interrupt_flag
ldw r1, [r0+]
cmpsub.a r1, #1
beq _interrupt_thread_switch
ldm.w (r0 - r15), [sp]+
ldm.w (r16 - r28), [sp]+
ldm.w (lr), [sp]+
mov.a pc, lr
.align 5
FIQ_handle:
b rt_hw_trap_fiq
_interrupt_thread_switch:
mov r1, #0 /* clear rt_thread_switch_interrupt_flag*/
stw r1, [r0+]
/*reload register*/
ldm.w (r0 - r15), [sp]+
ldm.w (r16 - r28), [sp]+
ldm.w (lr), [sp]+
stm.w (r0 - r3), [sp-] /*save r0-r3*/
mov r1, sp
add sp, sp, #16 /* restore sp */
mov r2, lr /* save old task's pc to r2 */
mov r3, bsr
mov r0, #0xd3 /*I:F:0:PRIV*/
mov.a asr, r0
stm.w (r2), [sp-] /* push old task's pc */
/* push old task's registers */
stm.w (lr), [sp-]
stm.w (r16 - r28), [sp-]
stm.w (r4 - r15), [sp-]
mov r4, r1 /* Special optimised code below */
mov r5, r3
ldm.w (r0 - r3), [r4]+
stm.w (r0 - r3), [sp-] /*push old task's r3-r0*/
stm.w (r5), [sp-] /* push old task's asr */
mov r4, bsr
stm.w (r4), [sp-] /* push old task's bsr*/
ldw r4, =rt_interrupt_from_thread
ldw r5, [r4+]
stw sp, [r5+] /* store sp in preempted tasks's TCB*/
ldw r6, =rt_interrupt_to_thread
ldw r6, [r6+]
ldw sp, [r6+] /* get new task's stack pointer */
ldm.w (r4), [sp]+ /* pop new task's spsr */
mov.a bsr, r4
ldm.w (r4), [sp]+ /* pop new task's psr */
mov.a asr, r4
/* pop new task's r0-r28,lr & pc */
ldm.w (r0 - r15), [sp]+
ldm.w (r16 - r28), [sp]+
ldm.w (lr), [sp]+
ldm.w (pc), [sp]+
stack_setup:
/*irq*/
mov ip, lr
mov r0, asr
andn r0, r0, #0x1f
or r0, r0, #IRQMODE|NOINT
mov.a asr, r0 /*IRQMODE*/
ldw r0, =IRQ_STACK_START
ldw sp, [r0+]
/*ldw sp, IRQ_STACK_START*/
/*priv*/
mov r0, asr
andn r0, r0, #0x1f
or r0, r0, #PRIVMODE|NOINT
mov.a asr, r0 /*PRIVMODE*/
ldw r0, =_STACK_START
ldw sp, [r0+]
/*ldw sp, _STACK_START*/
mov lr, ip
/*fiq and other mode is not implemented in code here*/
mov pc, lr /*lr may not be valid for the mode changes*/
/*/*}*/
|
vandercookking/h7_device_RTT
| 5,505
|
rt-thread/libcpu/mips/gs232/cache_gcc.S
|
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2010-05-17 swkyer first version
* 2010-09-11 bernard port to Loongson SoC3210
* 2011-08-08 lgnq port to Loongson LS1B
* 2015-07-08 chinesebear port to Loongson LS1C
* 2019-07-19 Zhou Yanjie clean up code
*/
#ifndef __ASSEMBLY__
#define __ASSEMBLY__
#endif
#include <mips.h>
#include "cache.h"
.ent cache_init
.global cache_init
.set noreorder
cache_init:
move t1,ra
####part 2####
cache_detect_4way:
mfc0 t4, CP0_CONFIG
andi t5, t4, 0x0e00
srl t5, t5, 9 #ic
andi t6, t4, 0x01c0
srl t6, t6, 6 #dc
addiu t8, $0, 1
addiu t9, $0, 2
#set dcache way
beq t6, $0, cache_d1way
addiu t7, $0, 1 #1 way
beq t6, t8, cache_d2way
addiu t7, $0, 2 #2 way
beq $0, $0, cache_d4way
addiu t7, $0, 4 #4 way
cache_d1way:
beq $0, $0, 1f
addiu t6, t6, 12 #1 way
cache_d2way:
beq $0, $0, 1f
addiu t6, t6, 11 #2 way
cache_d4way:
addiu t6, t6, 10 #4 way (10), 2 way(11), 1 way(12)
1: #set icache way
beq t5, $0, cache_i1way
addiu t3, $0, 1 #1 way
beq t5, t8, cache_i2way
addiu t3, $0, 2 #2 way
beq $0, $0, cache_i4way
addiu t3, $0, 4 #4 way
cache_i1way:
beq $0, $0, 1f
addiu t5, t5, 12
cache_i2way:
beq $0, $0, 1f
addiu t5, t5, 11
cache_i4way:
addiu t5, t5, 10 #4 way (10), 2 way(11), 1 way(12)
1: addiu t4, $0, 1
sllv t6, t4, t6
sllv t5, t4, t5
#if 0
la t0, memvar
sw t7, 0x0(t0) #ways
sw t5, 0x4(t0) #icache size
sw t6, 0x8(t0) #dcache size
#endif
####part 3####
.set mips3
lui a0, 0x8000
addu a1, $0, t5
addu a2, $0, t6
cache_init_d2way:
#a0=0x80000000, a1=icache_size, a2=dcache_size
#a3, v0 and v1 used as local registers
mtc0 $0, CP0_TAGHI
addu v0, $0, a0
addu v1, a0, a2
1: slt a3, v0, v1
beq a3, $0, 1f
nop
mtc0 $0, CP0_TAGLO
beq t7, 1, 4f
cache Index_Store_Tag_D, 0x0(v0) # 1 way
beq t7, 2 ,4f
cache Index_Store_Tag_D, 0x1(v0) # 2 way
cache Index_Store_Tag_D, 0x2(v0) # 4 way
cache Index_Store_Tag_D, 0x3(v0)
4: beq $0, $0, 1b
addiu v0, v0, 0x20
1:
cache_flush_i2way:
addu v0, $0, a0
addu v1, a0, a1
1: slt a3, v0, v1
beq a3, $0, 1f
nop
beq t3, 1, 4f
cache Index_Invalidate_I, 0x0(v0) # 1 way
beq t3, 2, 4f
cache Index_Invalidate_I, 0x1(v0) # 2 way
cache Index_Invalidate_I, 0x2(v0)
cache Index_Invalidate_I, 0x3(v0) # 4 way
4: beq $0, $0, 1b
addiu v0, v0, 0x20
1:
cache_flush_d2way:
addu v0, $0, a0
addu v1, a0, a2
1: slt a3, v0, v1
beq a3, $0, 1f
nop
beq t7, 1, 4f
cache Index_Writeback_Inv_D, 0x0(v0) #1 way
beq t7, 2, 4f
cache Index_Writeback_Inv_D, 0x1(v0) # 2 way
cache Index_Writeback_Inv_D, 0x2(v0)
cache Index_Writeback_Inv_D, 0x3(v0) # 4 way
4: beq $0, $0, 1b
addiu v0, v0, 0x20
1:
cache_init_finish:
jr t1
nop
.set reorder
.end cache_init
###########################
# Enable CPU cache #
###########################
LEAF(enable_cpu_cache)
.set noreorder
mfc0 t0, CP0_CONFIG
nop
and t0, ~0x03
or t0, 0x03
mtc0 t0, CP0_CONFIG
nop
.set reorder
j ra
END (enable_cpu_cache)
###########################
# disable CPU cache #
###########################
LEAF(disable_cpu_cache)
.set noreorder
mfc0 t0, CP0_CONFIG
nop
and t0, ~0x03
or t0, 0x2
mtc0 t0, CP0_CONFIG
nop
.set reorder
j ra
END (disable_cpu_cache)
/**********************************/
/* Invalidate Instruction Cache */
/**********************************/
LEAF(Clear_TagLo)
.set noreorder
mtc0 zero, CP0_TAGLO
nop
.set reorder
j ra
END(Clear_TagLo)
.set mips3
/**********************************/
/* Invalidate Instruction Cache */
/**********************************/
LEAF(Invalidate_Icache_Ls1c)
.set noreorder
cache Index_Invalidate_I,0(a0)
cache Index_Invalidate_I,1(a0)
cache Index_Invalidate_I,2(a0)
cache Index_Invalidate_I,3(a0)
.set reorder
j ra
END(Invalidate_Icache_Ls1c)
/**********************************/
/* Invalidate Data Cache */
/**********************************/
LEAF(Invalidate_Dcache_ClearTag_Ls1c)
.set noreorder
cache Index_Store_Tag_D, 0(a0) # BDSLOT: clear tag
cache Index_Store_Tag_D, 1(a0) # BDSLOT: clear tag
.set reorder
j ra
END(Invalidate_Dcache_ClearTag_Ls1c)
LEAF(Invalidate_Dcache_Fill_Ls1c)
.set noreorder
cache Index_Writeback_Inv_D, 0(a0) # BDSLOT: clear tag
cache Index_Writeback_Inv_D, 1(a0) # BDSLOT: clear tag
.set reorder
j ra
END(Invalidate_Dcache_Fill_Ls1c)
LEAF(Writeback_Invalidate_Dcache)
.set noreorder
cache Hit_Writeback_Inv_D, (a0)
.set reorder
j ra
END(Writeback_Invalidate_Dcache)
.set mips0
|
vandercookking/h7_device_RTT
| 1,598
|
rt-thread/libcpu/mips/common/exception_gcc.S
|
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2019-12-04 Jiaxun Yang Initial version
* 2020-07-26 lizhirui Add xtlb exception entry
*/
#ifndef __ASSEMBLY__
#define __ASSEMBLY__
#endif
#include <mips.h>
.section ".exc_vectors", "ax"
.extern tlb_refill_handler
.extern cache_error_handler
.extern mips_irq_handle
/* 0x0 - TLB refill handler */
.global tlb_refill_exception
.type tlb_refill_exception,@function
ebase_start:
tlb_refill_exception:
b _general_exception_handler
nop
/* 0x080 - XTLB refill handler */
.org ebase_start + 0x080
b _general_exception_handler
nop
/* 0x100 - Cache error handler */
.org ebase_start + 0x100
j cache_error_handler
nop
/* 0x180 - Exception/Interrupt handler */
.global general_exception
.type general_exception,@function
.org ebase_start + 0x180
general_exception:
b _general_exception_handler
nop
/* 0x200 - Special Exception Interrupt handler (when IV is set in CP0_CAUSE) */
.global irq_exception
.type irq_exception,@function
.org ebase_start + 0x200
irq_exception:
b _general_exception_handler
nop
/* general exception handler */
_general_exception_handler:
.set noreorder
PTR_LA k0, mips_irq_handle
jr k0
nop
.set reorder
/* interrupt handler */
_irq_handler:
.set noreorder
PTR_LA k0, mips_irq_handle
jr k0
nop
.set reorder
|
vandercookking/h7_device_RTT
| 3,069
|
rt-thread/libcpu/mips/common/context_gcc.S
|
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2019-12-04 Jiaxun Yang Initial version
* 2020-07-26 lizhirui Fixed some problems
*/
#ifndef __ASSEMBLY__
#define __ASSEMBLY__
#endif
#include "mips_regs.h"
#include "stackframe.h"
.section ".text", "ax"
.set noreorder
/*
* void rt_hw_context_switch(rt_uint32 from, rt_uint32 to)
* a0 --> from
* a1 --> to
*/
.globl rt_hw_context_switch
rt_hw_context_switch:
MTC0 ra, CP0_EPC
SAVE_ALL
REG_S sp, 0(a0) /* store sp in preempted tasks TCB */
REG_L sp, 0(a1) /* get new task stack pointer */
RESTORE_ALL_AND_RET
/*
* void rt_hw_context_switch_to(rt_uint32 to)/*
* a0 --> to
*/
.globl rt_hw_context_switch_to
rt_hw_context_switch_to:
REG_L sp, 0(a0) /* get new task stack pointer */
RESTORE_ALL_AND_RET
/*
* void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to)/*
*/
.globl rt_thread_switch_interrupt_flag
.globl rt_interrupt_from_thread
.globl rt_interrupt_to_thread
.globl rt_hw_context_switch_interrupt
rt_hw_context_switch_interrupt:
PTR_LA t0, rt_thread_switch_interrupt_flag
REG_L t1, 0(t0)
nop
bnez t1, _reswitch
nop
li t1, 0x01 /* set rt_thread_switch_interrupt_flag to 1 */
LONG_S t1, 0(t0)
PTR_LA t0, rt_interrupt_from_thread /* set rt_interrupt_from_thread */
LONG_S a0, 0(t0)
_reswitch:
PTR_LA t0, rt_interrupt_to_thread /* set rt_interrupt_to_thread */
LONG_S a1, 0(t0)
jr ra
nop
/*
* void rt_hw_context_switch_interrupt_do(rt_base_t flag)
*/
.globl rt_interrupt_enter
.globl rt_interrupt_leave
.globl rt_general_exc_dispatch
.globl mips_irq_handle
mips_irq_handle:
SAVE_ALL
/* let k0 keep the current context sp */
move k0, sp
/* switch to kernel stack */
PTR_LA sp, _system_stack
jal rt_interrupt_enter
nop
/* Get Old SP from k0 as paremeter in a0 */
move a0, k0
jal rt_general_exc_dispatch
nop
jal rt_interrupt_leave
nop
/* switch sp back to thread context */
move sp, k0
/*
* if rt_thread_switch_interrupt_flag set, jump to
* rt_hw_context_switch_interrupt_do and do not return
*/
PTR_LA k0, rt_thread_switch_interrupt_flag
LONG_L k1, 0(k0)
beqz k1, spurious_interrupt
nop
LONG_S zero, 0(k0) /* clear flag */
nop
/*
* switch to the new thread
*/
PTR_LA k0, rt_interrupt_from_thread
LONG_L k1, 0(k0)
nop
LONG_S sp, 0(k1) /* store sp in preempted task TCB */
PTR_LA k0, rt_interrupt_to_thread
LONG_L k1, 0(k0)
nop
LONG_L sp, 0(k1) /* get new task stack pointer */
j spurious_interrupt
nop
spurious_interrupt:
RESTORE_ALL_AND_RET
.set reorder
|
vandercookking/h7_device_RTT
| 1,153
|
rt-thread/libcpu/mips/common/entry_gcc.S
|
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2019-12-04 Jiaxun Yang Initial version
*/
#ifndef __ASSEMBLY__
#define __ASSEMBLY__
#endif
#include <mips.h>
#include <rtconfig.h>
#include "asm.h"
#include <rtconfig.h>
.section ".start", "ax"
.set noreorder
/* the program entry */
.globl _rtthread_entry
_rtthread_entry:
#ifndef RT_USING_SELF_BOOT
.globl _start
_start:
#endif
PTR_LA ra, _rtthread_entry
/* disable interrupt */
MTC0 zero, CP0_CAUSE
MTC0 zero, CP0_STATUS # Set CPU to disable interrupt.
ehb
#ifdef ARCH_MIPS64
dli t0, ST0_KX
MTC0 t0, CP0_STATUS
#endif
/* setup stack pointer */
PTR_LA sp, _system_stack
PTR_LA gp, _gp
bal rt_cpu_early_init
nop
/* clear bss */
PTR_LA t0, __bss_start
PTR_LA t1, __bss_end
_clr_bss_loop:
sw zero, 0(t0)
bne t1, t0, _clr_bss_loop
addu t0, 4
/* jump to RT-Thread RTOS */
jal rtthread_startup
nop
/* restart, never die */
j _start
nop
|
vandercookking/h7_device_RTT
| 3,607
|
rt-thread/libcpu/mips/pic32/context_gcc.S
|
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2011-05-24 aozima first version
* 2019-07-19 Zhou Yanjie clean up code
*/
#ifndef __ASSEMBLY__
#define __ASSEMBLY__
#endif
#include <p32xxxx.h>
#include "../common/mips_def.h"
#include "../common/stackframe.h"
.section ".text", "ax"
.set noat
.set noreorder
/*
* rt_base_t rt_hw_interrupt_disable()
*/
.globl rt_hw_interrupt_disable
rt_hw_interrupt_disable:
mfc0 v0, CP0_STATUS /* v0 = status */
addiu v1, zero, -2 /* v1 = 0-2 = 0xFFFFFFFE */
and v1, v0, v1 /* v1 = v0 & 0xFFFFFFFE */
mtc0 v1, CP0_STATUS /* status = v1 */
jr ra
nop
/*
* void rt_hw_interrupt_enable(rt_base_t level)
*/
.globl rt_hw_interrupt_enable
rt_hw_interrupt_enable:
mtc0 a0, CP0_STATUS
jr ra
nop
/*
* void rt_hw_context_switch_to(rt_uint32 to)/*
* a0 --> to
*/
.globl rt_hw_context_switch_to
rt_hw_context_switch_to:
lw sp, 0(a0) /* get new task stack pointer */
RESTORE_ALL_AND_RET
/*
* void rt_hw_context_switch(rt_uint32 from, rt_uint32 to)
* a0 --> from
* a1 --> to
*/
.globl rt_hw_context_switch
rt_hw_context_switch:
mtc0 ra, CP0_EPC
SAVE_ALL
sw sp, 0(a0) /* store sp in preempted tasks TCB */
lw sp, 0(a1) /* get new task stack pointer */
RESTORE_ALL_AND_RET
/*
* void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to)/*
*/
.globl rt_thread_switch_interrupt_flag
.globl rt_interrupt_from_thread
.globl rt_interrupt_to_thread
.globl rt_hw_context_switch_interrupt
rt_hw_context_switch_interrupt:
la t0, rt_thread_switch_interrupt_flag
lw t1, 0(t0)
nop
bnez t1, _reswitch
nop
li t1, 0x01 /* set rt_thread_switch_interrupt_flag to 1 */
sw t1, 0(t0)
la t0, rt_interrupt_from_thread /* set rt_interrupt_from_thread */
sw a0, 0(t0)
_reswitch:
la t0, rt_interrupt_to_thread /* set rt_interrupt_to_thread */
sw a1, 0(t0)
/* trigger the soft exception (causes context switch) */
mfc0 t0, CP0_CAUSE /* t0 = Cause */
ori t0, t0, (1<<8) /* t0 |= (1<<8) */
mtc0 t0, CP0_CAUSE /* cause = t0 */
addiu t1, zero, -257 /* t1 = ~(1<<8) */
and t0, t0, t1 /* t0 &= t1 */
mtc0 t0, CP0_CAUSE /* cause = t0 */
jr ra
nop
/*
* void __ISR(_CORE_SOFTWARE_0_VECTOR, ipl2) CoreSW0Handler(void)
*/
.section ".text", "ax"
.set noreorder
.set noat
.ent CoreSW0Handler
.globl CoreSW0Handler
CoreSW0Handler:
SAVE_ALL
/* mCS0ClearIntFlag(); */
la t0, IFS0CLR /* t0 = IFS0CLR */
addiu t1,zero,0x02 /* t1 = (1<<2) */
sw t1, 0(t0) /* IFS0CLR = t1 */
la k0, rt_thread_switch_interrupt_flag
sw zero, 0(k0) /* clear flag */
/*
* switch to the new thread
*/
la k0, rt_interrupt_from_thread
lw k1, 0(k0)
nop
sw sp, 0(k1) /* store sp in preempted tasks's TCB */
la k0, rt_interrupt_to_thread
lw k1, 0(k0)
nop
lw sp, 0(k1) /* get new task's stack pointer */
RESTORE_ALL_AND_RET
.end CoreSW0Handler
|
vandercookking/h7_device_RTT
| 5,831
|
rt-thread/libcpu/xilinx/microblaze/context_gcc.S
|
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2011-12-17 nl1031 first implementation for MicroBlaze.
*/
#include "microblaze.inc"
.text
.globl rt_interrupt_enter
.globl rt_interrupt_leave
/*
* rt_base_t rt_hw_interrupt_disable()
* copy from ucos-ii
*/
.globl rt_hw_interrupt_disable
.ent rt_hw_interrupt_disable
.align 2
rt_hw_interrupt_disable:
ADDIK r1, r1, -4
SW r4, r1, r0
MFS r3, RMSR
ANDNI r4, r3, IE_BIT
MTS RMSR, r4
LW r4, r1, r0
ADDIK r1, r1, 4
AND r0, r0, r0 /* NO-OP - pipeline flush */
AND r0, r0, r0 /* NO-OP - pipeline flush */
AND r0, r0, r0 /* NO-OP - pipeline flush */
RTSD r15, 8
AND r0, r0, r0
.end rt_hw_interrupt_disable
/*
* void rt_hw_interrupt_enable(rt_base_t level)
* copy from ucos-ii
*/
.globl rt_hw_interrupt_enable
.ent rt_hw_interrupt_enable
.align 2
rt_hw_interrupt_enable:
RTSD r15, 8
MTS rMSR, r5 /* Move the saved status from r5 into rMSR */
.end rt_hw_interrupt_enable
/*
* void rt_hw_context_switch(rt_uint32 from, rt_uint32 to)
* r5 --> from
* r6 --> to
*/
.globl rt_interrupt_from_thread
.globl rt_interrupt_to_thread
.globl rt_hw_context_switch
.ent rt_hw_context_switch
.align 2
rt_hw_context_switch:
PUSH_ALL
MFS r3, RMSR /* save the MSR */
SWI r3, r1, STACK_RMSR
SWI r1, r5, 0 /* store sp in preempted tasks TCB */
LWI r1, r6, 0 /* get new task stack pointer */
LWI r3, r1, STACK_RMSR
ANDI r3, r3, IE_BIT
BNEI r3, rt_hw_context_switch_ie /*if IE bit set,should be use RTID (return from interrupt). */
LWI r3, r1, STACK_RMSR
MTS RMSR,r3
POP_ALL
ADDIK r1, r1, STACK_SIZE
RTSD r15, 8
AND r0, r0, r0
rt_hw_context_switch_ie:
LWI r3, r1, STACK_RMSR
ANDNI r3, r3, IE_BIT /* clear IE bit, prevent interrupt occur immediately*/
MTS RMSR,r3
LWI r3, r1, STACK_R03
POP_ALL
ADDIK r1, r1, STACK_SIZE
RTID r14, 0 /* IE bit will be set automatically */
AND r0, r0, r0
.end rt_hw_context_switch
/*
* void rt_hw_context_switch_to(rt_uint32 to)
* r5 --> to
*/
.globl rt_hw_context_switch_to
.ent rt_hw_context_switch_to
.align 2
rt_hw_context_switch_to:
LWI r1, r5, 0 /* get new task stack pointer */
LWI r3, r1, STACK_RMSR
ANDNI r3, r3, IE_BIT /* clear IE bit, prevent interrupt occur immediately*/
MTS RMSR,r3
POP_ALL
ADDIK r1, r1, STACK_SIZE
RTID r14, 0 /* IE bit will be set automatically */
AND r0, r0, r0
.end rt_hw_context_switch_to
/*
* void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to)
*/
.globl rt_thread_switch_interrupt_flag
.globl rt_hw_context_switch_interrupt
.ent rt_hw_context_switch_interrupt
.align 2
rt_hw_context_switch_interrupt:
LA r3, r0, rt_thread_switch_interrupt_flag
LWI r4, r3, 0 /* load rt_thread_switch_interrupt_flag into r4 */
ANDI r4, r4, 1
BNEI r4, _reswitch /* if rt_thread_switch_interrupt_flag = 1 */
ADDIK r4, r0, 1 /* set rt_thread_switch_interrupt_flag to 1 */
SWI r4, r3, 0
LA r3, r0, rt_interrupt_from_thread /* set rt_interrupt_from_thread */
SWI r5, r3, 0 /* rt_interrupt_from_thread = from */
_reswitch:
LA r3, r0, rt_interrupt_to_thread /* set rt_interrupt_to_thread */
SWI r6, r3, 0 /* rt_interrupt_to_thread = to */
RTSD r15, 8
AND r0, r0, r0
.end rt_hw_context_switch_interrupt
.globl _interrupt_handler
.section .text
.align 2
.ent _interrupt_handler
.type _interrupt_handler, @function
_interrupt_handler:
PUSH_ALL
MFS r3, RMSR
ORI r3, r3, IE_BIT
SWI r3, r1, STACK_RMSR /* push MSR */
BRLID r15, rt_interrupt_enter
AND r0, r0, r0
BRLID r15, rt_hw_trap_irq
AND r0, r0, r0
BRLID r15, rt_interrupt_leave
AND r0, r0, r0
/*
* if rt_thread_switch_interrupt_flag set, jump to
* rt_hw_context_switch_interrupt_do and don't return
*/
LA r3, r0, rt_thread_switch_interrupt_flag
LWI r4, r3, 0
ANDI r4, r4, 1
BNEI r4, rt_hw_context_switch_interrupt_do
LWI r3, r1, STACK_RMSR
ANDNI r3, r3, IE_BIT
MTS RMSR,r3
POP_ALL
ADDIK r1, r1, STACK_SIZE
RTID r14, 0
AND r0, r0, r0
/*
* void rt_hw_context_switch_interrupt_do(rt_base_t flag)
*/
rt_hw_context_switch_interrupt_do:
SWI r0, r3, 0 /* clear rt_thread_switch_interrupt_flag */
LA r3, r0, rt_interrupt_from_thread
LW r4, r0, r3
SWI r1, r4, 0 /* store sp in preempted tasks's TCB */
LA r3, r0, rt_interrupt_to_thread
LW r4, r0, r3
LWI r1, r4, 0 /* get new task's stack pointer */
LWI r3, r1, STACK_RMSR
ANDI r3, r3, IE_BIT
BNEI r3, return_with_ie /*if IE bit set,should be use RTID (return from interrupt). */
LWI r3, r1, STACK_RMSR
MTS RMSR,r3
POP_ALL
ADDIK r1, r1, STACK_SIZE
RTSD r15, 8
AND r0, r0, r0
return_with_ie:
LWI r3, r1, STACK_RMSR
ANDNI r3, r3, IE_BIT /* clear IE bit, prevent interrupt occur immediately*/
MTS RMSR,r3
LWI r3, r1, STACK_R03
POP_ALL
ADDIK r1, r1, STACK_SIZE
RTID r14, 0 /* IE bit will be set automatically */
AND r0, r0, r0
.end _interrupt_handler
|
vandercookking/h7_device_RTT
| 2,336
|
rt-thread/libcpu/arm/lpc24xx/context_gcc.S
|
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2008-12-11 XuXinming first version
*/
/*!
* \addtogroup LPC2478
*/
/*@{*/
#define NOINT 0xc0
/*
* rt_base_t rt_hw_interrupt_disable();
*/
.globl rt_hw_interrupt_disable
rt_hw_interrupt_disable:
mrs r0, cpsr
orr r1, r0, #NOINT
msr cpsr_c, r1
mov pc, lr
/*
* void rt_hw_interrupt_enable(rt_base_t level);
*/
.globl rt_hw_interrupt_enable
rt_hw_interrupt_enable:
msr cpsr, r0
mov pc, lr
/*
* void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
* r0 --> from
* r1 --> to
*/
.globl rt_hw_context_switch
rt_hw_context_switch:
stmfd sp!, {lr} @ push pc (lr should be pushed in place of PC)
stmfd sp!, {r0-r12, lr} @ push lr & register file
mrs r4, cpsr
stmfd sp!, {r4} @ push cpsr
mrs r4, spsr
stmfd sp!, {r4} @ push spsr
str sp, [r0] @ store sp in preempted tasks TCB
ldr sp, [r1] @ get new task stack pointer
ldmfd sp!, {r4} @ pop new task spsr
msr spsr_cxsf, r4
ldmfd sp!, {r4} @ pop new task cpsr
msr cpsr_cxsf, r4
ldmfd sp!, {r0-r12, lr, pc} @ pop new task r0-r12, lr & pc
/*
* void rt_hw_context_switch_to(rt_uint32 to);
* r0 --> to
*/
.globl rt_hw_context_switch_to
rt_hw_context_switch_to:
ldr sp, [r0] @ get new task stack pointer
ldmfd sp!, {r4} @ pop new task spsr
msr spsr_cxsf, r4
ldmfd sp!, {r4} @ pop new task cpsr
msr cpsr_cxsf, r4
ldmfd sp!, {r0-r12, lr, pc} @ pop new task r0-r12, lr & pc
/*
* void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to);
*/
.globl rt_thread_switch_interrupt_flag
.globl rt_interrupt_from_thread
.globl rt_interrupt_to_thread
.globl rt_hw_context_switch_interrupt
rt_hw_context_switch_interrupt:
ldr r2, =rt_thread_switch_interrupt_flag
ldr r3, [r2]
cmp r3, #1
beq _reswitch
mov r3, #1 @ set rt_thread_switch_interrupt_flag to 1
str r3, [r2]
ldr r2, =rt_interrupt_from_thread @ set rt_interrupt_from_thread
str r0, [r2]
_reswitch:
ldr r2, =rt_interrupt_to_thread @ set rt_interrupt_to_thread
str r1, [r2]
mov pc, lr
|
vandercookking/h7_device_RTT
| 7,862
|
rt-thread/libcpu/arm/lpc24xx/start_gcc.S
|
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2008-12-11 XuXinming first version
* 2011-03-17 Bernard update to 0.4.x
*/
#define WDMOD (0xE0000000 + 0x00)
#define VICIntEnClr (0xFFFFF000 + 0x014)
#define VICVectAddr (0xFFFFF000 + 0xF00)
#define VICIntSelect (0xFFFFF000 + 0x00C)
#define PLLCFG (0xE01FC000 + 0x084)
#define PLLCON (0xE01FC000 + 0x080)
#define PLLFEED (0xE01FC000 + 0x08C)
#define PLLSTAT (0xE01FC000 + 0x088)
#define CCLKCFG (0xE01FC000 + 0x104)
#define MEMMAP (0xE01FC000 + 0x040)
#define SCS (0xE01FC000 + 0x1A0)
#define CLKSRCSEL (0xE01FC000 + 0x10C)
#define MAMCR (0xE01FC000 + 0x000)
#define MAMTIM (0xE01FC000 + 0x004)
/* stack memory */
.section .bss.noinit
.equ IRQ_STACK_SIZE, 0x00000200
.equ FIQ_STACK_SIZE, 0x00000100
.equ UDF_STACK_SIZE, 0x00000004
.equ ABT_STACK_SIZE, 0x00000004
.equ SVC_STACK_SIZE, 0x00000200
.space IRQ_STACK_SIZE
IRQ_STACK:
.space FIQ_STACK_SIZE
FIQ_STACK:
.space UDF_STACK_SIZE
UDF_STACK:
.space ABT_STACK_SIZE
ABT_STACK:
.space SVC_STACK_SIZE
SVC_STACK:
.section .init, "ax"
.code 32
.globl _start
_start:
b reset
ldr pc, _vector_undef
ldr pc, _vector_swi
ldr pc, _vector_pabt
ldr pc, _vector_dabt
ldr pc, _vector_resv
ldr pc, _vector_irq
ldr pc, _vector_fiq
_vector_undef: .word vector_undef
_vector_swi: .word vector_swi
_vector_pabt: .word vector_pabt
_vector_dabt: .word vector_dabt
_vector_resv: .word vector_resv
_vector_irq: .word vector_irq
_vector_fiq: .word vector_fiq
.balignl 16,0xdeadbeef
/*
* rtthread kernel start and end
* which are defined in linker script
*/
.globl _rtthread_start
_rtthread_start:
.word _start
.globl _rtthread_end
_rtthread_end:
.word _end
/*
* rtthread bss start and end which are defined in linker script
*/
.globl _bss_start
_bss_start:
.word __bss_start
.globl _bss_end
_bss_end:
.word __bss_end
.text
.code 32
/* the system entry */
reset:
/* enter svc mode */
msr cpsr_c, #SVCMODE|NOINT
/*watch dog disable */
ldr r0,=WDMOD
ldr r1,=0x0
str r1,[r0]
/* all interrupt disable */
ldr r0,=VICIntEnClr
ldr r1,=0xffffffff
str r1,[r0]
ldr r1, =VICVectAddr
ldr r0, =0x00
str r0, [r1]
ldr r1, =VICIntSelect
ldr r0, =0x00
str r0, [r1]
/* setup stack */
bl stack_setup
/* copy .data to SRAM */
ldr r1, =_sidata /* .data start in image */
ldr r2, =_edata /* .data end in image */
ldr r3, =_sdata /* sram data start */
data_loop:
ldr r0, [r1, #0]
str r0, [r3]
add r1, r1, #4
add r3, r3, #4
cmp r3, r2 /* check if data to clear */
blo data_loop /* loop until done */
/* clear .bss */
mov r0,#0 /* get a zero */
ldr r1,=__bss_start /* bss start */
ldr r2,=__bss_end /* bss end */
bss_loop:
cmp r1,r2 /* check if data to clear */
strlo r0,[r1],#4 /* clear 4 bytes */
blo bss_loop /* loop until done */
/* call C++ constructors of global objects */
ldr r0, =__ctors_start__
ldr r1, =__ctors_end__
ctor_loop:
cmp r0, r1
beq ctor_end
ldr r2, [r0], #4
stmfd sp!, {r0-r1}
mov lr, pc
bx r2
ldmfd sp!, {r0-r1}
b ctor_loop
ctor_end:
/* start RT-Thread Kernel */
ldr pc, _rtthread_startup
_rtthread_startup:
.word rtthread_startup
.equ USERMODE, 0x10
.equ FIQMODE, 0x11
.equ IRQMODE, 0x12
.equ SVCMODE, 0x13
.equ ABORTMODE, 0x17
.equ UNDEFMODE, 0x1b
.equ MODEMASK, 0x1f
.equ NOINT, 0xc0
/* exception handlers */
vector_undef: bl rt_hw_trap_udef
vector_swi: bl rt_hw_trap_swi
vector_pabt: bl rt_hw_trap_pabt
vector_dabt: bl rt_hw_trap_dabt
vector_resv: bl rt_hw_trap_resv
.globl rt_interrupt_enter
.globl rt_interrupt_leave
.globl rt_thread_switch_interrupt_flag
.globl rt_interrupt_from_thread
.globl rt_interrupt_to_thread
vector_irq:
stmfd sp!, {r0-r12,lr}
bl rt_interrupt_enter
bl rt_hw_trap_irq
bl rt_interrupt_leave
/* if rt_thread_switch_interrupt_flag set,
* jump to _interrupt_thread_switch and don't return
*/
ldr r0, =rt_thread_switch_interrupt_flag
ldr r1, [r0]
cmp r1, #1
beq _interrupt_thread_switch
ldmfd sp!, {r0-r12,lr}
subs pc, lr, #4
.align 5
vector_fiq:
stmfd sp!,{r0-r7,lr}
bl rt_hw_trap_fiq
ldmfd sp!,{r0-r7,lr}
subs pc,lr,#4
_interrupt_thread_switch:
mov r1, #0 /* clear rt_thread_switch_interrupt_flag */
str r1, [r0]
ldmfd sp!, {r0-r12,lr} /* reload saved registers */
stmfd sp!, {r0-r3} /* save r0-r3 */
mov r1, sp
add sp, sp, #16 /* restore sp */
sub r2, lr, #4 /* save old task's pc to r2 */
mrs r3, spsr /* disable interrupt */
orr r0, r3, #NOINT
msr spsr_c, r0
ldr r0, =.+8 /* switch to interrupted task's stack */
movs pc, r0
stmfd sp!, {r2} /* push old task's pc */
stmfd sp!, {r4-r12,lr} /* push old task's lr,r12-r4 */
mov r4, r1 /* Special optimised code below */
mov r5, r3
ldmfd r4!, {r0-r3}
stmfd sp!, {r0-r3} /* push old task's r3-r0 */
stmfd sp!, {r5} /* push old task's psr */
mrs r4, spsr
stmfd sp!, {r4} /* push old task's spsr */
ldr r4, =rt_interrupt_from_thread
ldr r5, [r4]
str sp, [r5] /* store sp in preempted tasks's TCB */
ldr r6, =rt_interrupt_to_thread
ldr r6, [r6]
ldr sp, [r6] /* get new task's stack pointer */
ldmfd sp!, {r4} /* pop new task's spsr */
msr SPSR_cxsf, r4
ldmfd sp!, {r4} /* pop new task's psr */
msr CPSR_cxsf, r4
ldmfd sp!, {r0-r12,lr,pc} /* pop new task's r0-r12,lr & pc */
stack_setup:
mrs r0, cpsr
bic r0, r0, #MODEMASK
orr r1, r0, #UNDEFMODE|NOINT
msr cpsr_cxsf, r1 /* undef mode */
ldr sp, =UDF_STACK
orr r1,r0,#ABORTMODE|NOINT
msr cpsr_cxsf,r1 /* abort mode */
ldr sp, =ABT_STACK
orr r1,r0,#IRQMODE|NOINT
msr cpsr_cxsf,r1 /* IRQ mode */
ldr sp, =IRQ_STACK
orr r1,r0,#FIQMODE|NOINT
msr cpsr_cxsf,r1 /* FIQ mode */
ldr sp, =FIQ_STACK
bic r0,r0,#MODEMASK
orr r1,r0,#SVCMODE|NOINT
msr cpsr_cxsf,r1 /* SVC mode */
ldr sp, =SVC_STACK
/* USER mode is not initialized. */
mov pc,lr /* The LR register may be not valid for the mode changes.*/
|
vandercookking/h7_device_RTT
| 2,754
|
rt-thread/libcpu/arm/lpc24xx/context_rvds.S
|
;/*
; * Copyright (c) 2006-2022, RT-Thread Development Team
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Change Logs:
; * Date Author Notes
; * 2009-01-20 Bernard first version
; * 2011-07-22 Bernard added thumb mode porting
; */
NOINT EQU 0xc0 ; disable interrupt in psr
AREA |.text|, CODE, READONLY, ALIGN=2
ARM
REQUIRE8
PRESERVE8
;/*
; * rt_base_t rt_hw_interrupt_disable();
; */
rt_hw_interrupt_disable PROC
EXPORT rt_hw_interrupt_disable
MRS r0, cpsr
ORR r1, r0, #NOINT
MSR cpsr_c, r1
BX lr
ENDP
;/*
; * void rt_hw_interrupt_enable(rt_base_t level);
; */
rt_hw_interrupt_enable PROC
EXPORT rt_hw_interrupt_enable
MSR cpsr_c, r0
BX lr
ENDP
;/*
; * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
; * r0 --> from
; * r1 --> to
; */
rt_hw_context_switch PROC
EXPORT rt_hw_context_switch
STMFD sp!, {lr} ; push pc (lr should be pushed in place of PC)
STMFD sp!, {r0-r12, lr} ; push lr & register file
MRS r4, cpsr
TST lr, #0x01
BEQ _ARM_MODE
ORR r4, r4, #0x20 ; it's thumb code
_ARM_MODE
STMFD sp!, {r4} ; push cpsr
STR sp, [r0] ; store sp in preempted tasks TCB
LDR sp, [r1] ; get new task stack pointer
LDMFD sp!, {r4} ; pop new task cpsr to spsr
MSR spsr_cxsf, r4
BIC r4, r4, #0x20 ; must be ARM mode
MSR cpsr_cxsf, r4
LDMFD sp!, {r0-r12, lr, pc}^ ; pop new task r0-r12, lr & pc, copy spsr to cpsr
ENDP
;/*
; * void rt_hw_context_switch_to(rt_uint32 to);
; * r0 --> to
; */
rt_hw_context_switch_to PROC
EXPORT rt_hw_context_switch_to
LDR sp, [r0] ; get new task stack pointer
LDMFD sp!, {r4} ; pop new task cpsr to spsr
MSR spsr_cxsf, r4
BIC r4, r4, #0x20 ; must be ARM mode
MSR cpsr_cxsf, r4
LDMFD sp!, {r0-r12, lr, pc}^ ; pop new task r0-r12, lr & pc, copy spsr to cpsr
ENDP
;/*
; * void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to);
; */
IMPORT rt_thread_switch_interrupt_flag
IMPORT rt_interrupt_from_thread
IMPORT rt_interrupt_to_thread
rt_hw_context_switch_interrupt PROC
EXPORT rt_hw_context_switch_interrupt
LDR r2, =rt_thread_switch_interrupt_flag
LDR r3, [r2]
CMP r3, #1
BEQ _reswitch
MOV r3, #1 ; set rt_thread_switch_interrupt_flag to 1
STR r3, [r2]
LDR r2, =rt_interrupt_from_thread ; set rt_interrupt_from_thread
STR r0, [r2]
_reswitch
LDR r2, =rt_interrupt_to_thread ; set rt_interrupt_to_thread
STR r1, [r2]
BX lr
ENDP
END
|
vandercookking/h7_device_RTT
| 67,489
|
rt-thread/libcpu/arm/lpc24xx/start_rvds.S
|
;/*
; * Copyright (c) 2006-2022, RT-Thread Development Team
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Change Logs:
; * Date Author Notes
; */
;
;/*****************************************************************************/
;/* LPC2400.S: Startup file for Philips LPC2400 device series */
;/*****************************************************************************/
;/* <<< Use Configuration Wizard in Context Menu >>> */
;/*****************************************************************************/
;/* This file is part of the uVision/ARM development tools. */
;/* Copyright (c) 2007-2008 Keil - An ARM Company. All rights reserved. */
;/* This software may only be used under the terms of a valid, current, */
;/* end user licence from KEIL for a compatible version of KEIL software */
;/* development tools. Nothing else gives you the right to use this software. */
;/*****************************************************************************/
;/*
; * The LPC2400.S code is executed after CPU Reset. This file may be
; * translated with the following SET symbols. In uVision these SET
; * symbols are entered under Options - ASM - Define.
; *
; * NO_CLOCK_SETUP: when set the startup code will not initialize Clock
; * (used mostly when clock is already initialized from script .ini
; * file).
; *
; * NO_EMC_SETUP: when set the startup code will not initialize
; * External Bus Controller.
; *
; * RAM_INTVEC: when set the startup code copies exception vectors
; * from on-chip Flash to on-chip RAM.
; *
; * REMAP: when set the startup code initializes the register MEMMAP
; * which overwrites the settings of the CPU configuration pins. The
; * startup and interrupt vectors are remapped from:
; * 0x00000000 default setting (not remapped)
; * 0x40000000 when RAM_MODE is used
; * 0x80000000 when EXTMEM_MODE is used
; *
; * EXTMEM_MODE: when set the device is configured for code execution
; * from external memory starting at address 0x80000000.
; *
; * RAM_MODE: when set the device is configured for code execution
; * from on-chip RAM starting at address 0x40000000.
; */
; Standard definitions of Mode bits and Interrupt (I & F) flags in PSRs
Mode_USR EQU 0x10
Mode_FIQ EQU 0x11
Mode_IRQ EQU 0x12
Mode_SVC EQU 0x13
Mode_ABT EQU 0x17
Mode_UND EQU 0x1B
Mode_SYS EQU 0x1F
I_Bit EQU 0x80 ; when I bit is set, IRQ is disabled
F_Bit EQU 0x40 ; when F bit is set, FIQ is disabled
;----------------------- Memory Definitions ------------------------------------
; Internal Memory Base Addresses
FLASH_BASE EQU 0x00000000
RAM_BASE EQU 0x40000000
EXTMEM_BASE EQU 0x80000000
; External Memory Base Addresses
STA_MEM0_BASE EQU 0x80000000
STA_MEM1_BASE EQU 0x81000000
STA_MEM2_BASE EQU 0x82000000
STA_MEM3_BASE EQU 0x83000000
DYN_MEM0_BASE EQU 0xA0000000
DYN_MEM1_BASE EQU 0xB0000000
DYN_MEM2_BASE EQU 0xC0000000
DYN_MEM3_BASE EQU 0xD0000000
;----------------------- Stack and Heap Definitions ----------------------------
;// <h> Stack Configuration (Stack Sizes in Bytes)
;// <o0> Undefined Mode <0x0-0xFFFFFFFF:8>
;// <o1> Supervisor Mode <0x0-0xFFFFFFFF:8>
;// <o2> Abort Mode <0x0-0xFFFFFFFF:8>
;// <o3> Fast Interrupt Mode <0x0-0xFFFFFFFF:8>
;// <o4> Interrupt Mode <0x0-0xFFFFFFFF:8>
;// <o5> User/System Mode <0x0-0xFFFFFFFF:8>
;// </h>
UND_Stack_Size EQU 0x00000000
SVC_Stack_Size EQU 0x00000100
ABT_Stack_Size EQU 0x00000000
FIQ_Stack_Size EQU 0x00000000
IRQ_Stack_Size EQU 0x00000100
USR_Stack_Size EQU 0x00000100
ISR_Stack_Size EQU (UND_Stack_Size + SVC_Stack_Size + ABT_Stack_Size + \
FIQ_Stack_Size + IRQ_Stack_Size)
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE USR_Stack_Size
__initial_sp SPACE ISR_Stack_Size
Stack_Top
;// <h> Heap Configuration
;// <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF>
;// </h>
Heap_Size EQU 0x00000000
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
;----------------------- Clock Definitions -------------------------------------
; System Control Block (SCB) Module Definitions
SCB_BASE EQU 0xE01FC000 ; SCB Base Address
PLLCON_OFS EQU 0x80 ; PLL Control Offset
PLLCFG_OFS EQU 0x84 ; PLL Configuration Offset
PLLSTAT_OFS EQU 0x88 ; PLL Status Offset
PLLFEED_OFS EQU 0x8C ; PLL Feed Offset
CCLKCFG_OFS EQU 0x104 ; CPU Clock Divider Reg Offset
USBCLKCFG_OFS EQU 0x108 ; USB Clock Divider Reg Offset
CLKSRCSEL_OFS EQU 0x10C ; Clock Source Sel Reg Offset
SCS_OFS EQU 0x1A0 ; Sys Control and Status Reg Offset
PCLKSEL0_OFS EQU 0x1A8 ; Periph Clock Sel Reg 0 Offset
PCLKSEL1_OFS EQU 0x1AC ; Periph Clock Sel Reg 0 Offset
PCON_OFS EQU 0x0C0 ; Power Mode Control Reg Offset
PCONP_OFS EQU 0x0C4 ; Power Control for Periphs Reg Offset
; Constants
OSCRANGE EQU (1<<4) ; Oscillator Range Select
OSCEN EQU (1<<5) ; Main oscillator Enable
OSCSTAT EQU (1<<6) ; Main Oscillator Status
PLLCON_PLLE EQU (1<<0) ; PLL Enable
PLLCON_PLLC EQU (1<<1) ; PLL Connect
PLLSTAT_M EQU (0x7FFF<<0) ; PLL M Value
PLLSTAT_N EQU (0xFF<<16) ; PLL N Value
PLLSTAT_PLOCK EQU (1<<26) ; PLL Lock Status
;// <e> Clock Setup
;// <h> System Controls and Status Register (SYS)
;// <o1.4> OSCRANGE: Main Oscillator Range Select
;// <0=> 1 MHz to 20 MHz
;// <1=> 15 MHz to 24 MHz
;// <e1.5> OSCEN: Main Oscillator Enable
;// </e>
;// </h>
;//
;// <h> PLL Clock Source Select Register (CLKSRCSEL)
;// <o2.0..1> CLKSRC: PLL Clock Source Selection
;// <0=> Internal RC oscillator
;// <1=> Main oscillator
;// <2=> RTC oscillator
;// </h>
;//
;// <h> PLL Configuration Register (PLLCFG)
;// <i> PLL_clk = (2* M * PLL_clk_src) / N
;// <o3.0..14> MSEL: PLL Multiplier Selection
;// <1-32768><#-1>
;// <i> M Value
;// <o3.16..23> NSEL: PLL Divider Selection
;// <1-256><#-1>
;// <i> N Value
;// </h>
;//
;// <h> CPU Clock Configuration Register (CCLKCFG)
;// <o4.0..7> CCLKSEL: Divide Value for CPU Clock from PLL
;// <1-256><#-1>
;// </h>
;//
;// <h> USB Clock Configuration Register (USBCLKCFG)
;// <o5.0..3> USBSEL: Divide Value for USB Clock from PLL
;// <1-16><#-1>
;// </h>
;//
;// <h> Peripheral Clock Selection Register 0 (PCLKSEL0)
;// <o6.0..1> PCLK_WDT: Peripheral Clock Selection for WDT
;// <0=> Pclk = Cclk / 4
;// <1=> Pclk = Cclk
;// <2=> Pclk = Cclk / 2
;// <3=> Pclk = Cclk / 8
;// <o6.2..3> PCLK_TIMER0: Peripheral Clock Selection for TIMER0
;// <0=> Pclk = Cclk / 4
;// <1=> Pclk = Cclk
;// <2=> Pclk = Cclk / 2
;// <3=> Pclk = Cclk / 8
;// <o6.4..5> PCLK_TIMER1: Peripheral Clock Selection for TIMER1
;// <0=> Pclk = Cclk / 4
;// <1=> Pclk = Cclk
;// <2=> Pclk = Cclk / 2
;// <3=> Pclk = Cclk / 8
;// <o6.6..7> PCLK_UART0: Peripheral Clock Selection for UART0
;// <0=> Pclk = Cclk / 4
;// <1=> Pclk = Cclk
;// <2=> Pclk = Cclk / 2
;// <3=> Pclk = Cclk / 8
;// <o6.8..9> PCLK_UART1: Peripheral Clock Selection for UART1
;// <0=> Pclk = Cclk / 4
;// <1=> Pclk = Cclk
;// <2=> Pclk = Cclk / 2
;// <3=> Pclk = Cclk / 8
;// <o6.10..11> PCLK_PWM0: Peripheral Clock Selection for PWM0
;// <0=> Pclk = Cclk / 4
;// <1=> Pclk = Cclk
;// <2=> Pclk = Cclk / 2
;// <3=> Pclk = Cclk / 8
;// <o6.12..13> PCLK_PWM1: Peripheral Clock Selection for PWM1
;// <0=> Pclk = Cclk / 4
;// <1=> Pclk = Cclk
;// <2=> Pclk = Cclk / 2
;// <3=> Pclk = Cclk / 8
;// <o6.14..15> PCLK_I2C0: Peripheral Clock Selection for I2C0
;// <0=> Pclk = Cclk / 4
;// <1=> Pclk = Cclk
;// <2=> Pclk = Cclk / 2
;// <3=> Pclk = Cclk / 8
;// <o6.16..17> PCLK_SPI: Peripheral Clock Selection for SPI
;// <0=> Pclk = Cclk / 4
;// <1=> Pclk = Cclk
;// <2=> Pclk = Cclk / 2
;// <3=> Pclk = Cclk / 8
;// <o6.18..19> PCLK_RTC: Peripheral Clock Selection for RTC
;// <0=> Pclk = Cclk / 4
;// <1=> Pclk = Cclk
;// <2=> Pclk = Cclk / 2
;// <3=> Pclk = Cclk / 8
;// <o6.20..21> PCLK_SSP1: Peripheral Clock Selection for SSP1
;// <0=> Pclk = Cclk / 4
;// <1=> Pclk = Cclk
;// <2=> Pclk = Cclk / 2
;// <3=> Pclk = Cclk / 8
;// <o6.22..23> PCLK_DAC: Peripheral Clock Selection for DAC
;// <0=> Pclk = Cclk / 4
;// <1=> Pclk = Cclk
;// <2=> Pclk = Cclk / 2
;// <3=> Pclk = Cclk / 8
;// <o6.24..25> PCLK_ADC: Peripheral Clock Selection for ADC
;// <0=> Pclk = Cclk / 4
;// <1=> Pclk = Cclk
;// <2=> Pclk = Cclk / 2
;// <3=> Pclk = Cclk / 8
;// <o6.26..27> PCLK_CAN1: Peripheral Clock Selection for CAN1
;// <0=> Pclk = Cclk / 4
;// <1=> Pclk = Cclk
;// <2=> Pclk = Cclk / 2
;// <3=> Pclk = Cclk / 6
;// <o6.28..29> PCLK_CAN2: Peripheral Clock Selection for CAN2
;// <0=> Pclk = Cclk / 4
;// <1=> Pclk = Cclk
;// <2=> Pclk = Cclk / 2
;// <3=> Pclk = Cclk / 6
;// <o6.30..31> PCLK_ACF: Peripheral Clock Selection for ACF
;// <0=> Pclk = Cclk / 4
;// <1=> Pclk = Cclk
;// <2=> Pclk = Cclk / 2
;// <3=> Pclk = Cclk / 6
;// </h>
;//
;// <h> Peripheral Clock Selection Register 1 (PCLKSEL1)
;// <o7.0..1> PCLK_BAT_RAM: Peripheral Clock Selection for the Battery Supported RAM
;// <0=> Pclk = Cclk / 4
;// <1=> Pclk = Cclk
;// <2=> Pclk = Cclk / 2
;// <3=> Pclk = Cclk / 8
;// <o7.2..3> PCLK_GPIO: Peripheral Clock Selection for GPIOs
;// <0=> Pclk = Cclk / 4
;// <1=> Pclk = Cclk
;// <2=> Pclk = Cclk / 2
;// <3=> Pclk = Cclk / 8
;// <o7.4..5> PCLK_PCB: Peripheral Clock Selection for Pin Connect Block
;// <0=> Pclk = Cclk / 4
;// <1=> Pclk = Cclk
;// <2=> Pclk = Cclk / 2
;// <3=> Pclk = Cclk / 8
;// <o7.6..7> PCLK_I2C1: Peripheral Clock Selection for I2C1
;// <0=> Pclk = Cclk / 4
;// <1=> Pclk = Cclk
;// <2=> Pclk = Cclk / 2
;// <3=> Pclk = Cclk / 8
;// <o7.10..11> PCLK_SSP0: Peripheral Clock Selection for SSP0
;// <0=> Pclk = Cclk / 4
;// <1=> Pclk = Cclk
;// <2=> Pclk = Cclk / 2
;// <3=> Pclk = Cclk / 8
;// <o7.12..13> PCLK_TIMER2: Peripheral Clock Selection for TIMER2
;// <0=> Pclk = Cclk / 4
;// <1=> Pclk = Cclk
;// <2=> Pclk = Cclk / 2
;// <3=> Pclk = Cclk / 8
;// <o7.14..15> PCLK_TIMER3: Peripheral Clock Selection for TIMER3
;// <0=> Pclk = Cclk / 4
;// <1=> Pclk = Cclk
;// <2=> Pclk = Cclk / 2
;// <3=> Pclk = Cclk / 8
;// <o7.16..17> PCLK_UART2: Peripheral Clock Selection for UART2
;// <0=> Pclk = Cclk / 4
;// <1=> Pclk = Cclk
;// <2=> Pclk = Cclk / 2
;// <3=> Pclk = Cclk / 8
;// <o7.18..19> PCLK_UART3: Peripheral Clock Selection for UART3
;// <0=> Pclk = Cclk / 4
;// <1=> Pclk = Cclk
;// <2=> Pclk = Cclk / 2
;// <3=> Pclk = Cclk / 8
;// <o7.20..21> PCLK_I2C2: Peripheral Clock Selection for I2C2
;// <0=> Pclk = Cclk / 4
;// <1=> Pclk = Cclk
;// <2=> Pclk = Cclk / 2
;// <3=> Pclk = Cclk / 8
;// <o7.22..23> PCLK_I2S: Peripheral Clock Selection for I2S
;// <0=> Pclk = Cclk / 4
;// <1=> Pclk = Cclk
;// <2=> Pclk = Cclk / 2
;// <3=> Pclk = Cclk / 8
;// <o7.24..25> PCLK_MCI: Peripheral Clock Selection for MCI
;// <0=> Pclk = Cclk / 4
;// <1=> Pclk = Cclk
;// <2=> Pclk = Cclk / 2
;// <3=> Pclk = Cclk / 8
;// <o7.28..29> PCLK_SYSCON: Peripheral Clock Selection for System Control Block
;// <0=> Pclk = Cclk / 4
;// <1=> Pclk = Cclk
;// <2=> Pclk = Cclk / 2
;// <3=> Pclk = Cclk / 8
;// </h>
;// </e>
CLOCK_SETUP EQU 1
SCS_Val EQU 0x00000020
CLKSRCSEL_Val EQU 0x00000001
PLLCFG_Val EQU 0x0000000B
CCLKCFG_Val EQU 0x00000004
USBCLKCFG_Val EQU 0x00000005
PCLKSEL0_Val EQU 0x00000000
PCLKSEL1_Val EQU 0x00000000
;----------------------- Memory Accelerator Module (MAM) Definitions -----------
MAM_BASE EQU 0xE01FC000 ; MAM Base Address
MAMCR_OFS EQU 0x00 ; MAM Control Offset
MAMTIM_OFS EQU 0x04 ; MAM Timing Offset
;// <e> MAM Setup
;// <o1.0..1> MAM Control
;// <0=> Disabled
;// <1=> Partially Enabled
;// <2=> Fully Enabled
;// <i> Mode
;// <o2.0..2> MAM Timing
;// <0=> Reserved <1=> 1 <2=> 2 <3=> 3
;// <4=> 4 <5=> 5 <6=> 6 <7=> 7
;// <i> Fetch Cycles
;// </e>
MAM_SETUP EQU 1
MAMCR_Val EQU 0x00000002
MAMTIM_Val EQU 0x00000004
;----------------------- Pin Connect Block Definitions -------------------------
PCB_BASE EQU 0xE002C000 ; PCB Base Address
PINSEL0_OFS EQU 0x00 ; PINSEL0 Address Offset
PINSEL1_OFS EQU 0x04 ; PINSEL1 Address Offset
PINSEL2_OFS EQU 0x08 ; PINSEL2 Address Offset
PINSEL3_OFS EQU 0x0C ; PINSEL3 Address Offset
PINSEL4_OFS EQU 0x10 ; PINSEL4 Address Offset
PINSEL5_OFS EQU 0x14 ; PINSEL5 Address Offset
PINSEL6_OFS EQU 0x18 ; PINSEL6 Address Offset
PINSEL7_OFS EQU 0x1C ; PINSEL7 Address Offset
PINSEL8_OFS EQU 0x20 ; PINSEL8 Address Offset
PINSEL9_OFS EQU 0x24 ; PINSEL9 Address Offset
PINSEL10_OFS EQU 0x28 ; PINSEL10 Address Offset
;----------------------- External Memory Controller (EMC) Definitons -----------
EMC_BASE EQU 0xFFE08000 ; EMC Base Address
EMC_CTRL_OFS EQU 0x000
EMC_STAT_OFS EQU 0x004
EMC_CONFIG_OFS EQU 0x008
EMC_DYN_CTRL_OFS EQU 0x020
EMC_DYN_RFSH_OFS EQU 0x024
EMC_DYN_RD_CFG_OFS EQU 0x028
EMC_DYN_RP_OFS EQU 0x030
EMC_DYN_RAS_OFS EQU 0x034
EMC_DYN_SREX_OFS EQU 0x038
EMC_DYN_APR_OFS EQU 0x03C
EMC_DYN_DAL_OFS EQU 0x040
EMC_DYN_WR_OFS EQU 0x044
EMC_DYN_RC_OFS EQU 0x048
EMC_DYN_RFC_OFS EQU 0x04C
EMC_DYN_XSR_OFS EQU 0x050
EMC_DYN_RRD_OFS EQU 0x054
EMC_DYN_MRD_OFS EQU 0x058
EMC_DYN_CFG0_OFS EQU 0x100
EMC_DYN_RASCAS0_OFS EQU 0x104
EMC_DYN_CFG1_OFS EQU 0x140
EMC_DYN_RASCAS1_OFS EQU 0x144
EMC_DYN_CFG2_OFS EQU 0x160
EMC_DYN_RASCAS2_OFS EQU 0x164
EMC_DYN_CFG3_OFS EQU 0x180
EMC_DYN_RASCAS3_OFS EQU 0x184
EMC_STA_CFG0_OFS EQU 0x200
EMC_STA_WWEN0_OFS EQU 0x204
EMC_STA_WOEN0_OFS EQU 0x208
EMC_STA_WRD0_OFS EQU 0x20C
EMC_STA_WPAGE0_OFS EQU 0x210
EMC_STA_WWR0_OFS EQU 0x214
EMC_STA_WTURN0_OFS EQU 0x218
EMC_STA_CFG1_OFS EQU 0x220
EMC_STA_WWEN1_OFS EQU 0x224
EMC_STA_WOEN1_OFS EQU 0x228
EMC_STA_WRD1_OFS EQU 0x22C
EMC_STA_WPAGE1_OFS EQU 0x230
EMC_STA_WWR1_OFS EQU 0x234
EMC_STA_WTURN1_OFS EQU 0x238
EMC_STA_CFG2_OFS EQU 0x240
EMC_STA_WWEN2_OFS EQU 0x244
EMC_STA_WOEN2_OFS EQU 0x248
EMC_STA_WRD2_OFS EQU 0x24C
EMC_STA_WPAGE2_OFS EQU 0x250
EMC_STA_WWR2_OFS EQU 0x254
EMC_STA_WTURN2_OFS EQU 0x258
EMC_STA_CFG3_OFS EQU 0x260
EMC_STA_WWEN3_OFS EQU 0x264
EMC_STA_WOEN3_OFS EQU 0x268
EMC_STA_WRD3_OFS EQU 0x26C
EMC_STA_WPAGE3_OFS EQU 0x270
EMC_STA_WWR3_OFS EQU 0x274
EMC_STA_WTURN3_OFS EQU 0x278
EMC_STA_EXT_W_OFS EQU 0x880
; Constants
NORMAL_CMD EQU (0x0 << 7) ; NORMAL Command
MODE_CMD EQU (0x1 << 7) ; MODE Command
PALL_CMD EQU (0x2 << 7) ; Precharge All Command
NOP_CMD EQU (0x3 << 7) ; NOP Command
BUFEN_Const EQU (1 << 19) ; Buffer enable bit
EMC_PCONP_Const EQU (1 << 11) ; PCONP val to enable power for EMC
; External Memory Pins definitions
; pin functions for SDRAM, NOR and NAND flash interfacing
EMC_PINSEL5_Val EQU 0x05010115 ; !CAS, !RAS, CLKOUT0, !DYCS0, DQMOUT0, DQMOUT1
EMC_PINSEL6_Val EQU 0x55555555 ; D0 .. D15
EMC_PINSEL8_Val EQU 0x55555555 ; A0 .. A15
EMC_PINSEL9_Val EQU 0x50055555; ; A16 .. A23, !OE, !WE, !CS0, !CS1
;// External Memory Controller Setup (EMC) ---------------------------------
;// <e> External Memory Controller Setup (EMC)
EMC_SETUP EQU 0
;// <h> EMC Control Register (EMCControl)
;// <i> Controls operation of the memory controller
;// <o0.2> L: Low-power mode enable
;// <o0.1> M: Address mirror enable
;// <o0.0> E: EMC enable
;// </h>
EMC_CTRL_Val EQU 0x00000001
;// <h> EMC Configuration Register (EMCConfig)
;// <i> Configures operation of the memory controller
;// <o0.8> CCLK: CLKOUT ratio
;// <0=> 1:1
;// <1=> 1:2
;// <o0.0> Endian mode
;// <0=> Little-endian
;// <1=> Big-endian
;// </h>
EMC_CONFIG_Val EQU 0x00000000
;// Dynamic Memory Interface Setup ---------------------------------------
;// <e> Dynamic Memory Interface Setup
EMC_DYNAMIC_SETUP EQU 1
;// <h> Dynamic Memory Refresh Timer Register (EMCDynamicRefresh)
;// <i> Configures dynamic memory refresh operation
;// <o0.0..10> REFRESH: Refresh timer <0x000-0x7FF>
;// <i> 0 = refresh disabled, 0x01-0x7FF: value * 16 CCLKS
;// </h>
EMC_DYN_RFSH_Val EQU 0x0000001C
;// <h> Dynamic Memory Read Configuration Register (EMCDynamicReadConfig)
;// <i> Configures the dynamic memory read strategy
;// <o0.0..1> RD: Read data strategy
;// <0=> Clock out delayed strategy
;// <1=> Command delayed strategy
;// <2=> Command delayed strategy plus one clock cycle
;// <3=> Command delayed strategy plus two clock cycles
;// </h>
EMC_DYN_RD_CFG_Val EQU 0x00000001
;// <h> Dynamic Memory Timings
;// <h> Dynamic Memory Percentage Command Period Register (EMCDynamictRP)
;// <o0.0..3> tRP: Precharge command period <1-16> <#-1>
;// <i> The delay is in EMCCLK cycles
;// <i> This value is normally found in SDRAM data sheets as tRP
;// </h>
;// <h> Dynamic Memory Active to Precharge Command Period Register (EMCDynamictRAS)
;// <o1.0..3> tRAS: Active to precharge command period <1-16> <#-1>
;// <i> The delay is in EMCCLK cycles
;// <i> This value is normally found in SDRAM data sheets as tRAS
;// </h>
;// <h> Dynamic Memory Self-refresh Exit Time Register (EMCDynamictSREX)
;// <o2.0..3> tSREX: Self-refresh exit time <1-16> <#-1>
;// <i> The delay is in CCLK cycles
;// <i> This value is normally found in SDRAM data sheets as tSREX,
;// <i> for devices without this parameter you use the same value as tXSR
;// </h>
;// <h> Dynamic Memory Last Data Out to Active Time Register (EMCDynamictAPR)
;// <o3.0..3> tAPR: Last-data-out to active command time <1-16> <#-1>
;// <i> The delay is in CCLK cycles
;// <i> This value is normally found in SDRAM data sheets as tAPR
;// </h>
;// <h> Dynamic Memory Data-in to Active Command Time Register (EMCDynamictDAL)
;// <o4.0..3> tDAL: Data-in to active command time <1-16> <#-1>
;// <i> The delay is in CCLK cycles
;// <i> This value is normally found in SDRAM data sheets as tDAL or tAPW
;// </h>
;// <h> Dynamic Memory Write Recovery Time Register (EMCDynamictWR)
;// <o5.0..3> tWR: Write recovery time <1-16> <#-1>
;// <i> The delay is in CCLK cycles
;// <i> This value is normally found in SDRAM data sheets as tWR, tDPL, tRWL, or tRDL
;// </h>
;// <h> Dynamic Memory Active to Active Command Period Register (EMCDynamictRC)
;// <o6.0..4> tRC: Active to active command period <1-32> <#-1>
;// <i> The delay is in CCLK cycles
;// <i> This value is normally found in SDRAM data sheets as tRC
;// </h>
;// <h> Dynamic Memory Auto-refresh Period Register (EMCDynamictRFC)
;// <o7.0..4> tRFC: Auto-refresh period and auto-refresh to active command period <1-32> <#-1>
;// <i> The delay is in CCLK cycles
;// <i> This value is normally found in SDRAM data sheets as tRFC or tRC
;// </h>
;// <h> Dynamic Memory Exit Self-refresh Register (EMCDynamictXSR)
;// <o8.0..4> tXSR: Exit self-refresh to active command time <1-32> <#-1>
;// <i> The delay is in CCLK cycles
;// <i> This value is normally found in SDRAM data sheets as tXSR
;// </h>
;// <h> Dynamic Memory Active Bank A to Active Bank B Time Register (EMCDynamicRRD)
;// <o9.0..3> tRRD: Active bank A to active bank B latency <1-16> <#-1>
;// <i> The delay is in CCLK cycles
;// <i> This value is normally found in SDRAM data sheets as tRRD
;// </h>
;// <h> Dynamic Memory Load Mode Register to Active Command Time (EMCDynamictMRD)
;// <o10.0..3> tMRD: Load mode register to active command time <1-16> <#-1>
;// <i> The delay is in CCLK cycles
;// <i> This value is normally found in SDRAM data sheets as tMRD or tRSA
;// </h>
;// </h>
EMC_DYN_RP_Val EQU 0x00000002
EMC_DYN_RAS_Val EQU 0x00000003
EMC_DYN_SREX_Val EQU 0x00000007
EMC_DYN_APR_Val EQU 0x00000002
EMC_DYN_DAL_Val EQU 0x00000005
EMC_DYN_WR_Val EQU 0x00000001
EMC_DYN_RC_Val EQU 0x00000005
EMC_DYN_RFC_Val EQU 0x00000005
EMC_DYN_XSR_Val EQU 0x00000007
EMC_DYN_RRD_Val EQU 0x00000001
EMC_DYN_MRD_Val EQU 0x00000002
;// <e> Configure External Bus Behaviour for Dynamic CS0 Area
EMC_DYNCS0_SETUP EQU 1
;// <h> Dynamic Memory Configuration Register (EMCDynamicConfig0)
;// <i> Defines the configuration information for the dynamic memory CS0
;// <o0.20> P: Write protect
;// <o0.19> B: Buffer enable
;// <o0.14> AM 14: External bus data width
;// <0=> 16 bit
;// <1=> 32 bit
;// <o0.12> AM 12: External bus memory type
;// <0=> High-performance
;// <1=> Low-power SDRAM
;// <o0.7..11> AM 11..7: External bus address mapping (Row, Bank, Column)
;// <0x00=> 16 Mb = 2MB (2Mx8), 2 banks, row length = 11, column length = 9
;// <0x01=> 16 Mb = 2MB (1Mx16), 2 banks, row length = 11, column length = 8
;// <0x04=> 64 Mb = 8MB (8Mx8), 4 banks, row length = 12, column length = 9
;// <0x05=> 64 Mb = 8MB (4Mx16), 4 banks, row length = 12, column length = 8
;// <0x08=> 128 Mb = 16MB (16Mx8), 4 banks, row length = 12, column length = 10
;// <0x09=> 128 Mb = 16MB (8Mx16), 4 banks, row length = 12, column length = 9
;// <0x0C=> 256 Mb = 32MB (32Mx8), 4 banks, row length = 13, column length = 10
;// <0x0D=> 256 Mb = 32MB (16Mx16), 4 banks, row length = 13, column length = 9
;// <0x10=> 512 Mb = 64MB (64Mx8), 4 banks, row length = 13, column length = 11
;// <0x11=> 512 Mb = 64MB (32Mx16), 4 banks, row length = 13, column length = 10
;// <o0.3..4> MD: Memory device
;// <0=> SDRAM
;// <1=> Low-power SDRAM
;// <2=> Micron SyncFlash
;// </h>
EMC_DYN_CFG0_Val EQU 0x00080680
;// <h> Dynamic Memory RAS & CAS Delay register (EMCDynamicRASCAS0)
;// <i> Controls the RAS and CAS latencies for the dynamic memory CS0
;// <o0.8..9> CAS: CAS latency
;// <1=> One CCLK cycle
;// <2=> Two CCLK cycles
;// <3=> Three CCLK cycles
;// <o0.0..1> RAS: RAS latency (active to read/write delay)
;// <1=> One CCLK cycle
;// <2=> Two CCLK cycles
;// <3=> Three CCLK cycles
;// </h>
EMC_DYN_RASCAS0_Val EQU 0x00000303
;// </e> End of Dynamic Setup for CS0 Area
;// <e> Configure External Bus Behaviour for Dynamic CS1 Area
EMC_DYNCS1_SETUP EQU 0
;// <h> Dynamic Memory Configuration Register (EMCDynamicConfig1)
;// <i> Defines the configuration information for the dynamic memory CS1
;// <o0.20> P: Write protect
;// <o0.19> B: Buffer enable
;// <o0.14> AM 14: External bus data width
;// <0=> 16 bit
;// <1=> 32 bit
;// <o0.12> AM 12: External bus memory type
;// <0=> High-performance
;// <1=> Low-power SDRAM
;// <o0.7..11> AM 11..7: External bus address mapping (Row, Bank, Column)
;// <0x00=> 16 Mb = 2MB (2Mx8), 2 banks, row length = 11, column length = 9
;// <0x01=> 16 Mb = 2MB (1Mx16), 2 banks, row length = 11, column length = 8
;// <0x04=> 64 Mb = 8MB (8Mx8), 4 banks, row length = 12, column length = 9
;// <0x05=> 64 Mb = 8MB (4Mx16), 4 banks, row length = 12, column length = 8
;// <0x08=> 128 Mb = 16MB (16Mx8), 4 banks, row length = 12, column length = 10
;// <0x09=> 128 Mb = 16MB (8Mx16), 4 banks, row length = 12, column length = 9
;// <0x0C=> 256 Mb = 32MB (32Mx8), 4 banks, row length = 13, column length = 10
;// <0x0D=> 256 Mb = 32MB (16Mx16), 4 banks, row length = 13, column length = 9
;// <0x10=> 512 Mb = 64MB (64Mx8), 4 banks, row length = 13, column length = 11
;// <0x11=> 512 Mb = 64MB (32Mx16), 4 banks, row length = 13, column length = 10
;// <o0.3..4> MD: Memory device
;// <0=> SDRAM
;// <1=> Low-power SDRAM
;// <2=> Micron SyncFlash
;// </h>
EMC_DYN_CFG1_Val EQU 0x00000000
;// <h> Dynamic Memory RAS & CAS Delay register (EMCDynamicRASCAS1)
;// <i> Controls the RAS and CAS latencies for the dynamic memory CS1
;// <o0.8..9> CAS: CAS latency
;// <1=> One CCLK cycle
;// <2=> Two CCLK cycles
;// <3=> Three CCLK cycles
;// <o0.0..1> RAS: RAS latency (active to read/write delay)
;// <1=> One CCLK cycle
;// <2=> Two CCLK cycles
;// <3=> Three CCLK cycles
;// </h>
EMC_DYN_RASCAS1_Val EQU 0x00000303
;// </e> End of Dynamic Setup for CS1 Area
;// <e> Configure External Bus Behaviour for Dynamic CS2 Area
EMC_DYNCS2_SETUP EQU 0
;// <h> Dynamic Memory Configuration Register (EMCDynamicConfig2)
;// <i> Defines the configuration information for the dynamic memory CS2
;// <o0.20> P: Write protect
;// <o0.19> B: Buffer enable
;// <o0.14> AM 14: External bus data width
;// <0=> 16 bit
;// <1=> 32 bit
;// <o0.12> AM 12: External bus memory type
;// <0=> High-performance
;// <1=> Low-power SDRAM
;// <o0.7..11> AM 11..7: External bus address mapping (Row, Bank, Column)
;// <0x00=> 16 Mb = 2MB (2Mx8), 2 banks, row length = 11, column length = 9
;// <0x01=> 16 Mb = 2MB (1Mx16), 2 banks, row length = 11, column length = 8
;// <0x04=> 64 Mb = 8MB (8Mx8), 4 banks, row length = 12, column length = 9
;// <0x05=> 64 Mb = 8MB (4Mx16), 4 banks, row length = 12, column length = 8
;// <0x08=> 128 Mb = 16MB (16Mx8), 4 banks, row length = 12, column length = 10
;// <0x09=> 128 Mb = 16MB (8Mx16), 4 banks, row length = 12, column length = 9
;// <0x0C=> 256 Mb = 32MB (32Mx8), 4 banks, row length = 13, column length = 10
;// <0x0D=> 256 Mb = 32MB (16Mx16), 4 banks, row length = 13, column length = 9
;// <0x10=> 512 Mb = 64MB (64Mx8), 4 banks, row length = 13, column length = 11
;// <0x11=> 512 Mb = 64MB (32Mx16), 4 banks, row length = 13, column length = 10
;// <o0.3..4> MD: Memory device
;// <0=> SDRAM
;// <1=> Low-power SDRAM
;// <2=> Micron SyncFlash
;// </h>
EMC_DYN_CFG2_Val EQU 0x00000000
;// <h> Dynamic Memory RAS & CAS Delay register (EMCDynamicRASCAS2)
;// <i> Controls the RAS and CAS latencies for the dynamic memory CS2
;// <o0.8..9> CAS: CAS latency
;// <1=> One CCLK cycle
;// <2=> Two CCLK cycles
;// <3=> Three CCLK cycles
;// <o0.0..1> RAS: RAS latency (active to read/write delay)
;// <1=> One CCLK cycle
;// <2=> Two CCLK cycles
;// <3=> Three CCLK cycles
;// </h>
EMC_DYN_RASCAS2_Val EQU 0x00000303
;// </e> End of Dynamic Setup for CS2 Area
;// <e> Configure External Bus Behaviour for Dynamic CS3 Area
EMC_DYNCS3_SETUP EQU 0
;// <h> Dynamic Memory Configuration Register (EMCDynamicConfig3)
;// <i> Defines the configuration information for the dynamic memory CS3
;// <o0.20> P: Write protect
;// <o0.19> B: Buffer enable
;// <o0.14> AM 14: External bus data width
;// <0=> 16 bit
;// <1=> 32 bit
;// <o0.12> AM 12: External bus memory type
;// <0=> High-performance
;// <1=> Low-power SDRAM
;// <o0.7..11> AM 11..7: External bus address mapping (Row, Bank, Column)
;// <0x00=> 16 Mb = 2MB (2Mx8), 2 banks, row length = 11, column length = 9
;// <0x01=> 16 Mb = 2MB (1Mx16), 2 banks, row length = 11, column length = 8
;// <0x04=> 64 Mb = 8MB (8Mx8), 4 banks, row length = 12, column length = 9
;// <0x05=> 64 Mb = 8MB (4Mx16), 4 banks, row length = 12, column length = 8
;// <0x08=> 128 Mb = 16MB (16Mx8), 4 banks, row length = 12, column length = 10
;// <0x09=> 128 Mb = 16MB (8Mx16), 4 banks, row length = 12, column length = 9
;// <0x0C=> 256 Mb = 32MB (32Mx8), 4 banks, row length = 13, column length = 10
;// <0x0D=> 256 Mb = 32MB (16Mx16), 4 banks, row length = 13, column length = 9
;// <0x10=> 512 Mb = 64MB (64Mx8), 4 banks, row length = 13, column length = 11
;// <0x11=> 512 Mb = 64MB (32Mx16), 4 banks, row length = 13, column length = 10
;// <o0.3..4> MD: Memory device
;// <0=> SDRAM
;// <1=> Low-power SDRAM
;// <2=> Micron SyncFlash
;// </h>
EMC_DYN_CFG3_Val EQU 0x00000000
;// <h> Dynamic Memory RAS & CAS Delay register (EMCDynamicRASCAS3)
;// <i> Controls the RAS and CAS latencies for the dynamic memory CS3
;// <o0.8..9> CAS: CAS latency
;// <1=> One CCLK cycle
;// <2=> Two CCLK cycles
;// <3=> Three CCLK cycles
;// <o0.0..1> RAS: RAS latency (active to read/write delay)
;// <1=> One CCLK cycle
;// <2=> Two CCLK cycles
;// <3=> Three CCLK cycles
;// </h>
EMC_DYN_RASCAS3_Val EQU 0x00000303
;// </e> End of Dynamic Setup for CS3 Area
;// </e> End of Dynamic Setup
;// Static Memory Interface Setup ----------------------------------------
;// <e> Static Memory Interface Setup
EMC_STATIC_SETUP EQU 1
;// Configure External Bus Behaviour for Static CS0 Area ---------------
;// <e> Configure External Bus Behaviour for Static CS0 Area
EMC_STACS0_SETUP EQU 1
;// <h> Static Memory Configuration Register (EMCStaticConfig0)
;// <i> Defines the configuration information for the static memory CS0
;// <o0.20> WP: Write protect
;// <o0.19> B: Buffer enable
;// <o0.8> EW: Extended wait enable
;// <o0.7> PB: Byte lane state
;// <0=> For reads BLSn are HIGH, for writes BLSn are LOW
;// <1=> For reads BLSn are LOW, for writes BLSn are LOW
;// <o0.6> PC: Chip select polarity
;// <0=> Active LOW chip select
;// <1=> Active HIGH chip select
;// <o0.3> PM: Page mode enable
;// <o0.0..1> MW: Memory width
;// <0=> 8 bit
;// <1=> 16 bit
;// <2=> 32 bit
;// </h>
EMC_STA_CFG0_Val EQU 0x00000081
;// <h> Static Memory Write Enable Delay Register (EMCStaticWaitWen0)
;// <i> Selects the delay from CS0 to write enable
;// <o.0..3> WAITWEN: Wait write enable <1-16> <#-1>
;// <i> The delay is in CCLK cycles
;// </h>
EMC_STA_WWEN0_Val EQU 0x00000002
;// <h> Static Memory Output Enable Delay register (EMCStaticWaitOen0)
;// <i> Selects the delay from CS0 or address change, whichever is later, to output enable
;// <o.0..3> WAITOEN: Wait output enable <0-15>
;// <i> The delay is in CCLK cycles
;// </h>
EMC_STA_WOEN0_Val EQU 0x00000002
;// <h> Static Memory Read Delay Register (EMCStaticWaitRd0)
;// <i> Selects the delay from CS0 to a read access
;// <o.0..4> WAITRD: Non-page mode read wait states or asynchronous page mode read first access wait states <1-32> <#-1>
;// <i> The delay is in CCLK cycles
;// </h>
EMC_STA_WRD0_Val EQU 0x0000001F
;// <h> Static Memory Page Mode Read Delay Register (EMCStaticWaitPage0)
;// <i> Selects the delay for asynchronous page mode sequential accesses for CS0
;// <o.0..4> WAITPAGE: Asynchronous page mode read after the first read wait states <1-32> <#-1>
;// <i> The delay is in CCLK cycles
;// </h>
EMC_STA_WPAGE0_Val EQU 0x0000001F
;// <h> Static Memory Write Delay Register (EMCStaticWaitWr0)
;// <i> Selects the delay from CS0 to a write access
;// <o.0..4> WAITWR: Write wait states <2-33> <#-2>
;// <i> The delay is in CCLK cycles
;// </h>
EMC_STA_WWR0_Val EQU 0x0000001F
;// <h> Static Memory Turn Round Delay Register (EMCStaticWaitTurn0)
;// <i> Selects the number of bus turnaround cycles for CS0
;// <o.0..4> WAITTURN: Bus turnaround cycles <1-16> <#-1>
;// <i> The delay is in CCLK cycles
;// </h>
EMC_STA_WTURN0_Val EQU 0x0000000F
;// </e> End of Static Setup for Static CS0 Area
;// Configure External Bus Behaviour for Static CS1 Area ---------------
;// <e> Configure External Bus Behaviour for Static CS1 Area
EMC_STACS1_SETUP EQU 0
;// <h> Static Memory Configuration Register (EMCStaticConfig1)
;// <i> Defines the configuration information for the static memory CS1
;// <o0.20> WP: Write protect
;// <o0.19> B: Buffer enable
;// <o0.8> EW: Extended wait enable
;// <o0.7> PB: Byte lane state
;// <0=> For reads BLSn are HIGH, for writes BLSn are LOW
;// <1=> For reads BLSn are LOW, for writes BLSn are LOW
;// <o0.6> PC: Chip select polarity
;// <0=> Active LOW chip select
;// <1=> Active HIGH chip select
;// <o0.3> PM: Page mode enable
;// <o0.0..1> MW: Memory width
;// <0=> 8 bit
;// <1=> 16 bit
;// <2=> 32 bit
;// </h>
EMC_STA_CFG1_Val EQU 0x00000000
;// <h> Static Memory Write Enable Delay Register (EMCStaticWaitWen1)
;// <i> Selects the delay from CS1 to write enable
;// <o.0..3> WAITWEN: Wait write enable <1-16> <#-1>
;// <i> The delay is in CCLK cycles
;// </h>
EMC_STA_WWEN1_Val EQU 0x00000000
;// <h> Static Memory Output Enable Delay register (EMCStaticWaitOen1)
;// <i> Selects the delay from CS1 or address change, whichever is later, to output enable
;// <o.0..3> WAITOEN: Wait output enable <0-15>
;// <i> The delay is in CCLK cycles
;// </h>
EMC_STA_WOEN1_Val EQU 0x00000000
;// <h> Static Memory Read Delay Register (EMCStaticWaitRd1)
;// <i> Selects the delay from CS1 to a read access
;// <o.0..4> WAITRD: Non-page mode read wait states or asynchronous page mode read first access wait states <1-32> <#-1>
;// <i> The delay is in CCLK cycles
;// </h>
EMC_STA_WRD1_Val EQU 0x0000001F
;// <h> Static Memory Page Mode Read Delay Register (EMCStaticWaitPage0)
;// <i> Selects the delay for asynchronous page mode sequential accesses for CS1
;// <o.0..4> WAITPAGE: Asynchronous page mode read after the first read wait states <1-32> <#-1>
;// <i> The delay is in CCLK cycles
;// </h>
EMC_STA_WPAGE1_Val EQU 0x0000001F
;// <h> Static Memory Write Delay Register (EMCStaticWaitWr1)
;// <i> Selects the delay from CS1 to a write access
;// <o.0..4> WAITWR: Write wait states <2-33> <#-2>
;// <i> The delay is in CCLK cycles
;// </h>
EMC_STA_WWR1_Val EQU 0x0000001F
;// <h> Static Memory Turn Round Delay Register (EMCStaticWaitTurn1)
;// <i> Selects the number of bus turnaround cycles for CS1
;// <o.0..4> WAITTURN: Bus turnaround cycles <1-16> <#-1>
;// <i> The delay is in CCLK cycles
;// </h>
EMC_STA_WTURN1_Val EQU 0x0000000F
;// </e> End of Static Setup for Static CS1 Area
;// Configure External Bus Behaviour for Static CS2 Area ---------------
;// <e> Configure External Bus Behaviour for Static CS2 Area
EMC_STACS2_SETUP EQU 0
;// <h> Static Memory Configuration Register (EMCStaticConfig2)
;// <i> Defines the configuration information for the static memory CS2
;// <o0.20> WP: Write protect
;// <o0.19> B: Buffer enable
;// <o0.8> EW: Extended wait enable
;// <o0.7> PB: Byte lane state
;// <0=> For reads BLSn are HIGH, for writes BLSn are LOW
;// <1=> For reads BLSn are LOW, for writes BLSn are LOW
;// <o0.6> PC: Chip select polarity
;// <0=> Active LOW chip select
;// <1=> Active HIGH chip select
;// <o0.3> PM: Page mode enable
;// <o0.0..1> MW: Memory width
;// <0=> 8 bit
;// <1=> 16 bit
;// <2=> 32 bit
;// </h>
EMC_STA_CFG2_Val EQU 0x00000000
;// <h> Static Memory Write Enable Delay Register (EMCStaticWaitWen2)
;// <i> Selects the delay from CS2 to write enable
;// <o.0..3> WAITWEN: Wait write enable <1-16> <#-1>
;// <i> The delay is in CCLK cycles
;// </h>
EMC_STA_WWEN2_Val EQU 0x00000000
;// <h> Static Memory Output Enable Delay register (EMCStaticWaitOen2)
;// <i> Selects the delay from CS2 or address change, whichever is later, to output enable
;// <o.0..3> WAITOEN: Wait output enable <0-15>
;// <i> The delay is in CCLK cycles
;// </h>
EMC_STA_WOEN2_Val EQU 0x00000000
;// <h> Static Memory Read Delay Register (EMCStaticWaitRd2)
;// <i> Selects the delay from CS2 to a read access
;// <o.0..4> WAITRD: Non-page mode read wait states or asynchronous page mode read first access wait states <1-32> <#-1>
;// <i> The delay is in CCLK cycles
;// </h>
EMC_STA_WRD2_Val EQU 0x0000001F
;// <h> Static Memory Page Mode Read Delay Register (EMCStaticWaitPage2)
;// <i> Selects the delay for asynchronous page mode sequential accesses for CS2
;// <o.0..4> WAITPAGE: Asynchronous page mode read after the first read wait states <1-32> <#-1>
;// <i> The delay is in CCLK cycles
;// </h>
EMC_STA_WPAGE2_Val EQU 0x0000001F
;// <h> Static Memory Write Delay Register (EMCStaticWaitWr2)
;// <i> Selects the delay from CS2 to a write access
;// <o.0..4> WAITWR: Write wait states <2-33> <#-2>
;// <i> The delay is in CCLK cycles
;// </h>
EMC_STA_WWR2_Val EQU 0x0000001F
;// <h> Static Memory Turn Round Delay Register (EMCStaticWaitTurn2)
;// <i> Selects the number of bus turnaround cycles for CS2
;// <o.0..4> WAITTURN: Bus turnaround cycles <1-16> <#-1>
;// <i> The delay is in CCLK cycles
;// </h>
EMC_STA_WTURN2_Val EQU 0x0000000F
;// </e> End of Static Setup for Static CS2 Area
;// Configure External Bus Behaviour for Static CS3 Area ---------------
;// <e> Configure External Bus Behaviour for Static CS3 Area
EMC_STACS3_SETUP EQU 0
;// <h> Static Memory Configuration Register (EMCStaticConfig3)
;// <i> Defines the configuration information for the static memory CS3
;// <o0.20> WP: Write protect
;// <o0.19> B: Buffer enable
;// <o0.8> EW: Extended wait enable
;// <o0.7> PB: Byte lane state
;// <0=> For reads BLSn are HIGH, for writes BLSn are LOW
;// <1=> For reads BLSn are LOW, for writes BLSn are LOW
;// <o0.6> PC: Chip select polarity
;// <0=> Active LOW chip select
;// <1=> Active HIGH chip select
;// <o0.3> PM: Page mode enable
;// <o0.0..1> MW: Memory width
;// <0=> 8 bit
;// <1=> 16 bit
;// <2=> 32 bit
;// </h>
EMC_STA_CFG3_Val EQU 0x00000000
;// <h> Static Memory Write Enable Delay Register (EMCStaticWaitWen3)
;// <i> Selects the delay from CS3 to write enable
;// <o.0..3> WAITWEN: Wait write enable <1-16> <#-1>
;// <i> The delay is in CCLK cycles
;// </h>
EMC_STA_WWEN3_Val EQU 0x00000000
;// <h> Static Memory Output Enable Delay register (EMCStaticWaitOen3)
;// <i> Selects the delay from CS3 or address change, whichever is later, to output enable
;// <o.0..3> WAITOEN: Wait output enable <0-15>
;// <i> The delay is in CCLK cycles
;// </h>
EMC_STA_WOEN3_Val EQU 0x00000000
;// <h> Static Memory Read Delay Register (EMCStaticWaitRd3)
;// <i> Selects the delay from CS3 to a read access
;// <o.0..4> WAITRD: Non-page mode read wait states or asynchronous page mode read first access wait states <1-32> <#-1>
;// <i> The delay is in CCLK cycles
;// </h>
EMC_STA_WRD3_Val EQU 0x0000001F
;// <h> Static Memory Page Mode Read Delay Register (EMCStaticWaitPage3)
;// <i> Selects the delay for asynchronous page mode sequential accesses for CS3
;// <o.0..4> WAITPAGE: Asynchronous page mode read after the first read wait states <1-32> <#-1>
;// <i> The delay is in CCLK cycles
;// </h>
EMC_STA_WPAGE3_Val EQU 0x0000001F
;// <h> Static Memory Write Delay Register (EMCStaticWaitWr3)
;// <i> Selects the delay from CS3 to a write access
;// <o.0..4> WAITWR: Write wait states <2-33> <#-2>
;// <i> The delay is in CCLK cycles
;// </h>
EMC_STA_WWR3_Val EQU 0x0000001F
;// <h> Static Memory Turn Round Delay Register (EMCStaticWaitTurn3)
;// <i> Selects the number of bus turnaround cycles for CS3
;// <o.0..4> WAITTURN: Bus turnaround cycles <1-16> <#-1>
;// <i> The delay is in CCLK cycles
;// </h>
EMC_STA_WTURN3_Val EQU 0x0000000F
;// </e> End of Static Setup for Static CS3 Area
;// <h> Static Memory Extended Wait Register (EMCStaticExtendedWait)
;// <i> Time long static memory read and write transfers
;// <o.0..9> EXTENDEDWAIT: Extended wait time out <0-1023>
;// <i> The delay is in (16 * CCLK) cycles
;// </h>
EMC_STA_EXT_W_Val EQU 0x00000000
;// </e> End of Static Setup
;// </e> End of EMC Setup
PRESERVE8
; Area Definition and Entry Point
; Startup Code must be linked first at Address at which it expects to run.
AREA RESET, CODE, READONLY
ARM
; Exception Vectors
; Mapped to Address 0.
; Absolute addressing mode must be used.
; Dummy Handlers are implemented as infinite loops which can be modified.
Vectors LDR PC, Reset_Addr
LDR PC, Undef_Addr
LDR PC, SWI_Addr
LDR PC, PAbt_Addr
LDR PC, DAbt_Addr
NOP ; Reserved Vector
LDR PC, IRQ_Addr
LDR PC, FIQ_Addr
Reset_Addr DCD Reset_Handler
Undef_Addr DCD Undef_Handler
SWI_Addr DCD SWI_Handler
PAbt_Addr DCD PAbt_Handler
DAbt_Addr DCD DAbt_Handler
DCD 0 ; Reserved Address
IRQ_Addr DCD IRQ_Handler
FIQ_Addr DCD FIQ_Handler
; Exception Handler
IMPORT rt_hw_trap_udef
IMPORT rt_hw_trap_swi
IMPORT rt_hw_trap_pabt
IMPORT rt_hw_trap_dabt
IMPORT rt_hw_trap_fiq
; Prepare Fatal Context
MACRO
prepare_fatal
STMFD sp!, {r0-r3}
MOV r1, sp
ADD sp, sp, #16
SUB r2, lr, #4
MRS r3, spsr
; switch to SVC mode and no interrupt
MSR cpsr_c, #I_Bit :OR: F_Bit :OR: Mode_SVC
STMFD sp!, {r0} ; old r0
; get sp
ADD r0, sp, #4
STMFD sp!, {r3} ; cpsr
STMFD sp!, {r2} ; pc
STMFD sp!, {lr} ; lr
STMFD sp!, {r0} ; sp
STMFD sp!, {r4-r12}
MOV r4, r1
LDMFD r4!, {r0-r3}
STMFD sp!, {r0-r3}
MOV r0, sp
MEND
Undef_Handler
prepare_fatal
BL rt_hw_trap_irq
B .
SWI_Handler
prepare_fatal
BL rt_hw_trap_swi
B .
PAbt_Handler
prepare_fatal
BL rt_hw_trap_pabt
B .
DAbt_Handler
prepare_fatal
BL rt_hw_trap_dabt
B .
FIQ_Handler
prepare_fatal
BL rt_hw_trap_fiq
B .
; Reset Handler
EXPORT Reset_Handler
Reset_Handler
; Clock Setup ------------------------------------------------------------------
IF (:LNOT:(:DEF:NO_CLOCK_SETUP)):LAND:(CLOCK_SETUP != 0)
LDR R0, =SCB_BASE
MOV R1, #0xAA
MOV R2, #0x55
; Configure and Enable PLL
LDR R3, =SCS_Val ; Enable main oscillator
STR R3, [R0, #SCS_OFS]
IF (SCS_Val:AND:OSCEN) != 0
OSC_Loop LDR R3, [R0, #SCS_OFS] ; Wait for main osc stabilize
ANDS R3, R3, #OSCSTAT
BEQ OSC_Loop
ENDIF
LDR R3, =CLKSRCSEL_Val ; Select PLL source clock
STR R3, [R0, #CLKSRCSEL_OFS]
LDR R3, =PLLCFG_Val
STR R3, [R0, #PLLCFG_OFS]
STR R1, [R0, #PLLFEED_OFS]
STR R2, [R0, #PLLFEED_OFS]
MOV R3, #PLLCON_PLLE
STR R3, [R0, #PLLCON_OFS]
STR R1, [R0, #PLLFEED_OFS]
STR R2, [R0, #PLLFEED_OFS]
IF (CLKSRCSEL_Val:AND:3) != 2
; Wait until PLL Locked (if source is not RTC oscillator)
PLL_Loop LDR R3, [R0, #PLLSTAT_OFS]
ANDS R3, R3, #PLLSTAT_PLOCK
BEQ PLL_Loop
ELSE
; Wait at least 200 cycles (if source is RTC oscillator)
MOV R3, #(200/4)
PLL_Loop SUBS R3, R3, #1
BNE PLL_Loop
ENDIF
M_N_Lock LDR R3, [R0, #PLLSTAT_OFS]
LDR R4, =(PLLSTAT_M:OR:PLLSTAT_N)
AND R3, R3, R4
LDR R4, =PLLCFG_Val
EORS R3, R3, R4
BNE M_N_Lock
; Setup CPU clock divider
MOV R3, #CCLKCFG_Val
STR R3, [R0, #CCLKCFG_OFS]
; Setup USB clock divider
LDR R3, =USBCLKCFG_Val
STR R3, [R0, #USBCLKCFG_OFS]
; Setup Peripheral Clock
LDR R3, =PCLKSEL0_Val
STR R3, [R0, #PCLKSEL0_OFS]
LDR R3, =PCLKSEL1_Val
STR R3, [R0, #PCLKSEL1_OFS]
; Switch to PLL Clock
MOV R3, #(PLLCON_PLLE:OR:PLLCON_PLLC)
STR R3, [R0, #PLLCON_OFS]
STR R1, [R0, #PLLFEED_OFS]
STR R2, [R0, #PLLFEED_OFS]
ENDIF ; CLOCK_SETUP
; Setup Memory Accelerator Module ----------------------------------------------
IF MAM_SETUP != 0
LDR R0, =MAM_BASE
MOV R1, #MAMTIM_Val
STR R1, [R0, #MAMTIM_OFS]
MOV R1, #MAMCR_Val
STR R1, [R0, #MAMCR_OFS]
ENDIF ; MAM_SETUP
; Setup External Memory Controller ---------------------------------------------
IF (:LNOT:(:DEF:NO_EMC_SETUP)):LAND:(EMC_SETUP != 0)
LDR R0, =EMC_BASE
LDR R1, =SCB_BASE
LDR R2, =PCB_BASE
LDR R4, =EMC_PCONP_Const ; Enable EMC
LDR R3, [R1, #PCONP_OFS]
ORR R4, R4, R3
STR R4, [R1, #PCONP_OFS]
LDR R4, =EMC_CTRL_Val
STR R4, [R0, #EMC_CTRL_OFS]
LDR R4, =EMC_CONFIG_Val
STR R4, [R0, #EMC_CONFIG_OFS]
; Setup pin functions for External Bus functionality
LDR R4, =EMC_PINSEL5_Val
STR R4, [R2, #PINSEL5_OFS]
LDR R4, =EMC_PINSEL6_Val
STR R4, [R2, #PINSEL6_OFS]
LDR R4, =EMC_PINSEL8_Val
STR R4, [R2, #PINSEL8_OFS]
LDR R4, =EMC_PINSEL9_Val
STR R4, [R2, #PINSEL9_OFS]
; Setup Dynamic Memory Interface
IF (EMC_DYNAMIC_SETUP != 0)
LDR R4, =EMC_DYN_RP_Val
STR R4, [R0, #EMC_DYN_RP_OFS]
LDR R4, =EMC_DYN_RAS_Val
STR R4, [R0, #EMC_DYN_RAS_OFS]
LDR R4, =EMC_DYN_SREX_Val
STR R4, [R0, #EMC_DYN_SREX_OFS]
LDR R4, =EMC_DYN_APR_Val
STR R4, [R0, #EMC_DYN_APR_OFS]
LDR R4, =EMC_DYN_DAL_Val
STR R4, [R0, #EMC_DYN_DAL_OFS]
LDR R4, =EMC_DYN_WR_Val
STR R4, [R0, #EMC_DYN_WR_OFS]
LDR R4, =EMC_DYN_RC_Val
STR R4, [R0, #EMC_DYN_RC_OFS]
LDR R4, =EMC_DYN_RFC_Val
STR R4, [R0, #EMC_DYN_RFC_OFS]
LDR R4, =EMC_DYN_XSR_Val
STR R4, [R0, #EMC_DYN_XSR_OFS]
LDR R4, =EMC_DYN_RRD_Val
STR R4, [R0, #EMC_DYN_RRD_OFS]
LDR R4, =EMC_DYN_MRD_Val
STR R4, [R0, #EMC_DYN_MRD_OFS]
LDR R4, =EMC_DYN_RD_CFG_Val
STR R4, [R0, #EMC_DYN_RD_CFG_OFS]
IF (EMC_DYNCS0_SETUP != 0)
LDR R4, =EMC_DYN_RASCAS0_Val
STR R4, [R0, #EMC_DYN_RASCAS0_OFS]
LDR R4, =EMC_DYN_CFG0_Val
MVN R5, #BUFEN_Const
AND R4, R4, R5
STR R4, [R0, #EMC_DYN_CFG0_OFS]
ENDIF
IF (EMC_DYNCS1_SETUP != 0)
LDR R4, =EMC_DYN_RASCAS1_Val
STR R4, [R0, #EMC_DYN_RASCAS1_OFS]
LDR R4, =EMC_DYN_CFG1_Val
MVN R5, =BUFEN_Const
AND R4, R4, R5
STR R4, [R0, #EMC_DYN_CFG1_OFS]
ENDIF
IF (EMC_DYNCS2_SETUP != 0)
LDR R4, =EMC_DYN_RASCAS2_Val
STR R4, [R0, #EMC_DYN_RASCAS2_OFS]
LDR R4, =EMC_DYN_CFG2_Val
MVN R5, =BUFEN_Const
AND R4, R4, R5
STR R4, [R0, #EMC_DYN_CFG2_OFS]
ENDIF
IF (EMC_DYNCS3_SETUP != 0)
LDR R4, =EMC_DYN_RASCAS3_Val
STR R4, [R0, #EMC_DYN_RASCAS3_OFS]
LDR R4, =EMC_DYN_CFG3_Val
MVN R5, =BUFEN_Const
AND R4, R4, R5
STR R4, [R0, #EMC_DYN_CFG3_OFS]
ENDIF
LDR R6, =1440000 ; Number of cycles to delay
Wait_0 SUBS R6, R6, #1 ; Delay ~100 ms proc clk 57.6 MHz
BNE Wait_0 ; BNE (3 cyc) + SUBS (1 cyc) = 4 cyc
LDR R4, =(NOP_CMD:OR:0x03) ; Write NOP Command
STR R4, [R0, #EMC_DYN_CTRL_OFS]
LDR R6, =2880000 ; Number of cycles to delay
Wait_1 SUBS R6, R6, #1 ; Delay ~200 ms proc clk 57.6 MHz
BNE Wait_1
LDR R4, =(PALL_CMD:OR:0x03) ; Write Precharge All Command
STR R4, [R0, #EMC_DYN_CTRL_OFS]
MOV R4, #2
STR R4, [R0, #EMC_DYN_RFSH_OFS]
MOV R6, #64 ; Number of cycles to delay
Wait_2 SUBS R6, R6, #1 ; Delay
BNE Wait_2
LDR R4, =EMC_DYN_RFSH_Val
STR R4, [R0, #EMC_DYN_RFSH_OFS]
LDR R4, =(MODE_CMD:OR:0x03) ; Write MODE Command
STR R4, [R0, #EMC_DYN_CTRL_OFS]
; Dummy read
IF (EMC_DYNCS0_SETUP != 0)
LDR R4, =DYN_MEM0_BASE
MOV R5, #(0x33 << 12)
ADD R4, R4, R5
LDR R4, [R4, #0]
ENDIF
IF (EMC_DYNCS1_SETUP != 0)
LDR R4, =DYN_MEM1_BASE
MOV R5, #(0x33 << 12)
ADD R4, R4, R5
LDR R4, [R4, #0]
ENDIF
IF (EMC_DYNCS2_SETUP != 0)
LDR R4, =DYN_MEM2_BASE
MOV R5, #(0x33 << 12)
ADD R4, R4, R5
LDR R4, [R4, #0]
ENDIF
IF (EMC_DYNCS3_SETUP != 0)
LDR R4, =DYN_MEM3_BASE
MOV R5, #(0x33 << 12)
ADD R4, R4, R5
LDR R4, [R4, #0]
ENDIF
LDR R4, =NORMAL_CMD ; Write NORMAL Command
STR R4, [R0, #EMC_DYN_CTRL_OFS]
; Enable buffer if requested by settings
IF (EMC_DYNCS0_SETUP != 0):LAND:((EMC_DYN_CFG0_Val:AND:BUFEN_Const) != 0)
LDR R4, =EMC_DYN_CFG0_Val
STR R4, [R0, #EMC_DYN_CFG0_OFS]
ENDIF
IF (EMC_DYNCS1_SETUP != 0):LAND:((EMC_DYN_CFG1_Val:AND:BUFEN_Const) != 0)
LDR R4, =EMC_DYN_CFG1_Val
STR R4, [R0, #EMC_DYN_CFG1_OFS]
ENDIF
IF (EMC_DYNCS2_SETUP != 0):LAND:((EMC_DYN_CFG2_Val:AND:BUFEN_Const) != 0)
LDR R4, =EMC_DYN_CFG2_Val
STR R4, [R0, #EMC_DYN_CFG2_OFS]
ENDIF
IF (EMC_DYNCS3_SETUP != 0):LAND:((EMC_DYN_CFG3_Val:AND:BUFEN_Const) != 0)
LDR R4, =EMC_DYN_CFG3_Val
STR R4, [R0, #EMC_DYN_CFG3_OFS]
ENDIF
LDR R6, =14400 ; Number of cycles to delay
Wait_3 SUBS R6, R6, #1 ; Delay ~1 ms @ proc clk 57.6 MHz
BNE Wait_3
ENDIF ; EMC_DYNAMIC_SETUP
; Setup Static Memory Interface
IF (EMC_STATIC_SETUP != 0)
LDR R6, =1440000 ; Number of cycles to delay
Wait_4 SUBS R6, R6, #1 ; Delay ~100 ms @ proc clk 57.6 MHz
BNE Wait_4
IF (EMC_STACS0_SETUP != 0)
LDR R4, =EMC_STA_CFG0_Val
STR R4, [R0, #EMC_STA_CFG0_OFS]
LDR R4, =EMC_STA_WWEN0_Val
STR R4, [R0, #EMC_STA_WWEN0_OFS]
LDR R4, =EMC_STA_WOEN0_Val
STR R4, [R0, #EMC_STA_WOEN0_OFS]
LDR R4, =EMC_STA_WRD0_Val
STR R4, [R0, #EMC_STA_WRD0_OFS]
LDR R4, =EMC_STA_WPAGE0_Val
STR R4, [R0, #EMC_STA_WPAGE0_OFS]
LDR R4, =EMC_STA_WWR0_Val
STR R4, [R0, #EMC_STA_WWR0_OFS]
LDR R4, =EMC_STA_WTURN0_Val
STR R4, [R0, #EMC_STA_WTURN0_OFS]
ENDIF
IF (EMC_STACS1_SETUP != 0)
LDR R4, =EMC_STA_CFG1_Val
STR R4, [R0, #EMC_STA_CFG1_OFS]
LDR R4, =EMC_STA_WWEN1_Val
STR R4, [R0, #EMC_STA_WWEN1_OFS]
LDR R4, =EMC_STA_WOEN1_Val
STR R4, [R0, #EMC_STA_WOEN1_OFS]
LDR R4, =EMC_STA_WRD1_Val
STR R4, [R0, #EMC_STA_WRD1_OFS]
LDR R4, =EMC_STA_WPAGE1_Val
STR R4, [R0, #EMC_STA_WPAGE1_OFS]
LDR R4, =EMC_STA_WWR1_Val
STR R4, [R0, #EMC_STA_WWR1_OFS]
LDR R4, =EMC_STA_WTURN1_Val
STR R4, [R0, #EMC_STA_WTURN1_OFS]
ENDIF
IF (EMC_STACS2_SETUP != 0)
LDR R4, =EMC_STA_CFG2_Val
STR R4, [R0, #EMC_STA_CFG2_OFS]
LDR R4, =EMC_STA_WWEN2_Val
STR R4, [R0, #EMC_STA_WWEN2_OFS]
LDR R4, =EMC_STA_WOEN2_Val
STR R4, [R0, #EMC_STA_WOEN2_OFS]
LDR R4, =EMC_STA_WRD2_Val
STR R4, [R0, #EMC_STA_WRD2_OFS]
LDR R4, =EMC_STA_WPAGE2_Val
STR R4, [R0, #EMC_STA_WPAGE2_OFS]
LDR R4, =EMC_STA_WWR2_Val
STR R4, [R0, #EMC_STA_WWR2_OFS]
LDR R4, =EMC_STA_WTURN2_Val
STR R4, [R0, #EMC_STA_WTURN2_OFS]
ENDIF
IF (EMC_STACS3_SETUP != 0)
LDR R4, =EMC_STA_CFG3_Val
STR R4, [R0, #EMC_STA_CFG3_OFS]
LDR R4, =EMC_STA_WWEN3_Val
STR R4, [R0, #EMC_STA_WWEN3_OFS]
LDR R4, =EMC_STA_WOEN3_Val
STR R4, [R0, #EMC_STA_WOEN3_OFS]
LDR R4, =EMC_STA_WRD3_Val
STR R4, [R0, #EMC_STA_WRD3_OFS]
LDR R4, =EMC_STA_WPAGE3_Val
STR R4, [R0, #EMC_STA_WPAGE3_OFS]
LDR R4, =EMC_STA_WWR3_Val
STR R4, [R0, #EMC_STA_WWR3_OFS]
LDR R4, =EMC_STA_WTURN3_Val
STR R4, [R0, #EMC_STA_WTURN3_OFS]
ENDIF
LDR R6, =144000 ; Number of cycles to delay
Wait_5 SUBS R6, R6, #1 ; Delay ~10 ms @ proc clk 57.6 MHz
BNE Wait_5
LDR R4, =EMC_STA_EXT_W_Val
LDR R5, =EMC_STA_EXT_W_OFS
ADD R5, R5, R0
STR R4, [R5, #0]
ENDIF ; EMC_STATIC_SETUP
ENDIF ; EMC_SETUP
; Copy Exception Vectors to Internal RAM ---------------------------------------
IF :DEF:RAM_INTVEC
ADR R8, Vectors ; Source
LDR R9, =RAM_BASE ; Destination
LDMIA R8!, {R0-R7} ; Load Vectors
STMIA R9!, {R0-R7} ; Store Vectors
LDMIA R8!, {R0-R7} ; Load Handler Addresses
STMIA R9!, {R0-R7} ; Store Handler Addresses
ENDIF
; Memory Mapping (when Interrupt Vectors are in RAM) ---------------------------
MEMMAP EQU 0xE01FC040 ; Memory Mapping Control
IF :DEF:REMAP
LDR R0, =MEMMAP
IF :DEF:EXTMEM_MODE
MOV R1, #3
ELIF :DEF:RAM_MODE
MOV R1, #2
ELSE
MOV R1, #1
ENDIF
STR R1, [R0]
ENDIF
; Setup Stack for each mode ----------------------------------------------------
LDR R0, =Stack_Top
; Enter Undefined Instruction Mode and set its Stack Pointer
MSR CPSR_c, #Mode_UND:OR:I_Bit:OR:F_Bit
MOV SP, R0
SUB R0, R0, #UND_Stack_Size
; Enter Abort Mode and set its Stack Pointer
MSR CPSR_c, #Mode_ABT:OR:I_Bit:OR:F_Bit
MOV SP, R0
SUB R0, R0, #ABT_Stack_Size
; Enter FIQ Mode and set its Stack Pointer
MSR CPSR_c, #Mode_FIQ:OR:I_Bit:OR:F_Bit
MOV SP, R0
SUB R0, R0, #FIQ_Stack_Size
; Enter IRQ Mode and set its Stack Pointer
MSR CPSR_c, #Mode_IRQ:OR:I_Bit:OR:F_Bit
MOV SP, R0
SUB R0, R0, #IRQ_Stack_Size
; Enter Supervisor Mode and set its Stack Pointer
MSR CPSR_c, #Mode_SVC:OR:I_Bit:OR:F_Bit
MOV SP, R0
SUB R0, R0, #SVC_Stack_Size
IF :DEF:__MICROLIB
EXPORT __initial_sp
ELSE
ENDIF
; Enter the C code -------------------------------------------------------------
IMPORT __main
LDR R0, =__main
BX R0
IMPORT rt_interrupt_enter
IMPORT rt_interrupt_leave
IMPORT rt_thread_switch_interrupt_flag
IMPORT rt_interrupt_from_thread
IMPORT rt_interrupt_to_thread
IMPORT rt_hw_trap_irq
IRQ_Handler PROC
EXPORT IRQ_Handler
STMFD sp!, {r0-r12,lr}
BL rt_interrupt_enter
BL rt_hw_trap_irq
BL rt_interrupt_leave
; if rt_thread_switch_interrupt_flag set, jump to
; rt_hw_context_switch_interrupt_do and don't return
LDR r0, =rt_thread_switch_interrupt_flag
LDR r1, [r0]
CMP r1, #1
BEQ rt_hw_context_switch_interrupt_do
LDMFD sp!, {r0-r12,lr}
SUBS pc, lr, #4
ENDP
; /*
; * void rt_hw_context_switch_interrupt_do(rt_base_t flag)
; */
rt_hw_context_switch_interrupt_do PROC
EXPORT rt_hw_context_switch_interrupt_do
MOV r1, #0 ; clear flag
STR r1, [r0]
LDMFD sp!, {r0-r12,lr}; reload saved registers
STMFD sp!, {r0-r3} ; save r0-r3
MOV r1, sp
ADD sp, sp, #16 ; restore sp
SUB r2, lr, #4 ; save old task's pc to r2
MRS r3, spsr ; get cpsr of interrupt thread
; switch to SVC mode and no interrupt
MSR cpsr_c, #I_Bit :OR: F_Bit :OR: Mode_SVC
STMFD sp!, {r2} ; push old task's pc
STMFD sp!, {r4-r12,lr}; push old task's lr,r12-r4
MOV r4, r1 ; Special optimised code below
MOV r5, r3
LDMFD r4!, {r0-r3}
STMFD sp!, {r0-r3} ; push old task's r3-r0
STMFD sp!, {r5} ; push old task's cpsr
LDR r4, =rt_interrupt_from_thread
LDR r5, [r4]
STR sp, [r5] ; store sp in preempted tasks's TCB
LDR r6, =rt_interrupt_to_thread
LDR r6, [r6]
LDR sp, [r6] ; get new task's stack pointer
LDMFD sp!, {r4} ; pop new task's cpsr to spsr
MSR spsr_cxsf, r4
BIC r4, r4, #0x20 ; must be ARM mode
MSR cpsr_cxsf, r4
LDMFD sp!, {r0-r12,lr,pc}^ ; pop new task's r0-r12,lr & pc, copy spsr to cpsr
ENDP
IF :DEF:__MICROLIB
EXPORT __heap_base
EXPORT __heap_limit
ELSE
; User Initial Stack & Heap
AREA |.text|, CODE, READONLY
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + USR_Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ENDIF
END
|
vandercookking/h7_device_RTT
| 2,529
|
rt-thread/libcpu/arm/am335x/context_iar.S
|
;/*
; * Copyright (c) 2006-2021, RT-Thread Development Team
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Change Logs:
; * Date Author Notes
; * 2011-08-14 weety copy from mini2440
; * 2015-04-15 ArdaFu convert from context_gcc.s
; */
#define NOINT 0xc0
SECTION .text:CODE(6)
/*
* rt_base_t rt_hw_interrupt_disable();
*/
PUBLIC rt_hw_interrupt_disable
rt_hw_interrupt_disable:
MRS R0, CPSR
ORR R1, R0, #NOINT
MSR CPSR_C, R1
MOV PC, LR
/*
* void rt_hw_interrupt_enable(rt_base_t level);
*/
PUBLIC rt_hw_interrupt_enable
rt_hw_interrupt_enable:
MSR CPSR_CXSF, R0
MOV PC, LR
/*
* void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
* r0 --> from
* r1 --> to
*/
PUBLIC rt_hw_context_switch
rt_hw_context_switch:
STMFD SP!, {LR} ; push pc (lr should be pushed in place of PC)
STMFD SP!, {R0-R12, LR} ; push lr & register file
MRS R4, CPSR
TST LR, #0x01
ORRNE R4, R4, #0x20 ; it's thumb code
STMFD SP!, {R4} ; push cpsr
STR SP, [R0] ; store sp in preempted tasks TCB
LDR SP, [R1] ; get new task stack pointer
LDMFD SP!, {R4} ; pop new task spsr
MSR SPSR_cxsf, R4
LDMFD SP!, {R0-R12, LR, PC}^ ; pop new task r0-r12, lr & pc
/*
* void rt_hw_context_switch_to(rt_uint32 to);
* r0 --> to
*/
PUBLIC rt_hw_context_switch_to
rt_hw_context_switch_to:
LDR SP, [R0] ; get new task stack pointer
LDMFD SP!, {R4} ; pop new task spsr
MSR SPSR_cxsf, R4
BIC R4, R4, #0x20 ; must be ARM mode
MSR CPSR_CXSF, R4
LDMFD SP!, {R0-R12, LR, PC}^ ; pop new task r0-r12, lr & pc
/*
* void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to);
*/
IMPORT rt_thread_switch_interrupt_flag
IMPORT rt_interrupt_from_thread
IMPORT rt_interrupt_to_thread
PUBLIC rt_hw_context_switch_interrupt
rt_hw_context_switch_interrupt:
LDR R2, =rt_thread_switch_interrupt_flag
LDR R3, [R2]
CMP R3, #1
BEQ _reswitch
MOV R3, #1 ; set flag to 1
STR R3, [R2]
LDR R2, =rt_interrupt_from_thread ; set rt_interrupt_from_thread
STR R0, [R2]
_reswitch:
LDR R2, =rt_interrupt_to_thread ; set rt_interrupt_to_thread
STR R1, [R2]
MOV PC, LR
END
|
vandercookking/h7_device_RTT
| 2,210
|
rt-thread/libcpu/arm/am335x/context_gcc.S
|
/*
* Copyright (c) 2006-2018, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2013-07-05 Bernard the first version
*/
/*
* rt_base_t rt_hw_interrupt_disable();
*/
.globl rt_hw_interrupt_disable
rt_hw_interrupt_disable:
mrs r0, cpsr
cpsid if
bx lr
/*
* void rt_hw_interrupt_enable(rt_base_t level);
*/
.globl rt_hw_interrupt_enable
rt_hw_interrupt_enable:
msr cpsr_c, r0
bx lr
/*
* void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
* r0 --> from
* r1 --> to
*/
.globl rt_hw_context_switch
rt_hw_context_switch:
stmfd sp!, {lr} @ push pc (lr should be pushed in place of PC)
stmfd sp!, {r0-r12, lr} @ push lr & register file
mrs r4, cpsr
tst lr, #0x01
orrne r4, r4, #0x20 @ it's thumb code
stmfd sp!, {r4} @ push cpsr
str sp, [r0] @ store sp in preempted tasks TCB
ldr sp, [r1] @ get new task stack pointer
ldmfd sp!, {r4} @ pop new task cpsr to spsr
msr spsr_cxsf, r4
_do_switch:
ldmfd sp!, {r0-r12, lr, pc}^ @ pop new task r0-r12, lr & pc, copy spsr to cpsr
/*
* void rt_hw_context_switch_to(rt_uint32 to);
* r0 --> to
*/
.globl rt_hw_context_switch_to
rt_hw_context_switch_to:
ldr sp, [r0] @ get new task stack pointer
ldmfd sp!, {r4} @ pop new task spsr
msr spsr_cxsf, r4
bic r4, r4, #0x20 @ must be ARM mode
msr cpsr_cxsf, r4
ldmfd sp!, {r0-r12, lr, pc}^ @ pop new task r0-r12, lr & pc
/*
* void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to);
*/
.globl rt_thread_switch_interrupt_flag
.globl rt_interrupt_from_thread
.globl rt_interrupt_to_thread
.globl rt_hw_context_switch_interrupt
rt_hw_context_switch_interrupt:
ldr r2, =rt_thread_switch_interrupt_flag
ldr r3, [r2]
cmp r3, #1
beq _reswitch
mov r3, #1 @ set rt_thread_switch_interrupt_flag to 1
str r3, [r2]
ldr r2, =rt_interrupt_from_thread @ set rt_interrupt_from_thread
str r0, [r2]
_reswitch:
ldr r2, =rt_interrupt_to_thread @ set rt_interrupt_to_thread
str r1, [r2]
bx lr
|
vandercookking/h7_device_RTT
| 7,091
|
rt-thread/libcpu/arm/am335x/start_gcc.S
|
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2013-07-05 Bernard the first version
*/
.equ Mode_USR, 0x10
.equ Mode_FIQ, 0x11
.equ Mode_IRQ, 0x12
.equ Mode_SVC, 0x13
.equ Mode_ABT, 0x17
.equ Mode_UND, 0x1B
.equ Mode_SYS, 0x1F
.equ I_Bit, 0x80 @ when I bit is set, IRQ is disabled
.equ F_Bit, 0x40 @ when F bit is set, FIQ is disabled
.equ UND_Stack_Size, 0x00000200
.equ SVC_Stack_Size, 0x00000100
.equ ABT_Stack_Size, 0x00000000
.equ FIQ_Stack_Size, 0x00000000
.equ IRQ_Stack_Size, 0x00000100
.equ USR_Stack_Size, 0x00000100
#define ISR_Stack_Size (UND_Stack_Size + SVC_Stack_Size + ABT_Stack_Size + \
FIQ_Stack_Size + IRQ_Stack_Size)
/* stack */
.globl stack_start
.globl stack_top
.align 3
stack_start:
.rept ISR_Stack_Size
.long 0
.endr
stack_top:
/* reset entry */
.globl _reset
_reset:
/* set the cpu to SVC32 mode and disable interrupt */
mrs r0, cpsr
bic r0, r0, #0x1f
orr r0, r0, #0x13
msr cpsr_c, r0
/* setup stack */
bl stack_setup
/* clear .bss */
mov r0,#0 /* get a zero */
ldr r1,=__bss_start /* bss start */
ldr r2,=__bss_end /* bss end */
bss_loop:
cmp r1,r2 /* check if data to clear */
strlo r0,[r1],#4 /* clear 4 bytes */
blo bss_loop /* loop until done */
/* call C++ constructors of global objects */
ldr r0, =__ctors_start__
ldr r1, =__ctors_end__
ctor_loop:
cmp r0, r1
beq ctor_end
ldr r2, [r0], #4
stmfd sp!, {r0-r1}
mov lr, pc
bx r2
ldmfd sp!, {r0-r1}
b ctor_loop
ctor_end:
/* start RT-Thread Kernel */
ldr pc, _rtthread_startup
_rtthread_startup:
.word rtthread_startup
stack_setup:
ldr r0, =stack_top
@ Enter Undefined Instruction Mode and set its Stack Pointer
msr cpsr_c, #Mode_UND|I_Bit|F_Bit
mov sp, r0
sub r0, r0, #UND_Stack_Size
@ Enter Abort Mode and set its Stack Pointer
msr cpsr_c, #Mode_ABT|I_Bit|F_Bit
mov sp, r0
sub r0, r0, #ABT_Stack_Size
@ Enter FIQ Mode and set its Stack Pointer
msr cpsr_c, #Mode_FIQ|I_Bit|F_Bit
mov sp, r0
sub r0, r0, #FIQ_Stack_Size
@ Enter IRQ Mode and set its Stack Pointer
msr cpsr_c, #Mode_IRQ|I_Bit|F_Bit
mov sp, r0
sub r0, r0, #IRQ_Stack_Size
@ Enter Supervisor Mode and set its Stack Pointer
msr cpsr_c, #Mode_SVC|I_Bit|F_Bit
mov sp, r0
sub r0, r0, #SVC_Stack_Size
@ Enter User Mode and set its Stack Pointer
mov sp, r0
sub sl, sp, #USR_Stack_Size
bx lr
/* exception handlers: undef, swi, padt, dabt, resv, irq, fiq */
.align 5
.globl vector_undef
vector_undef:
sub sp, sp, #72
stmia sp, {r0 - r12} @/* Calling r0-r12 */
add r8, sp, #60
mrs r1, cpsr
mrs r2, spsr
orr r2,r2, #I_Bit|F_Bit
msr cpsr_c, r2
mov r0, r0
stmdb r8, {sp, lr} @/* Calling SP, LR */
msr cpsr_c, r1 @/* return to Undefined Instruction mode */
str lr, [r8, #0] @/* Save calling PC */
mrs r6, spsr
str r6, [r8, #4] @/* Save CPSR */
str r0, [r8, #8] @/* Save OLD_R0 */
mov r0, sp
bl rt_hw_trap_udef
ldmia sp, {r0 - r12} @/* Calling r0 - r2 */
mov r0, r0
ldr lr, [sp, #60] @/* Get PC */
add sp, sp, #72
movs pc, lr @/* return & move spsr_svc into cpsr */
.align 5
.globl vector_swi
vector_swi:
bl rt_hw_trap_swi
.align 5
.globl vector_pabt
vector_pabt:
bl rt_hw_trap_pabt
.align 5
.globl vector_dabt
vector_dabt:
sub sp, sp, #72
stmia sp, {r0 - r12} @/* Calling r0-r12 */
add r8, sp, #60
stmdb r8, {sp, lr} @/* Calling SP, LR */
str lr, [r8, #0] @/* Save calling PC */
mrs r6, spsr
str r6, [r8, #4] @/* Save CPSR */
str r0, [r8, #8] @/* Save OLD_R0 */
mov r0, sp
bl rt_hw_trap_dabt
ldmia sp, {r0 - r12} @/* Calling r0 - r2 */
mov r0, r0
ldr lr, [sp, #60] @/* Get PC */
add sp, sp, #72
movs pc, lr @/* return & move spsr_svc into cpsr */
.align 5
.globl vector_resv
vector_resv:
b .
.align 5
.globl vector_fiq
vector_fiq:
stmfd sp!,{r0-r7,lr}
bl rt_hw_trap_fiq
ldmfd sp!,{r0-r7,lr}
subs pc,lr,#4
.globl rt_interrupt_enter
.globl rt_interrupt_leave
.globl rt_thread_switch_interrupt_flag
.globl rt_interrupt_from_thread
.globl rt_interrupt_to_thread
.globl rt_current_thread
.globl vmm_thread
.globl vmm_virq_check
.globl vector_irq
vector_irq:
stmfd sp!, {r0-r12,lr}
bl rt_interrupt_enter
bl rt_hw_trap_irq
bl rt_interrupt_leave
@ if rt_thread_switch_interrupt_flag set, jump to
@ rt_hw_context_switch_interrupt_do and don't return
ldr r0, =rt_thread_switch_interrupt_flag
ldr r1, [r0]
cmp r1, #1
beq rt_hw_context_switch_interrupt_do
ldmfd sp!, {r0-r12,lr}
subs pc, lr, #4
rt_hw_context_switch_interrupt_do:
mov r1, #0 @ clear flag
str r1, [r0]
ldmfd sp!, {r0-r12,lr}@ reload saved registers
stmfd sp, {r0-r2} @ save r0-r2
mrs r0, spsr @ get cpsr of interrupt thread
sub r1, sp, #4*3
sub r2, lr, #4 @ save old task's pc to r2
@ switch to SVC mode with no interrupt
msr cpsr_c, #I_Bit|F_Bit|Mode_SVC
stmfd sp!, {r2} @ push old task's pc
stmfd sp!, {r3-r12,lr}@ push old task's lr,r12-r4
ldmfd r1, {r1-r3} @ restore r0-r2 of the interrupt thread
stmfd sp!, {r1-r3} @ push old task's r0-r2
stmfd sp!, {r0} @ push old task's cpsr
ldr r4, =rt_interrupt_from_thread
ldr r5, [r4]
str sp, [r5] @ store sp in preempted tasks's TCB
ldr r6, =rt_interrupt_to_thread
ldr r6, [r6]
ldr sp, [r6] @ get new task's stack pointer
ldmfd sp!, {r4} @ pop new task's cpsr to spsr
msr spsr_cxsf, r4
ldmfd sp!, {r0-r12,lr,pc}^ @ pop new task's r0-r12,lr & pc, copy spsr to cpsr
|
vandercookking/h7_device_RTT
| 9,497
|
rt-thread/libcpu/arm/am335x/start_iar.s
|
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2015-04-06 zchong the first version
*/
MODULE ?cstartup
; --------------------
; Mode, correspords to bits 0-5 in CPSR
MODE_MSK DEFINE 0x1F ; Bit mask for mode bits in CPSR
I_Bit DEFINE 0x80 ; when I bit is set, IRQ is disabled
F_Bit DEFINE 0x40 ; when F bit is set, FIQ is disabled
USR_MODE DEFINE 0x10 ; User mode
FIQ_MODE DEFINE 0x11 ; Fast Interrupt Request mode
IRQ_MODE DEFINE 0x12 ; Interrupt Request mode
SVC_MODE DEFINE 0x13 ; Supervisor mode
ABT_MODE DEFINE 0x17 ; Abort mode
UND_MODE DEFINE 0x1B ; Undefined Instruction mode
SYS_MODE DEFINE 0x1F ; System mode
;; Forward declaration of sections.
SECTION IRQ_STACK:DATA:NOROOT(3)
SECTION FIQ_STACK:DATA:NOROOT(3)
SECTION SVC_STACK:DATA:NOROOT(3)
SECTION ABT_STACK:DATA:NOROOT(3)
SECTION UND_STACK:DATA:NOROOT(3)
SECTION CSTACK:DATA:NOROOT(3)
SECTION .text:CODE
SECTION .intvec:CODE:NOROOT(5)
PUBLIC __vector
PUBLIC __iar_program_start
__iar_init$$done: ; The vector table is not needed
; until after copy initialization is done
__vector: ; Make this a DATA label, so that stack usage
; analysis doesn't consider it an uncalled fun
ARM
; All default exception handlers (except reset) are
; defined as weak symbol definitions.
; If a handler is defined by the application it will take precedence.
LDR PC,Reset_Addr ; Reset
LDR PC,Undefined_Addr ; Undefined instructions
LDR PC,SWI_Addr ; Software interrupt (SWI/SVC)
LDR PC,Prefetch_Addr ; Prefetch abort
LDR PC,Abort_Addr ; Data abort
DCD 0 ; RESERVED
LDR PC,IRQ_Addr ; IRQ
LDR PC,FIQ_Addr ; FIQ
DATA
Reset_Addr: DCD __iar_program_start
Undefined_Addr: DCD Undefined_Handler
SWI_Addr: DCD SWI_Handler
Prefetch_Addr: DCD Prefetch_Handler
Abort_Addr: DCD Abort_Handler
IRQ_Addr: DCD IRQ_Handler
FIQ_Addr: DCD FIQ_Handler
; --------------------------------------------------
; ?cstartup -- low-level system initialization code.
;
; After a reset execution starts here, the mode is ARM, supervisor
; with interrupts disabled.
;
SECTION .text:CODE:NOROOT(2)
EXTERN rt_hw_trap_udef
EXTERN rt_hw_trap_swi
EXTERN rt_hw_trap_pabt
EXTERN rt_hw_trap_dabt
EXTERN rt_hw_trap_fiq
EXTERN rt_hw_trap_irq
EXTERN rt_interrupt_enter
EXTERN rt_interrupt_leave
EXTERN rt_thread_switch_interrupt_flag
EXTERN rt_interrupt_from_thread
EXTERN rt_interrupt_to_thread
EXTERN rt_current_thread
EXTERN vmm_thread
EXTERN vmm_virq_check
EXTERN __cmain
REQUIRE __vector
EXTWEAK __iar_init_core
EXTWEAK __iar_init_vfp
ARM
__iar_program_start:
?cstartup:
;
; Add initialization needed before setup of stackpointers here.
;
;
; Initialize the stack pointers.
; The pattern below can be used for any of the exception stacks:
; FIQ, IRQ, SVC, ABT, UND, SYS.
; The USR mode uses the same stack as SYS.
; The stack segments must be defined in the linker command file,
; and be declared above.
;
MRS r0, cpsr ; Original PSR value
;; Set up the interrupt stack pointer.
BIC r0, r0, #MODE_MSK ; Clear the mode bits
ORR r0, r0, #IRQ_MODE ; Set IRQ mode bits
MSR cpsr_c, r0 ; Change the mode
LDR sp, =SFE(IRQ_STACK) ; End of IRQ_STACK
BIC sp,sp,#0x7 ; Make sure SP is 8 aligned
;; Set up the fast interrupt stack pointer.
BIC r0, r0, #MODE_MSK ; Clear the mode bits
ORR r0, r0, #FIQ_MODE ; Set FIR mode bits
MSR cpsr_c, r0 ; Change the mode
LDR sp, =SFE(FIQ_STACK) ; End of FIQ_STACK
BIC sp,sp,#0x7 ; Make sure SP is 8 aligned
BIC r0,r0,#MODE_MSK ; Clear the mode bits
ORR r0,r0,#ABT_MODE ; Set Abort mode bits
MSR cpsr_c,r0 ; Change the mode
LDR sp,=SFE(ABT_STACK) ; End of ABT_STACK
BIC sp,sp,#0x7 ; Make sure SP is 8 aligned
BIC r0,r0,#MODE_MSK ; Clear the mode bits
ORR r0,r0,#UND_MODE ; Set Undefined mode bits
MSR cpsr_c,r0 ; Change the mode
LDR sp,=SFE(UND_STACK) ; End of UND_STACK
BIC sp,sp,#0x7 ; Make sure SP is 8 aligned
;; Set up the normal stack pointer.
BIC r0 ,r0, #MODE_MSK ; Clear the mode bits
ORR r0 ,r0, #SVC_MODE ; Set System mode bits
MSR cpsr_c, r0 ; Change the mode
LDR sp, =SFE(SVC_STACK) ; End of SVC_STACK
BIC sp,sp,#0x7 ; Make sure SP is 8 aligned
;; Turn on core features assumed to be enabled.
BL __iar_init_core
;; Initialize VFP (if needed).
BL __iar_init_vfp
;; Continue to __cmain for C-level initialization.
B __cmain
Undefined_Handler:
SUB sp, sp, #72
STMIA sp, {r0 - r12} ;/* Calling r0-r12 */
ADD r8, sp, #60
MRS r1, cpsr
MRS r2, spsr
ORR r2,r2, #I_Bit | F_Bit
MSR cpsr_c, r2
MOV r0, r0
STMDB r8, {sp, lr} ;/* Calling SP, LR */
MSR cpsr_c, r1 ;/* return to Undefined Instruction mode */
STR lr, [r8, #0] ;/* Save calling PC */
MRS r6, spsr
STR r6, [r8, #4] ;/* Save CPSR */
STR r0, [r8, #8] ;/* Save OLD_R0 */
MOV r0, sp
BL rt_hw_trap_udef
LDMIA sp, {r0 - r12} ;/* Calling r0 - r2 */
MOV r0, r0
LDR lr, [sp, #60] ;/* Get PC */
ADD sp, sp, #72
MOVS pc, lr ;/* return & move spsr_svc into cpsr */
SWI_Handler:
BL rt_hw_trap_swi
Prefetch_Handler:
BL rt_hw_trap_pabt
Abort_Handler:
SUB sp, sp, #72
STMIA sp, {r0 - r12} ;/* Calling r0-r12 */
ADD r8, sp, #60
STMDB r8, {sp, lr} ;/* Calling SP, LR */
STR lr, [r8, #0] ;/* Save calling PC */
MRS r6, spsr
STR r6, [r8, #4] ;/* Save CPSR */
STR r0, [r8, #8] ;/* Save OLD_R0 */
MOV r0, sp
BL rt_hw_trap_dabt
LDMIA sp, {r0 - r12} ;/* Calling r0 - r2 */
MOV r0, r0
LDR lr, [sp, #60] ;/* Get PC */
ADD sp, sp, #72
MOVS pc, lr ;/* return & move spsr_svc into cpsr */
FIQ_Handler:
STMFD sp!,{r0-r7,lr}
BL rt_hw_trap_fiq
LDMFD sp!,{r0-r7,lr}
SUBS pc,lr,#4
IRQ_Handler:
STMFD sp!, {r0-r12,lr}
BL rt_interrupt_enter
BL rt_hw_trap_irq
BL rt_interrupt_leave
; if rt_thread_switch_interrupt_flag set, jump to
; rt_hw_context_switch_interrupt_do and don't return
LDR r0, =rt_thread_switch_interrupt_flag
LDR r1, [r0]
CMP r1, #1
BEQ rt_hw_context_switch_interrupt_do
LDMFD sp!, {r0-r12,lr}
SUBS pc, lr, #4
rt_hw_context_switch_interrupt_do:
MOV r1, #0 ; clear flag
STR r1, [r0]
LDMFD sp!, {r0-r12,lr}; reload saved registers
STMFD sp, {r0-r2} ; save r0-r2
MRS r0, spsr ; get cpsr of interrupt thread
SUB r1, sp, #4*3
SUB r2, lr, #4 ; save old task's pc to r2
; switch to SVC mode with no interrupt
MSR cpsr_c, #I_Bit | F_Bit | SVC_MODE
STMFD sp!, {r2} ; push old task's pc
STMFD sp!, {r3-r12,lr}; push old task's lr,r12-r4
LDMFD r1, {r1-r3} ; restore r0-r2 of the interrupt thread
STMFD sp!, {r1-r3} ; push old task's r0-r2
STMFD sp!, {r0} ; push old task's cpsr
LDR r4, =rt_interrupt_from_thread
LDR r5, [r4]
STR sp, [r5] ; store sp in preempted tasks's TCB
LDR r6, =rt_interrupt_to_thread
LDR r6, [r6]
LDR sp, [r6] ; get new task's stack pointer
LDMFD sp!, {r4} ; pop new task's cpsr to spsr
MSR spsr_cxsf, r4
LDMFD sp!, {r0-r12,lr,pc}^ ; pop new task's r0-r12,lr & pc, copy spsr to cpsr
END
|
vandercookking/h7_device_RTT
| 2,914
|
rt-thread/libcpu/arm/am335x/cp15_gcc.S
|
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2013-07-05 Bernard the first version
*/
.globl rt_cpu_vector_set_base
rt_cpu_vector_set_base:
mcr p15, #0, r0, c12, c0, #0
dsb
bx lr
.globl rt_cpu_vector_get_base
rt_cpu_vector_get_base:
mrc p15, #0, r0, c12, c0, #0
bx lr
.globl rt_cpu_get_sctlr
rt_cpu_get_sctlr:
mrc p15, #0, r0, c1, c0, #0
bx lr
.globl rt_cpu_dcache_enable
rt_cpu_dcache_enable:
mrc p15, #0, r0, c1, c0, #0
orr r0, r0, #0x00000004
mcr p15, #0, r0, c1, c0, #0
bx lr
.globl rt_cpu_icache_enable
rt_cpu_icache_enable:
mrc p15, #0, r0, c1, c0, #0
orr r0, r0, #0x00001000
mcr p15, #0, r0, c1, c0, #0
bx lr
_FLD_MAX_WAY:
.word 0x3ff
_FLD_MAX_IDX:
.word 0x7ff
.globl rt_cpu_dcache_clean_flush
rt_cpu_dcache_clean_flush:
push {r4-r11}
dmb
mrc p15, #1, r0, c0, c0, #1 @ read clid register
ands r3, r0, #0x7000000 @ get level of coherency
mov r3, r3, lsr #23
beq finished
mov r10, #0
loop1:
add r2, r10, r10, lsr #1
mov r1, r0, lsr r2
and r1, r1, #7
cmp r1, #2
blt skip
mcr p15, #2, r10, c0, c0, #0
isb
mrc p15, #1, r1, c0, c0, #0
and r2, r1, #7
add r2, r2, #4
ldr r4, _FLD_MAX_WAY
ands r4, r4, r1, lsr #3
clz r5, r4
ldr r7, _FLD_MAX_IDX
ands r7, r7, r1, lsr #13
loop2:
mov r9, r4
loop3:
orr r11, r10, r9, lsl r5
orr r11, r11, r7, lsl r2
mcr p15, #0, r11, c7, c14, #2
subs r9, r9, #1
bge loop3
subs r7, r7, #1
bge loop2
skip:
add r10, r10, #2
cmp r3, r10
bgt loop1
finished:
dsb
isb
pop {r4-r11}
bx lr
.globl rt_cpu_dcache_disable
rt_cpu_dcache_disable:
push {r4-r11, lr}
mrc p15, #0, r0, c1, c0, #0
bic r0, r0, #0x00000004
mcr p15, #0, r0, c1, c0, #0
bl rt_cpu_dcache_clean_flush
pop {r4-r11, lr}
bx lr
.globl rt_cpu_icache_disable
rt_cpu_icache_disable:
mrc p15, #0, r0, c1, c0, #0
bic r0, r0, #0x00001000
mcr p15, #0, r0, c1, c0, #0
bx lr
.globl rt_cpu_mmu_disable
rt_cpu_mmu_disable:
mcr p15, #0, r0, c8, c7, #0 @ invalidate tlb
mrc p15, #0, r0, c1, c0, #0
bic r0, r0, #1
mcr p15, #0, r0, c1, c0, #0 @ clear mmu bit
dsb
bx lr
.globl rt_cpu_mmu_enable
rt_cpu_mmu_enable:
mrc p15, #0, r0, c1, c0, #0
orr r0, r0, #0x001
mcr p15, #0, r0, c1, c0, #0 @ set mmu enable bit
dsb
bx lr
.globl rt_cpu_tlb_set
rt_cpu_tlb_set:
mcr p15, #0, r0, c2, c0, #0
dmb
bx lr
|
vandercookking/h7_device_RTT
| 3,415
|
rt-thread/libcpu/arm/am335x/cp15_iar.s
|
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2015-04-06 zchong change to iar compiler from convert from cp15_gcc.S
*/
SECTION .text:CODE:NOROOT(2)
ARM
EXPORT rt_cpu_vector_set_base
rt_cpu_vector_set_base:
MCR p15, #0, r0, c12, c0, #0
DSB
BX lr
EXPORT rt_cpu_vector_get_base
rt_cpu_vector_get_base:
MRC p15, #0, r0, c12, c0, #0
BX lr
EXPORT rt_cpu_get_sctlr
rt_cpu_get_sctlr:
MRC p15, #0, r0, c1, c0, #0
BX lr
EXPORT rt_cpu_dcache_enable
rt_cpu_dcache_enable:
MRC p15, #0, r0, c1, c0, #0
ORR r0, r0, #0x00000004
MCR p15, #0, r0, c1, c0, #0
BX lr
EXPORT rt_cpu_icache_enable
rt_cpu_icache_enable:
MRC p15, #0, r0, c1, c0, #0
ORR r0, r0, #0x00001000
MCR p15, #0, r0, c1, c0, #0
BX lr
;_FLD_MAX_WAY DEFINE 0x3ff
;_FLD_MAX_IDX DEFINE 0x7ff
EXPORT rt_cpu_dcache_clean_flush
rt_cpu_dcache_clean_flush:
PUSH {r4-r11}
DMB
MRC p15, #1, r0, c0, c0, #1 ; read clid register
ANDS r3, r0, #0x7000000 ; get level of coherency
MOV r3, r3, lsr #23
BEQ finished
MOV r10, #0
loop1:
ADD r2, r10, r10, lsr #1
MOV r1, r0, lsr r2
AND r1, r1, #7
CMP r1, #2
BLT skip
MCR p15, #2, r10, c0, c0, #0
ISB
MRC p15, #1, r1, c0, c0, #0
AND r2, r1, #7
ADD r2, r2, #4
;LDR r4, _FLD_MAX_WAY
LDR r4, =0x3FF
ANDS r4, r4, r1, lsr #3
CLZ r5, r4
;LDR r7, _FLD_MAX_IDX
LDR r7, =0x7FF
ANDS r7, r7, r1, lsr #13
loop2:
MOV r9, r4
loop3:
ORR r11, r10, r9, lsl r5
ORR r11, r11, r7, lsl r2
MCR p15, #0, r11, c7, c14, #2
SUBS r9, r9, #1
BGE loop3
SUBS r7, r7, #1
BGE loop2
skip:
ADD r10, r10, #2
CMP r3, r10
BGT loop1
finished:
DSB
ISB
POP {r4-r11}
BX lr
EXPORT rt_cpu_dcache_disable
rt_cpu_dcache_disable:
PUSH {r4-r11, lr}
MRC p15, #0, r0, c1, c0, #0
BIC r0, r0, #0x00000004
MCR p15, #0, r0, c1, c0, #0
BL rt_cpu_dcache_clean_flush
POP {r4-r11, lr}
BX lr
EXPORT rt_cpu_icache_disable
rt_cpu_icache_disable:
MRC p15, #0, r0, c1, c0, #0
BIC r0, r0, #0x00001000
MCR p15, #0, r0, c1, c0, #0
BX lr
EXPORT rt_cpu_mmu_disable
rt_cpu_mmu_disable:
MCR p15, #0, r0, c8, c7, #0 ; invalidate tlb
MRC p15, #0, r0, c1, c0, #0
BIC r0, r0, #1
MCR p15, #0, r0, c1, c0, #0 ; clear mmu bit
DSB
BX lr
EXPORT rt_cpu_mmu_enable
rt_cpu_mmu_enable:
MRC p15, #0, r0, c1, c0, #0
ORR r0, r0, #0x001
MCR p15, #0, r0, c1, c0, #0 ; set mmu enable bit
DSB
BX lr
EXPORT rt_cpu_tlb_set
rt_cpu_tlb_set:
MCR p15, #0, r0, c2, c0, #0
DMB
BX lr
END
|
vandercookking/h7_device_RTT
| 5,832
|
rt-thread/libcpu/arm/cortex-m23/context_iar.S
|
;/*
; * Copyright (c) 2006-2019, RT-Thread Development Team
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Change Logs:
; * Date Author Notes
; * 2010-01-25 Bernard first version
; * 2012-06-01 aozima set pendsv priority to 0xFF.
; * 2012-08-17 aozima fixed bug: store r8 - r11.
; * 2013-06-18 aozima add restore MSP feature.
; * 2019-03-31 xuzhuoyi port to Cortex-M23.
; */
;/**
; * @addtogroup CORTEX-M23
; */
;/*@{*/
SCB_VTOR EQU 0xE000ED08 ; Vector Table Offset Register
NVIC_INT_CTRL EQU 0xE000ED04 ; interrupt control state register
NVIC_SHPR3 EQU 0xE000ED20 ; system priority register (2)
NVIC_PENDSV_PRI EQU 0xFFFF0000 ; PendSV and SysTick priority value (lowest)
NVIC_PENDSVSET EQU 0x10000000 ; value to trigger PendSV exception
SECTION .text:CODE(2)
THUMB
REQUIRE8
PRESERVE8
IMPORT rt_thread_switch_interrupt_flag
IMPORT rt_interrupt_from_thread
IMPORT rt_interrupt_to_thread
;/*
; * rt_base_t rt_hw_interrupt_disable();
; */
EXPORT rt_hw_interrupt_disable
rt_hw_interrupt_disable:
MRS r0, PRIMASK
CPSID I
BX LR
;/*
; * void rt_hw_interrupt_enable(rt_base_t level);
; */
EXPORT rt_hw_interrupt_enable
rt_hw_interrupt_enable:
MSR PRIMASK, r0
BX LR
;/*
; * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
; * r0 --> from
; * r1 --> to
; */
EXPORT rt_hw_context_switch_interrupt
EXPORT rt_hw_context_switch
rt_hw_context_switch_interrupt:
rt_hw_context_switch:
; set rt_thread_switch_interrupt_flag to 1
LDR r2, =rt_thread_switch_interrupt_flag
LDR r3, [r2]
CMP r3, #1
BEQ _reswitch
MOVS r3, #0x1
STR r3, [r2]
LDR r2, =rt_interrupt_from_thread ; set rt_interrupt_from_thread
STR r0, [r2]
_reswitch
LDR r2, =rt_interrupt_to_thread ; set rt_interrupt_to_thread
STR r1, [r2]
LDR r0, =NVIC_INT_CTRL ; trigger the PendSV exception (causes context switch)
LDR r1, =NVIC_PENDSVSET
STR r1, [r0]
BX LR
; r0 --> switch from thread stack
; r1 --> switch to thread stack
; psr, pc, lr, r12, r3, r2, r1, r0 are pushed into [from] stack
EXPORT PendSV_Handler
PendSV_Handler:
; disable interrupt to protect context switch
MRS r2, PRIMASK
CPSID I
; get rt_thread_switch_interrupt_flag
LDR r0, =rt_thread_switch_interrupt_flag
LDR r1, [r0]
CMP r1, #0x00
BEQ pendsv_exit ; pendsv already handled
; clear rt_thread_switch_interrupt_flag to 0
MOVS r1, #0x00
STR r1, [r0]
LDR r0, =rt_interrupt_from_thread
LDR r1, [r0]
CMP r1, #0x00
BEQ switch_to_thread ; skip register save at the first time
MRS r1, psp ; get from thread stack pointer
SUBS r1, r1, #0x20 ; space for {r4 - r7} and {r8 - r11}
LDR r0, [r0]
STR r1, [r0] ; update from thread stack pointer
STMIA r1!, {r4 - r7} ; push thread {r4 - r7} register to thread stack
MOV r4, r8 ; mov thread {r8 - r11} to {r4 - r7}
MOV r5, r9
MOV r6, r10
MOV r7, r11
STMIA r1!, {r4 - r7} ; push thread {r8 - r11} high register to thread stack
switch_to_thread
LDR r1, =rt_interrupt_to_thread
LDR r1, [r1]
LDR r1, [r1] ; load thread stack pointer
LDMIA r1!, {r4 - r7} ; pop thread {r4 - r7} register from thread stack
PUSH {r4 - r7} ; push {r4 - r7} to MSP for copy {r8 - r11}
LDMIA r1!, {r4 - r7} ; pop thread {r8 - r11} high register from thread stack to {r4 - r7}
MOV r8, r4 ; mov {r4 - r7} to {r8 - r11}
MOV r9, r5
MOV r10, r6
MOV r11, r7
POP {r4 - r7} ; pop {r4 - r7} from MSP
MSR psp, r1 ; update stack pointer
pendsv_exit
; restore interrupt
MSR PRIMASK, r2
MOVS r0, #0x03
RSBS r0, r0, #0x00
BX r0
;/*
; * void rt_hw_context_switch_to(rt_uint32 to);
; * r0 --> to
; * this fucntion is used to perform the first thread switch
; */
EXPORT rt_hw_context_switch_to
rt_hw_context_switch_to:
; set to thread
LDR r1, =rt_interrupt_to_thread
STR r0, [r1]
; set from thread to 0
LDR r1, =rt_interrupt_from_thread
MOVS r0, #0x0
STR r0, [r1]
; set interrupt flag to 1
LDR r1, =rt_thread_switch_interrupt_flag
MOVS r0, #1
STR r0, [r1]
; set the PendSV and SysTick exception priority
LDR r0, =NVIC_SHPR3
LDR r1, =NVIC_PENDSV_PRI
LDR r2, [r0,#0x00] ; read
ORRS r1,r1,r2 ; modify
STR r1, [r0] ; write-back
; trigger the PendSV exception (causes context switch)
LDR r0, =NVIC_INT_CTRL
LDR r1, =NVIC_PENDSVSET
STR r1, [r0]
NOP
; restore MSP
LDR r0, =SCB_VTOR
LDR r0, [r0]
LDR r0, [r0]
NOP
MSR msp, r0
; enable interrupts at processor level
CPSIE I
; ensure PendSV exception taken place before subsequent operation
DSB
ISB
; never reach here!
; compatible with old version
EXPORT rt_hw_interrupt_thread_switch
rt_hw_interrupt_thread_switch:
BX lr
IMPORT rt_hw_hard_fault_exception
EXPORT HardFault_Handler
HardFault_Handler:
; get current context
MRS r0, psp ; get fault thread stack pointer
PUSH {lr}
BL rt_hw_hard_fault_exception
POP {pc}
END
|
vandercookking/h7_device_RTT
| 6,331
|
rt-thread/libcpu/arm/cortex-m23/context_gcc.S
|
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2010-01-25 Bernard first version
* 2012-06-01 aozima set pendsv priority to 0xFF.
* 2012-08-17 aozima fixed bug: store r8 - r11.
* 2013-02-20 aozima port to gcc.
* 2013-06-18 aozima add restore MSP feature.
* 2013-11-04 bright fixed hardfault bug for gcc.
* 2019-03-31 xuzhuoyi port to Cortex-M23.
*/
.cpu cortex-m23
.fpu softvfp
.syntax unified
.thumb
.text
.equ SCB_VTOR, 0xE000ED08 /* Vector Table Offset Register */
.equ NVIC_INT_CTRL, 0xE000ED04 /* interrupt control state register */
.equ NVIC_SHPR3, 0xE000ED20 /* system priority register (3) */
.equ NVIC_PENDSV_PRI, 0xFFFF0000 /* PendSV and SysTick priority value (lowest) */
.equ NVIC_PENDSVSET, 0x10000000 /* value to trigger PendSV exception */
/*
* rt_base_t rt_hw_interrupt_disable();
*/
.global rt_hw_interrupt_disable
.type rt_hw_interrupt_disable, %function
rt_hw_interrupt_disable:
MRS R0, PRIMASK
CPSID I
BX LR
/*
* void rt_hw_interrupt_enable(rt_base_t level);
*/
.global rt_hw_interrupt_enable
.type rt_hw_interrupt_enable, %function
rt_hw_interrupt_enable:
MSR PRIMASK, R0
BX LR
/*
* void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
* R0 --> from
* R1 --> to
*/
.global rt_hw_context_switch_interrupt
.type rt_hw_context_switch_interrupt, %function
.global rt_hw_context_switch
.type rt_hw_context_switch, %function
rt_hw_context_switch_interrupt:
rt_hw_context_switch:
/* set rt_thread_switch_interrupt_flag to 1 */
LDR R2, =rt_thread_switch_interrupt_flag
LDR R3, [R2]
CMP R3, #1
BEQ _reswitch
MOVS R3, #1
STR R3, [R2]
LDR R2, =rt_interrupt_from_thread /* set rt_interrupt_from_thread */
STR R0, [R2]
_reswitch:
LDR R2, =rt_interrupt_to_thread /* set rt_interrupt_to_thread */
STR R1, [R2]
LDR R0, =NVIC_INT_CTRL /* trigger the PendSV exception (causes context switch) */
LDR R1, =NVIC_PENDSVSET
STR R1, [R0]
BX LR
/* R0 --> switch from thread stack
* R1 --> switch to thread stack
* psr, pc, LR, R12, R3, R2, R1, R0 are pushed into [from] stack
*/
.global PendSV_Handler
.type PendSV_Handler, %function
PendSV_Handler:
/* disable interrupt to protect context switch */
MRS R2, PRIMASK
CPSID I
/* get rt_thread_switch_interrupt_flag */
LDR R0, =rt_thread_switch_interrupt_flag
LDR R1, [R0]
CMP R1, #0x00
BEQ pendsv_exit /* pendsv already handled */
/* clear rt_thread_switch_interrupt_flag to 0 */
MOVS R1, #0
STR R1, [R0]
LDR R0, =rt_interrupt_from_thread
LDR R1, [R0]
CMP R1, #0x00
BEQ switch_to_thread /* skip register save at the first time */
MRS R1, PSP /* get from thread stack pointer */
SUBS R1, R1, #0x20 /* space for {R4 - R7} and {R8 - R11} */
LDR R0, [R0]
STR R1, [R0] /* update from thread stack pointer */
STMIA R1!, {R4 - R7} /* push thread {R4 - R7} register to thread stack */
MOV R4, R8 /* mov thread {R8 - R11} to {R4 - R7} */
MOV R5, R9
MOV R6, R10
MOV R7, R11
STMIA R1!, {R4 - R7} /* push thread {R8 - R11} high register to thread stack */
switch_to_thread:
LDR R1, =rt_interrupt_to_thread
LDR R1, [R1]
LDR R1, [R1] /* load thread stack pointer */
LDMIA R1!, {R4 - R7} /* pop thread {R4 - R7} register from thread stack */
PUSH {R4 - R7} /* push {R4 - R7} to MSP for copy {R8 - R11} */
LDMIA R1!, {R4 - R7} /* pop thread {R8 - R11} high register from thread stack to {R4 - R7} */
MOV R8, R4 /* mov {R4 - R7} to {R8 - R11} */
MOV R9, R5
MOV R10, R6
MOV R11, R7
POP {R4 - R7} /* pop {R4 - R7} from MSP */
MSR PSP, R1 /* update stack pointer */
pendsv_exit:
/* restore interrupt */
MSR PRIMASK, R2
MOVS R0, #0x03
RSBS R0, R0, #0x00
BX R0
/*
* void rt_hw_context_switch_to(rt_uint32 to);
* R0 --> to
*/
.global rt_hw_context_switch_to
.type rt_hw_context_switch_to, %function
rt_hw_context_switch_to:
LDR R1, =rt_interrupt_to_thread
STR R0, [R1]
/* set from thread to 0 */
LDR R1, =rt_interrupt_from_thread
MOVS R0, #0
STR R0, [R1]
/* set interrupt flag to 1 */
LDR R1, =rt_thread_switch_interrupt_flag
MOVS R0, #1
STR R0, [R1]
/* set the PendSV and SysTick exception priority */
LDR R0, =NVIC_SHPR3
LDR R1, =NVIC_PENDSV_PRI
LDR R2, [R0,#0x00] /* read */
ORRS R1, R1, R2 /* modify */
STR R1, [R0] /* write-back */
LDR R0, =NVIC_INT_CTRL /* trigger the PendSV exception (causes context switch) */
LDR R1, =NVIC_PENDSVSET
STR R1, [R0]
NOP
/* restore MSP */
LDR R0, =SCB_VTOR
LDR R0, [R0]
LDR R0, [R0]
NOP
MSR MSP, R0
/* enable interrupts at processor level */
CPSIE I
/* ensure PendSV exception taken place before subsequent operation */
DSB
ISB
/* never reach here! */
/* compatible with old version */
.global rt_hw_interrupt_thread_switch
.type rt_hw_interrupt_thread_switch, %function
rt_hw_interrupt_thread_switch:
BX LR
NOP
.global HardFault_Handler
.type HardFault_Handler, %function
HardFault_Handler:
/* get current context */
MRS R0, PSP /* get fault thread stack pointer */
PUSH {LR}
BL rt_hw_hard_fault_exception
POP {PC}
/*
* rt_uint32_t rt_hw_interrupt_check(void);
* R0 --> state
*/
.global rt_hw_interrupt_check
.type rt_hw_interrupt_check, %function
rt_hw_interrupt_check:
MRS R0, IPSR
BX LR
|
vandercookking/h7_device_RTT
| 5,956
|
rt-thread/libcpu/arm/cortex-m23/context_rvds.S
|
;/*
; * Copyright (c) 2006-2022, RT-Thread Development Team
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Change Logs:
; * Date Author Notes
; * 2010-01-25 Bernard first version
; * 2012-06-01 aozima set pendsv priority to 0xFF.
; * 2012-08-17 aozima fixed bug: store r8 - r11.
; * 2013-06-18 aozima add restore MSP feature.
; * 2019-03-31 xuzhuoyi port to Cortex-M23.
; */
;/**
; * @addtogroup CORTEX-M23
; */
;/*@{*/
SCB_VTOR EQU 0xE000ED08 ; Vector Table Offset Register
NVIC_INT_CTRL EQU 0xE000ED04 ; interrupt control state register
NVIC_SHPR3 EQU 0xE000ED20 ; system priority register (2)
NVIC_PENDSV_PRI EQU 0xFFFF0000 ; PendSV and SysTick priority value (lowest)
NVIC_PENDSVSET EQU 0x10000000 ; value to trigger PendSV exception
AREA |.text|, CODE, READONLY, ALIGN=2
THUMB
REQUIRE8
PRESERVE8
IMPORT rt_thread_switch_interrupt_flag
IMPORT rt_interrupt_from_thread
IMPORT rt_interrupt_to_thread
;/*
; * rt_base_t rt_hw_interrupt_disable();
; */
rt_hw_interrupt_disable PROC
EXPORT rt_hw_interrupt_disable
MRS r0, PRIMASK
CPSID I
BX LR
ENDP
;/*
; * void rt_hw_interrupt_enable(rt_base_t level);
; */
rt_hw_interrupt_enable PROC
EXPORT rt_hw_interrupt_enable
MSR PRIMASK, r0
BX LR
ENDP
;/*
; * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
; * r0 --> from
; * r1 --> to
; */
rt_hw_context_switch_interrupt
EXPORT rt_hw_context_switch_interrupt
rt_hw_context_switch PROC
EXPORT rt_hw_context_switch
; set rt_thread_switch_interrupt_flag to 1
LDR r2, =rt_thread_switch_interrupt_flag
LDR r3, [r2]
CMP r3, #1
BEQ _reswitch
MOVS r3, #0x01
STR r3, [r2]
LDR r2, =rt_interrupt_from_thread ; set rt_interrupt_from_thread
STR r0, [r2]
_reswitch
LDR r2, =rt_interrupt_to_thread ; set rt_interrupt_to_thread
STR r1, [r2]
LDR r0, =NVIC_INT_CTRL ; trigger the PendSV exception (causes context switch)
LDR r1, =NVIC_PENDSVSET
STR r1, [r0]
BX LR
ENDP
; r0 --> switch from thread stack
; r1 --> switch to thread stack
; psr, pc, lr, r12, r3, r2, r1, r0 are pushed into [from] stack
PendSV_Handler PROC
EXPORT PendSV_Handler
; disable interrupt to protect context switch
MRS r2, PRIMASK
CPSID I
; get rt_thread_switch_interrupt_flag
LDR r0, =rt_thread_switch_interrupt_flag
LDR r1, [r0]
CMP r1, #0x00
BEQ pendsv_exit ; pendsv already handled
; clear rt_thread_switch_interrupt_flag to 0
MOVS r1, #0x00
STR r1, [r0]
LDR r0, =rt_interrupt_from_thread
LDR r1, [r0]
CMP r1, #0x00
BEQ switch_to_thread ; skip register save at the first time
MRS r1, psp ; get from thread stack pointer
SUBS r1, r1, #0x20 ; space for {r4 - r7} and {r8 - r11}
LDR r0, [r0]
STR r1, [r0] ; update from thread stack pointer
STMIA r1!, {r4 - r7} ; push thread {r4 - r7} register to thread stack
MOV r4, r8 ; mov thread {r8 - r11} to {r4 - r7}
MOV r5, r9
MOV r6, r10
MOV r7, r11
STMIA r1!, {r4 - r7} ; push thread {r8 - r11} high register to thread stack
switch_to_thread
LDR r1, =rt_interrupt_to_thread
LDR r1, [r1]
LDR r1, [r1] ; load thread stack pointer
LDMIA r1!, {r4 - r7} ; pop thread {r4 - r7} register from thread stack
PUSH {r4 - r7} ; push {r4 - r7} to MSP for copy {r8 - r11}
LDMIA r1!, {r4 - r7} ; pop thread {r8 - r11} high register from thread stack to {r4 - r7}
MOV r8, r4 ; mov {r4 - r7} to {r8 - r11}
MOV r9, r5
MOV r10, r6
MOV r11, r7
POP {r4 - r7} ; pop {r4 - r7} from MSP
MSR psp, r1 ; update stack pointer
pendsv_exit
; restore interrupt
MSR PRIMASK, r2
MOVS r0, #0x03
RSBS r0, r0, #0x00
BX r0
ENDP
;/*
; * void rt_hw_context_switch_to(rt_uint32 to);
; * r0 --> to
; * this fucntion is used to perform the first thread switch
; */
rt_hw_context_switch_to PROC
EXPORT rt_hw_context_switch_to
; set to thread
LDR r1, =rt_interrupt_to_thread
STR r0, [r1]
; set from thread to 0
LDR r1, =rt_interrupt_from_thread
MOVS r0, #0x0
STR r0, [r1]
; set interrupt flag to 1
LDR r1, =rt_thread_switch_interrupt_flag
MOVS r0, #1
STR r0, [r1]
; set the PendSV and SysTick exception priority
LDR r0, =NVIC_SHPR3
LDR r1, =NVIC_PENDSV_PRI
LDR r2, [r0,#0x00] ; read
ORRS r1,r1,r2 ; modify
STR r1, [r0] ; write-back
; trigger the PendSV exception (causes context switch)
LDR r0, =NVIC_INT_CTRL
LDR r1, =NVIC_PENDSVSET
STR r1, [r0]
; restore MSP
LDR r0, =SCB_VTOR
LDR r0, [r0]
LDR r0, [r0]
MSR msp, r0
; enable interrupts at processor level
CPSIE I
; ensure PendSV exception taken place before subsequent operation
DSB
ISB
; never reach here!
ENDP
; compatible with old version
rt_hw_interrupt_thread_switch PROC
EXPORT rt_hw_interrupt_thread_switch
BX lr
ENDP
IMPORT rt_hw_hard_fault_exception
HardFault_Handler PROC
EXPORT HardFault_Handler
; get current context
MRS r0, psp ; get fault thread stack pointer
PUSH {lr}
BL rt_hw_hard_fault_exception
POP {pc}
ENDP
ALIGN 4
END
|
vandercookking/h7_device_RTT
| 5,584
|
rt-thread/libcpu/arm/cortex-m3/context_iar.S
|
;/*
; * Copyright (c) 2006-2018, RT-Thread Development Team
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Change Logs:
; * Date Author Notes
; * 2009-01-17 Bernard first version
; * 2009-09-27 Bernard add protect when contex switch occurs
; * 2013-06-18 aozima add restore MSP feature.
; * 2013-07-09 aozima enhancement hard fault exception handler.
; */
;/**
; * @addtogroup cortex-m3
; */
;/*@{*/
SCB_VTOR EQU 0xE000ED08 ; Vector Table Offset Register
NVIC_INT_CTRL EQU 0xE000ED04 ; interrupt control state register
NVIC_SYSPRI2 EQU 0xE000ED20 ; system priority register (2)
NVIC_PENDSV_PRI EQU 0xFFFF0000 ; PendSV and SysTick priority value (lowest)
NVIC_PENDSVSET EQU 0x10000000 ; value to trigger PendSV exception
SECTION .text:CODE(2)
THUMB
REQUIRE8
PRESERVE8
IMPORT rt_thread_switch_interrupt_flag
IMPORT rt_interrupt_from_thread
IMPORT rt_interrupt_to_thread
;/*
; * rt_base_t rt_hw_interrupt_disable();
; */
EXPORT rt_hw_interrupt_disable
rt_hw_interrupt_disable:
MRS r0, PRIMASK
CPSID I
BX LR
;/*
; * void rt_hw_interrupt_enable(rt_base_t level);
; */
EXPORT rt_hw_interrupt_enable
rt_hw_interrupt_enable:
MSR PRIMASK, r0
BX LR
;/*
; * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
; * r0 --> from
; * r1 --> to
; */
EXPORT rt_hw_context_switch_interrupt
EXPORT rt_hw_context_switch
rt_hw_context_switch_interrupt:
rt_hw_context_switch:
; set rt_thread_switch_interrupt_flag to 1
LDR r2, =rt_thread_switch_interrupt_flag
LDR r3, [r2]
CMP r3, #1
BEQ _reswitch
MOV r3, #1
STR r3, [r2]
LDR r2, =rt_interrupt_from_thread ; set rt_interrupt_from_thread
STR r0, [r2]
_reswitch
LDR r2, =rt_interrupt_to_thread ; set rt_interrupt_to_thread
STR r1, [r2]
LDR r0, =NVIC_INT_CTRL ; trigger the PendSV exception (causes context switch)
LDR r1, =NVIC_PENDSVSET
STR r1, [r0]
BX LR
; r0 --> switch from thread stack
; r1 --> switch to thread stack
; psr, pc, lr, r12, r3, r2, r1, r0 are pushed into [from] stack
EXPORT PendSV_Handler
PendSV_Handler:
; disable interrupt to protect context switch
MRS r2, PRIMASK
CPSID I
; get rt_thread_switch_interrupt_flag
LDR r0, =rt_thread_switch_interrupt_flag
LDR r1, [r0]
CBZ r1, pendsv_exit ; pendsv already handled
; clear rt_thread_switch_interrupt_flag to 0
MOV r1, #0x00
STR r1, [r0]
LDR r0, =rt_interrupt_from_thread
LDR r1, [r0]
CBZ r1, switch_to_thread ; skip register save at the first time
MRS r1, psp ; get from thread stack pointer
STMFD r1!, {r4 - r11} ; push r4 - r11 register
LDR r0, [r0]
STR r1, [r0] ; update from thread stack pointer
switch_to_thread
LDR r1, =rt_interrupt_to_thread
LDR r1, [r1]
LDR r1, [r1] ; load thread stack pointer
LDMFD r1!, {r4 - r11} ; pop r4 - r11 register
MSR psp, r1 ; update stack pointer
pendsv_exit
; restore interrupt
MSR PRIMASK, r2
ORR lr, lr, #0x04
BX lr
;/*
; * void rt_hw_context_switch_to(rt_uint32 to);
; * r0 --> to
; */
EXPORT rt_hw_context_switch_to
rt_hw_context_switch_to:
LDR r1, =rt_interrupt_to_thread
STR r0, [r1]
; set from thread to 0
LDR r1, =rt_interrupt_from_thread
MOV r0, #0x0
STR r0, [r1]
; set interrupt flag to 1
LDR r1, =rt_thread_switch_interrupt_flag
MOV r0, #1
STR r0, [r1]
; set the PendSV and SysTick exception priority
LDR r0, =NVIC_SYSPRI2
LDR r1, =NVIC_PENDSV_PRI
LDR.W r2, [r0,#0x00] ; read
ORR r1,r1,r2 ; modify
STR r1, [r0] ; write-back
LDR r0, =NVIC_INT_CTRL ; trigger the PendSV exception (causes context switch)
LDR r1, =NVIC_PENDSVSET
STR r1, [r0]
; restore MSP
LDR r0, =SCB_VTOR
LDR r0, [r0]
LDR r0, [r0]
NOP
MSR msp, r0
; enable interrupts at processor level
CPSIE F
CPSIE I
; ensure PendSV exception taken place before subsequent operation
DSB
ISB
; never reach here!
; compatible with old version
EXPORT rt_hw_interrupt_thread_switch
rt_hw_interrupt_thread_switch:
BX lr
IMPORT rt_hw_hard_fault_exception
EXPORT HardFault_Handler
HardFault_Handler:
; get current context
MRS r0, msp ; get fault context from handler.
TST lr, #0x04 ; if(!EXC_RETURN[2])
BEQ _get_sp_done
MRS r0, psp ; get fault context from thread.
_get_sp_done
STMFD r0!, {r4 - r11} ; push r4 - r11 register
;STMFD r0!, {lr} ; push exec_return register
SUB r0, r0, #0x04
STR lr, [r0]
TST lr, #0x04 ; if(!EXC_RETURN[2])
BEQ _update_msp
MSR psp, r0 ; update stack pointer to PSP.
B _update_done
_update_msp
MSR msp, r0 ; update stack pointer to MSP.
_update_done
PUSH {lr}
BL rt_hw_hard_fault_exception
POP {lr}
ORR lr, lr, #0x04
BX lr
END
|
vandercookking/h7_device_RTT
| 6,133
|
rt-thread/libcpu/arm/cortex-m3/context_gcc.S
|
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2009-10-11 Bernard First version
* 2010-12-29 onelife Modify for EFM32
* 2011-06-17 onelife Merge all of the assembly source code into context_gcc.S
* 2011-07-12 onelife Add interrupt context check function
* 2013-06-18 aozima add restore MSP feature.
* 2013-07-09 aozima enhancement hard fault exception handler.
*/
.cpu cortex-m3
.fpu softvfp
.syntax unified
.thumb
.text
.equ SCB_VTOR, 0xE000ED08 /* Vector Table Offset Register */
.equ ICSR, 0xE000ED04 /* interrupt control state register */
.equ PENDSVSET_BIT, 0x10000000 /* value to trigger PendSV exception */
.equ SHPR3, 0xE000ED20 /* system priority register (3) */
.equ PENDSV_PRI_LOWEST, 0xFFFF0000 /* PendSV and SysTick priority value (lowest) */
/*
* rt_base_t rt_hw_interrupt_disable();
*/
.global rt_hw_interrupt_disable
.type rt_hw_interrupt_disable, %function
rt_hw_interrupt_disable:
MRS R0, PRIMASK
CPSID I
BX LR
/*
* void rt_hw_interrupt_enable(rt_base_t level);
*/
.global rt_hw_interrupt_enable
.type rt_hw_interrupt_enable, %function
rt_hw_interrupt_enable:
MSR PRIMASK, R0
BX LR
/*
* void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
* R0 --> from
* R1 --> to
*/
.global rt_hw_context_switch_interrupt
.type rt_hw_context_switch_interrupt, %function
.global rt_hw_context_switch
.type rt_hw_context_switch, %function
rt_hw_context_switch_interrupt:
rt_hw_context_switch:
/* set rt_thread_switch_interrupt_flag to 1 */
LDR R2, =rt_thread_switch_interrupt_flag
LDR R3, [R2]
CMP R3, #1
BEQ _reswitch
MOV R3, #1
STR R3, [R2]
LDR R2, =rt_interrupt_from_thread /* set rt_interrupt_from_thread */
STR R0, [R2]
_reswitch:
LDR R2, =rt_interrupt_to_thread /* set rt_interrupt_to_thread */
STR R1, [R2]
LDR R0, =ICSR /* trigger the PendSV exception (causes context switch) */
LDR R1, =PENDSVSET_BIT
STR R1, [R0]
BX LR
/* R0 --> switch from thread stack
* R1 --> switch to thread stack
* psr, pc, LR, R12, R3, R2, R1, R0 are pushed into [from] stack
*/
.global PendSV_Handler
.type PendSV_Handler, %function
PendSV_Handler:
/* disable interrupt to protect context switch */
MRS R2, PRIMASK
CPSID I
/* get rt_thread_switch_interrupt_flag */
LDR R0, =rt_thread_switch_interrupt_flag
LDR R1, [R0]
CBZ R1, pendsv_exit /* pendsv already handled */
/* clear rt_thread_switch_interrupt_flag to 0 */
MOV R1, #0
STR R1, [R0]
LDR R0, =rt_interrupt_from_thread
LDR R1, [R0]
CBZ R1, switch_to_thread /* skip register save at the first time */
MRS R1, PSP /* get from thread stack pointer */
STMFD R1!, {R4 - R11} /* push R4 - R11 register */
LDR R0, [R0]
STR R1, [R0] /* update from thread stack pointer */
switch_to_thread:
LDR R1, =rt_interrupt_to_thread
LDR R1, [R1]
LDR R1, [R1] /* load thread stack pointer */
LDMFD R1!, {R4 - R11} /* pop R4 - R11 register */
MSR PSP, R1 /* update stack pointer */
pendsv_exit:
/* restore interrupt */
MSR PRIMASK, R2
ORR LR, LR, #0x04
BX LR
/*
* void rt_hw_context_switch_to(rt_uint32 to);
* R0 --> to
*/
.global rt_hw_context_switch_to
.type rt_hw_context_switch_to, %function
rt_hw_context_switch_to:
LDR R1, =rt_interrupt_to_thread
STR R0, [R1]
/* set from thread to 0 */
LDR R1, =rt_interrupt_from_thread
MOV R0, #0
STR R0, [R1]
/* set interrupt flag to 1 */
LDR R1, =rt_thread_switch_interrupt_flag
MOV R0, #1
STR R0, [R1]
/* set the PendSV and SysTick exception priority */
LDR R0, =SHPR3
LDR R1, =PENDSV_PRI_LOWEST
LDR.W R2, [R0,#0] /* read */
ORR R1, R1, R2 /* modify */
STR R1, [R0] /* write-back */
LDR R0, =ICSR /* trigger the PendSV exception (causes context switch) */
LDR R1, =PENDSVSET_BIT
STR R1, [R0]
/* restore MSP */
LDR r0, =SCB_VTOR
LDR r0, [r0]
LDR r0, [r0]
NOP
MSR msp, r0
/* enable interrupts at processor level */
CPSIE F
CPSIE I
/* ensure PendSV exception taken place before subsequent operation */
DSB
ISB
/* never reach here! */
/* compatible with old version */
.global rt_hw_interrupt_thread_switch
.type rt_hw_interrupt_thread_switch, %function
rt_hw_interrupt_thread_switch:
BX LR
NOP
.global HardFault_Handler
.type HardFault_Handler, %function
HardFault_Handler:
/* get current context */
MRS r0, msp /* get fault context from handler. */
TST lr, #0x04 /* if(!EXC_RETURN[2]) */
BEQ _get_sp_done
MRS r0, psp /* get fault context from thread. */
_get_sp_done:
STMFD r0!, {r4 - r11} /* push r4 - r11 register */
STMFD r0!, {lr} /* push exec_return register */
TST lr, #0x04 /* if(!EXC_RETURN[2]) */
BEQ _update_msp
MSR psp, r0 /* update stack pointer to PSP. */
B _update_done
_update_msp:
MSR msp, r0 /* update stack pointer to MSP. */
_update_done:
PUSH {LR}
BL rt_hw_hard_fault_exception
POP {LR}
ORR LR, LR, #0x04
BX LR
/*
* rt_uint32_t rt_hw_interrupt_check(void);
* R0 --> state
*/
.global rt_hw_interrupt_check
.type rt_hw_interrupt_check, %function
rt_hw_interrupt_check:
MRS R0, IPSR
BX LR
|
vandercookking/h7_device_RTT
| 5,653
|
rt-thread/libcpu/arm/cortex-m3/context_rvds.S
|
;/*
; * Copyright (c) 2006-2022, RT-Thread Development Team
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Change Logs:
; * Date Author Notes
; * 2009-01-17 Bernard first version
; * 2013-06-18 aozima add restore MSP feature.
; * 2013-07-09 aozima enhancement hard fault exception handler.
; */
;/**
; * @addtogroup CORTEX-M3
; */
;/*@{*/
SCB_VTOR EQU 0xE000ED08 ; Vector Table Offset Register
NVIC_INT_CTRL EQU 0xE000ED04 ; interrupt control state register
NVIC_SYSPRI2 EQU 0xE000ED20 ; system priority register (2)
NVIC_PENDSV_PRI EQU 0xFFFF0000 ; PendSV and SysTick priority value (lowest)
NVIC_PENDSVSET EQU 0x10000000 ; value to trigger PendSV exception
AREA |.text|, CODE, READONLY, ALIGN=2
THUMB
REQUIRE8
PRESERVE8
IMPORT rt_thread_switch_interrupt_flag
IMPORT rt_interrupt_from_thread
IMPORT rt_interrupt_to_thread
;/*
; * rt_base_t rt_hw_interrupt_disable();
; */
rt_hw_interrupt_disable PROC
EXPORT rt_hw_interrupt_disable
MRS r0, PRIMASK
CPSID I
BX LR
ENDP
;/*
; * void rt_hw_interrupt_enable(rt_base_t level);
; */
rt_hw_interrupt_enable PROC
EXPORT rt_hw_interrupt_enable
MSR PRIMASK, r0
BX LR
ENDP
;/*
; * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
; * r0 --> from
; * r1 --> to
; */
rt_hw_context_switch_interrupt
EXPORT rt_hw_context_switch_interrupt
rt_hw_context_switch PROC
EXPORT rt_hw_context_switch
; set rt_thread_switch_interrupt_flag to 1
LDR r2, =rt_thread_switch_interrupt_flag
LDR r3, [r2]
CMP r3, #1
BEQ _reswitch
MOV r3, #1
STR r3, [r2]
LDR r2, =rt_interrupt_from_thread ; set rt_interrupt_from_thread
STR r0, [r2]
_reswitch
LDR r2, =rt_interrupt_to_thread ; set rt_interrupt_to_thread
STR r1, [r2]
LDR r0, =NVIC_INT_CTRL ; trigger the PendSV exception (causes context switch)
LDR r1, =NVIC_PENDSVSET
STR r1, [r0]
BX LR
ENDP
; r0 --> switch from thread stack
; r1 --> switch to thread stack
; psr, pc, lr, r12, r3, r2, r1, r0 are pushed into [from] stack
PendSV_Handler PROC
EXPORT PendSV_Handler
; disable interrupt to protect context switch
MRS r2, PRIMASK
CPSID I
; get rt_thread_switch_interrupt_flag
LDR r0, =rt_thread_switch_interrupt_flag
LDR r1, [r0]
CBZ r1, pendsv_exit ; pendsv already handled
; clear rt_thread_switch_interrupt_flag to 0
MOV r1, #0x00
STR r1, [r0]
LDR r0, =rt_interrupt_from_thread
LDR r1, [r0]
CBZ r1, switch_to_thread ; skip register save at the first time
MRS r1, psp ; get from thread stack pointer
STMFD r1!, {r4 - r11} ; push r4 - r11 register
LDR r0, [r0]
STR r1, [r0] ; update from thread stack pointer
switch_to_thread
LDR r1, =rt_interrupt_to_thread
LDR r1, [r1]
LDR r1, [r1] ; load thread stack pointer
LDMFD r1!, {r4 - r11} ; pop r4 - r11 register
MSR psp, r1 ; update stack pointer
pendsv_exit
; restore interrupt
MSR PRIMASK, r2
ORR lr, lr, #0x04
BX lr
ENDP
;/*
; * void rt_hw_context_switch_to(rt_uint32 to);
; * r0 --> to
; * this fucntion is used to perform the first thread switch
; */
rt_hw_context_switch_to PROC
EXPORT rt_hw_context_switch_to
; set to thread
LDR r1, =rt_interrupt_to_thread
STR r0, [r1]
; set from thread to 0
LDR r1, =rt_interrupt_from_thread
MOV r0, #0x0
STR r0, [r1]
; set interrupt flag to 1
LDR r1, =rt_thread_switch_interrupt_flag
MOV r0, #1
STR r0, [r1]
; set the PendSV and SysTick exception priority
LDR r0, =NVIC_SYSPRI2
LDR r1, =NVIC_PENDSV_PRI
LDR.W r2, [r0,#0x00] ; read
ORR r1,r1,r2 ; modify
STR r1, [r0] ; write-back
; trigger the PendSV exception (causes context switch)
LDR r0, =NVIC_INT_CTRL
LDR r1, =NVIC_PENDSVSET
STR r1, [r0]
; restore MSP
LDR r0, =SCB_VTOR
LDR r0, [r0]
LDR r0, [r0]
MSR msp, r0
; enable interrupts at processor level
CPSIE F
CPSIE I
; ensure PendSV exception taken place before subsequent operation
DSB
ISB
; never reach here!
ENDP
; compatible with old version
rt_hw_interrupt_thread_switch PROC
EXPORT rt_hw_interrupt_thread_switch
BX lr
ENDP
IMPORT rt_hw_hard_fault_exception
EXPORT HardFault_Handler
HardFault_Handler PROC
; get current context
TST lr, #0x04 ; if(!EXC_RETURN[2])
ITE EQ
MRSEQ r0, msp ; [2]=0 ==> Z=1, get fault context from handler.
MRSNE r0, psp ; [2]=1 ==> Z=0, get fault context from thread.
STMFD r0!, {r4 - r11} ; push r4 - r11 register
STMFD r0!, {lr} ; push exec_return register
TST lr, #0x04 ; if(!EXC_RETURN[2])
ITE EQ
MSREQ msp, r0 ; [2]=0 ==> Z=1, update stack pointer to MSP.
MSRNE psp, r0 ; [2]=1 ==> Z=0, update stack pointer to PSP.
PUSH {lr}
BL rt_hw_hard_fault_exception
POP {lr}
ORR lr, lr, #0x04
BX lr
ENDP
ALIGN 4
END
|
vandercookking/h7_device_RTT
| 2,277
|
rt-thread/libcpu/arm/dm36x/context_gcc.S
|
/*
* Copyright (c) 2006-2018, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2011-01-13 weety
*/
/*!
* \addtogroup DM36X
*/
/*@{*/
#define NOINT 0xc0
/*
* rt_base_t rt_hw_interrupt_disable();
*/
.globl rt_hw_interrupt_disable
rt_hw_interrupt_disable:
mrs r0, cpsr
orr r1, r0, #NOINT
msr cpsr_c, r1
bx lr
/*
* void rt_hw_interrupt_enable(rt_base_t level);
*/
.globl rt_hw_interrupt_enable
rt_hw_interrupt_enable:
msr cpsr, r0
bx lr
/*
* void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
* r0 --> from
* r1 --> to
*/
.globl rt_hw_context_switch
rt_hw_context_switch:
stmfd sp!, {lr} @ push pc (lr should be pushed in place of PC)
stmfd sp!, {r0-r12, lr} @ push lr & register file
mrs r4, cpsr
tst lr, #0x01
orrne r4, r4, #0x20 @ it's thumb code
stmfd sp!, {r4} @ push cpsr
str sp, [r0] @ store sp in preempted tasks TCB
ldr sp, [r1] @ get new task stack pointer
ldmfd sp!, {r4} @ pop new task cpsr to spsr
msr spsr_cxsf, r4
_do_switch:
ldmfd sp!, {r0-r12, lr, pc}^ @ pop new task r0-r12, lr & pc, copy spsr to cpsr
/*
* void rt_hw_context_switch_to(rt_uint32 to);
* r0 --> to
*/
.globl rt_hw_context_switch_to
rt_hw_context_switch_to:
ldr sp, [r0] @ get new task stack pointer
ldmfd sp!, {r4} @ pop new task spsr
msr spsr_cxsf, r4
bic r4, r4, #0x20 @ must be ARM mode
msr cpsr_cxsf, r4
ldmfd sp!, {r0-r12, lr, pc}^ @ pop new task r0-r12, lr & pc
/*
* void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to);
*/
.globl rt_thread_switch_interrupt_flag
.globl rt_interrupt_from_thread
.globl rt_interrupt_to_thread
.globl rt_hw_context_switch_interrupt
rt_hw_context_switch_interrupt:
ldr r2, =rt_thread_switch_interrupt_flag
ldr r3, [r2]
cmp r3, #1
beq _reswitch
mov r3, #1 @ set rt_thread_switch_interrupt_flag to 1
str r3, [r2]
ldr r2, =rt_interrupt_from_thread @ set rt_interrupt_from_thread
str r0, [r2]
_reswitch:
ldr r2, =rt_interrupt_to_thread @ set rt_interrupt_to_thread
str r1, [r2]
bx lr
|
vandercookking/h7_device_RTT
| 2,590
|
rt-thread/libcpu/arm/dm36x/context_rvds.S
|
;/*
; * Copyright (c) 2006-2022, RT-Thread Development Team
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Change Logs:
; * Date Author Notes
; * 2011-08-14 weety copy from mini2440
; */
NOINT EQU 0xc0 ; disable interrupt in psr
AREA |.text|, CODE, READONLY, ALIGN=2
ARM
REQUIRE8
PRESERVE8
;/*
; * rt_base_t rt_hw_interrupt_disable();
; */
rt_hw_interrupt_disable PROC
EXPORT rt_hw_interrupt_disable
MRS r0, cpsr
ORR r1, r0, #NOINT
MSR cpsr_c, r1
BX lr
ENDP
;/*
; * void rt_hw_interrupt_enable(rt_base_t level);
; */
rt_hw_interrupt_enable PROC
EXPORT rt_hw_interrupt_enable
MSR cpsr_c, r0
BX lr
ENDP
;/*
; * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
; * r0 --> from
; * r1 --> to
; */
rt_hw_context_switch PROC
EXPORT rt_hw_context_switch
STMFD sp!, {lr} ; push pc (lr should be pushed in place of PC)
STMFD sp!, {r0-r12, lr} ; push lr & register file
MRS r4, cpsr
STMFD sp!, {r4} ; push cpsr
MRS r4, spsr
STMFD sp!, {r4} ; push spsr
STR sp, [r0] ; store sp in preempted tasks TCB
LDR sp, [r1] ; get new task stack pointer
LDMFD sp!, {r4} ; pop new task spsr
MSR spsr_cxsf, r4
LDMFD sp!, {r4} ; pop new task cpsr
MSR spsr_cxsf, r4
LDMFD sp!, {r0-r12, lr, pc}^ ; pop new task r0-r12, lr & pc
ENDP
;/*
; * void rt_hw_context_switch_to(rt_uint32 to);
; * r0 --> to
; */
rt_hw_context_switch_to PROC
EXPORT rt_hw_context_switch_to
LDR sp, [r0] ; get new task stack pointer
LDMFD sp!, {r4} ; pop new task spsr
MSR spsr_cxsf, r4
LDMFD sp!, {r4} ; pop new task cpsr
MSR cpsr_cxsf, r4
LDMFD sp!, {r0-r12, lr, pc} ; pop new task r0-r12, lr & pc
ENDP
;/*
; * void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to);
; */
IMPORT rt_thread_switch_interrupt_flag
IMPORT rt_interrupt_from_thread
IMPORT rt_interrupt_to_thread
rt_hw_context_switch_interrupt PROC
EXPORT rt_hw_context_switch_interrupt
LDR r2, =rt_thread_switch_interrupt_flag
LDR r3, [r2]
CMP r3, #1
BEQ _reswitch
MOV r3, #1 ; set rt_thread_switch_interrupt_flag to 1
STR r3, [r2]
LDR r2, =rt_interrupt_from_thread ; set rt_interrupt_from_thread
STR r0, [r2]
_reswitch
LDR r2, =rt_interrupt_to_thread ; set rt_interrupt_to_thread
STR r1, [r2]
BX lr
ENDP
END
|
vandercookking/h7_device_RTT
| 9,791
|
rt-thread/libcpu/arm/common/divsi3.S
|
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
*/
/* $NetBSD: divsi3.S,v 1.5 2005/02/26 22:58:56 perry Exp $ */
/*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* stack is aligned as there's a possibility of branching to L_overflow
* which makes a C call
*/
.text
.align 0
.globl __umodsi3
.type __umodsi3 , function
__umodsi3:
stmfd sp!, {lr}
sub sp, sp, #4 /* align stack */
bl .L_udivide
add sp, sp, #4 /* unalign stack */
mov r0, r1
ldmfd sp!, {pc}
.text
.align 0
.globl __modsi3
.type __modsi3 , function
__modsi3:
stmfd sp!, {lr}
sub sp, sp, #4 /* align stack */
bl .L_divide
add sp, sp, #4 /* unalign stack */
mov r0, r1
ldmfd sp!, {pc}
.L_overflow:
/* XXX should cause a fatal error */
mvn r0, #0
mov pc, lr
.text
.align 0
.globl __udivsi3
.type __udivsi3 , function
__udivsi3:
.L_udivide: /* r0 = r0 / r1; r1 = r0 % r1 */
eor r0, r1, r0
eor r1, r0, r1
eor r0, r1, r0
/* r0 = r1 / r0; r1 = r1 % r0 */
cmp r0, #1
bcc .L_overflow
beq .L_divide_l0
mov ip, #0
movs r1, r1
bpl .L_divide_l1
orr ip, ip, #0x20000000 /* ip bit 0x20000000 = -ve r1 */
movs r1, r1, lsr #1
orrcs ip, ip, #0x10000000 /* ip bit 0x10000000 = bit 0 of r1 */
b .L_divide_l1
.L_divide_l0: /* r0 == 1 */
mov r0, r1
mov r1, #0
mov pc, lr
.text
.align 0
.globl __divsi3
.type __divsi3 , function
__divsi3:
.L_divide: /* r0 = r0 / r1; r1 = r0 % r1 */
eor r0, r1, r0
eor r1, r0, r1
eor r0, r1, r0
/* r0 = r1 / r0; r1 = r1 % r0 */
cmp r0, #1
bcc .L_overflow
beq .L_divide_l0
ands ip, r0, #0x80000000
rsbmi r0, r0, #0
ands r2, r1, #0x80000000
eor ip, ip, r2
rsbmi r1, r1, #0
orr ip, r2, ip, lsr #1 /* ip bit 0x40000000 = -ve division */
/* ip bit 0x80000000 = -ve remainder */
.L_divide_l1:
mov r2, #1
mov r3, #0
/*
* If the highest bit of the dividend is set, we have to be
* careful when shifting the divisor. Test this.
*/
movs r1,r1
bpl .L_old_code
/*
* At this point, the highest bit of r1 is known to be set.
* We abuse this below in the tst instructions.
*/
tst r1, r0 /*, lsl #0 */
bmi .L_divide_b1
tst r1, r0, lsl #1
bmi .L_divide_b2
tst r1, r0, lsl #2
bmi .L_divide_b3
tst r1, r0, lsl #3
bmi .L_divide_b4
tst r1, r0, lsl #4
bmi .L_divide_b5
tst r1, r0, lsl #5
bmi .L_divide_b6
tst r1, r0, lsl #6
bmi .L_divide_b7
tst r1, r0, lsl #7
bmi .L_divide_b8
tst r1, r0, lsl #8
bmi .L_divide_b9
tst r1, r0, lsl #9
bmi .L_divide_b10
tst r1, r0, lsl #10
bmi .L_divide_b11
tst r1, r0, lsl #11
bmi .L_divide_b12
tst r1, r0, lsl #12
bmi .L_divide_b13
tst r1, r0, lsl #13
bmi .L_divide_b14
tst r1, r0, lsl #14
bmi .L_divide_b15
tst r1, r0, lsl #15
bmi .L_divide_b16
tst r1, r0, lsl #16
bmi .L_divide_b17
tst r1, r0, lsl #17
bmi .L_divide_b18
tst r1, r0, lsl #18
bmi .L_divide_b19
tst r1, r0, lsl #19
bmi .L_divide_b20
tst r1, r0, lsl #20
bmi .L_divide_b21
tst r1, r0, lsl #21
bmi .L_divide_b22
tst r1, r0, lsl #22
bmi .L_divide_b23
tst r1, r0, lsl #23
bmi .L_divide_b24
tst r1, r0, lsl #24
bmi .L_divide_b25
tst r1, r0, lsl #25
bmi .L_divide_b26
tst r1, r0, lsl #26
bmi .L_divide_b27
tst r1, r0, lsl #27
bmi .L_divide_b28
tst r1, r0, lsl #28
bmi .L_divide_b29
tst r1, r0, lsl #29
bmi .L_divide_b30
tst r1, r0, lsl #30
bmi .L_divide_b31
/*
* instead of:
* tst r1, r0, lsl #31
* bmi .L_divide_b32
*/
b .L_divide_b32
.L_old_code:
cmp r1, r0
bcc .L_divide_b0
cmp r1, r0, lsl #1
bcc .L_divide_b1
cmp r1, r0, lsl #2
bcc .L_divide_b2
cmp r1, r0, lsl #3
bcc .L_divide_b3
cmp r1, r0, lsl #4
bcc .L_divide_b4
cmp r1, r0, lsl #5
bcc .L_divide_b5
cmp r1, r0, lsl #6
bcc .L_divide_b6
cmp r1, r0, lsl #7
bcc .L_divide_b7
cmp r1, r0, lsl #8
bcc .L_divide_b8
cmp r1, r0, lsl #9
bcc .L_divide_b9
cmp r1, r0, lsl #10
bcc .L_divide_b10
cmp r1, r0, lsl #11
bcc .L_divide_b11
cmp r1, r0, lsl #12
bcc .L_divide_b12
cmp r1, r0, lsl #13
bcc .L_divide_b13
cmp r1, r0, lsl #14
bcc .L_divide_b14
cmp r1, r0, lsl #15
bcc .L_divide_b15
cmp r1, r0, lsl #16
bcc .L_divide_b16
cmp r1, r0, lsl #17
bcc .L_divide_b17
cmp r1, r0, lsl #18
bcc .L_divide_b18
cmp r1, r0, lsl #19
bcc .L_divide_b19
cmp r1, r0, lsl #20
bcc .L_divide_b20
cmp r1, r0, lsl #21
bcc .L_divide_b21
cmp r1, r0, lsl #22
bcc .L_divide_b22
cmp r1, r0, lsl #23
bcc .L_divide_b23
cmp r1, r0, lsl #24
bcc .L_divide_b24
cmp r1, r0, lsl #25
bcc .L_divide_b25
cmp r1, r0, lsl #26
bcc .L_divide_b26
cmp r1, r0, lsl #27
bcc .L_divide_b27
cmp r1, r0, lsl #28
bcc .L_divide_b28
cmp r1, r0, lsl #29
bcc .L_divide_b29
cmp r1, r0, lsl #30
bcc .L_divide_b30
.L_divide_b32:
cmp r1, r0, lsl #31
subhs r1, r1,r0, lsl #31
addhs r3, r3,r2, lsl #31
.L_divide_b31:
cmp r1, r0, lsl #30
subhs r1, r1,r0, lsl #30
addhs r3, r3,r2, lsl #30
.L_divide_b30:
cmp r1, r0, lsl #29
subhs r1, r1,r0, lsl #29
addhs r3, r3,r2, lsl #29
.L_divide_b29:
cmp r1, r0, lsl #28
subhs r1, r1,r0, lsl #28
addhs r3, r3,r2, lsl #28
.L_divide_b28:
cmp r1, r0, lsl #27
subhs r1, r1,r0, lsl #27
addhs r3, r3,r2, lsl #27
.L_divide_b27:
cmp r1, r0, lsl #26
subhs r1, r1,r0, lsl #26
addhs r3, r3,r2, lsl #26
.L_divide_b26:
cmp r1, r0, lsl #25
subhs r1, r1,r0, lsl #25
addhs r3, r3,r2, lsl #25
.L_divide_b25:
cmp r1, r0, lsl #24
subhs r1, r1,r0, lsl #24
addhs r3, r3,r2, lsl #24
.L_divide_b24:
cmp r1, r0, lsl #23
subhs r1, r1,r0, lsl #23
addhs r3, r3,r2, lsl #23
.L_divide_b23:
cmp r1, r0, lsl #22
subhs r1, r1,r0, lsl #22
addhs r3, r3,r2, lsl #22
.L_divide_b22:
cmp r1, r0, lsl #21
subhs r1, r1,r0, lsl #21
addhs r3, r3,r2, lsl #21
.L_divide_b21:
cmp r1, r0, lsl #20
subhs r1, r1,r0, lsl #20
addhs r3, r3,r2, lsl #20
.L_divide_b20:
cmp r1, r0, lsl #19
subhs r1, r1,r0, lsl #19
addhs r3, r3,r2, lsl #19
.L_divide_b19:
cmp r1, r0, lsl #18
subhs r1, r1,r0, lsl #18
addhs r3, r3,r2, lsl #18
.L_divide_b18:
cmp r1, r0, lsl #17
subhs r1, r1,r0, lsl #17
addhs r3, r3,r2, lsl #17
.L_divide_b17:
cmp r1, r0, lsl #16
subhs r1, r1,r0, lsl #16
addhs r3, r3,r2, lsl #16
.L_divide_b16:
cmp r1, r0, lsl #15
subhs r1, r1,r0, lsl #15
addhs r3, r3,r2, lsl #15
.L_divide_b15:
cmp r1, r0, lsl #14
subhs r1, r1,r0, lsl #14
addhs r3, r3,r2, lsl #14
.L_divide_b14:
cmp r1, r0, lsl #13
subhs r1, r1,r0, lsl #13
addhs r3, r3,r2, lsl #13
.L_divide_b13:
cmp r1, r0, lsl #12
subhs r1, r1,r0, lsl #12
addhs r3, r3,r2, lsl #12
.L_divide_b12:
cmp r1, r0, lsl #11
subhs r1, r1,r0, lsl #11
addhs r3, r3,r2, lsl #11
.L_divide_b11:
cmp r1, r0, lsl #10
subhs r1, r1,r0, lsl #10
addhs r3, r3,r2, lsl #10
.L_divide_b10:
cmp r1, r0, lsl #9
subhs r1, r1,r0, lsl #9
addhs r3, r3,r2, lsl #9
.L_divide_b9:
cmp r1, r0, lsl #8
subhs r1, r1,r0, lsl #8
addhs r3, r3,r2, lsl #8
.L_divide_b8:
cmp r1, r0, lsl #7
subhs r1, r1,r0, lsl #7
addhs r3, r3,r2, lsl #7
.L_divide_b7:
cmp r1, r0, lsl #6
subhs r1, r1,r0, lsl #6
addhs r3, r3,r2, lsl #6
.L_divide_b6:
cmp r1, r0, lsl #5
subhs r1, r1,r0, lsl #5
addhs r3, r3,r2, lsl #5
.L_divide_b5:
cmp r1, r0, lsl #4
subhs r1, r1,r0, lsl #4
addhs r3, r3,r2, lsl #4
.L_divide_b4:
cmp r1, r0, lsl #3
subhs r1, r1,r0, lsl #3
addhs r3, r3,r2, lsl #3
.L_divide_b3:
cmp r1, r0, lsl #2
subhs r1, r1,r0, lsl #2
addhs r3, r3,r2, lsl #2
.L_divide_b2:
cmp r1, r0, lsl #1
subhs r1, r1,r0, lsl #1
addhs r3, r3,r2, lsl #1
.L_divide_b1:
cmp r1, r0
subhs r1, r1, r0
addhs r3, r3, r2
.L_divide_b0:
tst ip, #0x20000000
bne .L_udivide_l1
mov r0, r3
cmp ip, #0
rsbmi r1, r1, #0
movs ip, ip, lsl #1
bicmi r0, r0, #0x80000000 /* Fix incase we divided 0x80000000 */
rsbmi r0, r0, #0
mov pc, lr
.L_udivide_l1:
tst ip, #0x10000000
mov r1, r1, lsl #1
orrne r1, r1, #1
mov r3, r3, lsl #1
cmp r1, r0
subhs r1, r1, r0
addhs r3, r3, r2
mov r0, r3
mov pc, lr
|
vandercookking/h7_device_RTT
| 2,490
|
rt-thread/libcpu/arm/AT91SAM7S/context_gcc.S
|
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2006-03-13 Bernard first version
*/
#define NOINT 0xc0
/*
* rt_base_t rt_hw_interrupt_disable()/*
*/
.globl rt_hw_interrupt_disable
rt_hw_interrupt_disable:
mrs r0, cpsr
orr r1, r0, #NOINT
msr cpsr_c, r1
mov pc, lr
/*
* void rt_hw_interrupt_enable(rt_base_t level)/*
*/
.globl rt_hw_interrupt_enable
rt_hw_interrupt_enable:
msr cpsr, r0
mov pc, lr
/*
* void rt_hw_context_switch(rt_uint32 from, rt_uint32 to)/*
* r0 --> from
* r1 --> to
*/
.globl rt_hw_context_switch
rt_hw_context_switch:
stmfd sp!, {lr} /* push pc (lr should be pushed in place of PC) */
stmfd sp!, {r0-r12, lr} /* push lr & register file */
mrs r4, cpsr
stmfd sp!, {r4} /* push cpsr */
mrs r4, spsr
stmfd sp!, {r4} /* push spsr */
str sp, [r0] /* store sp in preempted tasks TCB */
ldr sp, [r1] /* get new task stack pointer */
ldmfd sp!, {r4} /* pop new task spsr */
msr spsr_cxsf, r4
ldmfd sp!, {r4} /* pop new task cpsr */
msr cpsr_cxsf, r4
ldmfd sp!, {r0-r12, lr, pc} /* pop new task r0-r12, lr & pc */
/*
* void rt_hw_context_switch_to(rt_uint32 to)/*
* r0 --> to
*/
.globl rt_hw_context_switch_to
rt_hw_context_switch_to:
ldr sp, [r0] /* get new task stack pointer */
ldmfd sp!, {r4} /* pop new task spsr */
msr spsr_cxsf, r4
ldmfd sp!, {r4} /* pop new task cpsr */
msr cpsr_cxsf, r4
ldmfd sp!, {r0-r12, lr, pc} /* pop new task r0-r12, lr & pc */
/*
* void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to)/*
*/
.globl rt_thread_switch_interrupt_flag
.globl rt_interrupt_from_thread
.globl rt_interrupt_to_thread
.globl rt_hw_context_switch_interrupt
rt_hw_context_switch_interrupt:
ldr r2, =rt_thread_switch_interrupt_flag
ldr r3, [r2]
cmp r3, #1
beq _reswitch
mov r3, #1 /* set rt_thread_switch_interrupt_flag to 1 */
str r3, [r2]
ldr r2, =rt_interrupt_from_thread /* set rt_interrupt_from_thread */
str r0, [r2]
_reswitch:
ldr r2, =rt_interrupt_to_thread /* set rt_interrupt_to_thread */
str r1, [r2]
mov pc, lr
|
vandercookking/h7_device_RTT
| 5,853
|
rt-thread/libcpu/arm/AT91SAM7S/start_gcc.S
|
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2006-08-31 Bernard first version
*/
/* Internal Memory Base Addresses */
.equ FLASH_BASE, 0x00100000
.equ RAM_BASE, 0x00200000
/* Stack Configuration */
.equ TOP_STACK, 0x00204000
.equ UND_STACK_SIZE, 0x00000100
.equ SVC_STACK_SIZE, 0x00000400
.equ ABT_STACK_SIZE, 0x00000100
.equ FIQ_STACK_SIZE, 0x00000100
.equ IRQ_STACK_SIZE, 0x00000100
.equ USR_STACK_SIZE, 0x00000004
/* ARM architecture definitions */
.equ MODE_USR, 0x10
.equ MODE_FIQ, 0x11
.equ MODE_IRQ, 0x12
.equ MODE_SVC, 0x13
.equ MODE_ABT, 0x17
.equ MODE_UND, 0x1B
.equ MODE_SYS, 0x1F
.equ I_BIT, 0x80 /* when this bit is set, IRQ is disabled */
.equ F_BIT, 0x40 /* when this bit is set, FIQ is disabled */
.section .init, "ax"
.code 32
.align 0
.globl _start
_start:
b reset
ldr pc, _vector_undef
ldr pc, _vector_swi
ldr pc, _vector_pabt
ldr pc, _vector_dabt
nop /* reserved vector */
ldr pc, _vector_irq
ldr pc, _vector_fiq
_vector_undef: .word vector_undef
_vector_swi: .word vector_swi
_vector_pabt: .word vector_pabt
_vector_dabt: .word vector_dabt
_vector_resv: .word vector_resv
_vector_irq: .word vector_irq
_vector_fiq: .word vector_fiq
/*
* rtthread bss start and end
* which are defined in linker script
*/
.globl _bss_start
_bss_start: .word __bss_start
.globl _bss_end
_bss_end: .word __bss_end
/* the system entry */
reset:
/* disable watchdog */
ldr r0, =0xFFFFFD40
ldr r1, =0x00008000
str r1, [r0, #0x04]
/* enable the main oscillator */
ldr r0, =0xFFFFFC00
ldr r1, =0x00000601
str r1, [r0, #0x20]
/* wait for main oscillator to stabilize */
moscs_loop:
ldr r2, [r0, #0x68]
ands r2, r2, #1
beq moscs_loop
/* set up the PLL */
ldr r1, =0x00191C05
str r1, [r0, #0x2C]
/* wait for PLL to lock */
pll_loop:
ldr r2, [r0, #0x68]
ands r2, r2, #0x04
beq pll_loop
/* select clock */
ldr r1, =0x00000007
str r1, [r0, #0x30]
/* setup stack for each mode */
ldr r0, =TOP_STACK
/* set stack */
/* undefined instruction mode */
msr cpsr_c, #MODE_UND|I_BIT|F_BIT
mov sp, r0
sub r0, r0, #UND_STACK_SIZE
/* abort mode */
msr cpsr_c, #MODE_ABT|I_BIT|F_BIT
mov sp, r0
sub r0, r0, #ABT_STACK_SIZE
/* FIQ mode */
msr cpsr_c, #MODE_FIQ|I_BIT|F_BIT
mov sp, r0
sub r0, r0, #FIQ_STACK_SIZE
/* IRQ mode */
msr cpsr_c, #MODE_IRQ|I_BIT|F_BIT
mov sp, r0
sub r0, r0, #IRQ_STACK_SIZE
/* supervisor mode */
msr cpsr_c, #MODE_SVC
mov sp, r0
#ifdef __FLASH_BUILD__
/* Relocate .data section (Copy from ROM to RAM) */
ldr r1, =_etext
ldr r2, =_data
ldr r3, =_edata
data_loop:
cmp r2, r3
ldrlo r0, [r1], #4
strlo r0, [r2], #4
blo data_loop
#else
/* remap SRAM to 0x0000 */
ldr r0, =0xFFFFFF00
mov r1, #0x01
str r1, [r0]
#endif
/* mask all IRQs */
ldr r1, =0xFFFFF124
ldr r0, =0XFFFFFFFF
str r0, [r1]
/* start RT-Thread Kernel */
ldr pc, _rtthread_startup
_rtthread_startup: .word rtthread_startup
/* exception handlers */
vector_undef: b vector_undef
vector_swi : b vector_swi
vector_pabt : b vector_pabt
vector_dabt : b vector_dabt
vector_resv : b vector_resv
.globl rt_interrupt_enter
.globl rt_interrupt_leave
.globl rt_thread_switch_interrupt_flag
.globl rt_interrupt_from_thread
.globl rt_interrupt_to_thread
vector_irq:
stmfd sp!, {r0-r12,lr}
bl rt_interrupt_enter
bl rt_hw_trap_irq
bl rt_interrupt_leave
/*
* if rt_thread_switch_interrupt_flag set, jump to
* rt_hw_context_switch_interrupt_do and don't return
*/
ldr r0, =rt_thread_switch_interrupt_flag
ldr r1, [r0]
cmp r1, #1
beq rt_hw_context_switch_interrupt_do
ldmfd sp!, {r0-r12,lr}
subs pc, lr, #4
vector_fiq:
stmfd sp!,{r0-r7,lr}
bl rt_hw_trap_fiq
ldmfd sp!,{r0-r7,lr}
subs pc,lr,#4
/*
* void rt_hw_context_switch_interrupt_do(rt_base_t flag)
*/
rt_hw_context_switch_interrupt_do:
mov r1, #0 /* clear flag */
str r1, [r0]
ldmfd sp!, {r0-r12,lr} /* reload saved registers */
stmfd sp!, {r0-r3} /* save r0-r3 */
mov r1, sp
add sp, sp, #16 /* restore sp */
sub r2, lr, #4 /* save old task's pc to r2 */
mrs r3, spsr /* disable interrupt */
orr r0, r3, #I_BIT|F_BIT
msr spsr_c, r0
ldr r0, =.+8 /* switch to interrupted task's stack */
movs pc, r0
stmfd sp!, {r2} /* push old task's pc */
stmfd sp!, {r4-r12,lr} /* push old task's lr,r12-r4 */
mov r4, r1 /* Special optimised code below */
mov r5, r3
ldmfd r4!, {r0-r3}
stmfd sp!, {r0-r3} /* push old task's r3-r0 */
stmfd sp!, {r5} /* push old task's psr */
mrs r4, spsr
stmfd sp!, {r4} /* push old task's spsr */
ldr r4, =rt_interrupt_from_thread
ldr r5, [r4]
str sp, [r5] /* store sp in preempted tasks's TCB */
ldr r6, =rt_interrupt_to_thread
ldr r6, [r6]
ldr sp, [r6] /* get new task's stack pointer */
ldmfd sp!, {r4} /* pop new task's spsr */
msr SPSR_cxsf, r4
ldmfd sp!, {r4} /* pop new task's psr */
msr CPSR_cxsf, r4
ldmfd sp!, {r0-r12,lr,pc} /* pop new task's r0-r12,lr & pc */
|
vandercookking/h7_device_RTT
| 2,580
|
rt-thread/libcpu/arm/AT91SAM7S/context_rvds.S
|
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2009-01-20 Bernard first version
*/
NOINT EQU 0xc0 ; disable interrupt in psr
AREA |.text|, CODE, READONLY, ALIGN=2
ARM
REQUIRE8
PRESERVE8
;/*
; * rt_base_t rt_hw_interrupt_disable();
; */
rt_hw_interrupt_disable PROC
EXPORT rt_hw_interrupt_disable
MRS r0, cpsr
ORR r1, r0, #NOINT
MSR cpsr_c, r1
BX lr
ENDP
;/*
; * void rt_hw_interrupt_enable(rt_base_t level);
; */
rt_hw_interrupt_enable PROC
EXPORT rt_hw_interrupt_enable
MSR cpsr_c, r0
BX lr
ENDP
;/*
; * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
; * r0 --> from
; * r1 --> to
; */
rt_hw_context_switch PROC
EXPORT rt_hw_context_switch
STMFD sp!, {lr} ; push pc (lr should be pushed in place of PC)
STMFD sp!, {r0-r12, lr} ; push lr & register file
MRS r4, cpsr
STMFD sp!, {r4} ; push cpsr
MRS r4, spsr
STMFD sp!, {r4} ; push spsr
STR sp, [r0] ; store sp in preempted tasks TCB
LDR sp, [r1] ; get new task stack pointer
LDMFD sp!, {r4} ; pop new task spsr
MSR spsr_cxsf, r4
LDMFD sp!, {r4} ; pop new task cpsr
MSR cpsr_cxsf, r4
LDMFD sp!, {r0-r12, lr, pc} ; pop new task r0-r12, lr & pc
ENDP
;/*
; * void rt_hw_context_switch_to(rt_uint32 to);
; * r0 --> to
; */
rt_hw_context_switch_to PROC
EXPORT rt_hw_context_switch_to
LDR sp, [r0] ; get new task stack pointer
LDMFD sp!, {r4} ; pop new task spsr
MSR spsr_cxsf, r4
LDMFD sp!, {r4} ; pop new task cpsr
MSR cpsr_cxsf, r4
LDMFD sp!, {r0-r12, lr, pc} ; pop new task r0-r12, lr & pc
ENDP
;/*
; * void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to);
; */
IMPORT rt_thread_switch_interrupt_flag
IMPORT rt_interrupt_from_thread
IMPORT rt_interrupt_to_thread
rt_hw_context_switch_interrupt PROC
EXPORT rt_hw_context_switch_interrupt
LDR r2, =rt_thread_switch_interrupt_flag
LDR r3, [r2]
CMP r3, #1
BEQ _reswitch
MOV r3, #1 ; set rt_thread_switch_interrupt_flag to 1
STR r3, [r2]
LDR r2, =rt_interrupt_from_thread ; set rt_interrupt_from_thread
STR r0, [r2]
_reswitch
LDR r2, =rt_interrupt_to_thread ; set rt_interrupt_to_thread
STR r1, [r2]
BX lr
ENDP
END
|
vandercookking/h7_device_RTT
| 16,691
|
rt-thread/libcpu/arm/AT91SAM7S/start_rvds.S
|
;/*****************************************************************************/
;/* SAM7.S: Startup file for Atmel AT91SAM7 device series */
;/*****************************************************************************/
;/* <<< Use Configuration Wizard in Context Menu >>> */
;/*****************************************************************************/
;/* This file is part of the uVision/ARM development tools. */
;/* Copyright (c) 2005-2006 Keil Software. All rights reserved. */
;/* This software may only be used under the terms of a valid, current, */
;/* end user licence from KEIL for a compatible version of KEIL software */
;/* development tools. Nothing else gives you the right to use this software. */
;/*****************************************************************************/
;/*
; * The SAM7.S code is executed after CPU Reset. This file may be
; * translated with the following SET symbols. In uVision these SET
; * symbols are entered under Options - ASM - Define.
; *
; * REMAP: when set the startup code remaps exception vectors from
; * on-chip RAM to address 0.
; *
; * RAM_INTVEC: when set the startup code copies exception vectors
; * from on-chip Flash to on-chip RAM.
; */
; Standard definitions of Mode bits and Interrupt (I & F) flags in PSRs
Mode_USR EQU 0x10
Mode_FIQ EQU 0x11
Mode_IRQ EQU 0x12
Mode_SVC EQU 0x13
Mode_ABT EQU 0x17
Mode_UND EQU 0x1B
Mode_SYS EQU 0x1F
I_Bit EQU 0x80 ; when I bit is set, IRQ is disabled
F_Bit EQU 0x40 ; when F bit is set, FIQ is disabled
; Internal Memory Base Addresses
FLASH_BASE EQU 0x00100000
RAM_BASE EQU 0x00200000
;// <h> Stack Configuration (Stack Sizes in Bytes)
;// <o0> Undefined Mode <0x0-0xFFFFFFFF:8>
;// <o1> Supervisor Mode <0x0-0xFFFFFFFF:8>
;// <o2> Abort Mode <0x0-0xFFFFFFFF:8>
;// <o3> Fast Interrupt Mode <0x0-0xFFFFFFFF:8>
;// <o4> Interrupt Mode <0x0-0xFFFFFFFF:8>
;// <o5> User/System Mode <0x0-0xFFFFFFFF:8>
;// </h>
UND_Stack_Size EQU 0x00000000
SVC_Stack_Size EQU 0x00000100
ABT_Stack_Size EQU 0x00000000
FIQ_Stack_Size EQU 0x00000000
IRQ_Stack_Size EQU 0x00000100
USR_Stack_Size EQU 0x00000100
ISR_Stack_Size EQU (UND_Stack_Size + SVC_Stack_Size + ABT_Stack_Size + \
FIQ_Stack_Size + IRQ_Stack_Size)
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE USR_Stack_Size
__initial_sp SPACE ISR_Stack_Size
Stack_Top
;// <h> Heap Configuration
;// <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF>
;// </h>
Heap_Size EQU 0x00000000
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
; Reset Controller (RSTC) definitions
RSTC_BASE EQU 0xFFFFFD00 ; RSTC Base Address
RSTC_MR EQU 0x08 ; RSTC_MR Offset
;/*
;// <e> Reset Controller (RSTC)
;// <o1.0> URSTEN: User Reset Enable
;// <i> Enables NRST Pin to generate Reset
;// <o1.8..11> ERSTL: External Reset Length <0-15>
;// <i> External Reset Time in 2^(ERSTL+1) Slow Clock Cycles
;// </e>
;*/
RSTC_SETUP EQU 1
RSTC_MR_Val EQU 0xA5000401
; Embedded Flash Controller (EFC) definitions
EFC_BASE EQU 0xFFFFFF00 ; EFC Base Address
EFC0_FMR EQU 0x60 ; EFC0_FMR Offset
EFC1_FMR EQU 0x70 ; EFC1_FMR Offset
;// <e> Embedded Flash Controller 0 (EFC0)
;// <o1.16..23> FMCN: Flash Microsecond Cycle Number <0-255>
;// <i> Number of Master Clock Cycles in 1us
;// <o1.8..9> FWS: Flash Wait State
;// <0=> Read: 1 cycle / Write: 2 cycles
;// <1=> Read: 2 cycle / Write: 3 cycles
;// <2=> Read: 3 cycle / Write: 4 cycles
;// <3=> Read: 4 cycle / Write: 4 cycles
;// </e>
EFC0_SETUP EQU 1
EFC0_FMR_Val EQU 0x00320100
;// <e> Embedded Flash Controller 1 (EFC1)
;// <o1.16..23> FMCN: Flash Microsecond Cycle Number <0-255>
;// <i> Number of Master Clock Cycles in 1us
;// <o1.8..9> FWS: Flash Wait State
;// <0=> Read: 1 cycle / Write: 2 cycles
;// <1=> Read: 2 cycle / Write: 3 cycles
;// <2=> Read: 3 cycle / Write: 4 cycles
;// <3=> Read: 4 cycle / Write: 4 cycles
;// </e>
EFC1_SETUP EQU 0
EFC1_FMR_Val EQU 0x00320100
; Watchdog Timer (WDT) definitions
WDT_BASE EQU 0xFFFFFD40 ; WDT Base Address
WDT_MR EQU 0x04 ; WDT_MR Offset
;// <e> Watchdog Timer (WDT)
;// <o1.0..11> WDV: Watchdog Counter Value <0-4095>
;// <o1.16..27> WDD: Watchdog Delta Value <0-4095>
;// <o1.12> WDFIEN: Watchdog Fault Interrupt Enable
;// <o1.13> WDRSTEN: Watchdog Reset Enable
;// <o1.14> WDRPROC: Watchdog Reset Processor
;// <o1.28> WDDBGHLT: Watchdog Debug Halt
;// <o1.29> WDIDLEHLT: Watchdog Idle Halt
;// <o1.15> WDDIS: Watchdog Disable
;// </e>
WDT_SETUP EQU 1
WDT_MR_Val EQU 0x00008000
; Power Mangement Controller (PMC) definitions
PMC_BASE EQU 0xFFFFFC00 ; PMC Base Address
PMC_MOR EQU 0x20 ; PMC_MOR Offset
PMC_MCFR EQU 0x24 ; PMC_MCFR Offset
PMC_PLLR EQU 0x2C ; PMC_PLLR Offset
PMC_MCKR EQU 0x30 ; PMC_MCKR Offset
PMC_SR EQU 0x68 ; PMC_SR Offset
PMC_MOSCEN EQU (1<<0) ; Main Oscillator Enable
PMC_OSCBYPASS EQU (1<<1) ; Main Oscillator Bypass
PMC_OSCOUNT EQU (0xFF<<8) ; Main OScillator Start-up Time
PMC_DIV EQU (0xFF<<0) ; PLL Divider
PMC_PLLCOUNT EQU (0x3F<<8) ; PLL Lock Counter
PMC_OUT EQU (0x03<<14) ; PLL Clock Frequency Range
PMC_MUL EQU (0x7FF<<16) ; PLL Multiplier
PMC_USBDIV EQU (0x03<<28) ; USB Clock Divider
PMC_CSS EQU (3<<0) ; Clock Source Selection
PMC_PRES EQU (7<<2) ; Prescaler Selection
PMC_MOSCS EQU (1<<0) ; Main Oscillator Stable
PMC_LOCK EQU (1<<2) ; PLL Lock Status
PMC_MCKRDY EQU (1<<3) ; Master Clock Status
;// <e> Power Mangement Controller (PMC)
;// <h> Main Oscillator
;// <o1.0> MOSCEN: Main Oscillator Enable
;// <o1.1> OSCBYPASS: Oscillator Bypass
;// <o1.8..15> OSCCOUNT: Main Oscillator Startup Time <0-255>
;// </h>
;// <h> Phase Locked Loop (PLL)
;// <o2.0..7> DIV: PLL Divider <0-255>
;// <o2.16..26> MUL: PLL Multiplier <0-2047>
;// <i> PLL Output is multiplied by MUL+1
;// <o2.14..15> OUT: PLL Clock Frequency Range
;// <0=> 80..160MHz <1=> Reserved
;// <2=> 150..220MHz <3=> Reserved
;// <o2.8..13> PLLCOUNT: PLL Lock Counter <0-63>
;// <o2.28..29> USBDIV: USB Clock Divider
;// <0=> None <1=> 2 <2=> 4 <3=> Reserved
;// </h>
;// <o3.0..1> CSS: Clock Source Selection
;// <0=> Slow Clock
;// <1=> Main Clock
;// <2=> Reserved
;// <3=> PLL Clock
;// <o3.2..4> PRES: Prescaler
;// <0=> None
;// <1=> Clock / 2 <2=> Clock / 4
;// <3=> Clock / 8 <4=> Clock / 16
;// <5=> Clock / 32 <6=> Clock / 64
;// <7=> Reserved
;// </e>
PMC_SETUP EQU 1
PMC_MOR_Val EQU 0x00000601
PMC_PLLR_Val EQU 0x00191C05
PMC_MCKR_Val EQU 0x00000007
PRESERVE8
; Area Definition and Entry Point
; Startup Code must be linked first at Address at which it expects to run.
AREA RESET, CODE, READONLY
ARM
; Exception Vectors
; Mapped to Address 0.
; Absolute addressing mode must be used.
; Dummy Handlers are implemented as infinite loops which can be modified.
Vectors LDR PC,Reset_Addr
LDR PC,Undef_Addr
LDR PC,SWI_Addr
LDR PC,PAbt_Addr
LDR PC,DAbt_Addr
NOP ; Reserved Vector
LDR PC,IRQ_Addr
LDR PC,FIQ_Addr
Reset_Addr DCD Reset_Handler
Undef_Addr DCD Undef_Handler
SWI_Addr DCD SWI_Handler
PAbt_Addr DCD PAbt_Handler
DAbt_Addr DCD DAbt_Handler
DCD 0 ; Reserved Address
IRQ_Addr DCD IRQ_Handler
FIQ_Addr DCD FIQ_Handler
Undef_Handler B Undef_Handler
SWI_Handler B SWI_Handler
PAbt_Handler B PAbt_Handler
DAbt_Handler B DAbt_Handler
FIQ_Handler B FIQ_Handler
; Reset Handler
EXPORT Reset_Handler
Reset_Handler
; Setup RSTC
IF RSTC_SETUP != 0
LDR R0, =RSTC_BASE
LDR R1, =RSTC_MR_Val
STR R1, [R0, #RSTC_MR]
ENDIF
; Setup EFC0
IF EFC0_SETUP != 0
LDR R0, =EFC_BASE
LDR R1, =EFC0_FMR_Val
STR R1, [R0, #EFC0_FMR]
ENDIF
; Setup EFC1
IF EFC1_SETUP != 0
LDR R0, =EFC_BASE
LDR R1, =EFC1_FMR_Val
STR R1, [R0, #EFC1_FMR]
ENDIF
; Setup WDT
IF WDT_SETUP != 0
LDR R0, =WDT_BASE
LDR R1, =WDT_MR_Val
STR R1, [R0, #WDT_MR]
ENDIF
; Setup PMC
IF PMC_SETUP != 0
LDR R0, =PMC_BASE
; Setup Main Oscillator
LDR R1, =PMC_MOR_Val
STR R1, [R0, #PMC_MOR]
; Wait until Main Oscillator is stablilized
IF (PMC_MOR_Val:AND:PMC_MOSCEN) != 0
MOSCS_Loop LDR R2, [R0, #PMC_SR]
ANDS R2, R2, #PMC_MOSCS
BEQ MOSCS_Loop
ENDIF
; Setup the PLL
IF (PMC_PLLR_Val:AND:PMC_MUL) != 0
LDR R1, =PMC_PLLR_Val
STR R1, [R0, #PMC_PLLR]
; Wait until PLL is stabilized
PLL_Loop LDR R2, [R0, #PMC_SR]
ANDS R2, R2, #PMC_LOCK
BEQ PLL_Loop
ENDIF
; Select Clock
IF (PMC_MCKR_Val:AND:PMC_CSS) == 1 ; Main Clock Selected
LDR R1, =PMC_MCKR_Val
AND R1, #PMC_CSS
STR R1, [R0, #PMC_MCKR]
WAIT_Rdy1 LDR R2, [R0, #PMC_SR]
ANDS R2, R2, #PMC_MCKRDY
BEQ WAIT_Rdy1
LDR R1, =PMC_MCKR_Val
STR R1, [R0, #PMC_MCKR]
WAIT_Rdy2 LDR R2, [R0, #PMC_SR]
ANDS R2, R2, #PMC_MCKRDY
BEQ WAIT_Rdy2
ELIF (PMC_MCKR_Val:AND:PMC_CSS) == 3 ; PLL Clock Selected
LDR R1, =PMC_MCKR_Val
AND R1, #PMC_PRES
STR R1, [R0, #PMC_MCKR]
WAIT_Rdy1 LDR R2, [R0, #PMC_SR]
ANDS R2, R2, #PMC_MCKRDY
BEQ WAIT_Rdy1
LDR R1, =PMC_MCKR_Val
STR R1, [R0, #PMC_MCKR]
WAIT_Rdy2 LDR R2, [R0, #PMC_SR]
ANDS R2, R2, #PMC_MCKRDY
BEQ WAIT_Rdy2
ENDIF ; Select Clock
ENDIF ; PMC_SETUP
; Copy Exception Vectors to Internal RAM
IF :DEF:RAM_INTVEC
ADR R8, Vectors ; Source
LDR R9, =RAM_BASE ; Destination
LDMIA R8!, {R0-R7} ; Load Vectors
STMIA R9!, {R0-R7} ; Store Vectors
LDMIA R8!, {R0-R7} ; Load Handler Addresses
STMIA R9!, {R0-R7} ; Store Handler Addresses
ENDIF
; Remap on-chip RAM to address 0
MC_BASE EQU 0xFFFFFF00 ; MC Base Address
MC_RCR EQU 0x00 ; MC_RCR Offset
IF :DEF:REMAP
LDR R0, =MC_BASE
MOV R1, #1
STR R1, [R0, #MC_RCR] ; Remap
ENDIF
; Setup Stack for each mode
LDR R0, =Stack_Top
; Enter Undefined Instruction Mode and set its Stack Pointer
MSR CPSR_c, #Mode_UND:OR:I_Bit:OR:F_Bit
MOV SP, R0
SUB R0, R0, #UND_Stack_Size
; Enter Abort Mode and set its Stack Pointer
MSR CPSR_c, #Mode_ABT:OR:I_Bit:OR:F_Bit
MOV SP, R0
SUB R0, R0, #ABT_Stack_Size
; Enter FIQ Mode and set its Stack Pointer
MSR CPSR_c, #Mode_FIQ:OR:I_Bit:OR:F_Bit
MOV SP, R0
SUB R0, R0, #FIQ_Stack_Size
; Enter IRQ Mode and set its Stack Pointer
MSR CPSR_c, #Mode_IRQ:OR:I_Bit:OR:F_Bit
MOV SP, R0
SUB R0, R0, #IRQ_Stack_Size
; Enter Supervisor Mode and set its Stack Pointer
MSR CPSR_c, #Mode_SVC:OR:I_Bit:OR:F_Bit
MOV SP, R0
SUB R0, R0, #SVC_Stack_Size
; Enter User Mode and set its Stack Pointer
; MSR CPSR_c, #Mode_USR
IF :DEF:__MICROLIB
EXPORT __initial_sp
ELSE
; No usr mode stack here.
;MOV SP, R0
;SUB SL, SP, #USR_Stack_Size
ENDIF
; Enter the C code
IMPORT __main
LDR R0, =__main
BX R0
IMPORT rt_interrupt_enter
IMPORT rt_interrupt_leave
IMPORT rt_thread_switch_interrupt_flag
IMPORT rt_interrupt_from_thread
IMPORT rt_interrupt_to_thread
IMPORT rt_hw_trap_irq
IRQ_Handler PROC
EXPORT IRQ_Handler
STMFD sp!, {r0-r12,lr}
BL rt_interrupt_enter
BL rt_hw_trap_irq
BL rt_interrupt_leave
; if rt_thread_switch_interrupt_flag set, jump to
; rt_hw_context_switch_interrupt_do and don't return
LDR r0, =rt_thread_switch_interrupt_flag
LDR r1, [r0]
CMP r1, #1
BEQ rt_hw_context_switch_interrupt_do
LDMFD sp!, {r0-r12,lr}
SUBS pc, lr, #4
ENDP
; /*
; * void rt_hw_context_switch_interrupt_do(rt_base_t flag)
; */
rt_hw_context_switch_interrupt_do PROC
EXPORT rt_hw_context_switch_interrupt_do
MOV r1, #0 ; clear flag
STR r1, [r0]
LDMFD sp!, {r0-r12,lr}; reload saved registers
STMFD sp!, {r0-r3} ; save r0-r3
MOV r1, sp
ADD sp, sp, #16 ; restore sp
SUB r2, lr, #4 ; save old task's pc to r2
MRS r3, spsr ; get cpsr of interrupt thread
; switch to SVC mode and no interrupt
MSR cpsr_c, #I_Bit|F_Bit|Mode_SVC
STMFD sp!, {r2} ; push old task's pc
STMFD sp!, {r4-r12,lr}; push old task's lr,r12-r4
MOV r4, r1 ; Special optimised code below
MOV r5, r3
LDMFD r4!, {r0-r3}
STMFD sp!, {r0-r3} ; push old task's r3-r0
STMFD sp!, {r5} ; push old task's cpsr
MRS r4, spsr
STMFD sp!, {r4} ; push old task's spsr
LDR r4, =rt_interrupt_from_thread
LDR r5, [r4]
STR sp, [r5] ; store sp in preempted tasks's TCB
LDR r6, =rt_interrupt_to_thread
LDR r6, [r6]
LDR sp, [r6] ; get new task's stack pointer
LDMFD sp!, {r4} ; pop new task's spsr
MSR spsr_cxsf, r4
LDMFD sp!, {r4} ; pop new task's psr
MSR cpsr_cxsf, r4
LDMFD sp!, {r0-r12,lr,pc} ; pop new task's r0-r12,lr & pc
ENDP
IF :DEF:__MICROLIB
EXPORT __heap_base
EXPORT __heap_limit
ELSE
; User Initial Stack & Heap
AREA |.text|, CODE, READONLY
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, = (Stack_Mem + IRQ_Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ENDIF
END
|
vandercookking/h7_device_RTT
| 2,336
|
rt-thread/libcpu/arm/s3c44b0/context_gcc.S
|
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2006-09-06 XuXinming first version
*/
/*!
* \addtogroup S3C44B0
*/
/*@{*/
#define NOINT 0xc0
/*
* rt_base_t rt_hw_interrupt_disable();
*/
.globl rt_hw_interrupt_disable
rt_hw_interrupt_disable:
mrs r0, cpsr
orr r1, r0, #NOINT
msr cpsr_c, r1
mov pc, lr
/*
* void rt_hw_interrupt_enable(rt_base_t level);
*/
.globl rt_hw_interrupt_enable
rt_hw_interrupt_enable:
msr cpsr, r0
mov pc, lr
/*
* void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
* r0 --> from
* r1 --> to
*/
.globl rt_hw_context_switch
rt_hw_context_switch:
stmfd sp!, {lr} @ push pc (lr should be pushed in place of PC)
stmfd sp!, {r0-r12, lr} @ push lr & register file
mrs r4, cpsr
stmfd sp!, {r4} @ push cpsr
mrs r4, spsr
stmfd sp!, {r4} @ push spsr
str sp, [r0] @ store sp in preempted tasks TCB
ldr sp, [r1] @ get new task stack pointer
ldmfd sp!, {r4} @ pop new task spsr
msr spsr_cxsf, r4
ldmfd sp!, {r4} @ pop new task cpsr
msr cpsr_cxsf, r4
ldmfd sp!, {r0-r12, lr, pc} @ pop new task r0-r12, lr & pc
/*
* void rt_hw_context_switch_to(rt_uint32 to);
* r0 --> to
*/
.globl rt_hw_context_switch_to
rt_hw_context_switch_to:
ldr sp, [r0] @ get new task stack pointer
ldmfd sp!, {r4} @ pop new task spsr
msr spsr_cxsf, r4
ldmfd sp!, {r4} @ pop new task cpsr
msr cpsr_cxsf, r4
ldmfd sp!, {r0-r12, lr, pc} @ pop new task r0-r12, lr & pc
/*
* void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to);
*/
.globl rt_thread_switch_interrupt_flag
.globl rt_interrupt_from_thread
.globl rt_interrupt_to_thread
.globl rt_hw_context_switch_interrupt
rt_hw_context_switch_interrupt:
ldr r2, =rt_thread_switch_interrupt_flag
ldr r3, [r2]
cmp r3, #1
beq _reswitch
mov r3, #1 @ set rt_thread_switch_interrupt_flag to 1
str r3, [r2]
ldr r2, =rt_interrupt_from_thread @ set rt_interrupt_from_thread
str r0, [r2]
_reswitch:
ldr r2, =rt_interrupt_to_thread @ set rt_interrupt_to_thread
str r1, [r2]
mov pc, lr
|
vandercookking/h7_device_RTT
| 6,112
|
rt-thread/libcpu/arm/s3c44b0/start_gcc.S
|
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2006-09-06 XuXinming first version
* 2006-09-20 Bernard clean the code
*/
/**
* @addtogroup S3C44B0
*/
/*@{*/
.section .init, "ax"
.code 32
.globl _start
_start:
b reset
ldr pc, _vector_undef
ldr pc, _vector_swi
ldr pc, _vector_pabt
ldr pc, _vector_dabt
ldr pc, _vector_resv
ldr pc, _vector_irq
ldr pc, _vector_fiq
_vector_undef: .word vector_undef
_vector_swi: .word vector_swi
_vector_pabt: .word vector_pabt
_vector_dabt: .word vector_dabt
_vector_resv: .word vector_resv
_vector_irq: .word vector_irq
_vector_fiq: .word vector_fiq
.text
.code 32
/*
* rtthread kernel start and end
* which are defined in linker script
*/
.globl _rtthread_start
_rtthread_start:.word _start
.globl _rtthread_end
_rtthread_end: .word _end
/*
* rtthread bss start and end
* which are defined in linker script
*/
.globl _bss_start
_bss_start: .word __bss_start
.globl _bss_end
_bss_end: .word __bss_end
#if defined(__FLASH_BUILD__)
/*
* TEXT_BASE,
* which is defined in macro of make
*/
_TEXT_BASE: .word TEXT_BASE
#endif
.equ WTCON, 0x1d30000
.equ INTCON, 0x1e00000
.equ INTMSK, 0x1e0000c
/* the system entry */
reset:
/* enter svc mode */
msr cpsr_c, #SVCMODE|NOINT
/*watch dog disable */
ldr r0,=WTCON
ldr r1,=0x0
str r1,[r0]
/* all interrupt disable */
ldr r0,=INTMSK
ldr r1,=0x07ffffff
str r1,[r0]
ldr r1, =INTCON
ldr r0, =0x05
str r0, [r1]
#if defined(__FLASH_BUILD__)
/* init lowlevel */
bl lowlevel_init
#endif
/* setup stack */
bl stack_setup
#if defined(__FLASH_BUILD__)
mov r0, #0x0 /* r0 <- flash base address */
ldr r1, _TEXT_BASE /* r1 <- the taget address */
ldr r2, _rtthread_start
ldr r3, _bss_start
sub r2, r3, r2 /* r2 <- size of rtthread kernel */
add r2, r0, r2 /* r2 <- source end address */
copy_loop:
ldmia r0!, {r3-r10} /* copy from source address [r0] */
stmia r1!, {r3-r10} /* copy to target address [r1] */
cmp r0, r2 /* until source end address [r2] */
ble copy_loop
#endif
/* start RT-Thread Kernel */
ldr pc, _rtthread_startup
_rtthread_startup: .word rtthread_startup
.equ USERMODE, 0x10
.equ FIQMODE, 0x11
.equ IRQMODE, 0x12
.equ SVCMODE, 0x13
.equ ABORTMODE, 0x17
.equ UNDEFMODE, 0x1b
.equ MODEMASK, 0x1f
.equ NOINT, 0xc0
/* exception handlers */
vector_undef: bl rt_hw_trap_udef
vector_swi: bl rt_hw_trap_swi
vector_pabt: bl rt_hw_trap_pabt
vector_dabt: bl rt_hw_trap_dabt
vector_resv: bl rt_hw_trap_resv
.globl rt_interrupt_enter
.globl rt_interrupt_leave
.globl rt_thread_switch_interrupt_flag
.globl rt_interrupt_from_thread
.globl rt_interrupt_to_thread
vector_irq:
stmfd sp!, {r0-r12,lr}
bl led_off
bl rt_interrupt_enter
bl rt_hw_trap_irq
bl rt_interrupt_leave
/* if rt_thread_switch_interrupt_flag set, jump to _interrupt_thread_switch and don't return */
ldr r0, =rt_thread_switch_interrupt_flag
ldr r1, [r0]
cmp r1, #1
beq _interrupt_thread_switch
ldmfd sp!, {r0-r12,lr}
subs pc, lr, #4
.align 5
vector_fiq:
stmfd sp!,{r0-r7,lr}
bl rt_hw_trap_fiq
ldmfd sp!,{r0-r7,lr}
subs pc,lr,#4
_interrupt_thread_switch:
mov r1, #0 @ clear rt_thread_switch_interrupt_flag
str r1, [r0]
ldmfd sp!, {r0-r12,lr} @ reload saved registers
stmfd sp!, {r0-r3} @ save r0-r3
mov r1, sp
add sp, sp, #16 @ restore sp
sub r2, lr, #4 @ save old task's pc to r2
mrs r3, spsr @ disable interrupt
orr r0, r3, #NOINT
msr spsr_c, r0
ldr r0, =.+8 @ switch to interrupted task's stack
movs pc, r0
stmfd sp!, {r2} @ push old task's pc
stmfd sp!, {r4-r12,lr} @ push old task's lr,r12-r4
mov r4, r1 @ Special optimised code below
mov r5, r3
ldmfd r4!, {r0-r3}
stmfd sp!, {r0-r3} @ push old task's r3-r0
stmfd sp!, {r5} @ push old task's psr
mrs r4, spsr
stmfd sp!, {r4} @ push old task's spsr
ldr r4, =rt_interrupt_from_thread
ldr r5, [r4]
str sp, [r5] @ store sp in preempted tasks's TCB
ldr r6, =rt_interrupt_to_thread
ldr r6, [r6]
ldr sp, [r6] @ get new task's stack pointer
ldmfd sp!, {r4} @ pop new task's spsr
msr SPSR_cxsf, r4
ldmfd sp!, {r4} @ pop new task's psr
msr CPSR_cxsf, r4
ldmfd sp!, {r0-r12,lr,pc} @ pop new task's r0-r12,lr & pc
/* each mode stack memory */
UNDSTACK_START: .word _undefined_stack_start + 128
ABTSTACK_START: .word _abort_stack_start + 128
FIQSTACK_START: .word _fiq_stack_start + 1024
IRQSTACK_START: .word _irq_stack_start + 1024
SVCSTACK_START: .word _svc_stack_start + 4096
stack_setup:
/* undefined instruction mode */
msr cpsr_c, #UNDEFMODE|NOINT
ldr sp, UNDSTACK_START
/* abort mode */
msr cpsr_c, #ABORTMODE|NOINT
ldr sp, ABTSTACK_START
/* FIQ mode */
msr cpsr_c, #FIQMODE|NOINT
ldr sp, FIQSTACK_START
/* IRQ mode */
msr cpsr_c, #IRQMODE|NOINT
ldr sp, IRQSTACK_START
/* supervisor mode */
msr cpsr_c, #SVCMODE|NOINT
ldr sp, SVCSTACK_START
mov pc,lr @ The LR register may be not valid for the mode changes.
.globl led_on
led_on:
ldr r1, =0x1d20014 @ r1<-PDATC
ldr r0, [r1] @ r0<-[r1]
orr r0, r0, #0x0e @ r0=r0 or 0x0e
str r0, [r1] @ r0->[r1]
mov pc, lr
.globl led_off
led_off:
ldr r1, =0x1d20010 @ r1<-PCONC
ldr r0, =0x5f555555 @ r0<-0x5f555555
str r0, [r1] @ r0->[r1]
ldr r1, =0x1d20014 @ r1<-PDATC
ldr r0, =0x0 @ r0<-00
str r0, [r1] @ r0->[r1]
mov pc, lr
|
vandercookking/h7_device_RTT
| 2,589
|
rt-thread/libcpu/arm/s3c44b0/context_rvds.S
|
;/*
; * Copyright (c) 2006-2022, RT-Thread Development Team
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Change Logs:
; * Date Author Notes
; * 2009-01-20 Bernard first version
; */
NOINT EQU 0xc0 ; disable interrupt in psr
AREA |.text|, CODE, READONLY, ALIGN=2
ARM
REQUIRE8
PRESERVE8
;/*
; * rt_base_t rt_hw_interrupt_disable();
; */
rt_hw_interrupt_disable PROC
EXPORT rt_hw_interrupt_disable
MRS r0, cpsr
ORR r1, r0, #NOINT
MSR cpsr_c, r1
BX lr
ENDP
;/*
; * void rt_hw_interrupt_enable(rt_base_t level);
; */
rt_hw_interrupt_enable PROC
EXPORT rt_hw_interrupt_enable
MSR cpsr_c, r0
BX lr
ENDP
;/*
; * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
; * r0 --> from
; * r1 --> to
; */
rt_hw_context_switch PROC
EXPORT rt_hw_context_switch
STMFD sp!, {lr} ; push pc (lr should be pushed in place of PC)
STMFD sp!, {r0-r12, lr} ; push lr & register file
MRS r4, cpsr
STMFD sp!, {r4} ; push cpsr
MRS r4, spsr
STMFD sp!, {r4} ; push spsr
STR sp, [r0] ; store sp in preempted tasks TCB
LDR sp, [r1] ; get new task stack pointer
LDMFD sp!, {r4} ; pop new task spsr
MSR spsr_cxsf, r4
LDMFD sp!, {r4} ; pop new task cpsr
MSR cpsr_cxsf, r4
LDMFD sp!, {r0-r12, lr, pc} ; pop new task r0-r12, lr & pc
ENDP
;/*
; * void rt_hw_context_switch_to(rt_uint32 to);
; * r0 --> to
; */
rt_hw_context_switch_to PROC
EXPORT rt_hw_context_switch_to
LDR sp, [r0] ; get new task stack pointer
LDMFD sp!, {r4} ; pop new task spsr
MSR spsr_cxsf, r4
LDMFD sp!, {r4} ; pop new task cpsr
MSR cpsr_cxsf, r4
LDMFD sp!, {r0-r12, lr, pc} ; pop new task r0-r12, lr & pc
ENDP
;/*
; * void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to);
; */
IMPORT rt_thread_switch_interrupt_flag
IMPORT rt_interrupt_from_thread
IMPORT rt_interrupt_to_thread
rt_hw_context_switch_interrupt PROC
EXPORT rt_hw_context_switch_interrupt
LDR r2, =rt_thread_switch_interrupt_flag
LDR r3, [r2]
CMP r3, #1
BEQ _reswitch
MOV r3, #1 ; set rt_thread_switch_interrupt_flag to 1
STR r3, [r2]
LDR r2, =rt_interrupt_from_thread ; set rt_interrupt_from_thread
STR r0, [r2]
_reswitch
LDR r2, =rt_interrupt_to_thread ; set rt_interrupt_to_thread
STR r1, [r2]
BX lr
ENDP
END
|
vandercookking/h7_device_RTT
| 43,817
|
rt-thread/libcpu/arm/s3c44b0/start_rvds.S
|
;/*****************************************************************************/
;/* S3C44B0X.S: Startup file for Samsung S3C44B0X */
;/*****************************************************************************/
;/* <<< Use Configuration Wizard in Context Menu >>> */
;/*****************************************************************************/
;/* This file is part of the uVision/ARM development tools. */
;/* Copyright (c) 2005-2006 Keil Software. All rights reserved. */
;/* This software may only be used under the terms of a valid, current, */
;/* end user licence from KEIL for a compatible version of KEIL software */
;/* development tools. Nothing else gives you the right to use this software. */
;/*****************************************************************************/
; *** Startup Code (executed after Reset) ***
; Standard definitions of Mode bits and Interrupt (I & F) flags in PSRs
Mode_USR EQU 0x10
Mode_FIQ EQU 0x11
Mode_IRQ EQU 0x12
Mode_SVC EQU 0x13
Mode_ABT EQU 0x17
Mode_UND EQU 0x1B
Mode_SYS EQU 0x1F
I_Bit EQU 0x80 ; when I bit is set, IRQ is disabled
F_Bit EQU 0x40 ; when F bit is set, FIQ is disabled
;// <h> Stack Configuration (Stack Sizes in Bytes)
;// <o0> Undefined Mode <0x0-0xFFFFFFFF:8>
;// <o1> Supervisor Mode <0x0-0xFFFFFFFF:8>
;// <o2> Abort Mode <0x0-0xFFFFFFFF:8>
;// <o3> Fast Interrupt Mode <0x0-0xFFFFFFFF:8>
;// <o4> Interrupt Mode <0x0-0xFFFFFFFF:8>
;// <o5> User/System Mode <0x0-0xFFFFFFFF:8>
;// </h>
UND_Stack_Size EQU 0x00000000
SVC_Stack_Size EQU 0x00000100
ABT_Stack_Size EQU 0x00000000
FIQ_Stack_Size EQU 0x00000000
IRQ_Stack_Size EQU 0x00000100
USR_Stack_Size EQU 0x00000100
ISR_Stack_Size EQU (UND_Stack_Size + SVC_Stack_Size + ABT_Stack_Size + \
FIQ_Stack_Size + IRQ_Stack_Size)
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE USR_Stack_Size
__initial_sp SPACE ISR_Stack_Size
Stack_Top
;// <h> Heap Configuration
;// <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF>
;// </h>
Heap_Size EQU 0x00000000
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
; CPU Wrapper and Bus Priorities definitions
CPUW_BASE EQU 0x01C00000 ; CPU Wrapper Base Address
SYSCFG_OFS EQU 0x00 ; SYSCFG Offset
NCACHBE0_OFS EQU 0x04 ; NCACHBE0 Offset
NCACHBE1_OFS EQU 0x08 ; NCACHBE0 Offset
BUSP_BASE EQU 0x01C40000 ; Bus Priority Base Address
SBUSCON_OFS EQU 0x00 ; SBUSCON Offset
;// <e> CPU Wrapper and Bus Priorities
;// <h> CPU Wrapper
;// <o1.0> SE: Stall Enable
;// <o1.1..2> CM: Cache Mode
;// <0=> Disable Cache (8kB SRAM)
;// <1=> Half Cache Enable (4kB Cache, 4kB SRAM)
;// <2=> Reserved
;// <3=> Full Cache Enable (8kB Cache)
;// <o1.3> WE: Write Buffer Enable
;// <o1.4> RSE: Read Stall Enable
;// <o1.5> DA: Data Abort <0=> Enable <1=> Disable
;// <h> Non-cacheable Area 0
;// <o2.0..15> Start Address <0x0-0x0FFFF000:0x1000><#/0x1000>
;// <i> SA = (Start Address) / 4k
;// <o2.16..31> End Address + 1 <0x0-0x10000000:0x1000><#/0x1000>
;// <i> SE = (End Address + 1) / 4k
;// </h>
;// <h> Non-cacheable Area 1
;// <o3.0..15> Start Address <0x0-0x0FFFF000:0x1000><#/0x1000>
;// <i> SA = (Start Address) / 4k
;// <o3.16..31> End Address + 1 <0x0-0x10000000:0x1000><#/0x1000>
;// <i> SE = (End Address + 1) / 4k
;// </h>
;// </h>
;// <h> Bus Priorities
;// <o4.31> FIX: Fixed Priorities
;// <o4.6..7> LCD_DMA <0=> 1st <1=> 2nd <2=> 3rd <3=> 4th
;// <o4.4..5> ZDMA <0=> 1st <1=> 2nd <2=> 3rd <3=> 4th
;// <o4.2..3> BDMA <0=> 1st <1=> 2nd <2=> 3rd <3=> 4th
;// <o4.0..1> nBREQ <0=> 1st <1=> 2nd <2=> 3rd <3=> 4th
;// </h>
;// </e>
SYS_SETUP EQU 0
SYSCFG_Val EQU 0x00000001
NCACHBE0_Val EQU 0x00000000
NCACHBE1_Val EQU 0x00000000
SBUSCON_Val EQU 0x80001B1B
;// <e> Vectored Interrupt Mode (for IRQ)
;// <o1.25> EINT0 <i> External Interrupt 0
;// <o1.24> EINT1 <i> External Interrupt 1
;// <o1.23> EINT2 <i> External Interrupt 2
;// <o1.22> EINT3 <i> External Interrupt 3
;// <o1.21> EINT4567 <i> External Interrupt 4/5/6/7
;// <o1.20> TICK <i> RTC Time Tick Interrupt
;// <o1.19> ZDMA0 <i> General DMA0 Interrupt
;// <o1.18> ZDMA1 <i> General DMA1 Interrupt
;// <o1.17> BDMA0 <i> Bridge DMA0 Interrupt
;// <o1.16> BDMA1 <i> Bridge DMA1 Interrupt
;// <o1.15> WDT <i> Watchdog Timer Interrupt
;// <o1.14> UERR01 <i> UART0/1 Error Interrupt
;// <o1.13> TIMER0 <i> Timer0 Interrupt
;// <o1.12> TIMER1 <i> Timer1 Interrupt
;// <o1.11> TIMER2 <i> Timer2 Interrupt
;// <o1.10> TIMER3 <i> Timer3 Interrupt
;// <o1.9> TIMER4 <i> Timer4 Interrupt
;// <o1.8> TIMER5 <i> Timer5 Interrupt
;// <o1.7> URXD0 <i> UART0 Rx Interrupt
;// <o1.6> URXD1 <i> UART1 Rx Interrupt
;// <o1.5> IIC <i> IIC Interrupt
;// <o1.4> SIO <i> SIO Interrupt
;// <o1.3> UTXD0 <i> UART0 Tx Interrupt
;// <o1.2> UTXD1 <i> UART1 Tx Interrupt
;// <o1.1> RTC <i> RTC Alarm Interrupt
;// <o1.0> ADC <i> ADC EOC Interrupt
;// </e>
VIM_SETUP EQU 0
VIM_CFG EQU 0x00000000
; Clock Management definitions
CLK_BASE EQU 0x01D80000 ; Clock Base Address
PLLCON_OFS EQU 0x00 ; PLLCON Offset
CLKCON_OFS EQU 0x04 ; CLKCON Offset
CLKSLOW_OFS EQU 0x08 ; CLKSLOW Offset
LOCKTIME_OFS EQU 0x0C ; LOCKTIME Offset
;// <e> Clock Management
;// <h> PLL Settings
;// <i> Fpllo = (m * Fin) / (p * 2^s), 20MHz < Fpllo < 66MHz
;// <o1.12..19> MDIV: Main divider <0x0-0xFF>
;// <i> m = MDIV + 8
;// <o1.4..9> PDIV: Pre-divider <0x0-0x3F>
;// <i> p = PDIV + 2, 1MHz <= Fin/p < 2MHz
;// <o1.0..1> SDIV: Post Divider <0x0-0x03>
;// <i> s = SDIV, Fpllo * 2^s < 170MHz
;// <o4.0..11> LTIME CNT: PLL Lock Time Count <0x0-0x0FFF>
;// </h>
;// <h> Master Clock
;// <i> PLL Clock: Fout = Fpllo
;// <i> Slow Clock: Fout = Fin / (2 * SLOW_VAL), SLOW_VAL > 0
;// <i> Slow Clock: Fout = Fin, SLOW_VAL = 0
;// <o3.5> PLL_OFF: PLL Off
;// <i> PLL is turned Off only when SLOW_BIT = 1
;// <o3.4> SLOW_BIT: Slow Clock
;// <o3.0..3> SLOW_VAL: Slow Clock divider <0x0-0x0F>
;// </h>
;// <h> Clock Generation
;// <o2.14> IIS <0=> Disable <1=> Enable
;// <o2.13> IIC <0=> Disable <1=> Enable
;// <o2.12> ADC <0=> Disable <1=> Enable
;// <o2.11> RTC <0=> Disable <1=> Enable
;// <o2.10> GPIO <0=> Disable <1=> Enable
;// <o2.9> UART1 <0=> Disable <1=> Enable
;// <o2.8> UART0 <0=> Disable <1=> Enable
;// <o2.7> BDMA0,1 <0=> Disable <1=> Enable
;// <o2.6> LCDC <0=> Disable <1=> Enable
;// <o2.5> SIO <0=> Disable <1=> Enable
;// <o2.4> ZDMA0,1 <0=> Disable <1=> Enable
;// <o2.3> PWMTIMER <0=> Disable <1=> Enable
;// </h>
;// </e>
CLK_SETUP EQU 1
PLLCON_Val EQU 0x00038080
CLKCON_Val EQU 0x00007FF8
CLKSLOW_Val EQU 0x00000009
LOCKTIME_Val EQU 0x00000FFF
; Watchdog Timer definitions
WT_BASE EQU 0x01D30000 ; WT Base Address
WTCON_OFS EQU 0x00 ; WTCON Offset
WTDAT_OFS EQU 0x04 ; WTDAT Offset
WTCNT_OFS EQU 0x08 ; WTCNT Offset
;// <e> Watchdog Timer
;// <o1.5> Watchdog Timer Enable/Disable
;// <o1.0> Reset Enable/Disable
;// <o1.2> Interrupt Enable/Disable
;// <o1.3..4> Clock Select
;// <0=> 1/16 <1=> 1/32 <2=> 1/64 <3=> 1/128
;// <i> Clock Division Factor
;// <o1.8..15> Prescaler Value <0x0-0xFF>
;// <o2.0..15> Time-out Value <0x0-0xFFFF>
;// </e>
WT_SETUP EQU 1
WTCON_Val EQU 0x00008000
WTDAT_Val EQU 0x00008000
; Memory Controller definitions
MC_BASE EQU 0x01C80000 ; Memory Controller Base Address
;// <e> Memory Controller
MC_SETUP EQU 1
;// <h> Bank 0
;// <o0.0..1> PMC: Page Mode Configuration
;// <0=> 1 Data <1=> 4 Data <2=> 8 Data <3=> 16 Data
;// <o0.2..3> Tpac: Page Mode Access Cycle
;// <0=> 2 clks <1=> 3 clks <2=> 4 clks <3=> 6 clks
;// <o0.4..5> Tcah: Address Holding Time after nGCSn
;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks
;// <o0.6..7> Toch: Chip Select Hold on nOE
;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks
;// <o0.8..10> Tacc: Access Cycle
;// <0=> 1 clk <1=> 2 clks <2=> 3 clks <3=> 4 clks
;// <4=> 6 clk <5=> 8 clks <6=> 10 clks <7=> 14 clks
;// <o0.11..12> Tcos: Chip Select Set-up nOE
;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks
;// <o0.13..14> Tacs: Address Set-up before nGCSn
;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks
;// </h>
;//
;// <h> Bank 1
;// <o8.4..5> DW: Data Bus Width
;// <0=> 8-bit <1=> 16-bit <2=> 32-bit <3=> Rsrvd
;// <o8.6> WS: WAIT Status
;// <0=> WAIT Disable
;// <1=> WAIT Enable
;// <o8.7> ST: SRAM Type
;// <0=> Not using UB/LB
;// <1=> Using UB/LB
;// <o1.0..1> PMC: Page Mode Configuration
;// <0=> 1 Data <1=> 4 Data <2=> 8 Data <3=> 16 Data
;// <o1.2..3> Tpac: Page Mode Access Cycle
;// <0=> 2 clks <1=> 3 clks <2=> 4 clks <3=> 6 clks
;// <o1.4..5> Tcah: Address Holding Time after nGCSn
;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks
;// <o1.6..7> Toch: Chip Select Hold on nOE
;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks
;// <o1.8..10> Tacc: Access Cycle
;// <0=> 1 clk <1=> 2 clks <2=> 3 clks <3=> 4 clks
;// <4=> 6 clk <5=> 8 clks <6=> 10 clks <7=> 14 clks
;// <o1.11..12> Tcos: Chip Select Set-up nOE
;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks
;// <o1.13..14> Tacs: Address Set-up before nGCSn
;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks
;// </h>
;//
;// <h> Bank 2
;// <o8.8..9> DW: Data Bus Width
;// <0=> 8-bit <1=> 16-bit <2=> 32-bit <3=> Rsrvd
;// <o8.10> WS: WAIT Status
;// <0=> WAIT Disable
;// <1=> WAIT Enable
;// <o8.11> ST: SRAM Type
;// <0=> Not using UB/LB
;// <1=> Using UB/LB
;// <o2.0..1> PMC: Page Mode Configuration
;// <0=> 1 Data <1=> 4 Data <2=> 8 Data <3=> 16 Data
;// <o2.2..3> Tpac: Page Mode Access Cycle
;// <0=> 2 clks <1=> 3 clks <2=> 4 clks <3=> 6 clks
;// <o2.4..5> Tcah: Address Holding Time after nGCSn
;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks
;// <o2.6..7> Toch: Chip Select Hold on nOE
;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks
;// <o2.8..10> Tacc: Access Cycle
;// <0=> 1 clk <1=> 2 clks <2=> 3 clks <3=> 4 clks
;// <4=> 6 clk <5=> 8 clks <6=> 10 clks <7=> 14 clks
;// <o2.11..12> Tcos: Chip Select Set-up nOE
;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks
;// <o2.13..14> Tacs: Address Set-up before nGCSn
;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks
;// </h>
;//
;// <h> Bank 3
;// <o8.12..13> DW: Data Bus Width
;// <0=> 8-bit <1=> 16-bit <2=> 32-bit <3=> Rsrvd
;// <o8.14> WS: WAIT Status
;// <0=> WAIT Disable
;// <1=> WAIT Enable
;// <o8.15> ST: SRAM Type
;// <0=> Not using UB/LB
;// <1=> Using UB/LB
;// <o3.0..1> PMC: Page Mode Configuration
;// <0=> 1 Data <1=> 4 Data <2=> 8 Data <3=> 16 Data
;// <o3.2..3> Tpac: Page Mode Access Cycle
;// <0=> 2 clks <1=> 3 clks <2=> 4 clks <3=> 6 clks
;// <o3.4..5> Tcah: Address Holding Time after nGCSn
;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks
;// <o3.6..7> Toch: Chip Select Hold on nOE
;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks
;// <o3.8..10> Tacc: Access Cycle
;// <0=> 1 clk <1=> 2 clks <2=> 3 clks <3=> 4 clks
;// <4=> 6 clk <5=> 8 clks <6=> 10 clks <7=> 14 clks
;// <o3.11..12> Tcos: Chip Select Set-up nOE
;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks
;// <o3.13..14> Tacs: Address Set-up before nGCSn
;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks
;// </h>
;//
;// <h> Bank 4
;// <o8.16..17> DW: Data Bus Width
;// <0=> 8-bit <1=> 16-bit <2=> 32-bit <3=> Rsrvd
;// <o8.18> WS: WAIT Status
;// <0=> WAIT Disable
;// <1=> WAIT Enable
;// <o8.19> ST: SRAM Type
;// <0=> Not using UB/LB
;// <1=> Using UB/LB
;// <o4.0..1> PMC: Page Mode Configuration
;// <0=> 1 Data <1=> 4 Data <2=> 8 Data <3=> 16 Data
;// <o4.2..3> Tpac: Page Mode Access Cycle
;// <0=> 2 clks <1=> 3 clks <2=> 4 clks <3=> 6 clks
;// <o4.4..5> Tcah: Address Holding Time after nGCSn
;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks
;// <o4.6..7> Toch: Chip Select Hold on nOE
;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks
;// <o4.8..10> Tacc: Access Cycle
;// <0=> 1 clk <1=> 2 clks <2=> 3 clks <3=> 4 clks
;// <4=> 6 clk <5=> 8 clks <6=> 10 clks <7=> 14 clks
;// <o4.11..12> Tcos: Chip Select Set-up nOE
;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks
;// <o4.13..14> Tacs: Address Set-up before nGCSn
;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks
;// </h>
;//
;// <h> Bank 5
;// <o8.20..21> DW: Data Bus Width
;// <0=> 8-bit <1=> 16-bit <2=> 32-bit <3=> Rsrvd
;// <o8.22> WS: WAIT Status
;// <0=> WAIT Disable
;// <1=> WAIT Enable
;// <o8.23> ST: SRAM Type
;// <0=> Not using UB/LB
;// <1=> Using UB/LB
;// <o5.0..1> PMC: Page Mode Configuration
;// <0=> 1 Data <1=> 4 Data <2=> 8 Data <3=> 16 Data
;// <o5.2..3> Tpac: Page Mode Access Cycle
;// <0=> 2 clks <1=> 3 clks <2=> 4 clks <3=> 6 clks
;// <o5.4..5> Tcah: Address Holding Time after nGCSn
;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks
;// <o5.6..7> Toch: Chip Select Hold on nOE
;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks
;// <o5.8..10> Tacc: Access Cycle
;// <0=> 1 clk <1=> 2 clks <2=> 3 clks <3=> 4 clks
;// <4=> 6 clk <5=> 8 clks <6=> 10 clks <7=> 14 clks
;// <o5.11..12> Tcos: Chip Select Set-up nOE
;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks
;// <o5.13..14> Tacs: Address Set-up before nGCSn
;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks
;// </h>
;//
;// <h> Bank 6
;// <o10.0..2> BK76MAP: Bank 6/7 Memory Map
;// <0=> 32M <4=> 2M <5=> 4M <6=> 8M <7=> 16M
;// <o8.24..25> DW: Data Bus Width
;// <0=> 8-bit <1=> 16-bit <2=> 32-bit <3=> Rsrvd
;// <o8.26> WS: WAIT Status
;// <0=> WAIT Disable
;// <1=> WAIT Enable
;// <o8.27> ST: SRAM Type
;// <0=> Not using UB/LB
;// <1=> Using UB/LB
;// <o6.15..16> MT: Memory Type
;// <0=> ROM or SRAM
;// <1=> FP DRAMP
;// <2=> EDO DRAM
;// <3=> SDRAM
;// <h> ROM or SRAM
;// <o6.0..1> PMC: Page Mode Configuration
;// <0=> 1 Data <1=> 4 Data <2=> 8 Data <3=> 16 Data
;// <o6.2..3> Tpac: Page Mode Access Cycle
;// <0=> 2 clks <1=> 3 clks <2=> 4 clks <3=> 6 clks
;// <o6.4..5> Tcah: Address Holding Time after nGCSn
;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks
;// <o6.6..7> Toch: Chip Select Hold on nOE
;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks
;// <o6.8..10> Tacc: Access Cycle
;// <0=> 1 clk <1=> 2 clks <2=> 3 clks <3=> 4 clks
;// <4=> 6 clk <5=> 8 clks <6=> 10 clks <7=> 14 clks
;// <o6.11..12> Tcos: Chip Select Set-up nOE
;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks
;// <o6.13..14> Tacs: Address Set-up before nGCSn
;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks
;// </h>
;// <h> FP DRAM or EDO DRAM
;// <o6.0..1> CAN: Columnn Address Number
;// <0=> 8-bit <1=> 9-bit <2=> 10-bit <3=> 11-bit
;// <o6.2> Tcp: CAS Pre-charge
;// <0=> 1 clk <1=> 2 clks
;// <o6.3> Tcas: CAS Pulse Width
;// <0=> 1 clk <1=> 2 clks
;// <o6.4..5> Trcd: RAS to CAS Delay
;// <0=> 1 clk <1=> 2 clks <2=> 3 clks <3=> 4 clks
;// </h>
;// <h> SDRAM
;// <o6.0..1> SCAN: Columnn Address Number
;// <0=> 8-bit <1=> 9-bit <2=> 10-bit <3=> Rsrvd
;// <o6.2..3> Trcd: RAS to CAS Delay
;// <0=> 2 clks <1=> 3 clks <2=> 4 clks <3=> Rsrvd
;// <o10.4> SCLKEN: SCLK Selection (Bank 6/7)
;// <0=> Normal
;// <1=> Reduced Power
;// <o11.0..2> BL: Burst Length
;// <0=> 1
;// <o11.3> BT: Burst Type
;// <0=> Sequential
;// <o11.4..6> CL: CAS Latency
;// <0=> 1 clk <1=> 2 clks <2=> 3 clks
;// <o11.7..8> TM: Test Mode
;// <0=> Mode Register Set
;// <o11.9> WBL: Write Burst Length
;// <0=> 0
;// </h>
;// </h>
;//
;// <h> Bank 7
;// <o10.0..2> BK76MAP: Bank 6/7 Memory Map
;// <0=> 32M <4=> 2M <5=> 4M <6=> 8M <7=> 16M
;// <o8.28..29> DW: Data Bus Width
;// <0=> 8-bit <1=> 16-bit <2=> 32-bit <3=> Rsrvd
;// <o8.30> WS: WAIT Status
;// <0=> WAIT Disable
;// <1=> WAIT Enable
;// <o8.31> ST: SRAM Type
;// <0=> Not using UB/LB
;// <1=> Using UB/LB
;// <o7.15..16> MT: Memory Type
;// <0=> ROM or SRAM
;// <1=> FP DRAMP
;// <2=> EDO DRAM
;// <3=> SDRAM
;// <h> ROM or SRAM
;// <o7.0..1> PMC: Page Mode Configuration
;// <0=> 1 Data <1=> 4 Data <2=> 8 Data <3=> 16 Data
;// <o7.2..3> Tpac: Page Mode Access Cycle
;// <0=> 2 clks <1=> 3 clks <2=> 4 clks <3=> 6 clks
;// <o7.4..5> Tcah: Address Holding Time after nGCSn
;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks
;// <o7.6..7> Toch: Chip Select Hold on nOE
;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks
;// <o7.8..10> Tacc: Access Cycle
;// <0=> 1 clk <1=> 2 clks <2=> 3 clks <3=> 4 clks
;// <4=> 6 clk <5=> 8 clks <6=> 10 clks <7=> 14 clks
;// <o7.11..12> Tcos: Chip Select Set-up nOE
;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks
;// <o7.13..14> Tacs: Address Set-up before nGCSn
;// <0=> 0 clk <1=> 1 clk <2=> 2 clks <3=> 4 clks
;// </h>
;// <h> FP DRAM or EDO DRAM
;// <o7.0..1> CAN: Columnn Address Number
;// <0=> 8-bit <1=> 9-bit <2=> 10-bit <3=> 11-bit
;// <o7.2> Tcp: CAS Pre-charge
;// <0=> 1 clk <1=> 2 clks
;// <o7.3> Tcas: CAS Pulse Width
;// <0=> 1 clk <1=> 2 clks
;// <o7.4..5> Trcd: RAS to CAS Delay
;// <0=> 1 clk <1=> 2 clks <2=> 3 clks <3=> 4 clks
;// </h>
;// <h> SDRAM
;// <o7.0..1> SCAN: Columnn Address Number
;// <0=> 8-bit <1=> 9-bit <2=> 10-bit <3=> Rsrvd
;// <o7.2..3> Trcd: RAS to CAS Delay
;// <0=> 2 clks <1=> 3 clks <2=> 4 clks <3=> Rsrvd
;// <o10.4> SCLKEN: SCLK Selection (Bank 6/7)
;// <0=> Normal
;// <1=> Reduced Power
;// <o12.0..2> BL: Burst Length
;// <0=> 1
;// <o12.3> BT: Burst Type
;// <0=> Sequential
;// <o12.4..6> CL: CAS Latency
;// <0=> 1 clk <1=> 2 clks <2=> 3 clks
;// <o12.7..8> TM: Test Mode
;// <0=> Mode Register Set
;// <o12.9> WBL: Write Burst Length
;// <0=> 0
;// </h>
;// </h>
;//
;// <h> Refresh
;// <o9.23> REFEN: DRAM/SDRAM Refresh
;// <0=> Disable <1=> Enable
;// <o9.22> TREFMD: DRAM/SDRAM Refresh Mode
;// <0=> CBR/Auto Refresh
;// <1=> Self Refresh
;// <o9.20..21> Trp: DRAM/SDRAM RAS Pre-charge Time
;// <0=> 1.5 clks (DRAM) / 2 clks (SDRAM)
;// <1=> 2.5 clks (DRAM) / 3 clks (SDRAM)
;// <2=> 3.5 clks (DRAM) / 4 clks (SDRAM)
;// <3=> 4.5 clks (DRAM) / Rsrvd (SDRAM)
;// <o9.18..19> Trc: SDRAM RC Min Time
;// <0=> 4 clks <1=> 5 clks <2=> 6 clks <3=> 7 clks
;// <o9.16..17> Tchr: DRAM CAS Hold Time
;// <0=> 1 clks <1=> 2 clks <2=> 3 clks <3=> 4 clks
;// <o9.0..10> Refresh Counter <0x0-0x07FF>
;// <i> Refresh Period = (2^11 - Refresh Count + 1) / MCLK
;// </h>
BANKCON0_Val EQU 0x00000700
BANKCON1_Val EQU 0x00000700
BANKCON2_Val EQU 0x00000700
BANKCON3_Val EQU 0x00000700
BANKCON4_Val EQU 0x00000700
BANKCON5_Val EQU 0x00000700
BANKCON6_Val EQU 0x00018008
BANKCON7_Val EQU 0x00018008
BWSCON_Val EQU 0x00000000
REFRESH_Val EQU 0x00AC0000
BANKSIZE_Val EQU 0x00000000
MRSRB6_Val EQU 0x00000000
MRSRB7_Val EQU 0x00000000
;// </e> End of MC
; I/O Ports definitions
PIO_BASE EQU 0x01D20000 ; PIO Base Address
PCONA_OFS EQU 0x00 ; PCONA Offset
PCONB_OFS EQU 0x08 ; PCONB Offset
PCONC_OFS EQU 0x10 ; PCONC Offset
PCOND_OFS EQU 0x1C ; PCOND Offset
PCONE_OFS EQU 0x28 ; PCONE Offset
PCONF_OFS EQU 0x34 ; PCONF Offset
PCONG_OFS EQU 0x40 ; PCONG Offset
PUPC_OFS EQU 0x18 ; PUPC Offset
PUPD_OFS EQU 0x24 ; PUPD Offset
PUPE_OFS EQU 0x30 ; PUPE Offset
PUPF_OFS EQU 0x3C ; PUPF Offset
PUPG_OFS EQU 0x48 ; PUPG Offset
SPUCR_OFS EQU 0x4C ; SPUCR Offset
;// <e> I/O Configuration
PIO_SETUP EQU 0
;// <e> Port A
;// <o1.0> PA0 <0=> Output <1=> ADDR0
;// <o1.1> PA1 <0=> Output <1=> ADDR16
;// <o1.2> PA2 <0=> Output <1=> ADDR17
;// <o1.3> PA3 <0=> Output <1=> ADDR18
;// <o1.4> PA4 <0=> Output <1=> ADDR19
;// <o1.5> PA5 <0=> Output <1=> ADDR20
;// <o1.6> PA6 <0=> Output <1=> ADDR21
;// <o1.7> PA7 <0=> Output <1=> ADDR22
;// <o1.8> PA8 <0=> Output <1=> ADDR23
;// <o1.9> PA9 <0=> Output <1=> ADDR24
;// </e>
PIOA_SETUP EQU 1
PCONA_Val EQU 0x000003FF
;// <e> Port B
;// <o1.0> PB0 <0=> Output <1=> SCKE
;// <o1.1> PB1 <0=> Output <1=> CKLK
;// <o1.2> PB2 <0=> Output <1=> nSCAS/nCAS2
;// <o1.3> PB3 <0=> Output <1=> nSRAS/nCAS3
;// <o1.4> PB4 <0=> Output <1=> nWBE2/nBE2/DQM2
;// <o1.5> PB5 <0=> Output <1=> nWBE3/nBE3/DQM3
;// <o1.6> PB6 <0=> Output <1=> nGCS1
;// <o1.7> PB7 <0=> Output <1=> nGCS2
;// <o1.8> PB8 <0=> Output <1=> nGCS3
;// <o1.9> PB9 <0=> Output <1=> nGCS4
;// <o1.10> PB10 <0=> Output <1=> nGCS5
;// </e>
PIOB_SETUP EQU 1
PCONB_Val EQU 0x000007FF
;// <e> Port C
;// <o1.0..1> PC0 <0=> Input <1=> Output <2=> DATA16 <3=> IISLRCK
;// <o1.2..3> PC1 <0=> Input <1=> Output <2=> DATA17 <3=> IISDO
;// <o1.4..5> PC2 <0=> Input <1=> Output <2=> DATA18 <3=> IISDI
;// <o1.6..7> PC3 <0=> Input <1=> Output <2=> DATA19 <3=> IISCLK
;// <o1.8..9> PC4 <0=> Input <1=> Output <2=> DATA20 <3=> VD7
;// <o1.10..11> PC5 <0=> Input <1=> Output <2=> DATA21 <3=> VD6
;// <o1.12..13> PC6 <0=> Input <1=> Output <2=> DATA22 <3=> VD5
;// <o1.14..15> PC7 <0=> Input <1=> Output <2=> DATA23 <3=> VD4
;// <o1.16..17> PC8 <0=> Input <1=> Output <2=> DATA24 <3=> nXDACK1
;// <o1.18..19> PC9 <0=> Input <1=> Output <2=> DATA25 <3=> nXDREQ1
;// <o1.20..21> PC10 <0=> Input <1=> Output <2=> DATA26 <3=> nRTS1
;// <o1.22..23> PC11 <0=> Input <1=> Output <2=> DATA27 <3=> nCTS1
;// <o1.24..25> PC12 <0=> Input <1=> Output <2=> DATA28 <3=> TxD1
;// <o1.26..27> PC13 <0=> Input <1=> Output <2=> DATA29 <3=> RxD1
;// <o1.28..29> PC14 <0=> Input <1=> Output <2=> DATA30 <3=> nRTS0
;// <o1.30..31> PC15 <0=> Input <1=> Output <2=> DATA31 <3=> nCTS0
;// <h> Pull-up Resistors
;// <o2.0> PC0 Pull-up <0=> Enabled <1=> Disabled
;// <o2.1> PC1 Pull-up <0=> Enabled <1=> Disabled
;// <o2.2> PC2 Pull-up <0=> Enabled <1=> Disabled
;// <o2.3> PC3 Pull-up <0=> Enabled <1=> Disabled
;// <o2.4> PC4 Pull-up <0=> Enabled <1=> Disabled
;// <o2.5> PC5 Pull-up <0=> Enabled <1=> Disabled
;// <o2.6> PC6 Pull-up <0=> Enabled <1=> Disabled
;// <o2.7> PC7 Pull-up <0=> Enabled <1=> Disabled
;// <o2.8> PC8 Pull-up <0=> Enabled <1=> Disabled
;// <o2.9> PC9 Pull-up <0=> Enabled <1=> Disabled
;// <o2.10> PC10 Pull-up <0=> Enabled <1=> Disabled
;// <o2.11> PC11 Pull-up <0=> Enabled <1=> Disabled
;// <o2.12> PC12 Pull-up <0=> Enabled <1=> Disabled
;// <o2.13> PC13 Pull-up <0=> Enabled <1=> Disabled
;// <o2.14> PC14 Pull-up <0=> Enabled <1=> Disabled
;// <o2.15> PC15 Pull-up <0=> Enabled <1=> Disabled
;// </h>
;// </e>
PIOC_SETUP EQU 1
PCONC_Val EQU 0xAAAAAAAA
PUPC_Val EQU 0x00000000
;// <e> Port D
;// <o1.0..1> PD0 <0=> Input <1=> Output <2=> VD0 <3=> Reserved
;// <o1.2..3> PD1 <0=> Input <1=> Output <2=> VD1 <3=> Reserved
;// <o1.4..5> PD2 <0=> Input <1=> Output <2=> VD2 <3=> Reserved
;// <o1.6..7> PD3 <0=> Input <1=> Output <2=> VD3 <3=> Reserved
;// <o1.8..9> PD4 <0=> Input <1=> Output <2=> VCLK <3=> Reserved
;// <o1.10..11> PD5 <0=> Input <1=> Output <2=> VLINE <3=> Reserved
;// <o1.12..13> PD6 <0=> Input <1=> Output <2=> VM <3=> Reserved
;// <o1.14..15> PD7 <0=> Input <1=> Output <2=> VFRAME <3=> Reserved
;// <h> Pull-up Resistors
;// <o2.0> PD0 Pull-up <0=> Enabled <1=> Disabled
;// <o2.1> PD1 Pull-up <0=> Enabled <1=> Disabled
;// <o2.2> PD2 Pull-up <0=> Enabled <1=> Disabled
;// <o2.3> PD3 Pull-up <0=> Enabled <1=> Disabled
;// <o2.4> PD4 Pull-up <0=> Enabled <1=> Disabled
;// <o2.5> PD5 Pull-up <0=> Enabled <1=> Disabled
;// <o2.6> PD6 Pull-up <0=> Enabled <1=> Disabled
;// <o2.7> PD7 Pull-up <0=> Enabled <1=> Disabled
;// </h>
;// </e>
PIOD_SETUP EQU 1
PCOND_Val EQU 0x00000000
PUPD_Val EQU 0x00000000
;// <e> Port E
;// <o1.0..1> PE0 <0=> Input <1=> Output <2=> Fpllo <3=> Fout
;// <o1.2..3> PE1 <0=> Input <1=> Output <2=> TxD0 <3=> Reserved
;// <o1.4..5> PE2 <0=> Input <1=> Output <2=> RxD0 <3=> Reserved
;// <o1.6..7> PE3 <0=> Input <1=> Output <2=> TOUT0 <3=> Reserved
;// <o1.8..9> PE4 <0=> Input <1=> Output <2=> TOUT1 <3=> TCLK
;// <o1.10..11> PE5 <0=> Input <1=> Output <2=> TOUT2 <3=> TCLK
;// <o1.12..13> PE6 <0=> Input <1=> Output <2=> TOUT3 <3=> VD6
;// <o1.14..15> PE7 <0=> Input <1=> Output <2=> TOUT4 <3=> VD7
;// <o1.16..17> PE8 <0=> Input <1=> Output <2=> CODECLK <3=> Reserved
;// <h> Pull-up Resistors
;// <o2.0> PE0 Pull-up <0=> Enabled <1=> Disabled
;// <o2.1> PE1 Pull-up <0=> Enabled <1=> Disabled
;// <o2.2> PE2 Pull-up <0=> Enabled <1=> Disabled
;// <o2.3> PE3 Pull-up <0=> Enabled <1=> Disabled
;// <o2.4> PE4 Pull-up <0=> Enabled <1=> Disabled
;// <o2.5> PE5 Pull-up <0=> Enabled <1=> Disabled
;// <o2.6> PE6 Pull-up <0=> Enabled <1=> Disabled
;// <o2.7> PE7 Pull-up <0=> Enabled <1=> Disabled
;// <o2.8> PE8 Pull-up <0=> Enabled <1=> Disabled
;// </h>
;// </e>
PIOE_SETUP EQU 1
PCONE_Val EQU 0x00000000
PUPE_Val EQU 0x00000000
;// <e> Port F
;// <o1.0..1> PF0 <0=> Input <1=> Output <2=> IICSCL <3=> Reserved
;// <o1.2..3> PF1 <0=> Input <1=> Output <2=> IICSDA <3=> Reserved
;// <o1.4..5> PF2 <0=> Input <1=> Output <2=> nWAIT <3=> Reserved
;// <o1.6..7> PF3 <0=> Input <1=> Output <2=> nXBACK <3=> nXDACK0
;// <o1.8..9> PF4 <0=> Input <1=> Output <2=> nXBREQ <3=> nXDREQ0
;// <o1.10..12> PF5 <0=> Input <1=> Output <2=> nRTS1 <3=> SIOTxD
;// <4=> IISLRCK <5=> Reserved <6=> Reserved <7=> Reserved
;// <o1.13..15> PF6 <0=> Input <1=> Output <2=> TxD1 <3=> SIORDY
;// <4=> IISDO <5=> Reserved <6=> Reserved <7=> Reserved
;// <o1.16..18> PF7 <0=> Input <1=> Output <2=> RxD1 <3=> SIORxD
;// <4=> IISDI <5=> Reserved <6=> Reserved <7=> Reserved
;// <o1.19..21> PF8 <0=> Input <1=> Output <2=> nCTS1 <3=> SIOCLK
;// <4=> IISCLK <5=> Reserved <6=> Reserved <7=> Reserved
;// <h> Pull-up Resistors
;// <o2.0> PF0 Pull-up <0=> Enabled <1=> Disabled
;// <o2.1> PF1 Pull-up <0=> Enabled <1=> Disabled
;// <o2.2> PF2 Pull-up <0=> Enabled <1=> Disabled
;// <o2.3> PF3 Pull-up <0=> Enabled <1=> Disabled
;// <o2.4> PF4 Pull-up <0=> Enabled <1=> Disabled
;// <o2.5> PF5 Pull-up <0=> Enabled <1=> Disabled
;// <o2.6> PF6 Pull-up <0=> Enabled <1=> Disabled
;// <o2.7> PF7 Pull-up <0=> Enabled <1=> Disabled
;// <o2.8> PF8 Pull-up <0=> Enabled <1=> Disabled
;// </h>
;// </e>
PIOF_SETUP EQU 1
PCONF_Val EQU 0x00000000
PUPF_Val EQU 0x00000000
;// <e> Port G
;// <o1.0..1> PG0 <0=> Input <1=> Output <2=> VD4 <3=> EINT0
;// <o1.2..3> PG1 <0=> Input <1=> Output <2=> VD5 <3=> EINT1
;// <o1.4..5> PG2 <0=> Input <1=> Output <2=> nCTS0 <3=> EINT2
;// <o1.6..7> PG3 <0=> Input <1=> Output <2=> nRTS0 <3=> EINT3
;// <o1.8..9> PG4 <0=> Input <1=> Output <2=> IISCLK <3=> EINT4
;// <o1.10..11> PG5 <0=> Input <1=> Output <2=> IISDI <3=> EINT5
;// <o1.12..13> PG6 <0=> Input <1=> Output <2=> IISDO <3=> EINT6
;// <o1.14..15> PG7 <0=> Input <1=> Output <2=> IISLRCK <3=> EINT7
;// <h> Pull-up Resistors
;// <o2.0> PG0 Pull-up <0=> Enabled <1=> Disabled
;// <o2.1> PG1 Pull-up <0=> Enabled <1=> Disabled
;// <o2.2> PG2 Pull-up <0=> Enabled <1=> Disabled
;// <o2.3> PG3 Pull-up <0=> Enabled <1=> Disabled
;// <o2.4> PG4 Pull-up <0=> Enabled <1=> Disabled
;// <o2.5> PG5 Pull-up <0=> Enabled <1=> Disabled
;// <o2.6> PG6 Pull-up <0=> Enabled <1=> Disabled
;// <o2.7> PG7 Pull-up <0=> Enabled <1=> Disabled
;// </h>
;// </e>
PIOG_SETUP EQU 1
PCONG_Val EQU 0x00000000
PUPG_Val EQU 0x00000000
;// <e> Special Pull-up
;// <o1.0> SPUCR0: DATA[7:0] Pull-up Resistor
;// <0=> Enabled <1=> Disabled
;// <o1.1> SPUCR1: DATA[15:8] Pull-up Resistor
;// <0=> Enabled <1=> Disabled
;// <o1.2> HZ@STOP
;// <0=> Prevoius state of PAD
;// <1=> HZ @ Stop
;// </e>
PSPU_SETUP EQU 1
SPUCR_Val EQU 0x00000004
;// </e>
PRESERVE8
; Area Definition and Entry Point
; Startup Code must be linked first at Address at which it expects to run.
AREA RESET, CODE, READONLY
ARM
; Exception Vectors
; Mapped to Address 0.
; Absolute addressing mode must be used.
; Dummy Handlers are implemented as infinite loops which can be modified.
Vectors LDR PC, Reset_Addr
LDR PC, Undef_Addr
LDR PC, SWI_Addr
LDR PC, PAbt_Addr
LDR PC, DAbt_Addr
NOP ; Reserved Vector
LDR PC, IRQ_Addr
LDR PC, FIQ_Addr
Reset_Addr DCD Reset_Handler
Undef_Addr DCD Undef_Handler
SWI_Addr DCD SWI_Handler
PAbt_Addr DCD PAbt_Handler
DAbt_Addr DCD DAbt_Handler
DCD 0 ; Reserved Address
IRQ_Addr DCD IRQ_Handler
FIQ_Addr DCD FIQ_Handler
Undef_Handler B Undef_Handler
SWI_Handler B SWI_Handler
PAbt_Handler B PAbt_Handler
DAbt_Handler B DAbt_Handler
FIQ_Handler B FIQ_Handler
; CPU Wrapper and Bus Priorities Configuration
IF SYS_SETUP <> 0
SYS_CFG
DCD CPUW_BASE
DCD BUSP_BASE
DCD SYSCFG_Val
DCD NCACHBE0_Val
DCD NCACHBE1_Val
DCD SBUSCON_Val
ENDIF
; Memory Controller Configuration
IF MC_SETUP <> 0
MC_CFG
DCD BWSCON_Val
DCD BANKCON0_Val
DCD BANKCON1_Val
DCD BANKCON2_Val
DCD BANKCON3_Val
DCD BANKCON4_Val
DCD BANKCON5_Val
DCD BANKCON6_Val
DCD BANKCON7_Val
DCD REFRESH_Val
DCD BANKSIZE_Val
DCD MRSRB6_Val
DCD MRSRB7_Val
ENDIF
; Clock Management Configuration
IF CLK_SETUP <> 0
CLK_CFG
DCD CLK_BASE
DCD PLLCON_Val
DCD CLKCON_Val
DCD CLKSLOW_Val
DCD LOCKTIME_Val
ENDIF
; I/O Configuration
IF PIO_SETUP <> 0
PIO_CFG
DCD PCONA_Val
DCD PCONB_Val
DCD PCONC_Val
DCD PCOND_Val
DCD PCONE_Val
DCD PCONF_Val
DCD PCONG_Val
DCD PUPC_Val
DCD PUPD_Val
DCD PUPE_Val
DCD PUPF_Val
DCD PUPG_Val
DCD SPUCR_Val
ENDIF
; Reset Handler
EXPORT Reset_Handler
Reset_Handler
IF SYS_SETUP <> 0
ADR R8, SYS_CFG
LDMIA R8, {R0-R5}
STMIA R0, {R2-R4}
STR R5, [R1]
ENDIF
IF MC_SETUP <> 0
ADR R14, MC_CFG
LDMIA R14, {R0-R12}
LDR R14, =MC_BASE
STMIA R14, {R0-R12}
ENDIF
IF CLK_SETUP <> 0
ADR R8, CLK_CFG
LDMIA R8, {R0-R4}
STR R4, [R0, #LOCKTIME_OFS]
STR R1, [R0, #PLLCON_OFS]
STR R3, [R0, #CLKSLOW_OFS]
STR R2, [R0, #CLKCON_OFS]
ENDIF
IF WT_SETUP <> 0
LDR R0, =WT_BASE
LDR R1, =WTCON_Val
LDR R2, =WTDAT_Val
STR R2, [R0, #WTCNT_OFS]
STR R2, [R0, #WTDAT_OFS]
STR R1, [R0, #WTCON_OFS]
ENDIF
IF PIO_SETUP <> 0
ADR R14, PIO_CFG
LDMIA R14, {R0-R12}
LDR R14, =PIO_BASE
IF PIOA_SETUP <> 0
STR R0, [R14, #PCONA_OFS]
ENDIF
IF PIOB_SETUP <> 0
STR R1, [R14, #PCONB_OFS]
ENDIF
IF PIOC_SETUP <> 0
STR R2, [R14, #PCONC_OFS]
STR R7, [R14, #PUPC_OFS]
ENDIF
IF PIOD_SETUP <> 0
STR R3, [R14, #PCOND_OFS]
STR R8, [R14, #PUPD_OFS]
ENDIF
IF PIOE_SETUP <> 0
STR R4, [R14, #PCONE_OFS]
STR R9, [R14, #PUPE_OFS]
ENDIF
IF PIOF_SETUP <> 0
STR R5, [R14, #PCONF_OFS]
STR R10,[R14, #PUPF_OFS]
ENDIF
IF PIOG_SETUP <> 0
STR R6, [R14, #PCONG_OFS]
STR R11,[R14, #PUPG_OFS]
ENDIF
IF PSPU_SETUP <> 0
STR R12,[R14, #SPUCR_OFS]
ENDIF
ENDIF
; Setup Stack for each mode
LDR R0, =Stack_Top
; Enter Undefined Instruction Mode and set its Stack Pointer
MSR CPSR_c, #Mode_UND:OR:I_Bit:OR:F_Bit
MOV SP, R0
SUB R0, R0, #UND_Stack_Size
; Enter Abort Mode and set its Stack Pointer
MSR CPSR_c, #Mode_ABT:OR:I_Bit:OR:F_Bit
MOV SP, R0
SUB R0, R0, #ABT_Stack_Size
; Enter FIQ Mode and set its Stack Pointer
MSR CPSR_c, #Mode_FIQ:OR:I_Bit:OR:F_Bit
MOV SP, R0
SUB R0, R0, #FIQ_Stack_Size
; Enter IRQ Mode and set its Stack Pointer
MSR CPSR_c, #Mode_IRQ:OR:I_Bit:OR:F_Bit
MOV SP, R0
SUB R0, R0, #IRQ_Stack_Size
; Enter Supervisor Mode and set its Stack Pointer
MSR CPSR_c, #Mode_SVC:OR:I_Bit:OR:F_Bit
MOV SP, R0
SUB R0, R0, #SVC_Stack_Size
; Enter User Mode and set its Stack Pointer
; MSR CPSR_c, #Mode_USR
IF :DEF:__MICROLIB
EXPORT __initial_sp
ELSE
; MOV SP, R0
; SUB SL, SP, #USR_Stack_Size
ENDIF
; Enter the C code
IMPORT __main
LDR R0, =__main
BX R0
IMPORT rt_interrupt_enter
IMPORT rt_interrupt_leave
IMPORT rt_thread_switch_interrupt_flag
IMPORT rt_interrupt_from_thread
IMPORT rt_interrupt_to_thread
IMPORT rt_hw_trap_irq
IRQ_Handler PROC
EXPORT IRQ_Handler
STMFD sp!, {r0-r12,lr}
BL rt_interrupt_enter
BL rt_hw_trap_irq
BL rt_interrupt_leave
; if rt_thread_switch_interrupt_flag set, jump to
; rt_hw_context_switch_interrupt_do and don't return
LDR r0, =rt_thread_switch_interrupt_flag
LDR r1, [r0]
CMP r1, #1
BEQ rt_hw_context_switch_interrupt_do
LDMFD sp!, {r0-r12,lr}
SUBS pc, lr, #4
ENDP
; /*
; * void rt_hw_context_switch_interrupt_do(rt_base_t flag)
; */
rt_hw_context_switch_interrupt_do PROC
EXPORT rt_hw_context_switch_interrupt_do
MOV r1, #0 ; clear flag
STR r1, [r0]
LDMFD sp!, {r0-r12,lr}; reload saved registers
STMFD sp!, {r0-r3} ; save r0-r3
MOV r1, sp
ADD sp, sp, #16 ; restore sp
SUB r2, lr, #4 ; save old task's pc to r2
MRS r3, spsr ; get cpsr of interrupt thread
; switch to SVC mode and no interrupt
MSR cpsr_c, #I_Bit|F_Bit|Mode_SVC
STMFD sp!, {r2} ; push old task's pc
STMFD sp!, {r4-r12,lr}; push old task's lr,r12-r4
MOV r4, r1 ; Special optimised code below
MOV r5, r3
LDMFD r4!, {r0-r3}
STMFD sp!, {r0-r3} ; push old task's r3-r0
STMFD sp!, {r5} ; push old task's cpsr
MRS r4, spsr
STMFD sp!, {r4} ; push old task's spsr
LDR r4, =rt_interrupt_from_thread
LDR r5, [r4]
STR sp, [r5] ; store sp in preempted tasks's TCB
LDR r6, =rt_interrupt_to_thread
LDR r6, [r6]
LDR sp, [r6] ; get new task's stack pointer
LDMFD sp!, {r4} ; pop new task's spsr
MSR spsr_cxsf, r4
LDMFD sp!, {r4} ; pop new task's psr
MSR cpsr_cxsf, r4
LDMFD sp!, {r0-r12,lr,pc} ; pop new task's r0-r12,lr & pc
ENDP
IF :DEF:__MICROLIB
EXPORT __heap_base
EXPORT __heap_limit
ELSE
; User Initial Stack & Heap
AREA |.text|, CODE, READONLY
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + USR_Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ENDIF
END
|
vandercookking/h7_device_RTT
| 2,819
|
rt-thread/libcpu/arm/zynqmp-r5/context_gcc.S
|
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2020-03-19 WangHuachen first version
*/
.section .text, "ax"
/*
* rt_base_t rt_hw_interrupt_disable();
*/
.globl rt_hw_interrupt_disable
rt_hw_interrupt_disable:
mrs r0, cpsr
cpsid if
bx lr
/*
* void rt_hw_interrupt_enable(rt_base_t level);
*/
.globl rt_hw_interrupt_enable
rt_hw_interrupt_enable:
msr cpsr, r0
bx lr
/*
* void rt_hw_context_switch_to(rt_uint32 to);
* r0 --> to
*/
.globl rt_hw_context_switch_to
rt_hw_context_switch_to:
ldr sp, [r0] @ get new task stack pointer
#if defined (__VFP_FP__) && !defined(__SOFTFP__)
ldmfd sp!, {r1} /* Restore floating point registers */
vmsr FPEXC, r1
ldmfd sp!, {r1}
vmsr FPSCR, r1
vldmia sp!, {d0-d15}
#endif
ldmfd sp!, {r4} @ pop new task spsr
msr spsr_cxsf, r4
ldmfd sp!, {r0-r12, lr, pc}^ @ pop new task r0-r12, lr & pc
.section .text.isr, "ax"
/*
* void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
* r0 --> from
* r1 --> to
*/
.globl rt_hw_context_switch
rt_hw_context_switch:
stmfd sp!, {lr} @ push pc (lr should be pushed in place of PC)
stmfd sp!, {r0-r12, lr} @ push lr & register file
mrs r4, cpsr
tst lr, #0x01
beq _ARM_MODE
orr r4, r4, #0x20 @ it's thumb code
_ARM_MODE:
stmfd sp!, {r4} @ push cpsr
#if defined (__VFP_FP__) && !defined(__SOFTFP__)
vstmdb sp!, {d0-d15} /* Store floating point registers */
vmrs r4, FPSCR
stmfd sp!,{r4}
vmrs r4, FPEXC
stmfd sp!,{r4}
#endif
str sp, [r0] @ store sp in preempted tasks TCB
ldr sp, [r1] @ get new task stack pointer
#if defined (__VFP_FP__) && !defined(__SOFTFP__)
ldmfd sp!, {r1} /* Restore floating point registers */
vmsr FPEXC, r1
ldmfd sp!, {r1}
vmsr FPSCR, r1
vldmia sp!, {d0-d15}
#endif
ldmfd sp!, {r4} @ pop new task cpsr to spsr
msr spsr_cxsf, r4
ldmfd sp!, {r0-r12, lr, pc}^ @ pop new task r0-r12, lr & pc, copy spsr to cpsr
/*
* void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to);
*/
.globl rt_thread_switch_interrupt_flag
.globl rt_interrupt_from_thread
.globl rt_interrupt_to_thread
.globl rt_hw_context_switch_interrupt
rt_hw_context_switch_interrupt:
ldr r2, =rt_thread_switch_interrupt_flag
ldr r3, [r2]
cmp r3, #1
beq _reswitch
mov r3, #1 @ set rt_thread_switch_interrupt_flag to 1
str r3, [r2]
ldr r2, =rt_interrupt_from_thread @ set rt_interrupt_from_thread
str r0, [r2]
_reswitch:
ldr r2, =rt_interrupt_to_thread @ set rt_interrupt_to_thread
str r1, [r2]
bx lr
|
vandercookking/h7_device_RTT
| 12,636
|
rt-thread/libcpu/arm/zynqmp-r5/start_gcc.S
|
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2020-03-19 WangHuachen first version
* 2021-05-11 WangHuachen Added call to Xil_InitializeExistingMPURegConfig to
* initialize the MPU configuration table with the MPU
* configurations already set in Init_Mpu function.
*/
.equ Mode_USR, 0x10
.equ Mode_FIQ, 0x11
.equ Mode_IRQ, 0x12
.equ Mode_SVC, 0x13
.equ Mode_ABT, 0x17
.equ Mode_UND, 0x1B
.equ Mode_SYS, 0x1F
.equ I_Bit, 0x80 @ when I bit is set, IRQ is disabled
.equ F_Bit, 0x40 @ when F bit is set, FIQ is disabled
.equ UND_Stack_Size, 0x00000000
.equ SVC_Stack_Size, 0x00000000
.equ ABT_Stack_Size, 0x00000000
.equ FIQ_Stack_Size, 0x00000200
.equ IRQ_Stack_Size, 0x00000200
.equ USR_Stack_Size, 0x00000000
.set RPU_GLBL_CNTL, 0xFF9A0000
.set RPU_ERR_INJ, 0xFF9A0020
.set RPU_0_CFG, 0xFF9A0100
.set RPU_1_CFG, 0xFF9A0200
.set RST_LPD_DBG, 0xFF5E0240
.set BOOT_MODE_USER, 0xFF5E0200
.set fault_log_enable, 0x101
#define ISR_Stack_Size (UND_Stack_Size + SVC_Stack_Size + ABT_Stack_Size + \
FIQ_Stack_Size + IRQ_Stack_Size)
.section .data.share.isr
/* stack */
.globl stack_start
.globl stack_top
.align 3
.bss
stack_start:
.rept ISR_Stack_Size
.long 0
.endr
stack_top:
.section .boot,"axS"
/* reset entry */
.globl _reset
_reset:
/* Initialize processor registers to 0 */
mov r0,#0
mov r1,#0
mov r2,#0
mov r3,#0
mov r4,#0
mov r5,#0
mov r6,#0
mov r7,#0
mov r8,#0
mov r9,#0
mov r10,#0
mov r11,#0
mov r12,#0
/* set the cpu to SVC32 mode and disable interrupt */
cpsid if, #Mode_SVC
/* setup stack */
bl stack_setup
/*
* Enable access to VFP by enabling access to Coprocessors 10 and 11.
* Enables Full Access i.e. in both privileged and non privileged modes
*/
mrc p15, 0, r0, c1, c0, 2 /* Read Coprocessor Access Control Register (CPACR) */
orr r0, r0, #(0xF << 20) /* Enable access to CP 10 & 11 */
mcr p15, 0, r0, c1, c0, 2 /* Write Coprocessor Access Control Register (CPACR) */
isb
/* enable fpu access */
vmrs r3, FPEXC
orr r1, r3, #(1<<30)
vmsr FPEXC, r1
/* clear the floating point register*/
mov r1,#0
vmov d0,r1,r1
vmov d1,r1,r1
vmov d2,r1,r1
vmov d3,r1,r1
vmov d4,r1,r1
vmov d5,r1,r1
vmov d6,r1,r1
vmov d7,r1,r1
vmov d8,r1,r1
vmov d9,r1,r1
vmov d10,r1,r1
vmov d11,r1,r1
vmov d12,r1,r1
vmov d13,r1,r1
vmov d14,r1,r1
vmov d15,r1,r1
#ifdef __SOFTFP__
/* Disable the FPU if SOFTFP is defined*/
vmsr FPEXC,r3
#endif
/* Disable MPU and caches */
mrc p15, 0, r0, c1, c0, 0 /* Read CP15 Control Register*/
bic r0, r0, #0x05 /* Disable MPU (M bit) and data cache (C bit) */
bic r0, r0, #0x1000 /* Disable instruction cache (I bit) */
dsb /* Ensure all previous loads/stores have completed */
mcr p15, 0, r0, c1, c0, 0 /* Write CP15 Control Register */
isb /* Ensure subsequent insts execute wrt new MPU settings */
/* Disable Branch prediction, TCM ECC checks */
mrc p15, 0, r0, c1, c0, 1 /* Read ACTLR */
orr r0, r0, #(0x1 << 17) /* Enable RSDIS bit 17 to disable the return stack */
orr r0, r0, #(0x1 << 16) /* Clear BP bit 15 and set BP bit 16:*/
bic r0, r0, #(0x1 << 15) /* Branch always not taken and history table updates disabled*/
orr r0, r0, #(0x1 << 27) /* Enable B1TCM ECC check */
orr r0, r0, #(0x1 << 26) /* Enable B0TCM ECC check */
orr r0, r0, #(0x1 << 25) /* Enable ATCM ECC check */
bic r0, r0, #(0x1 << 5) /* Generate abort on parity errors, with [5:3]=b 000*/
bic r0, r0, #(0x1 << 4)
bic r0, r0, #(0x1 << 3)
mcr p15, 0, r0, c1, c0, 1 /* Write ACTLR*/
dsb /* Complete all outstanding explicit memory operations*/
/* Invalidate caches */
mov r0,#0 /* r0 = 0 */
dsb
mcr p15, 0, r0, c7, c5, 0 /* invalidate icache */
mcr p15, 0, r0, c15, c5, 0 /* Invalidate entire data cache*/
isb
/* enable fault log for lock step */
ldr r0,=RPU_GLBL_CNTL
ldr r1, [r0]
ands r1, r1, #0x8
/* branch to initialization if split mode*/
bne init
/* check for boot mode if in lock step, branch to init if JTAG boot mode*/
ldr r0,=BOOT_MODE_USER
ldr r1, [r0]
ands r1, r1, #0xF
beq init
/* reset the debug logic */
ldr r0,=RST_LPD_DBG
ldr r1, [r0]
orr r1, r1, #(0x1 << 4)
orr r1, r1, #(0x1 << 5)
str r1, [r0]
/* enable fault log */
ldr r0,=RPU_ERR_INJ
ldr r1,=fault_log_enable
ldr r2, [r0]
orr r2, r2, r1
str r2, [r0]
nop
nop
init:
bl Init_MPU /* Initialize MPU */
/* Enable Branch prediction */
mrc p15, 0, r0, c1, c0, 1 /* Read ACTLR*/
bic r0, r0, #(0x1 << 17) /* Clear RSDIS bit 17 to enable return stack*/
bic r0, r0, #(0x1 << 16) /* Clear BP bit 15 and BP bit 16:*/
bic r0, r0, #(0x1 << 15) /* Normal operation, BP is taken from the global history table.*/
orr r0, r0, #(0x1 << 14) /* Disable DBWR for errata 780125 */
mcr p15, 0, r0, c1, c0, 1 /* Write ACTLR*/
/* Enable icahce and dcache */
mrc p15,0,r1,c1,c0,0
ldr r0, =0x1005
orr r1,r1,r0
dsb
mcr p15,0,r1,c1,c0,0 /* Enable cache */
isb /* isb flush prefetch buffer */
/* Set vector table in TCM/LOVEC */
mrc p15, 0, r0, c1, c0, 0
mvn r1, #0x2000
and r0, r0, r1
mcr p15, 0, r0, c1, c0, 0
/* Clear VINITHI to enable LOVEC on reset */
#if 1
ldr r0, =RPU_0_CFG
#else
ldr r0, =RPU_1_CFG
#endif
ldr r1, [r0]
bic r1, r1, #(0x1 << 2)
str r1, [r0]
/* enable asynchronous abort exception */
mrs r0, cpsr
bic r0, r0, #0x100
msr cpsr_xsf, r0
/* clear .bss */
mov r0,#0 /* get a zero */
ldr r1,=__bss_start /* bss start */
ldr r2,=__bss_end /* bss end */
bss_loop:
cmp r1,r2 /* check if data to clear */
strlo r0,[r1],#4 /* clear 4 bytes */
blo bss_loop /* loop until done */
/* call C++ constructors of global objects */
ldr r0, =__ctors_start__
ldr r1, =__ctors_end__
ctor_loop:
cmp r0, r1
beq ctor_end
ldr r2, [r0], #4
stmfd sp!, {r0-r1}
mov lr, pc
bx r2
ldmfd sp!, {r0-r1}
b ctor_loop
ctor_end:
bl Xil_InitializeExistingMPURegConfig /* Initialize MPU config */
/* start RT-Thread Kernel */
ldr pc, _entry
_entry:
.word entry
stack_setup:
ldr r0, =stack_top
@ Set the startup stack for svc
mov sp, r0
@ Enter Undefined Instruction Mode and set its Stack Pointer
msr cpsr_c, #Mode_UND|I_Bit|F_Bit
mov sp, r0
sub r0, r0, #UND_Stack_Size
@ Enter Abort Mode and set its Stack Pointer
msr cpsr_c, #Mode_ABT|I_Bit|F_Bit
mov sp, r0
sub r0, r0, #ABT_Stack_Size
@ Enter FIQ Mode and set its Stack Pointer
msr cpsr_c, #Mode_FIQ|I_Bit|F_Bit
mov sp, r0
sub r0, r0, #FIQ_Stack_Size
@ Enter IRQ Mode and set its Stack Pointer
msr cpsr_c, #Mode_IRQ|I_Bit|F_Bit
mov sp, r0
sub r0, r0, #IRQ_Stack_Size
@ Switch back to SVC
msr cpsr_c, #Mode_SVC|I_Bit|F_Bit
bx lr
.section .text.isr, "ax"
/* exception handlers: undef, swi, padt, dabt, resv, irq, fiq */
.align 5
.globl vector_fiq
vector_fiq:
stmfd sp!,{r0-r7,lr}
bl rt_hw_trap_fiq
ldmfd sp!,{r0-r7,lr}
subs pc,lr,#4
.globl rt_interrupt_enter
.globl rt_interrupt_leave
.globl rt_thread_switch_interrupt_flag
.globl rt_interrupt_from_thread
.globl rt_interrupt_to_thread
.align 5
.globl vector_irq
vector_irq:
stmfd sp!, {r0-r12,lr}
#if defined (__VFP_FP__) && !defined(__SOFTFP__)
vstmdb sp!, {d0-d15} /* Store floating point registers */
vmrs r1, FPSCR
stmfd sp!,{r1}
vmrs r1, FPEXC
stmfd sp!,{r1}
#endif
bl rt_interrupt_enter
bl rt_hw_trap_irq
bl rt_interrupt_leave
@ if rt_thread_switch_interrupt_flag set, jump to
@ rt_hw_context_switch_interrupt_do and don't return
ldr r0, =rt_thread_switch_interrupt_flag
ldr r1, [r0]
cmp r1, #1
beq rt_hw_context_switch_interrupt_do
#if defined (__VFP_FP__) && !defined(__SOFTFP__)
ldmfd sp!, {r1} /* Restore floating point registers */
vmsr FPEXC, r1
ldmfd sp!, {r1}
vmsr FPSCR, r1
vldmia sp!, {d0-d15}
#endif
ldmfd sp!, {r0-r12,lr}
subs pc, lr, #4
rt_hw_context_switch_interrupt_do:
mov r1, #0 @ clear flag
str r1, [r0]
#if defined (__VFP_FP__) && !defined(__SOFTFP__)
ldmfd sp!, {r1} /* Restore floating point registers */
vmsr FPEXC, r1
ldmfd sp!, {r1}
vmsr FPSCR, r1
vldmia sp!, {d0-d15}
#endif
mov r1, sp @ r1 point to {r0-r3} in stack
add sp, sp, #4*4
ldmfd sp!, {r4-r12,lr}@ reload saved registers
mrs r0, spsr @ get cpsr of interrupt thread
sub r2, lr, #4 @ save old task's pc to r2
@ Switch to SVC mode with no interrupt.
msr cpsr_c, #I_Bit|F_Bit|Mode_SVC
stmfd sp!, {r2} @ push old task's pc
stmfd sp!, {r4-r12,lr}@ push old task's lr,r12-r4
ldmfd r1, {r1-r4} @ restore r0-r3 of the interrupt thread
stmfd sp!, {r1-r4} @ push old task's r0-r3
stmfd sp!, {r0} @ push old task's cpsr
#if defined (__VFP_FP__) && !defined(__SOFTFP__)
vstmdb sp!, {d0-d15} /* Store floating point registers */
vmrs r1, FPSCR
stmfd sp!,{r1}
vmrs r1, FPEXC
stmfd sp!,{r1}
#endif
ldr r4, =rt_interrupt_from_thread
ldr r5, [r4]
str sp, [r5] @ store sp in preempted tasks's TCB
ldr r6, =rt_interrupt_to_thread
ldr r7, [r6]
ldr sp, [r7] @ get new task's stack pointer
#if defined (__VFP_FP__) && !defined(__SOFTFP__)
ldmfd sp!, {r1} /* Restore floating point registers */
vmsr FPEXC, r1
ldmfd sp!, {r1}
vmsr FPSCR, r1
vldmia sp!, {d0-d15}
#endif
ldmfd sp!, {r4} @ pop new task's cpsr to spsr
msr spsr_cxsf, r4
ldmfd sp!, {r0-r12,lr,pc}^ @ pop new task's r0-r12,lr & pc, copy spsr to cpsr
.macro push_svc_reg
sub sp, sp, #17 * 4 @/* Sizeof(struct rt_hw_exp_stack) */
stmia sp, {r0 - r12} @/* Calling r0-r12 */
mov r0, sp
mrs r6, spsr @/* Save CPSR */
str lr, [r0, #15*4] @/* Push PC */
str r6, [r0, #16*4] @/* Push CPSR */
cps #Mode_SVC
str sp, [r0, #13*4] @/* Save calling SP */
str lr, [r0, #14*4] @/* Save calling PC */
.endm
.align 5
.globl vector_swi
vector_swi:
push_svc_reg
bl rt_hw_trap_swi
b .
.align 5
.globl vector_undef
vector_undef:
push_svc_reg
bl rt_hw_trap_undef
b .
.align 5
.globl vector_pabt
vector_pabt:
push_svc_reg
bl rt_hw_trap_pabt
b .
.align 5
.globl vector_dabt
vector_dabt:
push_svc_reg
bl rt_hw_trap_dabt
b .
.align 5
.globl vector_resv
vector_resv:
push_svc_reg
bl rt_hw_trap_resv
b .
|
vandercookking/h7_device_RTT
| 2,348
|
rt-thread/libcpu/arm/armv6/context_gcc.S
|
/*
* Copyright (c) 2006-2018, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2011-01-13 weety copy from mini2440
*/
/*!
* \addtogroup ARMv6
*/
/*@{*/
#include <rtconfig.h>
#define NOINT 0xc0
#define FPEXC_EN (1 << 30) /* VFP enable bit */
/*
* rt_base_t rt_hw_interrupt_disable();
*/
.globl rt_hw_interrupt_disable
rt_hw_interrupt_disable:
mrs r0, cpsr
cpsid if
bx lr
/*
* void rt_hw_interrupt_enable(rt_base_t level);
*/
.globl rt_hw_interrupt_enable
rt_hw_interrupt_enable:
msr cpsr_c, r0
bx lr
/*
* void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
* r0 --> from
* r1 --> to
*/
.globl rt_hw_context_switch
rt_hw_context_switch:
stmfd sp!, {lr} @ push pc (lr should be pushed in place of PC)
stmfd sp!, {r0-r12, lr} @ push lr & register file
mrs r4, cpsr
tst lr, #0x01
orrne r4, r4, #0x20 @ it's thumb code
stmfd sp!, {r4} @ push cpsr
str sp, [r0] @ store sp in preempted tasks TCB
ldr sp, [r1] @ get new task stack pointer
ldmfd sp!, {r4} @ pop new task cpsr to spsr
msr spsr_cxsf, r4
_do_switch:
ldmfd sp!, {r0-r12, lr, pc}^ @ pop new task r0-r12, lr & pc, copy spsr to cpsr
/*
* void rt_hw_context_switch_to(rt_uint32 to);
* r0 --> to
*/
.globl rt_hw_context_switch_to
rt_hw_context_switch_to:
ldr sp, [r0] @ get new task stack pointer
ldmfd sp!, {r4} @ pop new task spsr
msr spsr_cxsf, r4
bic r4, r4, #0x20 @ must be ARM mode
msr cpsr_cxsf, r4
ldmfd sp!, {r0-r12, lr, pc}^ @ pop new task r0-r12, lr & pc
/*
* void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to);
*/
.globl rt_thread_switch_interrupt_flag
.globl rt_interrupt_from_thread
.globl rt_interrupt_to_thread
.globl rt_hw_context_switch_interrupt
rt_hw_context_switch_interrupt:
ldr r2, =rt_thread_switch_interrupt_flag
ldr r3, [r2]
cmp r3, #1
beq _reswitch
mov r3, #1 @ set rt_thread_switch_interrupt_flag to 1
str r3, [r2]
ldr r2, =rt_interrupt_from_thread @ set rt_interrupt_from_thread
str r0, [r2]
_reswitch:
ldr r2, =rt_interrupt_to_thread @ set rt_interrupt_to_thread
str r1, [r2]
bx lr
|
vandercookking/h7_device_RTT
| 3,005
|
rt-thread/libcpu/arm/armv6/arm_entry_gcc.S
|
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2014-11-07 weety first version
*/
#include <rtconfig.h>
#include "armv6.h"
//#define DEBUG
.macro PRINT, str
#ifdef DEBUG
stmfd sp!, {r0-r3, ip, lr}
add r0, pc, #4
bl rt_kprintf
b 1f
.asciz "UNDEF: \str\n"
.balign 4
1: ldmfd sp!, {r0-r3, ip, lr}
#endif
.endm
.macro PRINT1, str, arg
#ifdef DEBUG
stmfd sp!, {r0-r3, ip, lr}
mov r1, \arg
add r0, pc, #4
bl rt_kprintf
b 1f
.asciz "UNDEF: \str\n"
.balign 4
1: ldmfd sp!, {r0-r3, ip, lr}
#endif
.endm
.macro PRINT3, str, arg1, arg2, arg3
#ifdef DEBUG
stmfd sp!, {r0-r3, ip, lr}
mov r3, \arg3
mov r2, \arg2
mov r1, \arg1
add r0, pc, #4
bl rt_kprintf
b 1f
.asciz "UNDEF: \str\n"
.balign 4
1: ldmfd sp!, {r0-r3, ip, lr}
#endif
.endm
.macro get_current_thread, rd
ldr \rd, .current_thread
ldr \rd, [\rd]
.endm
.current_thread:
.word rt_current_thread
#ifdef RT_USING_NEON
.align 6
/* is the neon instuction on arm mode? */
.neon_opcode:
.word 0xfe000000 @ mask
.word 0xf2000000 @ opcode
.word 0xff100000 @ mask
.word 0xf4000000 @ opcode
.word 0x00000000 @ end mask
.word 0x00000000 @ end opcode
#endif
/* undefined instruction exception processing */
.globl undef_entry
undef_entry:
PRINT1 "r0=0x%08x", r0
PRINT1 "r2=0x%08x", r2
PRINT1 "r9=0x%08x", r9
PRINT1 "sp=0x%08x", sp
#ifdef RT_USING_NEON
ldr r6, .neon_opcode
__check_neon_instruction:
ldr r7, [r6], #4 @ load mask value
cmp r7, #0 @ end mask?
beq __check_vfp_instruction
and r8, r0, r7
ldr r7, [r6], #4 @ load opcode value
cmp r8, r7 @ is NEON instruction?
bne __check_neon_instruction
b vfp_entry
__check_vfp_instruction:
#endif
tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC instruction has bit 27
tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2 instruction
moveq pc, lr @ no vfp coprocessor instruction, return
get_current_thread r10
and r8, r0, #0x00000f00 @ get coprocessor number
PRINT1 "CP=0x%08x", r8
add pc, pc, r8, lsr #6
nop
mov pc, lr @ CP0
mov pc, lr @ CP1
mov pc, lr @ CP2
mov pc, lr @ CP3
mov pc, lr @ CP4
mov pc, lr @ CP5
mov pc, lr @ CP6
mov pc, lr @ CP7
mov pc, lr @ CP8
mov pc, lr @ CP9
mov pc, lr @ CP10 VFP
mov pc, lr @ CP11 VFP
mov pc, lr @ CP12
mov pc, lr @ CP13
mov pc, lr @ CP14 DEBUG
mov pc, lr @ CP15 SYS CONTROL
|
vandercookking/h7_device_RTT
| 2,338
|
rt-thread/libcpu/arm/AT91SAM7X/context_gcc.S
|
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2006-03-13 Bernard first version
*/
/*!
* \addtogroup xgs3c4510
*/
/*@{*/
#define NOINT 0xc0
/*
* rt_base_t rt_hw_interrupt_disable();
*/
.globl rt_hw_interrupt_disable
rt_hw_interrupt_disable:
mrs r0, cpsr
orr r1, r0, #NOINT
msr cpsr_c, r1
mov pc, lr
/*
* void rt_hw_interrupt_enable(rt_base_t level);
*/
.globl rt_hw_interrupt_enable
rt_hw_interrupt_enable:
msr cpsr, r0
mov pc, lr
/*
* void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
* r0 --> from
* r1 --> to
*/
.globl rt_hw_context_switch
rt_hw_context_switch:
stmfd sp!, {lr} @ push pc (lr should be pushed in place of PC)
stmfd sp!, {r0-r12, lr} @ push lr & register file
mrs r4, cpsr
stmfd sp!, {r4} @ push cpsr
mrs r4, spsr
stmfd sp!, {r4} @ push spsr
str sp, [r0] @ store sp in preempted tasks TCB
ldr sp, [r1] @ get new task stack pointer
ldmfd sp!, {r4} @ pop new task spsr
msr spsr_cxsf, r4
ldmfd sp!, {r4} @ pop new task cpsr
msr cpsr_cxsf, r4
ldmfd sp!, {r0-r12, lr, pc} @ pop new task r0-r12, lr & pc
/*
* void rt_hw_context_switch_to(rt_uint32 to);
* r0 --> to
*/
.globl rt_hw_context_switch_to
rt_hw_context_switch_to:
ldr sp, [r0] @ get new task stack pointer
ldmfd sp!, {r4} @ pop new task spsr
msr spsr_cxsf, r4
ldmfd sp!, {r4} @ pop new task cpsr
msr cpsr_cxsf, r4
ldmfd sp!, {r0-r12, lr, pc} @ pop new task r0-r12, lr & pc
/*
* void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to);
*/
.globl rt_thread_switch_interrupt_flag
.globl rt_interrupt_from_thread
.globl rt_interrupt_to_thread
.globl rt_hw_context_switch_interrupt
rt_hw_context_switch_interrupt:
ldr r2, =rt_thread_switch_interrupt_flag
ldr r3, [r2]
cmp r3, #1
beq _reswitch
mov r3, #1 @ set rt_thread_switch_interrupt_flag to 1
str r3, [r2]
ldr r2, =rt_interrupt_from_thread @ set rt_interrupt_from_thread
str r0, [r2]
_reswitch:
ldr r2, =rt_interrupt_to_thread @ set rt_interrupt_to_thread
str r1, [r2]
mov pc, lr
|
vandercookking/h7_device_RTT
| 6,809
|
rt-thread/libcpu/arm/AT91SAM7X/start_gcc.S
|
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2006-08-31 Bernard first version
*/
/* Internal Memory Base Addresses */
.equ FLASH_BASE, 0x00100000
.equ RAM_BASE, 0x00200000
/* Stack Configuration */
.equ TOP_STACK, 0x00204000
.equ UND_STACK_SIZE, 0x00000100
.equ SVC_STACK_SIZE, 0x00000400
.equ ABT_STACK_SIZE, 0x00000100
.equ FIQ_STACK_SIZE, 0x00000100
.equ IRQ_STACK_SIZE, 0x00000100
.equ USR_STACK_SIZE, 0x00000004
/* ARM architecture definitions */
.equ MODE_USR, 0x10
.equ MODE_FIQ, 0x11
.equ MODE_IRQ, 0x12
.equ MODE_SVC, 0x13
.equ MODE_ABT, 0x17
.equ MODE_UND, 0x1B
.equ MODE_SYS, 0x1F
.equ I_BIT, 0x80 /* when this bit is set, IRQ is disabled */
.equ F_BIT, 0x40 /* when this bit is set, FIQ is disabled */
.section .init, "ax"
.code 32
.align 0
.globl _start
_start:
b reset
ldr pc, _vector_undef
ldr pc, _vector_swi
ldr pc, _vector_pabt
ldr pc, _vector_dabt
nop /* reserved vector */
ldr pc, _vector_irq
ldr pc, _vector_fiq
_vector_undef: .word vector_undef
_vector_swi: .word vector_swi
_vector_pabt: .word vector_pabt
_vector_dabt: .word vector_dabt
_vector_resv: .word vector_resv
_vector_irq: .word vector_irq
_vector_fiq: .word vector_fiq
/*
* rtthread bss start and end
* which are defined in linker script
*/
.globl _bss_start
_bss_start: .word __bss_start
.globl _bss_end
_bss_end: .word __bss_end
/* the system entry */
reset:
/* disable watchdog */
ldr r0, =0xFFFFFD40
ldr r1, =0x00008000
str r1, [r0, #0x04]
/* enable the main oscillator */
ldr r0, =0xFFFFFC00
ldr r1, =0x00000601
str r1, [r0, #0x20]
/* wait for main oscillator to stabilize */
moscs_loop:
ldr r2, [r0, #0x68]
ands r2, r2, #1
beq moscs_loop
/* set up the PLL */
ldr r1, =0x00191C05
str r1, [r0, #0x2C]
/* wait for PLL to lock */
pll_loop:
ldr r2, [r0, #0x68]
ands r2, r2, #0x04
beq pll_loop
/* select clock */
ldr r1, =0x00000007
str r1, [r0, #0x30]
#ifdef __FLASH_BUILD__
/* copy exception vectors into internal sram */
/*
mov r8, #RAM_BASE
ldr r9, =_start
ldmia r9!, {r0-r7}
stmia r8!, {r0-r7}
ldmia r9!, {r0-r6}
stmia r8!, {r0-r6}
*/
#endif
/* setup stack for each mode */
ldr r0, =TOP_STACK
/* set stack */
/* undefined instruction mode */
msr cpsr_c, #MODE_UND|I_BIT|F_BIT
mov sp, r0
sub r0, r0, #UND_STACK_SIZE
/* abort mode */
msr cpsr_c, #MODE_ABT|I_BIT|F_BIT
mov sp, r0
sub r0, r0, #ABT_STACK_SIZE
/* FIQ mode */
msr cpsr_c, #MODE_FIQ|I_BIT|F_BIT
mov sp, r0
sub r0, r0, #FIQ_STACK_SIZE
/* IRQ mode */
msr cpsr_c, #MODE_IRQ|I_BIT|F_BIT
mov sp, r0
sub r0, r0, #IRQ_STACK_SIZE
/* supervisor mode */
msr cpsr_c, #MODE_SVC|I_BIT|F_BIT
mov sp, r0
/* remap SRAM to 0x0000 */
/*
ldr r0, =0xFFFFFF00
mov r1, #0x01
str r1, [r0]
*/
/* mask all IRQs */
ldr r1, =0xFFFFF124
ldr r0, =0XFFFFFFFF
str r0, [r1]
/* copy .data to SRAM */
ldr r1, =_sidata /* .data start in image */
ldr r2, =_edata /* .data end in image */
ldr r3, =_sdata /* sram data start */
data_loop:
ldr r0, [r1, #0]
str r0, [r3]
add r1, r1, #4
add r3, r3, #4
cmp r3, r2 /* check if data to clear */
blo data_loop /* loop until done */
/* clear .bss */
mov r0,#0 /* get a zero */
ldr r1,=__bss_start /* bss start */
ldr r2,=__bss_end /* bss end */
bss_loop:
cmp r1,r2 /* check if data to clear */
strlo r0,[r1],#4 /* clear 4 bytes */
blo bss_loop /* loop until done */
/* call C++ constructors of global objects */
ldr r0, =__ctors_start__
ldr r1, =__ctors_end__
ctor_loop:
cmp r0, r1
beq ctor_end
ldr r2, [r0], #4
stmfd sp!, {r0-r1}
mov lr, pc
bx r2
ldmfd sp!, {r0-r1}
b ctor_loop
ctor_end:
/* start RT-Thread Kernel */
ldr pc, _rtthread_startup
_rtthread_startup: .word rtthread_startup
/* exception handlers */
vector_undef: b vector_undef
vector_swi : b vector_swi
vector_pabt : b vector_pabt
vector_dabt : b vector_dabt
vector_resv : b vector_resv
.globl rt_interrupt_enter
.globl rt_interrupt_leave
.globl rt_thread_switch_interrupt_flag
.globl rt_interrupt_from_thread
.globl rt_interrupt_to_thread
vector_irq:
stmfd sp!, {r0-r12,lr}
bl rt_interrupt_enter
bl rt_hw_trap_irq
bl rt_interrupt_leave
/*
* if rt_thread_switch_interrupt_flag set, jump to
* rt_hw_context_switch_interrupt_do and don't return
*/
ldr r0, =rt_thread_switch_interrupt_flag
ldr r1, [r0]
cmp r1, #1
beq rt_hw_context_switch_interrupt_do
ldmfd sp!, {r0-r12,lr}
subs pc, lr, #4
vector_fiq:
stmfd sp!,{r0-r7,lr}
bl rt_hw_trap_fiq
ldmfd sp!,{r0-r7,lr}
subs pc,lr,#4
/*
* void rt_hw_context_switch_interrupt_do(rt_base_t flag)
*/
rt_hw_context_switch_interrupt_do:
mov r1, #0 @ clear flag
str r1, [r0]
ldmfd sp!, {r0-r12,lr}@ reload saved registers
stmfd sp!, {r0-r3} @ save r0-r3
mov r1, sp
add sp, sp, #16 @ restore sp
sub r2, lr, #4 @ save old task's pc to r2
mrs r3, spsr @ disable interrupt
orr r0, r3, #I_BIT|F_BIT
msr spsr_c, r0
ldr r0, =.+8 @ switch to interrupted task's stack
movs pc, r0
stmfd sp!, {r2} @ push old task's pc
stmfd sp!, {r4-r12,lr}@ push old task's lr,r12-r4
mov r4, r1 @ Special optimised code below
mov r5, r3
ldmfd r4!, {r0-r3}
stmfd sp!, {r0-r3} @ push old task's r3-r0
stmfd sp!, {r5} @ push old task's psr
mrs r4, spsr
stmfd sp!, {r4} @ push old task's spsr
ldr r4, =rt_interrupt_from_thread
ldr r5, [r4]
str sp, [r5] @ store sp in preempted tasks's TCB
ldr r6, =rt_interrupt_to_thread
ldr r6, [r6]
ldr sp, [r6] @ get new task's stack pointer
ldmfd sp!, {r4} @ pop new task's spsr
msr SPSR_cxsf, r4
ldmfd sp!, {r4} @ pop new task's psr
msr CPSR_cxsf, r4
ldmfd sp!, {r0-r12,lr,pc} @ pop new task's r0-r12,lr & pc
|
vandercookking/h7_device_RTT
| 2,580
|
rt-thread/libcpu/arm/AT91SAM7X/context_rvds.S
|
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2009-01-20 Bernard first version
*/
NOINT EQU 0xc0 ; disable interrupt in psr
AREA |.text|, CODE, READONLY, ALIGN=2
ARM
REQUIRE8
PRESERVE8
;/*
; * rt_base_t rt_hw_interrupt_disable();
; */
rt_hw_interrupt_disable PROC
EXPORT rt_hw_interrupt_disable
MRS r0, cpsr
ORR r1, r0, #NOINT
MSR cpsr_c, r1
BX lr
ENDP
;/*
; * void rt_hw_interrupt_enable(rt_base_t level);
; */
rt_hw_interrupt_enable PROC
EXPORT rt_hw_interrupt_enable
MSR cpsr_c, r0
BX lr
ENDP
;/*
; * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
; * r0 --> from
; * r1 --> to
; */
rt_hw_context_switch PROC
EXPORT rt_hw_context_switch
STMFD sp!, {lr} ; push pc (lr should be pushed in place of PC)
STMFD sp!, {r0-r12, lr} ; push lr & register file
MRS r4, cpsr
STMFD sp!, {r4} ; push cpsr
MRS r4, spsr
STMFD sp!, {r4} ; push spsr
STR sp, [r0] ; store sp in preempted tasks TCB
LDR sp, [r1] ; get new task stack pointer
LDMFD sp!, {r4} ; pop new task spsr
MSR spsr_cxsf, r4
LDMFD sp!, {r4} ; pop new task cpsr
MSR cpsr_cxsf, r4
LDMFD sp!, {r0-r12, lr, pc} ; pop new task r0-r12, lr & pc
ENDP
;/*
; * void rt_hw_context_switch_to(rt_uint32 to);
; * r0 --> to
; */
rt_hw_context_switch_to PROC
EXPORT rt_hw_context_switch_to
LDR sp, [r0] ; get new task stack pointer
LDMFD sp!, {r4} ; pop new task spsr
MSR spsr_cxsf, r4
LDMFD sp!, {r4} ; pop new task cpsr
MSR cpsr_cxsf, r4
LDMFD sp!, {r0-r12, lr, pc} ; pop new task r0-r12, lr & pc
ENDP
;/*
; * void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to);
; */
IMPORT rt_thread_switch_interrupt_flag
IMPORT rt_interrupt_from_thread
IMPORT rt_interrupt_to_thread
rt_hw_context_switch_interrupt PROC
EXPORT rt_hw_context_switch_interrupt
LDR r2, =rt_thread_switch_interrupt_flag
LDR r3, [r2]
CMP r3, #1
BEQ _reswitch
MOV r3, #1 ; set rt_thread_switch_interrupt_flag to 1
STR r3, [r2]
LDR r2, =rt_interrupt_from_thread ; set rt_interrupt_from_thread
STR r0, [r2]
_reswitch
LDR r2, =rt_interrupt_to_thread ; set rt_interrupt_to_thread
STR r1, [r2]
BX lr
ENDP
END
|
vandercookking/h7_device_RTT
| 17,179
|
rt-thread/libcpu/arm/AT91SAM7X/start_rvds.S
|
;/*****************************************************************************/
;/* SAM7.S: Startup file for Atmel AT91SAM7 device series */
;/*****************************************************************************/
;/* <<< Use Configuration Wizard in Context Menu >>> */
;/*****************************************************************************/
;/* This file is part of the uVision/ARM development tools. */
;/* Copyright (c) 2005-2006 Keil Software. All rights reserved. */
;/* This software may only be used under the terms of a valid, current, */
;/* end user licence from KEIL for a compatible version of KEIL software */
;/* development tools. Nothing else gives you the right to use this software. */
;/*****************************************************************************/
;/*
; * The SAM7.S code is executed after CPU Reset. This file may be
; * translated with the following SET symbols. In uVision these SET
; * symbols are entered under Options - ASM - Define.
; *
; * REMAP: when set the startup code remaps exception vectors from
; * on-chip RAM to address 0.
; *
; * RAM_INTVEC: when set the startup code copies exception vectors
; * from on-chip Flash to on-chip RAM.
; */
; Standard definitions of Mode bits and Interrupt (I & F) flags in PSRs
; 2009-12-28 MingBai Bug fix (USR mode stack removed).
; 2009-12-29 MingBai Merge svc and irq stack, add abort handler.
Mode_USR EQU 0x10
Mode_FIQ EQU 0x11
Mode_IRQ EQU 0x12
Mode_SVC EQU 0x13
Mode_ABT EQU 0x17
Mode_UND EQU 0x1B
Mode_SYS EQU 0x1F
I_Bit EQU 0x80 ; when I bit is set, IRQ is disabled
F_Bit EQU 0x40 ; when F bit is set, FIQ is disabled
; Internal Memory Base Addresses
FLASH_BASE EQU 0x00100000
RAM_BASE EQU 0x00200000
;// <h> Stack Configuration (Stack Sizes in Bytes)
;// <o0> Undefined Mode <0x0-0xFFFFFFFF:8>
;// <o1> Supervisor Mode <0x0-0xFFFFFFFF:8>
;// <o2> Abort Mode <0x0-0xFFFFFFFF:8>
;// <o3> Fast Interrupt Mode <0x0-0xFFFFFFFF:8>
;// <o4> Interrupt Mode <0x0-0xFFFFFFFF:8>
;// <o5> User/System Mode <0x0-0xFFFFFFFF:8>
;// </h>
UND_Stack_Size EQU 0x00000000
SVC_Stack_Size EQU 0x00000000
ABT_Stack_Size EQU 0x00000000
FIQ_Stack_Size EQU 0x00000000
IRQ_Stack_Size EQU 0x00000100
USR_Stack_Size EQU 0x00000000
ISR_Stack_Size EQU (UND_Stack_Size + SVC_Stack_Size + ABT_Stack_Size + \
FIQ_Stack_Size + IRQ_Stack_Size)
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE USR_Stack_Size
__initial_sp SPACE ISR_Stack_Size
Stack_Top
;// <h> Heap Configuration
;// <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF>
;// </h>
Heap_Size EQU 0x00000000
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
; Reset Controller (RSTC) definitions
RSTC_BASE EQU 0xFFFFFD00 ; RSTC Base Address
RSTC_MR EQU 0x08 ; RSTC_MR Offset
;/*
;// <e> Reset Controller (RSTC)
;// <o1.0> URSTEN: User Reset Enable
;// <i> Enables NRST Pin to generate Reset
;// <o1.8..11> ERSTL: External Reset Length <0-15>
;// <i> External Reset Time in 2^(ERSTL+1) Slow Clock Cycles
;// </e>
;*/
RSTC_SETUP EQU 1
RSTC_MR_Val EQU 0xA5000401
; Embedded Flash Controller (EFC) definitions
EFC_BASE EQU 0xFFFFFF00 ; EFC Base Address
EFC0_FMR EQU 0x60 ; EFC0_FMR Offset
EFC1_FMR EQU 0x70 ; EFC1_FMR Offset
;// <e> Embedded Flash Controller 0 (EFC0)
;// <o1.16..23> FMCN: Flash Microsecond Cycle Number <0-255>
;// <i> Number of Master Clock Cycles in 1us
;// <o1.8..9> FWS: Flash Wait State
;// <0=> Read: 1 cycle / Write: 2 cycles
;// <1=> Read: 2 cycle / Write: 3 cycles
;// <2=> Read: 3 cycle / Write: 4 cycles
;// <3=> Read: 4 cycle / Write: 4 cycles
;// </e>
EFC0_SETUP EQU 1
EFC0_FMR_Val EQU 0x00320100
;// <e> Embedded Flash Controller 1 (EFC1)
;// <o1.16..23> FMCN: Flash Microsecond Cycle Number <0-255>
;// <i> Number of Master Clock Cycles in 1us
;// <o1.8..9> FWS: Flash Wait State
;// <0=> Read: 1 cycle / Write: 2 cycles
;// <1=> Read: 2 cycle / Write: 3 cycles
;// <2=> Read: 3 cycle / Write: 4 cycles
;// <3=> Read: 4 cycle / Write: 4 cycles
;// </e>
EFC1_SETUP EQU 0
EFC1_FMR_Val EQU 0x00320100
; Watchdog Timer (WDT) definitions
WDT_BASE EQU 0xFFFFFD40 ; WDT Base Address
WDT_MR EQU 0x04 ; WDT_MR Offset
;// <e> Watchdog Timer (WDT)
;// <o1.0..11> WDV: Watchdog Counter Value <0-4095>
;// <o1.16..27> WDD: Watchdog Delta Value <0-4095>
;// <o1.12> WDFIEN: Watchdog Fault Interrupt Enable
;// <o1.13> WDRSTEN: Watchdog Reset Enable
;// <o1.14> WDRPROC: Watchdog Reset Processor
;// <o1.28> WDDBGHLT: Watchdog Debug Halt
;// <o1.29> WDIDLEHLT: Watchdog Idle Halt
;// <o1.15> WDDIS: Watchdog Disable
;// </e>
WDT_SETUP EQU 1
WDT_MR_Val EQU 0x00008000
; Power Mangement Controller (PMC) definitions
PMC_BASE EQU 0xFFFFFC00 ; PMC Base Address
PMC_MOR EQU 0x20 ; PMC_MOR Offset
PMC_MCFR EQU 0x24 ; PMC_MCFR Offset
PMC_PLLR EQU 0x2C ; PMC_PLLR Offset
PMC_MCKR EQU 0x30 ; PMC_MCKR Offset
PMC_SR EQU 0x68 ; PMC_SR Offset
PMC_MOSCEN EQU (1<<0) ; Main Oscillator Enable
PMC_OSCBYPASS EQU (1<<1) ; Main Oscillator Bypass
PMC_OSCOUNT EQU (0xFF<<8) ; Main OScillator Start-up Time
PMC_DIV EQU (0xFF<<0) ; PLL Divider
PMC_PLLCOUNT EQU (0x3F<<8) ; PLL Lock Counter
PMC_OUT EQU (0x03<<14) ; PLL Clock Frequency Range
PMC_MUL EQU (0x7FF<<16) ; PLL Multiplier
PMC_USBDIV EQU (0x03<<28) ; USB Clock Divider
PMC_CSS EQU (3<<0) ; Clock Source Selection
PMC_PRES EQU (7<<2) ; Prescaler Selection
PMC_MOSCS EQU (1<<0) ; Main Oscillator Stable
PMC_LOCK EQU (1<<2) ; PLL Lock Status
PMC_MCKRDY EQU (1<<3) ; Master Clock Status
;// <e> Power Mangement Controller (PMC)
;// <h> Main Oscillator
;// <o1.0> MOSCEN: Main Oscillator Enable
;// <o1.1> OSCBYPASS: Oscillator Bypass
;// <o1.8..15> OSCCOUNT: Main Oscillator Startup Time <0-255>
;// </h>
;// <h> Phase Locked Loop (PLL)
;// <o2.0..7> DIV: PLL Divider <0-255>
;// <o2.16..26> MUL: PLL Multiplier <0-2047>
;// <i> PLL Output is multiplied by MUL+1
;// <o2.14..15> OUT: PLL Clock Frequency Range
;// <0=> 80..160MHz <1=> Reserved
;// <2=> 150..220MHz <3=> Reserved
;// <o2.8..13> PLLCOUNT: PLL Lock Counter <0-63>
;// <o2.28..29> USBDIV: USB Clock Divider
;// <0=> None <1=> 2 <2=> 4 <3=> Reserved
;// </h>
;// <o3.0..1> CSS: Clock Source Selection
;// <0=> Slow Clock
;// <1=> Main Clock
;// <2=> Reserved
;// <3=> PLL Clock
;// <o3.2..4> PRES: Prescaler
;// <0=> None
;// <1=> Clock / 2 <2=> Clock / 4
;// <3=> Clock / 8 <4=> Clock / 16
;// <5=> Clock / 32 <6=> Clock / 64
;// <7=> Reserved
;// </e>
PMC_SETUP EQU 1
PMC_MOR_Val EQU 0x00000601
PMC_PLLR_Val EQU 0x00191C05
PMC_MCKR_Val EQU 0x00000007
PRESERVE8
; Area Definition and Entry Point
; Startup Code must be linked first at Address at which it expects to run.
AREA RESET, CODE, READONLY
ARM
; Exception Vectors
; Mapped to Address 0.
; Absolute addressing mode must be used.
; Dummy Handlers are implemented as infinite loops which can be modified.
Vectors LDR PC,Reset_Addr
LDR PC,Undef_Addr
LDR PC,SWI_Addr
LDR PC,PAbt_Addr
LDR PC,DAbt_Addr
NOP ; Reserved Vector
LDR PC,IRQ_Addr
LDR PC,FIQ_Addr
Reset_Addr DCD Reset_Handler
Undef_Addr DCD Undef_Handler
SWI_Addr DCD SWI_Handler
PAbt_Addr DCD PAbt_Handler
DAbt_Addr DCD DAbt_Handler
DCD 0 ; Reserved Address
IRQ_Addr DCD IRQ_Handler
FIQ_Addr DCD FIQ_Handler
Undef_Handler B Undef_Handler
SWI_Handler B SWI_Handler
PAbt_Handler B Abort_Handler
DAbt_Handler B Abort_Handler
FIQ_Handler B FIQ_Handler
; Reset Handler
EXPORT Reset_Handler
Reset_Handler
; Setup RSTC
IF RSTC_SETUP != 0
LDR R0, =RSTC_BASE
LDR R1, =RSTC_MR_Val
STR R1, [R0, #RSTC_MR]
ENDIF
; Setup EFC0
IF EFC0_SETUP != 0
LDR R0, =EFC_BASE
LDR R1, =EFC0_FMR_Val
STR R1, [R0, #EFC0_FMR]
ENDIF
; Setup EFC1
IF EFC1_SETUP != 0
LDR R0, =EFC_BASE
LDR R1, =EFC1_FMR_Val
STR R1, [R0, #EFC1_FMR]
ENDIF
; Setup WDT
IF WDT_SETUP != 0
LDR R0, =WDT_BASE
LDR R1, =WDT_MR_Val
STR R1, [R0, #WDT_MR]
ENDIF
; Setup PMC
IF PMC_SETUP != 0
LDR R0, =PMC_BASE
; Setup Main Oscillator
LDR R1, =PMC_MOR_Val
STR R1, [R0, #PMC_MOR]
; Wait until Main Oscillator is stablilized
IF (PMC_MOR_Val:AND:PMC_MOSCEN) != 0
MOSCS_Loop LDR R2, [R0, #PMC_SR]
ANDS R2, R2, #PMC_MOSCS
BEQ MOSCS_Loop
ENDIF
; Setup the PLL
IF (PMC_PLLR_Val:AND:PMC_MUL) != 0
LDR R1, =PMC_PLLR_Val
STR R1, [R0, #PMC_PLLR]
; Wait until PLL is stabilized
PLL_Loop LDR R2, [R0, #PMC_SR]
ANDS R2, R2, #PMC_LOCK
BEQ PLL_Loop
ENDIF
; Select Clock
IF (PMC_MCKR_Val:AND:PMC_CSS) == 1 ; Main Clock Selected
LDR R1, =PMC_MCKR_Val
AND R1, #PMC_CSS
STR R1, [R0, #PMC_MCKR]
WAIT_Rdy1 LDR R2, [R0, #PMC_SR]
ANDS R2, R2, #PMC_MCKRDY
BEQ WAIT_Rdy1
LDR R1, =PMC_MCKR_Val
STR R1, [R0, #PMC_MCKR]
WAIT_Rdy2 LDR R2, [R0, #PMC_SR]
ANDS R2, R2, #PMC_MCKRDY
BEQ WAIT_Rdy2
ELIF (PMC_MCKR_Val:AND:PMC_CSS) == 3 ; PLL Clock Selected
LDR R1, =PMC_MCKR_Val
AND R1, #PMC_PRES
STR R1, [R0, #PMC_MCKR]
WAIT_Rdy1 LDR R2, [R0, #PMC_SR]
ANDS R2, R2, #PMC_MCKRDY
BEQ WAIT_Rdy1
LDR R1, =PMC_MCKR_Val
STR R1, [R0, #PMC_MCKR]
WAIT_Rdy2 LDR R2, [R0, #PMC_SR]
ANDS R2, R2, #PMC_MCKRDY
BEQ WAIT_Rdy2
ENDIF ; Select Clock
ENDIF ; PMC_SETUP
; Copy Exception Vectors to Internal RAM
IF :DEF:RAM_INTVEC
ADR R8, Vectors ; Source
LDR R9, =RAM_BASE ; Destination
LDMIA R8!, {R0-R7} ; Load Vectors
STMIA R9!, {R0-R7} ; Store Vectors
LDMIA R8!, {R0-R7} ; Load Handler Addresses
STMIA R9!, {R0-R7} ; Store Handler Addresses
ENDIF
; Remap on-chip RAM to address 0
MC_BASE EQU 0xFFFFFF00 ; MC Base Address
MC_RCR EQU 0x00 ; MC_RCR Offset
IF :DEF:REMAP
LDR R0, =MC_BASE
MOV R1, #1
STR R1, [R0, #MC_RCR] ; Remap
ENDIF
; Setup Stack for each mode
LDR R0, =Stack_Top
; Enter Undefined Instruction Mode and set its Stack Pointer
MSR CPSR_c, #Mode_UND:OR:I_Bit:OR:F_Bit
MOV SP, R0
;SUB R0, R0, #UND_Stack_Size
; Enter Abort Mode and set its Stack Pointer
MSR CPSR_c, #Mode_ABT:OR:I_Bit:OR:F_Bit
MOV SP, R0
;SUB R0, R0, #ABT_Stack_Size
; Enter FIQ Mode and set its Stack Pointer
MSR CPSR_c, #Mode_FIQ:OR:I_Bit:OR:F_Bit
MOV SP, R0
;SUB R0, R0, #FIQ_Stack_Size
; Enter IRQ Mode and set its Stack Pointer
MSR CPSR_c, #Mode_IRQ:OR:I_Bit:OR:F_Bit
MOV SP, R0
;SUB R0, R0, #IRQ_Stack_Size
; Enter Supervisor Mode and set its Stack Pointer
MSR CPSR_c, #Mode_SVC:OR:I_Bit:OR:F_Bit
MOV SP, R0
; SUB R0, R0, #SVC_Stack_Size
; Enter User Mode and set its Stack Pointer
; MSR CPSR_c, #Mode_USR
IF :DEF:__MICROLIB
EXPORT __initial_sp
ELSE
; No usr mode stack here.
;MOV SP, R0
;SUB SL, SP, #USR_Stack_Size
ENDIF
; Enter the C code
IMPORT __main
LDR R0, =__main
BX R0
IMPORT rt_interrupt_enter
IMPORT rt_interrupt_leave
IMPORT rt_thread_switch_interrupt_flag
IMPORT rt_interrupt_from_thread
IMPORT rt_interrupt_to_thread
IMPORT rt_hw_trap_irq
IMPORT rt_hw_trap_abort
IMPORT rt_interrupt_nest
Abort_Handler PROC
EXPORT Abort_Handler
stmfd sp!, {r0-r12,lr}
LDR r0, =rt_interrupt_nest
LDR r1, [r0]
CMP r1, #0
DeadLoop BHI DeadLoop ; Abort happened in irq mode, halt system.
bl rt_interrupt_enter
bl rt_hw_trap_abort
bl rt_interrupt_leave
b SWITCH
ENDP
IRQ_Handler PROC
EXPORT IRQ_Handler
STMFD sp!, {r0-r12,lr}
BL rt_interrupt_enter
BL rt_hw_trap_irq
BL rt_interrupt_leave
; if rt_thread_switch_interrupt_flag set, jump to
; rt_hw_context_switch_interrupt_do and don't return
SWITCH LDR r0, =rt_thread_switch_interrupt_flag
LDR r1, [r0]
CMP r1, #1
BEQ rt_hw_context_switch_interrupt_do
LDMFD sp!, {r0-r12,lr}
SUBS pc, lr, #4
ENDP
; /*
; * void rt_hw_context_switch_interrupt_do(rt_base_t flag)
; */
rt_hw_context_switch_interrupt_do PROC
EXPORT rt_hw_context_switch_interrupt_do
MOV r1, #0 ; clear flag
STR r1, [r0]
LDMFD sp!, {r0-r12,lr}; reload saved registers
STMFD sp!, {r0-r3} ; save r0-r3
MOV r1, sp
ADD sp, sp, #16 ; restore sp
SUB r2, lr, #4 ; save old task's pc to r2
MRS r3, spsr ; get cpsr of interrupt thread
; switch to SVC mode and no interrupt
MSR cpsr_c, #I_Bit|F_Bit|Mode_SVC
STMFD sp!, {r2} ; push old task's pc
STMFD sp!, {r4-r12,lr}; push old task's lr,r12-r4
MOV r4, r1 ; Special optimised code below
MOV r5, r3
LDMFD r4!, {r0-r3}
STMFD sp!, {r0-r3} ; push old task's r3-r0
STMFD sp!, {r5} ; push old task's cpsr
MRS r4, spsr
STMFD sp!, {r4} ; push old task's spsr
LDR r4, =rt_interrupt_from_thread
LDR r5, [r4]
STR sp, [r5] ; store sp in preempted tasks's TCB
LDR r6, =rt_interrupt_to_thread
LDR r6, [r6]
LDR sp, [r6] ; get new task's stack pointer
LDMFD sp!, {r4} ; pop new task's spsr
MSR spsr_cxsf, r4
LDMFD sp!, {r4} ; pop new task's psr
MSR cpsr_cxsf, r4
LDMFD sp!, {r0-r12,lr,pc} ; pop new task's r0-r12,lr & pc
ENDP
IF :DEF:__MICROLIB
EXPORT __heap_base
EXPORT __heap_limit
ELSE
; User Initial Stack & Heap
AREA |.text|, CODE, READONLY
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, = (Stack_Mem + IRQ_Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ENDIF
END
|
vandercookking/h7_device_RTT
| 2,589
|
rt-thread/libcpu/arm/sep4020/context_rvds.S
|
;/*
; * Copyright (c) 2006-2022, RT-Thread Development Team
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Change Logs:
; * Date Author Notes
; * 2009-01-20 Bernard first version
; */
NOINT EQU 0xc0 ; disable interrupt in psr
AREA |.text|, CODE, READONLY, ALIGN=2
ARM
REQUIRE8
PRESERVE8
;/*
; * rt_base_t rt_hw_interrupt_disable();
; */
rt_hw_interrupt_disable PROC
EXPORT rt_hw_interrupt_disable
MRS r0, cpsr
ORR r1, r0, #NOINT
MSR cpsr_c, r1
BX lr
ENDP
;/*
; * void rt_hw_interrupt_enable(rt_base_t level);
; */
rt_hw_interrupt_enable PROC
EXPORT rt_hw_interrupt_enable
MSR cpsr_c, r0
BX lr
ENDP
;/*
; * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
; * r0 --> from
; * r1 --> to
; */
rt_hw_context_switch PROC
EXPORT rt_hw_context_switch
STMFD sp!, {lr} ; push pc (lr should be pushed in place of PC)
STMFD sp!, {r0-r12, lr} ; push lr & register file
MRS r4, cpsr
STMFD sp!, {r4} ; push cpsr
MRS r4, spsr
STMFD sp!, {r4} ; push spsr
STR sp, [r0] ; store sp in preempted tasks TCB
LDR sp, [r1] ; get new task stack pointer
LDMFD sp!, {r4} ; pop new task spsr
MSR spsr_cxsf, r4
LDMFD sp!, {r4} ; pop new task cpsr
MSR cpsr_cxsf, r4
LDMFD sp!, {r0-r12, lr, pc} ; pop new task r0-r12, lr & pc
ENDP
;/*
; * void rt_hw_context_switch_to(rt_uint32 to);
; * r0 --> to
; */
rt_hw_context_switch_to PROC
EXPORT rt_hw_context_switch_to
LDR sp, [r0] ; get new task stack pointer
LDMFD sp!, {r4} ; pop new task spsr
MSR spsr_cxsf, r4
LDMFD sp!, {r4} ; pop new task cpsr
MSR cpsr_cxsf, r4
LDMFD sp!, {r0-r12, lr, pc} ; pop new task r0-r12, lr & pc
ENDP
;/*
; * void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to);
; */
IMPORT rt_thread_switch_interrupt_flag
IMPORT rt_interrupt_from_thread
IMPORT rt_interrupt_to_thread
rt_hw_context_switch_interrupt PROC
EXPORT rt_hw_context_switch_interrupt
LDR r2, =rt_thread_switch_interrupt_flag
LDR r3, [r2]
CMP r3, #1
BEQ _reswitch
MOV r3, #1 ; set rt_thread_switch_interrupt_flag to 1
STR r3, [r2]
LDR r2, =rt_interrupt_from_thread ; set rt_interrupt_from_thread
STR r0, [r2]
_reswitch
LDR r2, =rt_interrupt_to_thread ; set rt_interrupt_to_thread
STR r1, [r2]
BX lr
ENDP
END
|
vandercookking/h7_device_RTT
| 11,690
|
rt-thread/libcpu/arm/sep4020/start_rvds.S
|
;==============================================================================================
; star_rvds.s for Keil MDK 4.10
;
; SEP4020 start up code
;
; Change Logs:
; Date Author Notes
; 2010-03-17 zchong
;=============================================================================================
PMU_PLTR EQU 0x10001000 ; PLLȶʱ
PMU_PMCR EQU 0x10001004 ; ϵͳʱPLLĿƼĴ
PMU_PUCR EQU 0x10001008 ; USBʱPLLĿƼĴ
PMU_PCSR EQU 0x1000100C ; ڲģʱԴĿƼĴ
PMU_PDSLOW EQU 0x10001010 ; SLOW״̬ʱӵķƵ
PMU_PMDR EQU 0x10001014 ; оƬģʽĴ
PMU_RCTR EQU 0x10001018 ; ResetƼĴ
PMU_CLRWAKUP EQU 0x1000101C ; WakeUpĴ
RTC_CTR EQU 0x1000200C ; RTCƼĴ
INTC_IER EQU 0x10000000 ; IRQжĴ
INTC_IMR EQU 0x10000008 ; IRQжμĴ
INTC_IFSR EQU 0x10000030 ; IRQж״̬Ĵ
INTC_FIER EQU 0x100000C0 ; FIQжĴ
INTC_FIMR EQU 0x100000C4 ; FIQжμĴ
EMI_CSACONF EQU 0x11000000 ; CSAüĴ
EMI_CSECONF EQU 0x11000010 ; CSEüĴ
EMI_CSFCONF EQU 0x11000014 ; CSFüĴ
EMI_SDCONF1 EQU 0x11000018 ; SDRAMʱüĴ1
EMI_SDCONF2 EQU 0x1100001C ; SDRAMʱüĴ2, SDRAMʼõϢ
EMI_REMAPCONF EQU 0x11000020 ; Ƭѡռ估ַӳREMAPüĴ
Mode_USR EQU 0x10
Mode_FIQ EQU 0x11
Mode_IRQ EQU 0x12
Mode_SVC EQU 0x13
Mode_ABT EQU 0x17
Mode_UND EQU 0x1B
Mode_SYS EQU 0x1F
I_Bit EQU 0x80 ; when I bit is set, IRQ is disabled
F_Bit EQU 0x40 ; when F bit is set, FIQ is disabled
NOINT EQU 0xc0
MASK_MODE EQU 0x0000003F
MODE_SVC32 EQU 0x00000013
; Internal Memory Base Addresses
FLASH_BASE EQU 0x20000000
RAM_BASE EQU 0x04000000
SDRAM_BASE EQU 0x30000000
; Stack
Unused_Stack_Size EQU 0x00000100
Svc_Stack_Size EQU 0x00001000
Abt_Stack_Size EQU 0x00000000
Fiq_Stack_Size EQU 0x00000000
Irq_Stack_Size EQU 0x00001000
Usr_Stack_Size EQU 0x00000000
;SVC STACK
AREA STACK, NOINIT, READWRITE, ALIGN=3
Svc_Stack SPACE Svc_Stack_Size
__initial_sp
Svc_Stack_Top
;IRQ STACK
AREA STACK, NOINIT, READWRITE, ALIGN=3
Irq_Stack SPACE Irq_Stack_Size
Irq_Stack_Top
;UNUSED STACK
AREA STACK, NOINIT, READWRITE, ALIGN=3
Unused_Stack SPACE Unused_Stack_Size
Unused_Stack_Top
; Heap
Heap_Size EQU 0x0000100
AREA HEAP, NOINIT, READWRITE, ALIGN=3
EXPORT Heap_Mem
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
; Area Definition and Entry Point
; Startup Code must be linked first at Address at which it expects to run.
AREA RESET, CODE, READONLY
ARM
; Exception Vectors
; Mapped to Address 0.
; Absolute addressing mode must be used.
; Dummy Handlers are implemented as infinite loops which can be modified.
EXPORT Entry_Point
Entry_Point
Vectors LDR PC,Reset_Addr
LDR PC,Undef_Addr
LDR PC,SWI_Addr
LDR PC,PAbt_Addr
LDR PC,DAbt_Addr
NOP ; Reserved Vector
LDR PC,IRQ_Addr
LDR PC,FIQ_Addr
Reset_Addr DCD Reset_Handler
Undef_Addr DCD Undef_Handler
SWI_Addr DCD SWI_Handler
PAbt_Addr DCD PAbt_Handler
DAbt_Addr DCD DAbt_Handler
DCD 0 ; Reserved Address
IRQ_Addr DCD IRQ_Handler
FIQ_Addr DCD FIQ_Handler
Undef_Handler B Undef_Handler
SWI_Handler B SWI_Handler
PAbt_Handler B Abort_Handler
DAbt_Handler B Abort_Handler
FIQ_Handler B FIQ_Handler
Abort_Handler PROC
ARM
EXPORT Abort_Handler
DeadLoop BHI DeadLoop ; Abort happened in irq mode, halt system.
ENDP
; Reset Handler
;IMPORT __user_initial_stackheap
EXPORT Reset_Handler
Reset_Handler
;****************************************************************
;* Shutdown watchdog
;****************************************************************
LDR R0,=RTC_CTR
LDR R1,=0x0
STR R1,[R0]
;****************************************************************
;* shutdown interrupts
;****************************************************************
MRS R0, CPSR
BIC R0, R0, #MASK_MODE
ORR R0, R0, #MODE_SVC32
ORR R0, R0, #I_Bit
ORR R0, R0, #F_Bit
MSR CPSR_c, r0
LDR R0,=INTC_IER
LDR R1,=0x0
STR R1,[R0]
LDR R0,=INTC_IMR
LDR R1,=0xFFFFFFFF
STR R1,[R0]
LDR R0,=INTC_FIER
LDR R1,=0x0
STR R1,[R0]
LDR R0,=INTC_FIMR
LDR R1,=0x0F
STR R1,[R0]
;****************************************************************
;* Initialize Stack Pointer
;****************************************************************
LDR SP, =Svc_Stack_Top ;init SP_svc
MOV R4, #0xD2 ;chmod to irq and init SP_irq
MSR cpsr_c, R4
LDR SP, =Irq_Stack_Top
MOV R4, #0XD1 ;chomod to fiq and init SP_fiq
MSR cpsr_c, R4
LDR SP, =Unused_Stack_Top
MOV R4, #0XD7 ;chomod to abt and init SP_ABT
MSR cpsr_c, R4
LDR SP, =Unused_Stack_Top
MOV R4, #0XDB ;chomod to undf and init SP_UNDF
MSR cpsr_c, R4
LDR SP, =Unused_Stack_Top
;chomod to abt and init SP_sys
MOV R4, #0xDF ;all interrupts disabled
MSR cpsr_c, R4 ;SYSTEM mode, @32-bit code mode
LDR SP, =Unused_Stack_Top
MOV R4, #0XD3 ;chmod to svc modle, CPSR IRQ bit is disable
MSR cpsr_c, R4
;****************************************************************
;* Initialize PMU & System Clock
;****************************************************************
LDR R4, =PMU_PCSR ; ģʱ
LDR R5, =0x0001ffff
STR R5, [ R4 ]
LDR R4, =PMU_PLTR ; PLLȶʱΪֵ50us*100M.
LDR R5, =0x00fa00fa
STR R5, [ R4 ]
LDR R4, =PMU_PMDR ; SLOWģʽNORMALģʽ
LDR R5, =0x00000001
STR R5, [ R4 ]
LDR R4, =PMU_PMCR ; ϵͳʱΪ80MHz
LDR R5, =0x00004009 ; 400b -- 88M
STR R5, [ R4 ]
;PMU_PMCRĴ15λҪдӵ͵ߵķתܴPLLʱ
LDR R4, =PMU_PMCR
LDR R5, =0x0000c009
STR R5, [ R4 ]
;****************************************************************
;* ʼEMI
;****************************************************************
IF :DEF:INIT_EMI
LDR R4, =EMI_CSACONF ; CSAƬѡʱ
LDR R5, =0x08a6a6a1
STR R5, [ R4 ]
LDR R4, =EMI_CSECONF ; CSEƬѡʱ,
LDR R5, =0x8cfffff1
STR R5, [ R4 ]
LDR R4, =EMI_SDCONF1 ; SDRAM1
LDR R5, =0x1E104177
STR R5, [ R4 ]
LDR R4, =EMI_SDCONF2 ; SDRAM2
LDR R5, =0x80001860
STR R5, [ R4 ]
ENDIF
; Copy Exception Vectors to Internal RAM
IF :DEF:RAM_INTVEC
ADR R8, Vectors ; Source
LDR R9, =RAM_BASE ; Destination
LDMIA R8!, {R0-R7} ; Load Vectors
STMIA R9!, {R0-R7} ; Store Vectors
LDMIA R8!, {R0-R7} ; Load Handler Addresses
STMIA R9!, {R0-R7} ; Store Handler Addresses
ENDIF
; Remap on-chip RAM to address 0
IF :DEF:REMAP
LDR R0, =EMI_REMAPCONF
IF :DEF:RAM_INTVEC
MOV R1, #0x80000000
ELSE
MOV R1, #0x0000000b
ENDIF
STR R1, [R0, #0] ; Remap
ENDIF
;***************************************************************
;* Open irq interrupt
;***************************************************************
MRS R4, cpsr
BIC R4, R4, #0x80 ; set bit7 to zero
MSR cpsr_c, R4
; Enter the C code
IMPORT __main
LDR R0,=__main
BX R0
IMPORT rt_interrupt_enter
IMPORT rt_interrupt_leave
IMPORT rt_thread_switch_interrupt_flag
IMPORT rt_interrupt_from_thread
IMPORT rt_interrupt_to_thread
IMPORT rt_hw_trap_irq
IRQ_Handler PROC
EXPORT IRQ_Handler
STMFD sp!, {r0-r12,lr}
BL rt_interrupt_enter
BL rt_hw_trap_irq
BL rt_interrupt_leave
; if rt_thread_switch_interrupt_flag set, jump to
; rt_hw_context_switch_interrupt_do and don't return
LDR r0, =rt_thread_switch_interrupt_flag
LDR r1, [r0]
CMP r1, #1
BEQ rt_hw_context_switch_interrupt_do
LDMFD sp!, {r0-r12,lr}
SUBS pc, lr, #4
ENDP
; /*
; * void rt_hw_context_switch_interrupt_do(rt_base_t flag)
; */
rt_hw_context_switch_interrupt_do PROC
EXPORT rt_hw_context_switch_interrupt_do
MOV r1, #0 ; clear flag
STR r1, [r0]
LDMFD sp!, {r0-r12,lr}; reload saved registers
STMFD sp!, {r0-r3} ; save r0-r3
MOV r1, sp
ADD sp, sp, #16 ; restore sp
SUB r2, lr, #4 ; save old task's pc to r2
MRS r3, spsr ; get cpsr of interrupt thread
; switch to SVC mode and no interrupt
MSR cpsr_c, #I_Bit :OR F_Bit :OR Mode_SVC
STMFD sp!, {r2} ; push old task's pc
STMFD sp!, {r4-r12,lr}; push old task's lr,r12-r4
MOV r4, r1 ; Special optimised code below
MOV r5, r3
LDMFD r4!, {r0-r3}
STMFD sp!, {r0-r3} ; push old task's r3-r0
STMFD sp!, {r5} ; push old task's cpsr
MRS r4, spsr
STMFD sp!, {r4} ; push old task's spsr
LDR r4, =rt_interrupt_from_thread
LDR r5, [r4]
STR sp, [r5] ; store sp in preempted tasks's TCB
LDR r6, =rt_interrupt_to_thread
LDR r6, [r6]
LDR sp, [r6] ; get new task's stack pointer
LDMFD sp!, {r4} ; pop new task's spsr
MSR spsr_cxsf, r4
LDMFD sp!, {r4} ; pop new task's psr
MSR cpsr_cxsf, r4
LDMFD sp!, {r0-r12,lr,pc} ; pop new task's r0-r12,lr & pc
ENDP
ALIGN
IF :DEF:__MICROLIB
EXPORT __heap_base
EXPORT __heap_limit
EXPORT __initial_sp
ELSE ;__MICROLIB
; User Initial Stack & Heap
AREA |.text|, CODE, READONLY
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, = (Svc_Stack + Svc_Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Svc_Stack
BX LR
ALIGN
ENDIF
END
|
vandercookking/h7_device_RTT
| 12,045
|
rt-thread/libcpu/arm/lpc214x/startup_gcc.S
|
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
*/
.extern main /* 引入外部C入口 */
.extern rt_interrupt_enter
.extern rt_interrupt_leave
.extern rt_thread_switch_interrupt_flag
.extern rt_interrupt_from_thread
.extern rt_interrupt_to_thread
.extern rt_hw_trap_irq
.global start
.global endless_loop
.global rt_hw_context_switch_interrupt_do
/* Standard definitions of Mode bits and Interrupt (I & F) flags in PSRs */
.set MODE_USR, 0x10 /* User Mode */
.set MODE_FIQ, 0x11 /* FIQ Mode */
.set MODE_IRQ, 0x12 /* IRQ Mode */
.set MODE_SVC, 0x13 /* Supervisor Mode */
.set MODE_ABT, 0x17 /* Abort Mode */
.set MODE_UND, 0x1B /* Undefined Mode */
.set MODE_SYS, 0x1F /* System Mode */
.equ I_BIT, 0x80 /* when I bit is set, IRQ is disabled */
.equ F_BIT, 0x40 /* when F bit is set, FIQ is disabled */
.equ I_Bit, 0x80 /* when I bit is set, IRQ is disabled */
.equ F_Bit, 0x40 /* when F bit is set, FIQ is disabled */
/* VPBDIV definitions*/
.equ VPBDIV, 0xE01FC100
.set VPBDIV_VALUE, 0x00000000
/* Phase Locked Loop (PLL) definitions*/
.equ PLL_BASE, 0xE01FC080 /* PLL Base Address */
.equ PLLCON_OFS, 0x00 /* PLL Control Offset */
.equ PLLCFG_OFS, 0x04 /* PLL Configuration Offset */
.equ PLLSTAT_OFS, 0x08 /* PLL Status Offset */
.equ PLLFEED_OFS, 0x0C /* PLL Feed Offset */
.equ PLLCON_PLLE, (1<<0) /* PLL Enable */
.equ PLLCON_PLLC, (1<<1) /* PLL Connect */
.equ PLLCFG_MSEL, (0x1F<<0) /* PLL Multiplier */
.equ PLLCFG_PSEL, (0x03<<5) /* PLL Divider */
.equ PLLSTAT_PLOCK, (1<<10) /* PLL Lock Status */
.equ PLLCFG_Val, 0x00000024 /* <o1.0..4> MSEL: PLL Multiplier Selection,<o1.5..6> PSEL: PLL Divider Selection */
.equ MEMMAP, 0xE01FC040 /*Memory Mapping Control*/
/* Memory Accelerator Module (MAM) definitions*/
.equ MAM_BASE, 0xE01FC000
.equ MAMCR_OFS, 0x00
.equ MAMTIM_OFS, 0x04
.equ MAMCR_Val, 0x00000002
.equ MAMTIM_Val, 0x00000004
.equ VICIntEnClr, 0xFFFFF014
.equ VICIntSelect, 0xFFFFF00C
/************* 目标配置结束 *************/
/* Setup the operating mode & stack.*/
/* --------------------------------- */
.global _reset
_reset:
.code 32
.align 0
/************************* PLL_SETUP **********************************/
ldr r0, =PLL_BASE
mov r1, #0xAA
mov r2, #0x55
/* Configure and Enable PLL */
mov r3, #PLLCFG_Val
str r3, [r0, #PLLCFG_OFS]
mov r3, #PLLCON_PLLE
str r3, [r0, #PLLCON_OFS]
str r1, [r0, #PLLFEED_OFS]
str r2, [r0, #PLLFEED_OFS]
/* Wait until PLL Locked */
PLL_Locked_loop:
ldr r3, [r0, #PLLSTAT_OFS]
ands r3, r3, #PLLSTAT_PLOCK
beq PLL_Locked_loop
/* Switch to PLL Clock */
mov r3, #(PLLCON_PLLE|PLLCON_PLLC)
str r3, [r0, #PLLCON_OFS]
str r1, [r0, #PLLFEED_OFS]
str R2, [r0, #PLLFEED_OFS]
/************************* PLL_SETUP **********************************/
/************************ Setup VPBDIV ********************************/
ldr r0, =VPBDIV
ldr r1, =VPBDIV_VALUE
str r1, [r0]
/************************ Setup VPBDIV ********************************/
/************** Setup MAM **************/
ldr r0, =MAM_BASE
mov r1, #MAMTIM_Val
str r1, [r0, #MAMTIM_OFS]
mov r1, #MAMCR_Val
str r1, [r0, #MAMCR_OFS]
/************** Setup MAM **************/
/************************ setup stack *********************************/
ldr r0, .undefined_stack_top
sub r0, r0, #4
msr CPSR_c, #MODE_UND|I_BIT|F_BIT /* Undefined Instruction Mode */
mov sp, r0
ldr r0, .abort_stack_top
sub r0, r0, #4
msr CPSR_c, #MODE_ABT|I_BIT|F_BIT /* Abort Mode */
mov sp, r0
ldr r0, .fiq_stack_top
sub r0, r0, #4
msr CPSR_c, #MODE_FIQ|I_BIT|F_BIT /* FIQ Mode */
mov sp, r0
ldr r0, .irq_stack_top
sub r0, r0, #4
msr CPSR_c, #MODE_IRQ|I_BIT|F_BIT /* IRQ Mode */
mov sp, r0
ldr r0, .svc_stack_top
sub r0, r0, #4
msr CPSR_c, #MODE_SVC|I_BIT|F_BIT /* Supervisor Mode */
mov sp, r0
/************************ setup stack ********************************/
/* copy .data to SRAM */
ldr r1, =_sidata /* .data start in image */
ldr r2, =_edata /* .data end in image */
ldr r3, =_sdata /* sram data start */
data_loop:
ldr r0, [r1, #0]
str r0, [r3]
add r1, r1, #4
add r3, r3, #4
cmp r3, r2 /* check if data to clear */
blo data_loop /* loop until done */
/* clear .bss */
mov r0,#0 /* get a zero */
ldr r1,=__bss_start /* bss start */
ldr r2,=__bss_end /* bss end */
bss_loop:
cmp r1,r2 /* check if data to clear */
strlo r0,[r1],#4 /* clear 4 bytes */
blo bss_loop /* loop until done */
/* call C++ constructors of global objects */
ldr r0, =__ctors_start__
ldr r1, =__ctors_end__
ctor_loop:
cmp r0, r1
beq ctor_end
ldr r2, [r0], #4
stmfd sp!, {r0-r1}
mov lr, pc
bx r2
ldmfd sp!, {r0-r1}
b ctor_loop
ctor_end:
/* enter C code */
bl main
.align 0
.undefined_stack_top:
.word _undefined_stack_top
.abort_stack_top:
.word _abort_stack_top
.fiq_stack_top:
.word _fiq_stack_top
.irq_stack_top:
.word _irq_stack_top
.svc_stack_top:
.word _svc_stack_top
/*********************** END Clear BSS ******************************/
.section .init,"ax"
.code 32
.align 0
.globl _start
_start:
ldr pc, __start /* reset - _start */
ldr pc, _undf /* undefined - _undf */
ldr pc, _swi /* SWI - _swi */
ldr pc, _pabt /* program abort - _pabt */
ldr pc, _dabt /* data abort - _dabt */
.word 0xB8A06F58 /* reserved */
ldr pc, __IRQ_Handler /* IRQ - read the VIC */
ldr pc, _fiq /* FIQ - _fiq */
__start:.word _reset
_undf: .word __undf /* undefined */
_swi: .word __swi /* SWI */
_pabt: .word __pabt /* program abort */
_dabt: .word __dabt /* data abort */
temp1: .word 0
__IRQ_Handler: .word IRQ_Handler
_fiq: .word __fiq /* FIQ */
__undf: b . /* undefined */
__swi : b .
__pabt: b . /* program abort */
__dabt: b . /* data abort */
__fiq : b . /* FIQ */
/* IRQ入口 */
IRQ_Handler :
stmfd sp!, {r0-r12,lr} /* 对R0 – R12,LR寄存器压栈 */
bl rt_interrupt_enter /* 通知RT-Thread进入中断模式 */
bl rt_hw_trap_irq /* 相应中断服务例程处理 */
bl rt_interrupt_leave /* ; 通知RT-Thread要离开中断模式 */
/* 如果设置了rt_thread_switch_interrupt_flag,进行中断中的线程上下文处理 */
ldr r0, =rt_thread_switch_interrupt_flag
ldr r1, [r0]
cmp r1, #1
beq rt_hw_context_switch_interrupt_do /* 中断中切换发生 */
/* 如果跳转了,将不会回来 */
ldmfd sp!, {r0-r12,lr} /* 恢复栈 */
subs pc, lr, #4 /* 从IRQ中返回 */
/*
* void rt_hw_context_switch_interrupt_do(rt_base_t flag)
* 中断结束后的上下文切换
*/
rt_hw_context_switch_interrupt_do:
mov r1, #0 /* clear flag */
/* 清楚中断中切换标志 */
str r1, [r0] /* */
ldmfd sp!, {r0-r12,lr}/* reload saved registers */
/* 先恢复被中断线程的上下文 */
stmfd sp!, {r0-r3} /* save r0-r3 */
/* 对R0 – R3压栈,因为后面会用到 */
mov r1, sp /* 把此处的栈值保存到R1 */
add sp, sp, #16 /* restore sp */
/* 恢复IRQ的栈,后面会跳出IRQ模式 */
sub r2, lr, #4 /* save old task's pc to r2 */
/* 保存切换出线程的PC到R2 */
mrs r3, spsr /* disable interrupt 保存中断前的CPSR到R3寄存器 */
/* 获得SPSR寄存器值 */
orr r0, r3, #I_BIT|F_BIT
msr spsr_c, r0 /* 关闭SPSR中的IRQ/FIQ中断 */
ldr r0, =.+8 /* 把当前地址+8载入到R0寄存器中 switch to interrupted task's stack */
movs pc, r0 /* 退出IRQ模式,由于SPSR被设置成关中断模式 */
/* 所以从IRQ返回后,中断并没有打开
; R0寄存器中的位置实际就是下一条指令,
; 即PC继续往下走
; 此时
; 模式已经换成中断前的SVC模式,
; SP寄存器也是SVC模式下的栈寄存器
; R1保存IRQ模式下的栈指针
; R2保存切换出线程的PC
; R3保存切换出线程的CPSR */
stmfd sp!, {r2} /* push old task's pc */
/* 保存切换出任务的PC */
stmfd sp!, {r4-r12,lr}/* push old task's lr,r12-r4 */
/* 保存R4 – R12,LR寄存器 */
mov r4, r1 /* Special optimised code below */
/* R1保存有压栈R0 – R3处的栈位置 */
mov r5, r3 /* R3切换出线程的CPSR */
ldmfd r4!, {r0-r3} /* 恢复R0 – R3 */
stmfd sp!, {r0-r3} /* push old task's r3-r0 */
/* R0 – R3压栈到切换出线程 */
stmfd sp!, {r5} /* push old task's psr */
/* 切换出线程CPSR压栈 */
mrs r4, spsr
stmfd sp!, {r4} /* push old task's spsr */
/* 切换出线程SPSR压栈 */
ldr r4, =rt_interrupt_from_thread
ldr r5, [r4]
str sp, [r5] /* store sp in preempted tasks's TCB */
/* 保存切换出线程的SP指针 */
ldr r6, =rt_interrupt_to_thread
ldr r6, [r6]
ldr sp, [r6] /* get new task's stack pointer */
/* 获得切换到线程的栈 */
ldmfd sp!, {r4} /* pop new task's spsr */
/* 恢复SPSR */
msr SPSR_cxsf, r4
ldmfd sp!, {r4} /* pop new task's psr */
/* 恢复CPSR */
msr CPSR_cxsf, r4
ldmfd sp!, {r0-r12,lr,pc} /* pop new task's r0-r12,lr & pc */
/* 恢复R0 – R12,LR及PC寄存器 */
/* 代码加密功能 */
#if defined(CODE_PROTECTION)
.org 0x01FC
.word 0x87654321
#endif
|
vandercookking/h7_device_RTT
| 3,765
|
rt-thread/libcpu/arm/lpc214x/context_gcc.S
|
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
*/
.global rt_hw_interrupt_disable
.global rt_hw_interrupt_enable
.global rt_hw_context_switch
.global rt_hw_context_switch_to
.global rt_hw_context_switch_interrupt
.equ NOINT, 0xc0
/*
* rt_base_t rt_hw_interrupt_disable();
关闭中断,关闭前返回CPSR寄存器值
*/
rt_hw_interrupt_disable:
//EXPORT rt_hw_interrupt_disable
MRS r0, cpsr
ORR r1, r0, #NOINT
MSR cpsr_c, r1
BX lr
//ENDP
/*
* void rt_hw_interrupt_enable(rt_base_t level);
恢复中断状态
*/
rt_hw_interrupt_enable:
//EXPORT rt_hw_interrupt_enable
MSR cpsr_c, r0
BX lr
//ENDP
/*
* void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
* r0 --> from
* r1 --> to
进行线程的上下文切换
*/
rt_hw_context_switch:
//EXPORT rt_hw_context_switch
STMFD sp!, {lr} /* push pc (lr should be pushed in place of PC) */
/* 把LR寄存器压入栈(这个函数返回后的下一个执行处) */
STMFD sp!, {r0-r12, lr} /* push lr & register file */
/* 把R0 – R12以及LR压入栈 */
MRS r4, cpsr /* 读取CPSR寄存器到R4寄存器 */
STMFD sp!, {r4} /* push cpsr */
/* 把R4寄存器压栈(即上一指令取出的CPSR寄存器) */
MRS r4, spsr /* 读取SPSR寄存器到R4寄存器 */
STMFD sp!, {r4} /* push spsr */
/* 把R4寄存器压栈(即SPSR寄存器) */
STR sp, [r0] /* store sp in preempted tasks TCB */
/* 把栈指针更新到TCB的sp,是由R0传入此函数 */
/* 到这里换出线程的上下文都保存在栈中 */
LDR sp, [r1] /* get new task stack pointer */
/* 载入切换到线程的TCB的sp */
/* 从切换到线程的栈中恢复上下文,次序和保存的时候刚好相反 */
LDMFD sp!, {r4} /* pop new task spsr */
/* 出栈到R4寄存器(保存了SPSR寄存器) */
MSR spsr_cxsf, r4 /* 恢复SPSR寄存器 */
LDMFD sp!, {r4} /* pop new task cpsr */
/* 出栈到R4寄存器(保存了CPSR寄存器) */
MSR cpsr_cxsf, r4 /* 恢复CPSR寄存器 */
LDMFD sp!, {r0-r12, lr, pc} /* pop new task r0-r12, lr & pc */
/* 对R0 – R12及LR、PC进行恢复 */
//ENDP
rt_hw_context_switch_to:
//EXPORT rt_hw_context_switch_to
LDR sp, [r0] /* get new task stack pointer */
/* 获得切换到线程的SP指针 */
LDMFD sp!, {r4} /* pop new task spsr */
/* 出栈R4寄存器(保存了SPSR寄存器值) */
MSR spsr_cxsf, r4 /* 恢复SPSR寄存器 */
LDMFD sp!, {r4} /* pop new task cpsr */
/* 出栈R4寄存器(保存了CPSR寄存器值) */
MSR cpsr_cxsf, r4 /* 恢复CPSR寄存器 */
LDMFD sp!, {r0-r12, lr, pc} /* pop new task r0-r12, lr & pc */
/* 恢复R0 – R12,LR及PC寄存器 */
//ENDP
rt_hw_context_switch_interrupt:
//EXPORT rt_hw_context_switch_interrupt
LDR r2, =rt_thread_switch_interrupt_flag
LDR r3, [r2] /* 载入中断中切换标致地址 */
CMP r3, #1 /* 等于 1 ?*/
BEQ _reswitch /* 如果等于1,跳转到_reswitch*/
MOV r3, #1 /* set rt_thread_switch_interrupt_flag to 1*/
/* 设置中断中切换标志位1 */
STR r3, [r2] /* */
LDR r2, =rt_interrupt_from_thread /* set rt_interrupt_from_thread*/
STR r0, [r2] /* 保存切换出线程栈指针*/
_reswitch:
LDR r2, =rt_interrupt_to_thread /* set rt_interrupt_to_thread*/
STR r1, [r2] /* 保存切换到线程栈指针*/
BX lr
//ENDP
//END
|
vandercookking/h7_device_RTT
| 4,535
|
rt-thread/libcpu/arm/lpc214x/context_rvds.S
|
;/*
; * Copyright (c) 2006-2022, RT-Thread Development Team
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Change Logs:
; * Date Author Notes
; * 2009-01-20 Bernard first version
; * 2011-07-22 Bernard added thumb mode porting
; */
Mode_USR EQU 0x10
Mode_FIQ EQU 0x11
Mode_IRQ EQU 0x12
Mode_SVC EQU 0x13
Mode_ABT EQU 0x17
Mode_UND EQU 0x1B
Mode_SYS EQU 0x1F
I_Bit EQU 0x80 ; when I bit is set, IRQ is disabled
F_Bit EQU 0x40 ; when F bit is set, FIQ is disabled
NOINT EQU 0xc0 ; disable interrupt in psr
AREA |.text|, CODE, READONLY, ALIGN=2
ARM
REQUIRE8
PRESERVE8
;/*
; * rt_base_t rt_hw_interrupt_disable();
; */
rt_hw_interrupt_disable PROC
EXPORT rt_hw_interrupt_disable
MRS r0, cpsr
ORR r1, r0, #NOINT
MSR cpsr_c, r1
BX lr
ENDP
;/*
; * void rt_hw_interrupt_enable(rt_base_t level);
; */
rt_hw_interrupt_enable PROC
EXPORT rt_hw_interrupt_enable
MSR cpsr_c, r0
BX lr
ENDP
;/*
; * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
; * r0 --> from
; * r1 --> to
; */
rt_hw_context_switch PROC
EXPORT rt_hw_context_switch
STMFD sp!, {lr} ; push pc (lr should be pushed in place of PC)
STMFD sp!, {r0-r12, lr} ; push lr & register file
MRS r4, cpsr
TST lr, #0x01
BEQ _ARM_MODE
ORR r4, r4, #0x20 ; it's thumb code
_ARM_MODE
STMFD sp!, {r4} ; push cpsr
STR sp, [r0] ; store sp in preempted tasks TCB
LDR sp, [r1] ; get new task stack pointer
LDMFD sp!, {r4} ; pop new task cpsr to spsr
MSR spsr_cxsf, r4
BIC r4, r4, #0x20 ; must be ARM mode
MSR cpsr_cxsf, r4
LDMFD sp!, {r0-r12, lr, pc}^ ; pop new task r0-r12, lr & pc, copy spsr to cpsr
ENDP
;/*
; * void rt_hw_context_switch_to(rt_uint32 to);
; * r0 --> to
; */
rt_hw_context_switch_to PROC
EXPORT rt_hw_context_switch_to
LDR sp, [r0] ; get new task stack pointer
LDMFD sp!, {r4} ; pop new task cpsr to spsr
MSR spsr_cxsf, r4
BIC r4, r4, #0x20 ; must be ARM mode
MSR cpsr_cxsf, r4
LDMFD sp!, {r0-r12, lr, pc}^ ; pop new task r0-r12, lr & pc, copy spsr to cpsr
ENDP
;/*
; * void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to);
; */
IMPORT rt_thread_switch_interrupt_flag
IMPORT rt_interrupt_from_thread
IMPORT rt_interrupt_to_thread
rt_hw_context_switch_interrupt PROC
EXPORT rt_hw_context_switch_interrupt
LDR r2, =rt_thread_switch_interrupt_flag
LDR r3, [r2]
CMP r3, #1
BEQ _reswitch
MOV r3, #1 ; set rt_thread_switch_interrupt_flag to 1
STR r3, [r2]
LDR r2, =rt_interrupt_from_thread ; set rt_interrupt_from_thread
STR r0, [r2]
_reswitch
LDR r2, =rt_interrupt_to_thread ; set rt_interrupt_to_thread
STR r1, [r2]
BX lr
ENDP
; /*
; * void rt_hw_context_switch_interrupt_do(rt_base_t flag)
; */
rt_hw_context_switch_interrupt_do PROC
EXPORT rt_hw_context_switch_interrupt_do
MOV r1, #0 ; clear flag
STR r1, [r0]
LDMFD sp!, {r0-r12,lr}; reload saved registers
STMFD sp!, {r0-r3} ; save r0-r3
MOV r1, sp
ADD sp, sp, #16 ; restore sp
SUB r2, lr, #4 ; save old task's pc to r2
MRS r3, spsr ; get cpsr of interrupt thread
; switch to SVC mode and no interrupt
MSR cpsr_c, #I_Bit:OR:F_Bit:OR:Mode_SVC
STMFD sp!, {r2} ; push old task's pc
STMFD sp!, {r4-r12,lr}; push old task's lr,r12-r4
MOV r4, r1 ; Special optimised code below
MOV r5, r3
LDMFD r4!, {r0-r3}
STMFD sp!, {r0-r3} ; push old task's r3-r0
STMFD sp!, {r5} ; push old task's cpsr
LDR r4, =rt_interrupt_from_thread
LDR r5, [r4]
STR sp, [r5] ; store sp in preempted tasks's TCB
LDR r6, =rt_interrupt_to_thread
LDR r6, [r6]
LDR sp, [r6] ; get new task's stack pointer
LDMFD sp!, {r4} ; pop new task's cpsr to spsr
MSR spsr_cxsf, r4
BIC r4, r4, #0x20 ; must be ARM mode
MSR cpsr_cxsf, r4
LDMFD sp!, {r0-r12,lr,pc}^ ; pop new task's r0-r12,lr & pc, copy spsr to cpsr
ENDP
END
|
vandercookking/h7_device_RTT
| 15,055
|
rt-thread/libcpu/arm/lpc214x/start_rvds.S
|
;/*****************************************************************************/
;/* STARTUP.S: Startup file for Philips LPC2000 */
;/*****************************************************************************/
;/* <<< Use Configuration Wizard in Context Menu >>> */
;/*****************************************************************************/
;/* This file is part of the uVision/ARM development tools. */
;/* Copyright (c) 2005-2007 Keil Software. All rights reserved. */
;/* This software may only be used under the terms of a valid, current, */
;/* end user licence from KEIL for a compatible version of KEIL software */
;/* development tools. Nothing else gives you the right to use this software. */
;/*****************************************************************************/
;/*
; * The STARTUP.S code is executed after CPU Reset. This file may be
; * translated with the following SET symbols. In uVision these SET
; * symbols are entered under Options - ASM - Define.
; *
; * REMAP: when set the startup code initializes the register MEMMAP
; * which overwrites the settings of the CPU configuration pins. The
; * startup and interrupt vectors are remapped from:
; * 0x00000000 default setting (not remapped)
; * 0x80000000 when EXTMEM_MODE is used
; * 0x40000000 when RAM_MODE is used
; *
; * EXTMEM_MODE: when set the device is configured for code execution
; * from external memory starting at address 0x80000000.
; *
; * RAM_MODE: when set the device is configured for code execution
; * from on-chip RAM starting at address 0x40000000.
; *
; * EXTERNAL_MODE: when set the PIN2SEL values are written that enable
; * the external BUS at startup.
; */
; Standard definitions of Mode bits and Interrupt (I & F) flags in PSRs
Mode_USR EQU 0x10
Mode_FIQ EQU 0x11
Mode_IRQ EQU 0x12
Mode_SVC EQU 0x13
Mode_ABT EQU 0x17
Mode_UND EQU 0x1B
Mode_SYS EQU 0x1F
I_Bit EQU 0x80 ; when I bit is set, IRQ is disabled
F_Bit EQU 0x40 ; when F bit is set, FIQ is disabled
;// <h> Stack Configuration (Stack Sizes in Bytes)
;// <o0> Undefined Mode <0x0-0xFFFFFFFF:8>
;// <o1> Supervisor Mode <0x0-0xFFFFFFFF:8>
;// <o2> Abort Mode <0x0-0xFFFFFFFF:8>
;// <o3> Fast Interrupt Mode <0x0-0xFFFFFFFF:8>
;// <o4> Interrupt Mode <0x0-0xFFFFFFFF:8>
;// <o5> User/System Mode <0x0-0xFFFFFFFF:8>
;// </h>
UND_Stack_Size EQU 0x00000000
SVC_Stack_Size EQU 0x00000100
ABT_Stack_Size EQU 0x00000000
FIQ_Stack_Size EQU 0x00000000
IRQ_Stack_Size EQU 0x00000100
USR_Stack_Size EQU 0x00000100
ISR_Stack_Size EQU (UND_Stack_Size + SVC_Stack_Size + ABT_Stack_Size + \
FIQ_Stack_Size + IRQ_Stack_Size)
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE USR_Stack_Size
__initial_sp SPACE ISR_Stack_Size
Stack_Top
;// <h> Heap Configuration
;// <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF>
;// </h>
Heap_Size EQU 0x00000000
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
; VPBDIV definitions
VPBDIV EQU 0xE01FC100 ; VPBDIV Address
;// <e> VPBDIV Setup
;// <i> Peripheral Bus Clock Rate
;// <o1.0..1> VPBDIV: VPB Clock
;// <0=> VPB Clock = CPU Clock / 4
;// <1=> VPB Clock = CPU Clock
;// <2=> VPB Clock = CPU Clock / 2
;// <o1.4..5> XCLKDIV: XCLK Pin
;// <0=> XCLK Pin = CPU Clock / 4
;// <1=> XCLK Pin = CPU Clock
;// <2=> XCLK Pin = CPU Clock / 2
;// </e>
VPBDIV_SETUP EQU 0
VPBDIV_Val EQU 0x00000000
; Phase Locked Loop (PLL) definitions
PLL_BASE EQU 0xE01FC080 ; PLL Base Address
PLLCON_OFS EQU 0x00 ; PLL Control Offset
PLLCFG_OFS EQU 0x04 ; PLL Configuration Offset
PLLSTAT_OFS EQU 0x08 ; PLL Status Offset
PLLFEED_OFS EQU 0x0C ; PLL Feed Offset
PLLCON_PLLE EQU (1<<0) ; PLL Enable
PLLCON_PLLC EQU (1<<1) ; PLL Connect
PLLCFG_MSEL EQU (0x1F<<0) ; PLL Multiplier
PLLCFG_PSEL EQU (0x03<<5) ; PLL Divider
PLLSTAT_PLOCK EQU (1<<10) ; PLL Lock Status
;// <e> PLL Setup
;// <o1.0..4> MSEL: PLL Multiplier Selection
;// <1-32><#-1>
;// <i> M Value
;// <o1.5..6> PSEL: PLL Divider Selection
;// <0=> 1 <1=> 2 <2=> 4 <3=> 8
;// <i> P Value
;// </e>
PLL_SETUP EQU 1
PLLCFG_Val EQU 0x00000024
; Memory Accelerator Module (MAM) definitions
MAM_BASE EQU 0xE01FC000 ; MAM Base Address
MAMCR_OFS EQU 0x00 ; MAM Control Offset
MAMTIM_OFS EQU 0x04 ; MAM Timing Offset
;// <e> MAM Setup
;// <o1.0..1> MAM Control
;// <0=> Disabled
;// <1=> Partially Enabled
;// <2=> Fully Enabled
;// <i> Mode
;// <o2.0..2> MAM Timing
;// <0=> Reserved <1=> 1 <2=> 2 <3=> 3
;// <4=> 4 <5=> 5 <6=> 6 <7=> 7
;// <i> Fetch Cycles
;// </e>
MAM_SETUP EQU 1
MAMCR_Val EQU 0x00000002
MAMTIM_Val EQU 0x00000004
; External Memory Controller (EMC) definitions
EMC_BASE EQU 0xFFE00000 ; EMC Base Address
BCFG0_OFS EQU 0x00 ; BCFG0 Offset
BCFG1_OFS EQU 0x04 ; BCFG1 Offset
BCFG2_OFS EQU 0x08 ; BCFG2 Offset
BCFG3_OFS EQU 0x0C ; BCFG3 Offset
;// <e> External Memory Controller (EMC)
EMC_SETUP EQU 0
;// <e> Bank Configuration 0 (BCFG0)
;// <o1.0..3> IDCY: Idle Cycles <0-15>
;// <o1.5..9> WST1: Wait States 1 <0-31>
;// <o1.11..15> WST2: Wait States 2 <0-31>
;// <o1.10> RBLE: Read Byte Lane Enable
;// <o1.26> WP: Write Protect
;// <o1.27> BM: Burst ROM
;// <o1.28..29> MW: Memory Width <0=> 8-bit <1=> 16-bit
;// <2=> 32-bit <3=> Reserved
;// </e>
BCFG0_SETUP EQU 0
BCFG0_Val EQU 0x0000FBEF
;// <e> Bank Configuration 1 (BCFG1)
;// <o1.0..3> IDCY: Idle Cycles <0-15>
;// <o1.5..9> WST1: Wait States 1 <0-31>
;// <o1.11..15> WST2: Wait States 2 <0-31>
;// <o1.10> RBLE: Read Byte Lane Enable
;// <o1.26> WP: Write Protect
;// <o1.27> BM: Burst ROM
;// <o1.28..29> MW: Memory Width <0=> 8-bit <1=> 16-bit
;// <2=> 32-bit <3=> Reserved
;// </e>
BCFG1_SETUP EQU 0
BCFG1_Val EQU 0x0000FBEF
;// <e> Bank Configuration 2 (BCFG2)
;// <o1.0..3> IDCY: Idle Cycles <0-15>
;// <o1.5..9> WST1: Wait States 1 <0-31>
;// <o1.11..15> WST2: Wait States 2 <0-31>
;// <o1.10> RBLE: Read Byte Lane Enable
;// <o1.26> WP: Write Protect
;// <o1.27> BM: Burst ROM
;// <o1.28..29> MW: Memory Width <0=> 8-bit <1=> 16-bit
;// <2=> 32-bit <3=> Reserved
;// </e>
BCFG2_SETUP EQU 0
BCFG2_Val EQU 0x0000FBEF
;// <e> Bank Configuration 3 (BCFG3)
;// <o1.0..3> IDCY: Idle Cycles <0-15>
;// <o1.5..9> WST1: Wait States 1 <0-31>
;// <o1.11..15> WST2: Wait States 2 <0-31>
;// <o1.10> RBLE: Read Byte Lane Enable
;// <o1.26> WP: Write Protect
;// <o1.27> BM: Burst ROM
;// <o1.28..29> MW: Memory Width <0=> 8-bit <1=> 16-bit
;// <2=> 32-bit <3=> Reserved
;// </e>
BCFG3_SETUP EQU 0
BCFG3_Val EQU 0x0000FBEF
;// </e> End of EMC
; External Memory Pins definitions
PINSEL2 EQU 0xE002C014 ; PINSEL2 Address
PINSEL2_Val EQU 0x0E6149E4 ; CS0..3, OE, WE, BLS0..3,
; D0..31, A2..23, JTAG Pins
PRESERVE8
; Area Definition and Entry Point
; Startup Code must be linked first at Address at which it expects to run.
AREA RESET, CODE, READONLY
ARM
; Exception Vectors
; Mapped to Address 0.
; Absolute addressing mode must be used.
; Dummy Handlers are implemented as infinite loops which can be modified.
Vectors LDR PC, Reset_Addr
LDR PC, Undef_Addr
LDR PC, SWI_Addr
LDR PC, PAbt_Addr
LDR PC, DAbt_Addr
NOP ; Reserved Vector
LDR PC, IRQ_Addr
LDR PC, FIQ_Addr
Reset_Addr DCD Reset_Handler
Undef_Addr DCD Undef_Handler
SWI_Addr DCD SWI_Handler
PAbt_Addr DCD PAbt_Handler
DAbt_Addr DCD DAbt_Handler
DCD 0 ; Reserved Address
IRQ_Addr DCD IRQ_Handler
FIQ_Addr DCD FIQ_Handler
Undef_Handler B Undef_Handler
SWI_Handler B SWI_Handler
PAbt_Handler B PAbt_Handler
DAbt_Handler B DAbt_Handler
FIQ_Handler B FIQ_Handler
; Reset Handler
EXPORT Reset_Handler
Reset_Handler
; Setup External Memory Pins
IF :DEF:EXTERNAL_MODE
LDR R0, =PINSEL2
LDR R1, =PINSEL2_Val
STR R1, [R0]
ENDIF
; Setup External Memory Controller
IF EMC_SETUP <> 0
LDR R0, =EMC_BASE
IF BCFG0_SETUP <> 0
LDR R1, =BCFG0_Val
STR R1, [R0, #BCFG0_OFS]
ENDIF
IF BCFG1_SETUP <> 0
LDR R1, =BCFG1_Val
STR R1, [R0, #BCFG1_OFS]
ENDIF
IF BCFG2_SETUP <> 0
LDR R1, =BCFG2_Val
STR R1, [R0, #BCFG2_OFS]
ENDIF
IF BCFG3_SETUP <> 0
LDR R1, =BCFG3_Val
STR R1, [R0, #BCFG3_OFS]
ENDIF
ENDIF ; EMC_SETUP
; Setup VPBDIV
IF VPBDIV_SETUP <> 0
LDR R0, =VPBDIV
LDR R1, =VPBDIV_Val
STR R1, [R0]
ENDIF
; Setup PLL
IF PLL_SETUP <> 0
LDR R0, =PLL_BASE
MOV R1, #0xAA
MOV R2, #0x55
; Configure and Enable PLL
MOV R3, #PLLCFG_Val
STR R3, [R0, #PLLCFG_OFS]
MOV R3, #PLLCON_PLLE
STR R3, [R0, #PLLCON_OFS]
STR R1, [R0, #PLLFEED_OFS]
STR R2, [R0, #PLLFEED_OFS]
; Wait until PLL Locked
PLL_Loop LDR R3, [R0, #PLLSTAT_OFS]
ANDS R3, R3, #PLLSTAT_PLOCK
BEQ PLL_Loop
; Switch to PLL Clock
MOV R3, #(PLLCON_PLLE:OR:PLLCON_PLLC)
STR R3, [R0, #PLLCON_OFS]
STR R1, [R0, #PLLFEED_OFS]
STR R2, [R0, #PLLFEED_OFS]
ENDIF ; PLL_SETUP
; Setup MAM
IF MAM_SETUP <> 0
LDR R0, =MAM_BASE
MOV R1, #MAMTIM_Val
STR R1, [R0, #MAMTIM_OFS]
MOV R1, #MAMCR_Val
STR R1, [R0, #MAMCR_OFS]
ENDIF ; MAM_SETUP
; Memory Mapping (when Interrupt Vectors are in RAM)
MEMMAP EQU 0xE01FC040 ; Memory Mapping Control
IF :DEF:REMAP
LDR R0, =MEMMAP
IF :DEF:EXTMEM_MODE
MOV R1, #3
ELIF :DEF:RAM_MODE
MOV R1, #2
ELSE
MOV R1, #1
ENDIF
STR R1, [R0]
ENDIF
; Initialise Interrupt System
; ...
; Setup Stack for each mode
LDR R0, =Stack_Top
; Enter Undefined Instruction Mode and set its Stack Pointer
MSR CPSR_c, #Mode_UND:OR:I_Bit:OR:F_Bit
MOV SP, R0
SUB R0, R0, #UND_Stack_Size
; Enter Abort Mode and set its Stack Pointer
MSR CPSR_c, #Mode_ABT:OR:I_Bit:OR:F_Bit
MOV SP, R0
SUB R0, R0, #ABT_Stack_Size
; Enter FIQ Mode and set its Stack Pointer
MSR CPSR_c, #Mode_FIQ:OR:I_Bit:OR:F_Bit
MOV SP, R0
SUB R0, R0, #FIQ_Stack_Size
; Enter IRQ Mode and set its Stack Pointer
MSR CPSR_c, #Mode_IRQ:OR:I_Bit:OR:F_Bit
MOV SP, R0
SUB R0, R0, #IRQ_Stack_Size
; Enter Supervisor Mode and set its Stack Pointer
MSR CPSR_c, #Mode_SVC:OR:I_Bit:OR:F_Bit
MOV SP, R0
; SUB R0, R0, #SVC_Stack_Size
; Enter User Mode and set its Stack Pointer
; RT-Thread does not use user mode
; MSR CPSR_c, #Mode_USR
IF :DEF:__MICROLIB
EXPORT __initial_sp
ELSE
; MOV SP, R0
; SUB SL, SP, #USR_Stack_Size
ENDIF
; Enter the C code
IMPORT __main
LDR R0, =__main
BX R0
IMPORT rt_interrupt_enter
IMPORT rt_interrupt_leave
IMPORT rt_thread_switch_interrupt_flag
IMPORT rt_interrupt_from_thread
IMPORT rt_interrupt_to_thread
IMPORT rt_hw_trap_irq
IMPORT rt_hw_context_switch_interrupt_do
IRQ_Handler PROC
EXPORT IRQ_Handler
STMFD sp!, {r0-r12,lr}
BL rt_interrupt_enter
BL rt_hw_trap_irq
BL rt_interrupt_leave
; if rt_thread_switch_interrupt_flag set, jump to
; rt_hw_context_switch_interrupt_do and don't return
LDR r0, =rt_thread_switch_interrupt_flag
LDR r1, [r0]
CMP r1, #1
BEQ rt_hw_context_switch_interrupt_do
LDMFD sp!, {r0-r12,lr}
SUBS pc, lr, #4
ENDP
IF :DEF:__MICROLIB
EXPORT __heap_base
EXPORT __heap_limit
ELSE
; User Initial Stack & Heap
AREA |.text|, CODE, READONLY
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + USR_Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ENDIF
END
|
vandercookking/h7_device_RTT
| 7,011
|
rt-thread/libcpu/arm/cortex-m7/context_iar.S
|
;/*
; * Copyright (c) 2006-2018, RT-Thread Development Team
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Change Logs:
; * Date Author Notes
; * 2009-01-17 Bernard first version
; * 2009-09-27 Bernard add protect when contex switch occurs
; * 2012-01-01 aozima support context switch load/store FPU register.
; * 2013-06-18 aozima add restore MSP feature.
; * 2013-06-23 aozima support lazy stack optimized.
; * 2018-07-24 aozima enhancement hard fault exception handler.
; */
;/**
; * @addtogroup cortex-m4
; */
;/*@{*/
SCB_VTOR EQU 0xE000ED08 ; Vector Table Offset Register
NVIC_INT_CTRL EQU 0xE000ED04 ; interrupt control state register
NVIC_SYSPRI2 EQU 0xE000ED20 ; system priority register (2)
NVIC_PENDSV_PRI EQU 0xFFFF0000 ; PendSV and SysTick priority value (lowest)
NVIC_PENDSVSET EQU 0x10000000 ; value to trigger PendSV exception
SECTION .text:CODE(2)
THUMB
REQUIRE8
PRESERVE8
IMPORT rt_thread_switch_interrupt_flag
IMPORT rt_interrupt_from_thread
IMPORT rt_interrupt_to_thread
;/*
; * rt_base_t rt_hw_interrupt_disable();
; */
EXPORT rt_hw_interrupt_disable
rt_hw_interrupt_disable:
MRS r0, PRIMASK
CPSID I
BX LR
;/*
; * void rt_hw_interrupt_enable(rt_base_t level);
; */
EXPORT rt_hw_interrupt_enable
rt_hw_interrupt_enable:
MSR PRIMASK, r0
BX LR
;/*
; * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
; * r0 --> from
; * r1 --> to
; */
EXPORT rt_hw_context_switch_interrupt
EXPORT rt_hw_context_switch
rt_hw_context_switch_interrupt:
rt_hw_context_switch:
; set rt_thread_switch_interrupt_flag to 1
LDR r2, =rt_thread_switch_interrupt_flag
LDR r3, [r2]
CMP r3, #1
BEQ _reswitch
MOV r3, #1
STR r3, [r2]
LDR r2, =rt_interrupt_from_thread ; set rt_interrupt_from_thread
STR r0, [r2]
_reswitch
LDR r2, =rt_interrupt_to_thread ; set rt_interrupt_to_thread
STR r1, [r2]
LDR r0, =NVIC_INT_CTRL ; trigger the PendSV exception (causes context switch)
LDR r1, =NVIC_PENDSVSET
STR r1, [r0]
BX LR
; r0 --> switch from thread stack
; r1 --> switch to thread stack
; psr, pc, lr, r12, r3, r2, r1, r0 are pushed into [from] stack
EXPORT PendSV_Handler
PendSV_Handler:
; disable interrupt to protect context switch
MRS r2, PRIMASK
CPSID I
; get rt_thread_switch_interrupt_flag
LDR r0, =rt_thread_switch_interrupt_flag
LDR r1, [r0]
CBZ r1, pendsv_exit ; pendsv already handled
; clear rt_thread_switch_interrupt_flag to 0
MOV r1, #0x00
STR r1, [r0]
LDR r0, =rt_interrupt_from_thread
LDR r1, [r0]
CBZ r1, switch_to_thread ; skip register save at the first time
MRS r1, psp ; get from thread stack pointer
#if defined ( __ARMVFP__ )
TST lr, #0x10 ; if(!EXC_RETURN[4])
BNE skip_push_fpu
VSTMDB r1!, {d8 - d15} ; push FPU register s16~s31
skip_push_fpu
#endif
STMFD r1!, {r4 - r11} ; push r4 - r11 register
#if defined ( __ARMVFP__ )
MOV r4, #0x00 ; flag = 0
TST lr, #0x10 ; if(!EXC_RETURN[4])
BNE push_flag
MOV r4, #0x01 ; flag = 1
push_flag
;STMFD r1!, {r4} ; push flag
SUB r1, r1, #0x04
STR r4, [r1]
#endif
LDR r0, [r0]
STR r1, [r0] ; update from thread stack pointer
switch_to_thread
LDR r1, =rt_interrupt_to_thread
LDR r1, [r1]
LDR r1, [r1] ; load thread stack pointer
#if defined ( __ARMVFP__ )
LDMFD r1!, {r3} ; pop flag
#endif
LDMFD r1!, {r4 - r11} ; pop r4 - r11 register
#if defined ( __ARMVFP__ )
CBZ r3, skip_pop_fpu
VLDMIA r1!, {d8 - d15} ; pop FPU register s16~s31
skip_pop_fpu
#endif
MSR psp, r1 ; update stack pointer
#if defined ( __ARMVFP__ )
ORR lr, lr, #0x10 ; lr |= (1 << 4), clean FPCA.
CBZ r3, return_without_fpu ; if(flag_r3 != 0)
BIC lr, lr, #0x10 ; lr &= ~(1 << 4), set FPCA.
return_without_fpu
#endif
pendsv_exit
; restore interrupt
MSR PRIMASK, r2
ORR lr, lr, #0x04
BX lr
;/*
; * void rt_hw_context_switch_to(rt_uint32 to);
; * r0 --> to
; */
EXPORT rt_hw_context_switch_to
rt_hw_context_switch_to:
LDR r1, =rt_interrupt_to_thread
STR r0, [r1]
#if defined ( __ARMVFP__ )
; CLEAR CONTROL.FPCA
MRS r2, CONTROL ; read
BIC r2, r2, #0x04 ; modify
MSR CONTROL, r2 ; write-back
#endif
; set from thread to 0
LDR r1, =rt_interrupt_from_thread
MOV r0, #0x0
STR r0, [r1]
; set interrupt flag to 1
LDR r1, =rt_thread_switch_interrupt_flag
MOV r0, #1
STR r0, [r1]
; set the PendSV and SysTick exception priority
LDR r0, =NVIC_SYSPRI2
LDR r1, =NVIC_PENDSV_PRI
LDR.W r2, [r0,#0x00] ; read
ORR r1,r1,r2 ; modify
STR r1, [r0] ; write-back
LDR r0, =NVIC_INT_CTRL ; trigger the PendSV exception (causes context switch)
LDR r1, =NVIC_PENDSVSET
STR r1, [r0]
; restore MSP
LDR r0, =SCB_VTOR
LDR r0, [r0]
LDR r0, [r0]
NOP
MSR msp, r0
; enable interrupts at processor level
CPSIE F
CPSIE I
; ensure PendSV exception taken place before subsequent operation
DSB
ISB
; never reach here!
; compatible with old version
EXPORT rt_hw_interrupt_thread_switch
rt_hw_interrupt_thread_switch:
BX lr
IMPORT rt_hw_hard_fault_exception
EXPORT HardFault_Handler
HardFault_Handler:
; get current context
MRS r0, msp ; get fault context from handler.
TST lr, #0x04 ; if(!EXC_RETURN[2])
BEQ _get_sp_done
MRS r0, psp ; get fault context from thread.
_get_sp_done
STMFD r0!, {r4 - r11} ; push r4 - r11 register
;STMFD r0!, {lr} ; push exec_return register
#if defined ( __ARMVFP__ )
SUB r0, r0, #0x04 ; push dummy for flag
STR lr, [r0]
#endif
SUB r0, r0, #0x04
STR lr, [r0]
TST lr, #0x04 ; if(!EXC_RETURN[2])
BEQ _update_msp
MSR psp, r0 ; update stack pointer to PSP.
B _update_done
_update_msp
MSR msp, r0 ; update stack pointer to MSP.
_update_done
PUSH {lr}
BL rt_hw_hard_fault_exception
POP {lr}
ORR lr, lr, #0x04
BX lr
END
|
vandercookking/h7_device_RTT
| 7,122
|
rt-thread/libcpu/arm/cortex-m7/context_gcc.S
|
/*
* Copyright (c) 2006-2018, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2009-10-11 Bernard first version
* 2012-01-01 aozima support context switch load/store FPU register.
* 2013-06-18 aozima add restore MSP feature.
* 2013-06-23 aozima support lazy stack optimized.
* 2018-07-24 aozima enhancement hard fault exception handler.
*/
/**
* @addtogroup cortex-m4
*/
/*@{*/
.cpu cortex-m4
.syntax unified
.thumb
.text
.equ SCB_VTOR, 0xE000ED08 /* Vector Table Offset Register */
.equ NVIC_INT_CTRL, 0xE000ED04 /* interrupt control state register */
.equ NVIC_SYSPRI2, 0xE000ED20 /* system priority register (2) */
.equ NVIC_PENDSV_PRI, 0xFFFF0000 /* PendSV and SysTick priority value (lowest) */
.equ NVIC_PENDSVSET, 0x10000000 /* value to trigger PendSV exception */
/*
* rt_base_t rt_hw_interrupt_disable();
*/
.global rt_hw_interrupt_disable
.type rt_hw_interrupt_disable, %function
rt_hw_interrupt_disable:
MRS r0, PRIMASK
CPSID I
BX LR
/*
* void rt_hw_interrupt_enable(rt_base_t level);
*/
.global rt_hw_interrupt_enable
.type rt_hw_interrupt_enable, %function
rt_hw_interrupt_enable:
MSR PRIMASK, r0
BX LR
/*
* void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
* r0 --> from
* r1 --> to
*/
.global rt_hw_context_switch_interrupt
.type rt_hw_context_switch_interrupt, %function
.global rt_hw_context_switch
.type rt_hw_context_switch, %function
rt_hw_context_switch_interrupt:
rt_hw_context_switch:
/* set rt_thread_switch_interrupt_flag to 1 */
LDR r2, =rt_thread_switch_interrupt_flag
LDR r3, [r2]
CMP r3, #1
BEQ _reswitch
MOV r3, #1
STR r3, [r2]
LDR r2, =rt_interrupt_from_thread /* set rt_interrupt_from_thread */
STR r0, [r2]
_reswitch:
LDR r2, =rt_interrupt_to_thread /* set rt_interrupt_to_thread */
STR r1, [r2]
LDR r0, =NVIC_INT_CTRL /* trigger the PendSV exception (causes context switch) */
LDR r1, =NVIC_PENDSVSET
STR r1, [r0]
BX LR
/* r0 --> switch from thread stack
* r1 --> switch to thread stack
* psr, pc, lr, r12, r3, r2, r1, r0 are pushed into [from] stack
*/
.global PendSV_Handler
.type PendSV_Handler, %function
PendSV_Handler:
/* disable interrupt to protect context switch */
MRS r2, PRIMASK
CPSID I
/* get rt_thread_switch_interrupt_flag */
LDR r0, =rt_thread_switch_interrupt_flag
LDR r1, [r0]
CBZ r1, pendsv_exit /* pendsv already handled */
/* clear rt_thread_switch_interrupt_flag to 0 */
MOV r1, #0x00
STR r1, [r0]
LDR r0, =rt_interrupt_from_thread
LDR r1, [r0]
CBZ r1, switch_to_thread /* skip register save at the first time */
MRS r1, psp /* get from thread stack pointer */
#if defined (__VFP_FP__) && !defined(__SOFTFP__)
TST lr, #0x10 /* if(!EXC_RETURN[4]) */
IT EQ
VSTMDBEQ r1!, {d8 - d15} /* push FPU register s16~s31 */
#endif
STMFD r1!, {r4 - r11} /* push r4 - r11 register */
#if defined (__VFP_FP__) && !defined(__SOFTFP__)
MOV r4, #0x00 /* flag = 0 */
TST lr, #0x10 /* if(!EXC_RETURN[4]) */
IT EQ
MOVEQ r4, #0x01 /* flag = 1 */
STMFD r1!, {r4} /* push flag */
#endif
LDR r0, [r0]
STR r1, [r0] /* update from thread stack pointer */
switch_to_thread:
LDR r1, =rt_interrupt_to_thread
LDR r1, [r1]
LDR r1, [r1] /* load thread stack pointer */
#if defined (__VFP_FP__) && !defined(__SOFTFP__)
LDMFD r1!, {r3} /* pop flag */
#endif
LDMFD r1!, {r4 - r11} /* pop r4 - r11 register */
#if defined (__VFP_FP__) && !defined(__SOFTFP__)
CMP r3, #0 /* if(flag_r3 != 0) */
IT NE
VLDMIANE r1!, {d8 - d15} /* pop FPU register s16~s31 */
#endif
MSR psp, r1 /* update stack pointer */
#if defined (__VFP_FP__) && !defined(__SOFTFP__)
ORR lr, lr, #0x10 /* lr |= (1 << 4), clean FPCA. */
CMP r3, #0 /* if(flag_r3 != 0) */
IT NE
BICNE lr, lr, #0x10 /* lr &= ~(1 << 4), set FPCA. */
#endif
pendsv_exit:
/* restore interrupt */
MSR PRIMASK, r2
ORR lr, lr, #0x04
BX lr
/*
* void rt_hw_context_switch_to(rt_uint32 to);
* r0 --> to
*/
.global rt_hw_context_switch_to
.type rt_hw_context_switch_to, %function
rt_hw_context_switch_to:
LDR r1, =rt_interrupt_to_thread
STR r0, [r1]
#if defined (__VFP_FP__) && !defined(__SOFTFP__)
/* CLEAR CONTROL.FPCA */
MRS r2, CONTROL /* read */
BIC r2, #0x04 /* modify */
MSR CONTROL, r2 /* write-back */
#endif
/* set from thread to 0 */
LDR r1, =rt_interrupt_from_thread
MOV r0, #0x0
STR r0, [r1]
/* set interrupt flag to 1 */
LDR r1, =rt_thread_switch_interrupt_flag
MOV r0, #1
STR r0, [r1]
/* set the PendSV and SysTick exception priority */
LDR r0, =NVIC_SYSPRI2
LDR r1, =NVIC_PENDSV_PRI
LDR.W r2, [r0,#0x00] /* read */
ORR r1,r1,r2 /* modify */
STR r1, [r0] /* write-back */
LDR r0, =NVIC_INT_CTRL /* trigger the PendSV exception (causes context switch) */
LDR r1, =NVIC_PENDSVSET
STR r1, [r0]
/* restore MSP */
LDR r0, =SCB_VTOR
LDR r0, [r0]
LDR r0, [r0]
NOP
MSR msp, r0
/* enable interrupts at processor level */
CPSIE F
CPSIE I
/* ensure PendSV exception taken place before subsequent operation */
DSB
ISB
/* never reach here! */
/* compatible with old version */
.global rt_hw_interrupt_thread_switch
.type rt_hw_interrupt_thread_switch, %function
rt_hw_interrupt_thread_switch:
BX lr
NOP
.global HardFault_Handler
.type HardFault_Handler, %function
HardFault_Handler:
/* get current context */
MRS r0, msp /* get fault context from handler. */
TST lr, #0x04 /* if(!EXC_RETURN[2]) */
BEQ _get_sp_done
MRS r0, psp /* get fault context from thread. */
_get_sp_done:
STMFD r0!, {r4 - r11} /* push r4 - r11 register */
#if defined (__VFP_FP__) && !defined(__SOFTFP__)
STMFD r0!, {lr} /* push dummy for flag */
#endif
STMFD r0!, {lr} /* push exec_return register */
TST lr, #0x04 /* if(!EXC_RETURN[2]) */
BEQ _update_msp
MSR psp, r0 /* update stack pointer to PSP. */
B _update_done
_update_msp:
MSR msp, r0 /* update stack pointer to MSP. */
_update_done:
PUSH {LR}
BL rt_hw_hard_fault_exception
POP {LR}
ORR lr, lr, #0x04
BX lr
|
vandercookking/h7_device_RTT
| 7,033
|
rt-thread/libcpu/arm/cortex-m7/context_rvds.S
|
;/*
; * Copyright (c) 2006-2018, RT-Thread Development Team
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Change Logs:
; * Date Author Notes
; * 2009-01-17 Bernard first version.
; * 2012-01-01 aozima support context switch load/store FPU register.
; * 2013-06-18 aozima add restore MSP feature.
; * 2013-06-23 aozima support lazy stack optimized.
; * 2018-07-24 aozima enhancement hard fault exception handler.
; */
;/**
; * @addtogroup cortex-m4
; */
;/*@{*/
SCB_VTOR EQU 0xE000ED08 ; Vector Table Offset Register
NVIC_INT_CTRL EQU 0xE000ED04 ; interrupt control state register
NVIC_SYSPRI2 EQU 0xE000ED20 ; system priority register (2)
NVIC_PENDSV_PRI EQU 0xFFFF0000 ; PendSV and SysTick priority value (lowest)
NVIC_PENDSVSET EQU 0x10000000 ; value to trigger PendSV exception
AREA |.text|, CODE, READONLY, ALIGN=2
THUMB
REQUIRE8
PRESERVE8
IMPORT rt_thread_switch_interrupt_flag
IMPORT rt_interrupt_from_thread
IMPORT rt_interrupt_to_thread
;/*
; * rt_base_t rt_hw_interrupt_disable();
; */
rt_hw_interrupt_disable PROC
EXPORT rt_hw_interrupt_disable
MRS r0, PRIMASK
CPSID I
BX LR
ENDP
;/*
; * void rt_hw_interrupt_enable(rt_base_t level);
; */
rt_hw_interrupt_enable PROC
EXPORT rt_hw_interrupt_enable
MSR PRIMASK, r0
BX LR
ENDP
;/*
; * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
; * r0 --> from
; * r1 --> to
; */
rt_hw_context_switch_interrupt
EXPORT rt_hw_context_switch_interrupt
rt_hw_context_switch PROC
EXPORT rt_hw_context_switch
; set rt_thread_switch_interrupt_flag to 1
LDR r2, =rt_thread_switch_interrupt_flag
LDR r3, [r2]
CMP r3, #1
BEQ _reswitch
MOV r3, #1
STR r3, [r2]
LDR r2, =rt_interrupt_from_thread ; set rt_interrupt_from_thread
STR r0, [r2]
_reswitch
LDR r2, =rt_interrupt_to_thread ; set rt_interrupt_to_thread
STR r1, [r2]
LDR r0, =NVIC_INT_CTRL ; trigger the PendSV exception (causes context switch)
LDR r1, =NVIC_PENDSVSET
STR r1, [r0]
BX LR
ENDP
; r0 --> switch from thread stack
; r1 --> switch to thread stack
; psr, pc, lr, r12, r3, r2, r1, r0 are pushed into [from] stack
PendSV_Handler PROC
EXPORT PendSV_Handler
; disable interrupt to protect context switch
MRS r2, PRIMASK
CPSID I
; get rt_thread_switch_interrupt_flag
LDR r0, =rt_thread_switch_interrupt_flag
LDR r1, [r0]
CBZ r1, pendsv_exit ; pendsv already handled
; clear rt_thread_switch_interrupt_flag to 0
MOV r1, #0x00
STR r1, [r0]
LDR r0, =rt_interrupt_from_thread
LDR r1, [r0]
CBZ r1, switch_to_thread ; skip register save at the first time
MRS r1, psp ; get from thread stack pointer
IF {FPU} != "SoftVFP"
TST lr, #0x10 ; if(!EXC_RETURN[4])
VSTMFDEQ r1!, {d8 - d15} ; push FPU register s16~s31
ENDIF
STMFD r1!, {r4 - r11} ; push r4 - r11 register
IF {FPU} != "SoftVFP"
MOV r4, #0x00 ; flag = 0
TST lr, #0x10 ; if(!EXC_RETURN[4])
MOVEQ r4, #0x01 ; flag = 1
STMFD r1!, {r4} ; push flag
ENDIF
LDR r0, [r0]
STR r1, [r0] ; update from thread stack pointer
switch_to_thread
LDR r1, =rt_interrupt_to_thread
LDR r1, [r1]
LDR r1, [r1] ; load thread stack pointer
IF {FPU} != "SoftVFP"
LDMFD r1!, {r3} ; pop flag
ENDIF
LDMFD r1!, {r4 - r11} ; pop r4 - r11 register
IF {FPU} != "SoftVFP"
CMP r3, #0 ; if(flag_r3 != 0)
VLDMFDNE r1!, {d8 - d15} ; pop FPU register s16~s31
ENDIF
MSR psp, r1 ; update stack pointer
IF {FPU} != "SoftVFP"
ORR lr, lr, #0x10 ; lr |= (1 << 4), clean FPCA.
CMP r3, #0 ; if(flag_r3 != 0)
BICNE lr, lr, #0x10 ; lr &= ~(1 << 4), set FPCA.
ENDIF
pendsv_exit
; restore interrupt
MSR PRIMASK, r2
ORR lr, lr, #0x04
BX lr
ENDP
;/*
; * void rt_hw_context_switch_to(rt_uint32 to);
; * r0 --> to
; * this fucntion is used to perform the first thread switch
; */
rt_hw_context_switch_to PROC
EXPORT rt_hw_context_switch_to
; set to thread
LDR r1, =rt_interrupt_to_thread
STR r0, [r1]
IF {FPU} != "SoftVFP"
; CLEAR CONTROL.FPCA
MRS r2, CONTROL ; read
BIC r2, #0x04 ; modify
MSR CONTROL, r2 ; write-back
ENDIF
; set from thread to 0
LDR r1, =rt_interrupt_from_thread
MOV r0, #0x0
STR r0, [r1]
; set interrupt flag to 1
LDR r1, =rt_thread_switch_interrupt_flag
MOV r0, #1
STR r0, [r1]
; set the PendSV and SysTick exception priority
LDR r0, =NVIC_SYSPRI2
LDR r1, =NVIC_PENDSV_PRI
LDR.W r2, [r0,#0x00] ; read
ORR r1,r1,r2 ; modify
STR r1, [r0] ; write-back
; trigger the PendSV exception (causes context switch)
LDR r0, =NVIC_INT_CTRL
LDR r1, =NVIC_PENDSVSET
STR r1, [r0]
; restore MSP
LDR r0, =SCB_VTOR
LDR r0, [r0]
LDR r0, [r0]
MSR msp, r0
; enable interrupts at processor level
CPSIE F
CPSIE I
; ensure PendSV exception taken place before subsequent operation
DSB
ISB
; never reach here!
ENDP
; compatible with old version
rt_hw_interrupt_thread_switch PROC
EXPORT rt_hw_interrupt_thread_switch
BX lr
ENDP
IMPORT rt_hw_hard_fault_exception
EXPORT HardFault_Handler
EXPORT MemManage_Handler
HardFault_Handler PROC
MemManage_Handler
; get current context
TST lr, #0x04 ; if(!EXC_RETURN[2])
ITE EQ
MRSEQ r0, msp ; [2]=0 ==> Z=1, get fault context from handler.
MRSNE r0, psp ; [2]=1 ==> Z=0, get fault context from thread.
STMFD r0!, {r4 - r11} ; push r4 - r11 register
IF {FPU} != "SoftVFP"
STMFD r0!, {lr} ; push dummy for flag
ENDIF
STMFD r0!, {lr} ; push exec_return register
TST lr, #0x04 ; if(!EXC_RETURN[2])
ITE EQ
MSREQ msp, r0 ; [2]=0 ==> Z=1, update stack pointer to MSP.
MSRNE psp, r0 ; [2]=1 ==> Z=0, update stack pointer to PSP.
PUSH {lr}
BL rt_hw_hard_fault_exception
POP {lr}
ORR lr, lr, #0x04
BX lr
ENDP
ALIGN 4
END
|
vandercookking/h7_device_RTT
| 10,762
|
rt-thread/libcpu/arm/cortex-m33/context_iar.S
|
;/*
; * Copyright (c) 2006-2018, RT-Thread Development Team
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Change Logs:
; * Date Author Notes
; * 2009-01-17 Bernard first version
; * 2009-09-27 Bernard add protect when contex switch occurs
; * 2012-01-01 aozima support context switch load/store FPU register.
; * 2013-06-18 aozima add restore MSP feature.
; * 2013-06-23 aozima support lazy stack optimized.
; * 2018-07-24 aozima enhancement hard fault exception handler.
; */
;/**
; * @addtogroup cortex-m33
; */
;/*@{*/
SCB_VTOR EQU 0xE000ED08 ; Vector Table Offset Register
NVIC_INT_CTRL EQU 0xE000ED04 ; interrupt control state register
NVIC_SYSPRI2 EQU 0xE000ED20 ; system priority register (2)
NVIC_PENDSV_PRI EQU 0xFFFF0000 ; PendSV and SysTick priority value (lowest)
NVIC_PENDSVSET EQU 0x10000000 ; value to trigger PendSV exception
SECTION .text:CODE(2)
THUMB
REQUIRE8
PRESERVE8
IMPORT rt_thread_switch_interrupt_flag
IMPORT rt_interrupt_from_thread
IMPORT rt_interrupt_to_thread
IMPORT rt_trustzone_current_context
IMPORT rt_trustzone_context_load
IMPORT rt_trustzone_context_store
;/*
; * rt_base_t rt_hw_interrupt_disable();
; */
EXPORT rt_hw_interrupt_disable
rt_hw_interrupt_disable:
MRS r0, PRIMASK
CPSID I
BX LR
;/*
; * void rt_hw_interrupt_enable(rt_base_t level);
; */
EXPORT rt_hw_interrupt_enable
rt_hw_interrupt_enable:
MSR PRIMASK, r0
BX LR
;/*
; * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
; * r0 --> from
; * r1 --> to
; */
EXPORT rt_hw_context_switch_interrupt
EXPORT rt_hw_context_switch
rt_hw_context_switch_interrupt:
rt_hw_context_switch:
; set rt_thread_switch_interrupt_flag to 1
LDR r2, =rt_thread_switch_interrupt_flag
LDR r3, [r2]
CMP r3, #1
BEQ _reswitch
MOV r3, #1
STR r3, [r2]
LDR r2, =rt_interrupt_from_thread ; set rt_interrupt_from_thread
STR r0, [r2]
_reswitch
LDR r2, =rt_interrupt_to_thread ; set rt_interrupt_to_thread
STR r1, [r2]
LDR r0, =NVIC_INT_CTRL ; trigger the PendSV exception (causes context switch)
LDR r1, =NVIC_PENDSVSET
STR r1, [r0]
BX LR
; r0 --> switch from thread stack
; r1 --> switch to thread stack
; psr, pc, lr, r12, r3, r2, r1, r0 are pushed into [from] stack
EXPORT PendSV_Handler
PendSV_Handler:
; disable interrupt to protect context switch
MRS r2, PRIMASK
CPSID I
; get rt_thread_switch_interrupt_flag
LDR r0, =rt_thread_switch_interrupt_flag ; r0 = &rt_thread_switch_interrupt_flag
LDR r1, [r0] ; r1 = *r1
CMP r1, #0x00 ; compare r1 == 0x00
BNE schedule
MSR PRIMASK, r2 ; if r1 == 0x00, do msr PRIMASK, r2
BX lr ; if r1 == 0x00, do bx lr
schedule
PUSH {r2} ; store interrupt state
; clear rt_thread_switch_interrupt_flag to 0
MOV r1, #0x00 ; r1 = 0x00
STR r1, [r0] ; *r0 = r1
; skip register save at the first time
LDR r0, =rt_interrupt_from_thread ; r0 = &rt_interrupt_from_thread
LDR r1, [r0] ; r1 = *r0
CBZ r1, switch_to_thread ; if r1 == 0, goto switch_to_thread
; Whether TrustZone thread stack exists
LDR r1, =rt_trustzone_current_context ; r1 = &rt_secure_current_context
LDR r1, [r1] ; r1 = *r1
CBZ r1, contex_ns_store ; if r1 == 0, goto contex_ns_store
;call TrustZone fun, Save TrustZone stack
STMFD sp!, {r0-r1, lr} ; push register
MOV r0, r1 ; r0 = rt_secure_current_context
BL rt_trustzone_context_store ; call TrustZone store fun
LDMFD sp!, {r0-r1, lr} ; pop register
; check break from TrustZone
MOV r2, lr ; r2 = lr
TST r2, #0x40 ; if EXC_RETURN[6] is 1, TrustZone stack was used
BEQ contex_ns_store ; if r2 & 0x40 == 0, goto contex_ns_store
; push PSPLIM CONTROL PSP LR current_context to stack
MRS r3, psplim ; r3 = psplim
MRS r4, control ; r4 = control
MRS r5, psp ; r5 = psp
STMFD r5!, {r1-r4} ; push to thread stack
; update from thread stack pointer
LDR r0, [r0] ; r0 = rt_thread_switch_interrupt_flag
STR r5, [r0] ; *r0 = r5
b switch_to_thread ; goto switch_to_thread
contex_ns_store
MRS r1, psp ; get from thread stack pointer
#if defined ( __ARMVFP__ )
TST lr, #0x10 ; if(!EXC_RETURN[4])
BNE skip_push_fpu
VSTMDB r1!, {d8 - d15} ; push FPU register s16~s31
skip_push_fpu
#endif
STMFD r1!, {r4 - r11} ; push r4 - r11 register
LDR r2, =rt_trustzone_current_context ; r2 = &rt_secure_current_context
LDR r2, [r2] ; r2 = *r2
MOV r3, lr ; r3 = lr
MRS r4, psplim ; r4 = psplim
MRS r5, control ; r5 = control
STMFD r1!, {r2-r5} ; push to thread stack
LDR r0, [r0]
STR r1, [r0] ; update from thread stack pointer
switch_to_thread
LDR r1, =rt_interrupt_to_thread
LDR r1, [r1]
LDR r1, [r1] ; load thread stack pointer
; update current TrustZone context
LDMFD r1!, {r2-r5} ; pop thread stack
MSR psplim, r4 ; psplim = r4
MSR control, r5 ; control = r5
MOV lr, r3 ; lr = r3
LDR r6, =rt_trustzone_current_context ; r6 = &rt_secure_current_context
STR r2, [r6] ; *r6 = r2
MOV r0, r2 ; r0 = r2
; Whether TrustZone thread stack exists
CBZ r0, contex_ns_load ; if r0 == 0, goto contex_ns_load
PUSH {r1, r3} ; push lr, thread_stack
BL rt_trustzone_context_load ; call TrustZone load fun
POP {r1, r3} ; pop lr, thread_stack
MOV lr, r3 ; lr = r1
TST r3, #0x40 ; if EXC_RETURN[6] is 1, TrustZone stack was used
BEQ contex_ns_load ; if r1 & 0x40 == 0, goto contex_ns_load
B pendsv_exit
contex_ns_load
LDMFD r1!, {r4 - r11} ; pop r4 - r11 register
#if defined ( __ARMVFP__ )
TST lr, #0x10 ; if(!EXC_RETURN[4])
BNE skip_pop_fpu
VLDMIA r1!, {d8 - d15} ; pop FPU register s16~s31
skip_pop_fpu
#endif
pendsv_exit
MSR psp, r1 ; update stack pointer
; restore interrupt
POP {r2}
MSR PRIMASK, r2
BX lr
;/*
; * void rt_hw_context_switch_to(rt_uint32 to);
; * r0 --> to
; */
EXPORT rt_hw_context_switch_to
rt_hw_context_switch_to:
LDR r1, =rt_interrupt_to_thread
STR r0, [r1]
#if defined ( __ARMVFP__ )
; CLEAR CONTROL.FPCA
MRS r2, CONTROL ; read
BIC r2, r2, #0x04 ; modify
MSR CONTROL, r2 ; write-back
#endif
; set from thread to 0
LDR r1, =rt_interrupt_from_thread
MOV r0, #0x0
STR r0, [r1]
; set interrupt flag to 1
LDR r1, =rt_thread_switch_interrupt_flag
MOV r0, #1
STR r0, [r1]
; set the PendSV and SysTick exception priority
LDR r0, =NVIC_SYSPRI2
LDR r1, =NVIC_PENDSV_PRI
LDR.W r2, [r0,#0x00] ; read
ORR r1,r1,r2 ; modify
STR r1, [r0] ; write-back
LDR r0, =NVIC_INT_CTRL ; trigger the PendSV exception (causes context switch)
LDR r1, =NVIC_PENDSVSET
STR r1, [r0]
; restore MSP
LDR r0, =SCB_VTOR
LDR r0, [r0]
LDR r0, [r0]
NOP
MSR msp, r0
; enable interrupts at processor level
CPSIE F
CPSIE I
; ensure PendSV exception taken place before subsequent operation
DSB
ISB
; never reach here!
; compatible with old version
EXPORT rt_hw_interrupt_thread_switch
rt_hw_interrupt_thread_switch:
BX lr
IMPORT rt_hw_hard_fault_exception
EXPORT HardFault_Handler
HardFault_Handler:
; get current context
MRS r0, msp ; get fault context from handler.
TST lr, #0x04 ; if(!EXC_RETURN[2])
BEQ get_sp_done
MRS r0, psp ; get fault context from thread.
get_sp_done
STMFD r0!, {r4 - r11} ; push r4 - r11 register
LDR r2, =rt_trustzone_current_context ; r2 = &rt_secure_current_context
LDR r2, [r2] ; r2 = *r2
MOV r3, lr ; r3 = lr
MRS r4, psplim ; r4 = psplim
MRS r5, control ; r5 = control
STMFD r0!, {r2-r5} ; push to thread stack
STMFD r0!, {lr} ; push exec_return register
TST lr, #0x04 ; if(!EXC_RETURN[2])
BEQ update_msp
MSR psp, r0 ; update stack pointer to PSP.
B update_done
update_msp
MSR msp, r0 ; update stack pointer to MSP.
update_done
PUSH {lr}
BL rt_hw_hard_fault_exception
POP {lr}
ORR lr, lr, #0x04
BX lr
END
|
vandercookking/h7_device_RTT
| 1,658
|
rt-thread/libcpu/arm/cortex-m33/syscall_rvds.S
|
;/*
; * Copyright (c) 2006-2022, RT-Thread Development Team
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Change Logs:
; * Date Author Notes
; * 2019-10-25 tyx first version
; */
AREA |.text|, CODE, READONLY, ALIGN=2
THUMB
REQUIRE8
PRESERVE8
IMPORT rt_secure_svc_handle
;/*
; * int tzcall(int id, rt_ubase_t arg0, rt_ubase_t arg1, rt_ubase_t arg2);
; */
tzcall PROC
EXPORT tzcall
SVC 1 ;call SVC 1
BX LR
ENDP
tzcall_entry PROC
PUSH {R1, R4, LR}
MOV R4, R1 ; copy thread SP to R4
LDMFD R4!, {r0 - r3} ; pop user stack, get input arg0, arg1, arg2
STMFD R4!, {r0 - r3} ; push stack, user stack recovery
BL rt_secure_svc_handle ; call fun
POP {R1, R4, LR}
STR R0, [R1] ; update return value
BX LR ; return to thread
ENDP
syscall_entry PROC
BX LR ; return to user app
ENDP
;/*
; * void SVC_Handler(void);
; */
SVC_Handler PROC
EXPORT SVC_Handler
; get SP, save to R1
MRS R1, MSP ;get fault context from handler
TST LR, #0x04 ;if(!EXC_RETURN[2])
BEQ get_sp_done
MRS R1, PSP ;get fault context from thread
get_sp_done
; get svc index
LDR R0, [R1, #24]
LDRB R0, [R0, #-2]
;if svc == 0, do system call
CMP R0, #0x0
BEQ syscall_entry
;if svc == 1, do TrustZone call
CMP R0, #0x1
BEQ tzcall_entry
ENDP
ALIGN
END
|
vandercookking/h7_device_RTT
| 11,119
|
rt-thread/libcpu/arm/cortex-m33/context_gcc.S
|
/*
* Copyright (c) 2006-2018, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2009-10-11 Bernard first version
* 2012-01-01 aozima support context switch load/store FPU register.
* 2013-06-18 aozima add restore MSP feature.
* 2013-06-23 aozima support lazy stack optimized.
* 2018-07-24 aozima enhancement hard fault exception handler.
*/
/**
* @addtogroup cortex-m4
*/
/*@{*/
.cpu cortex-m4
.syntax unified
.thumb
.text
.equ SCB_VTOR, 0xE000ED08 /* Vector Table Offset Register */
.equ NVIC_INT_CTRL, 0xE000ED04 /* interrupt control state register */
.equ NVIC_SYSPRI2, 0xE000ED20 /* system priority register (2) */
.equ NVIC_PENDSV_PRI, 0xFFFF0000 /* PendSV and SysTick priority value (lowest) */
.equ NVIC_PENDSVSET, 0x10000000 /* value to trigger PendSV exception */
/*
* rt_base_t rt_hw_interrupt_disable();
*/
.global rt_hw_interrupt_disable
.type rt_hw_interrupt_disable, %function
rt_hw_interrupt_disable:
MRS r0, PRIMASK
CPSID I
BX LR
/*
* void rt_hw_interrupt_enable(rt_base_t level);
*/
.global rt_hw_interrupt_enable
.type rt_hw_interrupt_enable, %function
rt_hw_interrupt_enable:
MSR PRIMASK, r0
BX LR
/*
* void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
* r0 --> from
* r1 --> to
*/
.global rt_hw_context_switch_interrupt
.type rt_hw_context_switch_interrupt, %function
.global rt_hw_context_switch
.type rt_hw_context_switch, %function
rt_hw_context_switch_interrupt:
rt_hw_context_switch:
/* set rt_thread_switch_interrupt_flag to 1 */
LDR r2, =rt_thread_switch_interrupt_flag
LDR r3, [r2]
CMP r3, #1
BEQ _reswitch
MOV r3, #1
STR r3, [r2]
LDR r2, =rt_interrupt_from_thread /* set rt_interrupt_from_thread */
STR r0, [r2]
_reswitch:
LDR r2, =rt_interrupt_to_thread /* set rt_interrupt_to_thread */
STR r1, [r2]
LDR r0, =NVIC_INT_CTRL /* trigger the PendSV exception (causes context switch) */
LDR r1, =NVIC_PENDSVSET
STR r1, [r0]
BX LR
/* r0 --> switch from thread stack
* r1 --> switch to thread stack
* psr, pc, lr, r12, r3, r2, r1, r0 are pushed into [from] stack
*/
.global PendSV_Handler
.type PendSV_Handler, %function
PendSV_Handler:
/* disable interrupt to protect context switch */
MRS r2, PRIMASK
CPSID I
/* get rt_thread_switch_interrupt_flag */
LDR r0, =rt_thread_switch_interrupt_flag /* r0 = &rt_thread_switch_interrupt_flag */
LDR r1, [r0] /* r1 = *r1 */
CMP r1, #0x00 /* compare r1 == 0x00 */
BNE schedule
MSR PRIMASK, r2 /* if r1 == 0x00, do msr PRIMASK, r2 */
BX lr /* if r1 == 0x00, do bx lr */
schedule:
PUSH {r2} /* store interrupt state */
/* clear rt_thread_switch_interrupt_flag to 0 */
MOV r1, #0x00 /* r1 = 0x00 */
STR r1, [r0] /* *r0 = r1 */
/* skip register save at the first time */
LDR r0, =rt_interrupt_from_thread /* r0 = &rt_interrupt_from_thread */
LDR r1, [r0] /* r1 = *r0 */
CBZ r1, switch_to_thread /* if r1 == 0, goto switch_to_thread */
/* Whether TrustZone thread stack exists */
LDR r1, =rt_trustzone_current_context /* r1 = &rt_secure_current_context */
LDR r1, [r1] /* r1 = *r1 */
CBZ r1, contex_ns_store /* if r1 == 0, goto contex_ns_store */
/*call TrustZone fun, Save TrustZone stack */
STMFD sp!, {r0-r1, lr} /* push register */
MOV r0, r1 /* r0 = rt_secure_current_context */
BL rt_trustzone_context_store /* call TrustZone store fun */
LDMFD sp!, {r0-r1, lr} /* pop register */
/* check break from TrustZone */
MOV r2, lr /* r2 = lr */
TST r2, #0x40 /* if EXC_RETURN[6] is 1, TrustZone stack was used */
BEQ contex_ns_store /* if r2 & 0x40 == 0, goto contex_ns_store */
/* push PSPLIM CONTROL PSP LR current_context to stack */
MRS r3, psplim /* r3 = psplim */
MRS r4, control /* r4 = control */
MRS r5, psp /* r5 = psp */
STMFD r5!, {r1-r4} /* push to thread stack */
/* update from thread stack pointer */
LDR r0, [r0] /* r0 = rt_thread_switch_interrupt_flag */
STR r5, [r0] /* *r0 = r5 */
b switch_to_thread /* goto switch_to_thread */
contex_ns_store:
MRS r1, psp /* get from thread stack pointer */
#if defined (__VFP_FP__) && !defined(__SOFTFP__)
TST lr, #0x10 /* if(!EXC_RETURN[4]) */
IT EQ
VSTMDBEQ r1!, {d8 - d15} /* push FPU register s16~s31 */
#endif
STMFD r1!, {r4 - r11} /* push r4 - r11 register */
LDR r2, =rt_trustzone_current_context /* r2 = &rt_secure_current_context */
LDR r2, [r2] /* r2 = *r2 */
MOV r3, lr /* r3 = lr */
MRS r4, psplim /* r4 = psplim */
MRS r5, control /* r5 = control */
STMFD r1!, {r2-r5} /* push to thread stack */
LDR r0, [r0]
STR r1, [r0] /* update from thread stack pointer */
switch_to_thread:
LDR r1, =rt_interrupt_to_thread
LDR r1, [r1]
LDR r1, [r1] /* load thread stack pointer */
/* update current TrustZone context */
LDMFD r1!, {r2-r5} /* pop thread stack */
MSR psplim, r4 /* psplim = r4 */
MSR control, r5 /* control = r5 */
MOV lr, r3 /* lr = r3 */
LDR r6, =rt_trustzone_current_context /* r6 = &rt_secure_current_context */
STR r2, [r6] /* *r6 = r2 */
MOV r0, r2 /* r0 = r2 */
/* Whether TrustZone thread stack exists */
CBZ r0, contex_ns_load /* if r0 == 0, goto contex_ns_load */
PUSH {r1, r3} /* push lr, thread_stack */
BL rt_trustzone_context_load /* call TrustZone load fun */
POP {r1, r3} /* pop lr, thread_stack */
MOV lr, r3 /* lr = r1 */
TST r3, #0x40 /* if EXC_RETURN[6] is 1, TrustZone stack was used */
BEQ contex_ns_load /* if r1 & 0x40 == 0, goto contex_ns_load */
B pendsv_exit
contex_ns_load:
LDMFD r1!, {r4 - r11} /* pop r4 - r11 register */
#if defined (__VFP_FP__) && !defined(__SOFTFP__)
TST lr, #0x10 /* if(!EXC_RETURN[4]) */
IT EQ
VLDMIAEQ r1!, {d8 - d15} /* pop FPU register s16~s31 */
#endif
pendsv_exit:
MSR psp, r1 /* update stack pointer */
/* restore interrupt */
POP {r2}
MSR PRIMASK, r2
BX lr
/*
* void rt_hw_context_switch_to(rt_uint32 to);
* r0 --> to
*/
.global rt_hw_context_switch_to
.type rt_hw_context_switch_to, %function
rt_hw_context_switch_to:
LDR r1, =rt_interrupt_to_thread
STR r0, [r1]
#if defined (__VFP_FP__) && !defined(__SOFTFP__)
/* CLEAR CONTROL.FPCA */
MRS r2, CONTROL /* read */
BIC r2, #0x04 /* modify */
MSR CONTROL, r2 /* write-back */
#endif
/* set from thread to 0 */
LDR r1, =rt_interrupt_from_thread
MOV r0, #0x0
STR r0, [r1]
/* set interrupt flag to 1 */
LDR r1, =rt_thread_switch_interrupt_flag
MOV r0, #1
STR r0, [r1]
/* set the PendSV and SysTick exception priority */
LDR r0, =NVIC_SYSPRI2
LDR r1, =NVIC_PENDSV_PRI
LDR.W r2, [r0,#0x00] /* read */
ORR r1,r1,r2 /* modify */
STR r1, [r0] /* write-back */
LDR r0, =NVIC_INT_CTRL /* trigger the PendSV exception (causes context switch) */
LDR r1, =NVIC_PENDSVSET
STR r1, [r0]
/* restore MSP */
LDR r0, =SCB_VTOR
LDR r0, [r0]
LDR r0, [r0]
NOP
MSR msp, r0
/* enable interrupts at processor level */
CPSIE F
CPSIE I
/* ensure PendSV exception taken place before subsequent operation */
DSB
ISB
/* never reach here! */
/* compatible with old version */
.global rt_hw_interrupt_thread_switch
.type rt_hw_interrupt_thread_switch, %function
rt_hw_interrupt_thread_switch:
BX lr
NOP
.global HardFault_Handler
.type HardFault_Handler, %function
HardFault_Handler:
/* get current context */
MRS r0, msp /* get fault context from handler. */
TST lr, #0x04 /* if(!EXC_RETURN[2]) */
BEQ get_sp_done
MRS r0, psp /* get fault context from thread. */
get_sp_done:
STMFD r0!, {r4 - r11} /* push r4 - r11 register */
LDR r2, =rt_trustzone_current_context /* r2 = &rt_secure_current_context */
LDR r2, [r2] /* r2 = *r2 */
MOV r3, lr /* r3 = lr */
MRS r4, psplim /* r4 = psplim */
MRS r5, control /* r5 = control */
STMFD r0!, {r2-r5} /* push to thread stack */
STMFD r0!, {lr} /* push exec_return register */
TST lr, #0x04 /* if(!EXC_RETURN[2]) */
BEQ update_msp
MSR psp, r0 /* update stack pointer to PSP. */
B update_done
update_msp:
MSR msp, r0 /* update stack pointer to MSP. */
update_done:
PUSH {LR}
BL rt_hw_hard_fault_exception
POP {LR}
ORR lr, lr, #0x04
BX lr
|
vandercookking/h7_device_RTT
| 1,570
|
rt-thread/libcpu/arm/cortex-m33/syscall_gcc.S
|
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2019-10-25 tyx first version
*/
.cpu cortex-m4
.syntax unified
.thumb
.text
/*
* int tzcall(int id, rt_ubase_t arg0, rt_ubase_t arg1, rt_ubase_t arg2);
*/
.global tzcall
.type tzcall, %function
tzcall:
SVC 1 /* call SVC 1 */
BX LR
tzcall_entry:
PUSH {R1, R4, LR}
MOV R4, R1 /* copy thread SP to R4 */
LDMFD R4!, {r0 - r3} /* pop user stack, get input arg0, arg1, arg2 */
STMFD R4!, {r0 - r3} /* push stack, user stack recovery */
BL rt_secure_svc_handle /* call fun */
POP {R1, R4, LR}
STR R0, [R1] /* update return value */
BX LR /* return to thread */
syscall_entry:
BX LR /* return to user app */
.global SVC_Handler
.type SVC_Handler, %function
SVC_Handler:
/* get SP, save to R1 */
MRS R1, MSP /* get fault context from handler. */
TST LR, #0x04 /* if(!EXC_RETURN[2]) */
BEQ get_sp_done
MRS R1, PSP /* get fault context from thread. */
get_sp_done:
/* get svc index */
LDR R0, [R1, #24]
LDRB R0, [R0, #-2]
/* if svc == 0, do system call */
CMP R0, #0x0
BEQ syscall_entry
/* if svc == 1, do TrustZone call */
CMP R0, #0x1
BEQ tzcall_entry
|
vandercookking/h7_device_RTT
| 1,707
|
rt-thread/libcpu/arm/cortex-m33/syscall_iar.S
|
;/*
; * Copyright (c) 2006-2022, RT-Thread Development Team
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Change Logs:
; * Date Author Notes
; * 2019-10-25 tyx first version
; * 2021-03-26 lxf modify bad instruction
; */
;/*
; * @addtogroup cortex-m33
; */
SECTION .text:CODE(2)
THUMB
REQUIRE8
PRESERVE8
IMPORT rt_secure_svc_handle
;/*
; * int tzcall(int id, rt_ubase_t arg0, rt_ubase_t arg1, rt_ubase_t arg2);
; */
EXPORT tzcall
tzcall:
SVC 1 ;/* call SVC 1 */
BX LR
tzcall_entry:
PUSH {R1, R4, LR}
MOV R4, R1 ;/* copy thread SP to R4 */
LDMFD R4!, {r0 - r3} ;/* pop user stack, get input arg0, arg1, arg2 */
STMFD R4!, {r0 - r3} ;/* push stack, user stack recovery */
BL rt_secure_svc_handle ;/* call fun */
POP {R1, R4, LR}
STR R0, [R1] ;/* update return value */
BX LR ;/* return to thread */
syscall_entry:
BX LR ;/* return to user app */
EXPORT SVC_Handler
SVC_Handler:
;/* get SP, save to R1 */
MRS R1, MSP ;/* get fault context from handler. */
TST LR, #0x04 ;/* if(!EXC_RETURN[2]) */
BEQ get_sp_done
MRS R1, PSP ;/* get fault context from thread. */
get_sp_done:
;/* get svc index */
LDR R0, [R1, #24]
LDRB R0, [R0, #-2]
;/* if svc == 0, do system call */
CMP R0, #0x0
BEQ syscall_entry
;/* if svc == 1, do TrustZone call */
CMP R0, #0x1
BEQ tzcall_entry
END
|
vandercookking/h7_device_RTT
| 10,842
|
rt-thread/libcpu/arm/cortex-m33/context_rvds.S
|
;/*
;* Copyright (c) 2006-2018, RT-Thread Development Team
;*
;* SPDX-License-Identifier: Apache-2.0
;*
; * Change Logs:
; * Date Author Notes
; * 2009-01-17 Bernard first version.
; * 2012-01-01 aozima support context switch load/store FPU register.
; * 2013-06-18 aozima add restore MSP feature.
; * 2013-06-23 aozima support lazy stack optimized.
; * 2018-07-24 aozima enhancement hard fault exception handler.
; */
;/**
; * @addtogroup cortex-m33
; */
;/*@{*/
SCB_VTOR EQU 0xE000ED08 ; Vector Table Offset Register
NVIC_INT_CTRL EQU 0xE000ED04 ; interrupt control state register
NVIC_SYSPRI2 EQU 0xE000ED20 ; system priority register (2)
NVIC_PENDSV_PRI EQU 0xFFFF0000 ; PendSV and SysTick priority value (lowest)
NVIC_PENDSVSET EQU 0x10000000 ; value to trigger PendSV exception
AREA |.text|, CODE, READONLY, ALIGN=2
THUMB
REQUIRE8
PRESERVE8
IMPORT rt_thread_switch_interrupt_flag
IMPORT rt_interrupt_from_thread
IMPORT rt_interrupt_to_thread
IMPORT rt_trustzone_current_context
IMPORT rt_trustzone_context_load
IMPORT rt_trustzone_context_store
;/*
; * rt_base_t rt_hw_interrupt_disable();
; */
rt_hw_interrupt_disable PROC
EXPORT rt_hw_interrupt_disable
MRS r0, PRIMASK
CPSID I
BX LR
ENDP
;/*
; * void rt_hw_interrupt_enable(rt_base_t level);
; */
rt_hw_interrupt_enable PROC
EXPORT rt_hw_interrupt_enable
MSR PRIMASK, r0
BX LR
ENDP
;/*
; * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
; * r0 --> from
; * r1 --> to
; */
rt_hw_context_switch_interrupt
EXPORT rt_hw_context_switch_interrupt
rt_hw_context_switch PROC
EXPORT rt_hw_context_switch
; set rt_thread_switch_interrupt_flag to 1
LDR r2, =rt_thread_switch_interrupt_flag
LDR r3, [r2]
CMP r3, #1
BEQ _reswitch
MOV r3, #1
STR r3, [r2]
LDR r2, =rt_interrupt_from_thread ; set rt_interrupt_from_thread
STR r0, [r2]
_reswitch
LDR r2, =rt_interrupt_to_thread ; set rt_interrupt_to_thread
STR r1, [r2]
LDR r0, =NVIC_INT_CTRL ; trigger the PendSV exception (causes context switch)
LDR r1, =NVIC_PENDSVSET
STR r1, [r0]
BX LR
ENDP
; r0 --> switch from thread stack
; r1 --> switch to thread stack
; psr, pc, lr, r12, r3, r2, r1, r0 are pushed into [from] stack
PendSV_Handler PROC
EXPORT PendSV_Handler
; disable interrupt to protect context switch
MRS r2, PRIMASK ; R2 = PRIMASK
CPSID I ; disable all interrupt
; get rt_thread_switch_interrupt_flag
LDR r0, =rt_thread_switch_interrupt_flag ; r0 = &rt_thread_switch_interrupt_flag
LDR r1, [r0] ; r1 = *r1
CMP r1, #0x00 ; compare r1 == 0x00
BNE schedule
MSR PRIMASK, r2 ; if r1 == 0x00, do msr PRIMASK, r2
BX lr ; if r1 == 0x00, do bx lr
schedule
PUSH {r2} ; store interrupt state
; clear rt_thread_switch_interrupt_flag to 0
MOV r1, #0x00 ; r1 = 0x00
STR r1, [r0] ; *r0 = r1
; skip register save at the first time
LDR r0, =rt_interrupt_from_thread ; r0 = &rt_interrupt_from_thread
LDR r1, [r0] ; r1 = *r0
CBZ r1, switch_to_thread ; if r1 == 0, goto switch_to_thread
; Whether TrustZone thread stack exists
LDR r1, =rt_trustzone_current_context ; r1 = &rt_secure_current_context
LDR r1, [r1] ; r1 = *r1
CBZ r1, contex_ns_store ; if r1 == 0, goto contex_ns_store
;call TrustZone fun, Save TrustZone stack
STMFD sp!, {r0-r1, lr} ; push register
MOV r0, r1 ; r0 = rt_secure_current_context
BL rt_trustzone_context_store ; call TrustZone store fun
LDMFD sp!, {r0-r1, lr} ; pop register
; check break from TrustZone
MOV r2, lr ; r2 = lr
TST r2, #0x40 ; if EXC_RETURN[6] is 1, TrustZone stack was used
BEQ contex_ns_store ; if r2 & 0x40 == 0, goto contex_ns_store
; push PSPLIM CONTROL PSP LR current_context to stack
MRS r3, psplim ; r3 = psplim
MRS r4, control ; r4 = control
MRS r5, psp ; r5 = psp
STMFD r5!, {r1-r4} ; push to thread stack
; update from thread stack pointer
LDR r0, [r0] ; r0 = rt_thread_switch_interrupt_flag
STR r5, [r0] ; *r0 = r5
b switch_to_thread ; goto switch_to_thread
contex_ns_store
MRS r1, psp ; get from thread stack pointer
IF {FPU} != "SoftVFP"
TST lr, #0x10 ; if(!EXC_RETURN[4])
VSTMFDEQ r1!, {d8 - d15} ; push FPU register s16~s31
ENDIF
STMFD r1!, {r4 - r11} ; push r4 - r11 register
LDR r2, =rt_trustzone_current_context ; r2 = &rt_secure_current_context
LDR r2, [r2] ; r2 = *r2
MOV r3, lr ; r3 = lr
MRS r4, psplim ; r4 = psplim
MRS r5, control ; r5 = control
STMFD r1!, {r2-r5} ; push to thread stack
LDR r0, [r0]
STR r1, [r0] ; update from thread stack pointer
switch_to_thread
LDR r1, =rt_interrupt_to_thread
LDR r1, [r1]
LDR r1, [r1] ; load thread stack pointer
; update current TrustZone context
LDMFD r1!, {r2-r5} ; pop thread stack
MSR psplim, r4 ; psplim = r4
MSR control, r5 ; control = r5
MOV lr, r3 ; lr = r3
LDR r6, =rt_trustzone_current_context ; r6 = &rt_secure_current_context
STR r2, [r6] ; *r6 = r2
MOV r0, r2 ; r0 = r2
; Whether TrustZone thread stack exists
CBZ r0, contex_ns_load ; if r0 == 0, goto contex_ns_load
PUSH {r1, r3} ; push lr, thread_stack
BL rt_trustzone_context_load ; call TrustZone load fun
POP {r1, r3} ; pop lr, thread_stack
MOV lr, r3 ; lr = r1
TST r3, #0x40 ; if EXC_RETURN[6] is 1, TrustZone stack was used
BEQ contex_ns_load ; if r1 & 0x40 == 0, goto contex_ns_load
B pendsv_exit
contex_ns_load
LDMFD r1!, {r4 - r11} ; pop r4 - r11 register
IF {FPU} != "SoftVFP"
TST lr, #0x10 ; if(!EXC_RETURN[4])
VLDMFDEQ r1!, {d8 - d15} ; pop FPU register s16~s31
ENDIF
pendsv_exit
MSR psp, r1 ; update stack pointer
; restore interrupt
POP {r2}
MSR PRIMASK, r2
BX lr
ENDP
;/*
; * void rt_hw_context_switch_to(rt_uint32 to);
; * r0 --> to
; * this fucntion is used to perform the first thread switch
; */
rt_hw_context_switch_to PROC
EXPORT rt_hw_context_switch_to
; set to thread
LDR r1, =rt_interrupt_to_thread
STR r0, [r1]
IF {FPU} != "SoftVFP"
; CLEAR CONTROL.FPCA
MRS r2, CONTROL ; read
BIC r2, #0x04 ; modify
MSR CONTROL, r2 ; write-back
ENDIF
; set from thread to 0
LDR r1, =rt_interrupt_from_thread
MOV r0, #0x0
STR r0, [r1]
; set interrupt flag to 1
LDR r1, =rt_thread_switch_interrupt_flag
MOV r0, #1
STR r0, [r1]
; set the PendSV and SysTick exception priority
LDR r0, =NVIC_SYSPRI2
LDR r1, =NVIC_PENDSV_PRI
LDR.W r2, [r0,#0x00] ; read
ORR r1,r1,r2 ; modify
STR r1, [r0] ; write-back
; trigger the PendSV exception (causes context switch)
LDR r0, =NVIC_INT_CTRL
LDR r1, =NVIC_PENDSVSET
STR r1, [r0]
; restore MSP
LDR r0, =SCB_VTOR
LDR r0, [r0]
LDR r0, [r0]
MSR msp, r0
; enable interrupts at processor level
CPSIE F
CPSIE I
; ensure PendSV exception taken place before subsequent operation
DSB
ISB
; never reach here!
ENDP
; compatible with old version
rt_hw_interrupt_thread_switch PROC
EXPORT rt_hw_interrupt_thread_switch
BX lr
ENDP
IMPORT rt_hw_hard_fault_exception
EXPORT HardFault_Handler
HardFault_Handler PROC
; get current context
MRS r0, msp ;get fault context from handler
TST lr, #0x04 ;if(!EXC_RETURN[2])
BEQ get_sp_done
MRS r0, psp ;get fault context from thread
get_sp_done
STMFD r0!, {r4 - r11} ; push r4 - r11 register
LDR r2, =rt_trustzone_current_context ; r2 = &rt_secure_current_context
LDR r2, [r2] ; r2 = *r2
MOV r3, lr ; r3 = lr
MRS r4, psplim ; r4 = psplim
MRS r5, control ; r5 = control
STMFD r0!, {r2-r5} ; push to thread stack
STMFD r0!, {lr} ; push exec_return register
TST lr, #0x04 ; if(!EXC_RETURN[2])
BEQ update_msp
MSR psp, r0 ; update stack pointer to PSP
B update_done
update_msp
MSR msp, r0 ; update stack pointer to MSP
update_done
PUSH {lr}
BL rt_hw_hard_fault_exception
POP {lr}
ORR lr, lr, #0x04
BX lr
ENDP
ALIGN 4
END
|
vandercookking/h7_device_RTT
| 2,352
|
rt-thread/libcpu/arm/arm926/context_iar.S
|
/*
* Copyright (c) 2006-2018, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2011-08-14 weety copy from mini2440
* 2015-04-15 ArdaFu convert from context_gcc.s
*/
#define NOINT 0xc0
SECTION .text:CODE(6)
/*
* rt_base_t rt_hw_interrupt_disable();
*/
PUBLIC rt_hw_interrupt_disable
rt_hw_interrupt_disable:
MRS R0, CPSR
ORR R1, R0, #NOINT
MSR CPSR_C, R1
MOV PC, LR
/*
* void rt_hw_interrupt_enable(rt_base_t level);
*/
PUBLIC rt_hw_interrupt_enable
rt_hw_interrupt_enable:
MSR CPSR_CXSF, R0
MOV PC, LR
/*
* void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
* r0 --> from
* r1 --> to
*/
PUBLIC rt_hw_context_switch
rt_hw_context_switch:
STMFD SP!, {LR} ; push pc (lr should be pushed in place of PC)
STMFD SP!, {R0-R12, LR} ; push lr & register file
MRS R4, CPSR
STMFD SP!, {R4} ; push cpsr
STR SP, [R0] ; store sp in preempted tasks TCB
LDR SP, [R1] ; get new task stack pointer
LDMFD SP!, {R4} ; pop new task spsr
MSR SPSR_cxsf, R4
LDMFD SP!, {R0-R12, LR, PC}^ ; pop new task r0-r12, lr & pc
/*
* void rt_hw_context_switch_to(rt_uint32 to);
* r0 --> to
*/
PUBLIC rt_hw_context_switch_to
rt_hw_context_switch_to:
LDR SP, [R0] ; get new task stack pointer
LDMFD SP!, {R4} ; pop new task spsr
MSR SPSR_cxsf, R4
LDMFD SP!, {R0-R12, LR, PC}^ ; pop new task r0-r12, lr & pc
/*
* void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to);
*/
IMPORT rt_thread_switch_interrupt_flag
IMPORT rt_interrupt_from_thread
IMPORT rt_interrupt_to_thread
PUBLIC rt_hw_context_switch_interrupt
rt_hw_context_switch_interrupt:
LDR R2, =rt_thread_switch_interrupt_flag
LDR R3, [R2]
CMP R3, #1
BEQ _reswitch
MOV R3, #1 ; set flag to 1
STR R3, [R2]
LDR R2, =rt_interrupt_from_thread ; set rt_interrupt_from_thread
STR R0, [R2]
_reswitch:
LDR R2, =rt_interrupt_to_thread ; set rt_interrupt_to_thread
STR R1, [R2]
MOV PC, LR
END
|
vandercookking/h7_device_RTT
| 7,817
|
rt-thread/libcpu/arm/arm926/start_iar.S
|
;/*
; * Copyright (c) 2006-2018, RT-Thread Development Team
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Change Logs:
; * Date Author Notes
; * 2011-01-13 weety first version
; * 2015-04-15 ArdaFu Split from AT91SAM9260 BSP
; * 2015-04-21 ArdaFu Remove remap code. Using mmu to map vector table
; * 2015-06-04 aozima Align stack address to 8 byte.
; */
#include "rt_low_level_init.h"
#define S_FRAME_SIZE (18*4) ;72
;#define S_SPSR (17*4) ;SPSR
;#define S_CPSR (16*4) ;CPSR
#define S_PC (15*4) ;R15
;#define S_LR (14*4) ;R14
;#define S_SP (13*4) ;R13
;#define S_IP (12*4) ;R12
;#define S_FP (11*4) ;R11
;#define S_R10 (10*4)
;#define S_R9 (9*4)
;#define S_R8 (8*4)
;#define S_R7 (7*4)
;#define S_R6 (6*4)
;#define S_R5 (5*4)
;#define S_R4 (4*4)
;#define S_R3 (3*4)
;#define S_R2 (2*4)
;#define S_R1 (1*4)
;#define S_R0 (0*4)
#define MODE_SYS 0x1F
#define MODE_FIQ 0x11
#define MODE_IRQ 0x12
#define MODE_SVC 0x13
#define MODE_ABT 0x17
#define MODE_UND 0x1B
#define MODEMASK 0x1F
#define NOINT 0xC0
;----------------------- Stack and Heap Definitions ----------------------------
MODULE ?cstartup
SECTION .noinit:DATA:NOROOT(3)
DATA
ALIGNRAM 3
DS8 UND_STK_SIZE
PUBLIC UND_STACK_START
UND_STACK_START:
ALIGNRAM 3
DS8 ABT_STK_SIZE
PUBLIC ABT_STACK_START
ABT_STACK_START:
ALIGNRAM 3
DS8 FIQ_STK_SIZE
PUBLIC FIQ_STACK_START
FIQ_STACK_START:
ALIGNRAM 3
DS8 IRQ_STK_SIZE
PUBLIC IRQ_STACK_START
IRQ_STACK_START:
ALIGNRAM 3
DS8 SYS_STK_SIZE
PUBLIC SYS_STACK_START
SYS_STACK_START:
ALIGNRAM 3
DS8 SVC_STK_SIZE
PUBLIC SVC_STACK_START
SVC_STACK_START:
;--------------Jump vector table------------------------------------------------
SECTION .intvec:CODE:ROOT(2)
ARM
PUBLIC Entry_Point
Entry_Point:
__iar_init$$done: ; The interrupt vector is not needed
; until after copy initialization is done
LDR PC, vector_reset
LDR PC, vector_undef
LDR PC, vector_swi
LDR PC, vector_pabt
LDR PC, vector_dabt
LDR PC, vector_resv
LDR PC, vector_irq
LDR PC, vector_fiq
vector_reset:
DC32 Reset_Handler
vector_undef:
DC32 Undef_Handler
vector_swi:
DC32 SWI_Handler
vector_pabt:
DC32 PAbt_Handler
vector_dabt:
DC32 DAbt_Handler
vector_resv:
DC32 Resv_Handler
vector_irq:
DC32 IRQ_Handler
vector_fiq:
DC32 FIQ_Handler
;----------------- Reset Handler -----------------------------------------------
EXTERN rt_low_level_init
EXTERN ?main
PUBLIC __iar_program_start
__iar_program_start:
Reset_Handler:
; Set the cpu to SVC32 mode
MRS R0, CPSR
BIC R0, R0, #MODEMASK
ORR R0, R0, #MODE_SVC|NOINT
MSR CPSR_cxsf, R0
; Set CO-Processor
; little-end,disbale I/D Cache MMU, vector table is 0x00000000
MRC P15, 0, R0, C1, C0, 0 ; Read CP15
LDR R1, =0x00003085 ; set clear bits
BIC R0, R0, R1
MCR P15, 0, R0, C1, C0, 0 ; Write CP15
; Call low level init function,
; disable and clear all IRQs, Init MMU, Init interrupt controller, etc.
LDR SP, =SVC_STACK_START
LDR R0, =rt_low_level_init
BLX R0
Setup_Stack:
; Setup Stack for each mode
MRS R0, CPSR
BIC R0, R0, #MODEMASK
ORR R1, R0, #MODE_UND|NOINT
MSR CPSR_cxsf, R1 ; Undef mode
LDR SP, =UND_STACK_START
ORR R1,R0,#MODE_ABT|NOINT
MSR CPSR_cxsf,R1 ; Abort mode
LDR SP, =ABT_STACK_START
ORR R1,R0,#MODE_IRQ|NOINT
MSR CPSR_cxsf,R1 ; IRQ mode
LDR SP, =IRQ_STACK_START
ORR R1,R0,#MODE_FIQ|NOINT
MSR CPSR_cxsf,R1 ; FIQ mode
LDR SP, =FIQ_STACK_START
ORR R1,R0,#MODE_SYS|NOINT
MSR CPSR_cxsf,R1 ; SYS/User mode
LDR SP, =SYS_STACK_START
ORR R1,R0,#MODE_SVC|NOINT
MSR CPSR_cxsf,R1 ; SVC mode
LDR SP, =SVC_STACK_START
; Enter the C code
LDR R0, =?main
BLX R0
;----------------- Exception Handler -------------------------------------------
IMPORT rt_hw_trap_udef
IMPORT rt_hw_trap_swi
IMPORT rt_hw_trap_pabt
IMPORT rt_hw_trap_dabt
IMPORT rt_hw_trap_resv
IMPORT rt_hw_trap_irq
IMPORT rt_hw_trap_fiq
IMPORT rt_interrupt_enter
IMPORT rt_interrupt_leave
IMPORT rt_thread_switch_interrupt_flag
IMPORT rt_interrupt_from_thread
IMPORT rt_interrupt_to_thread
SECTION .text:CODE:ROOT(2)
ARM
Undef_Handler:
SUB SP, SP, #S_FRAME_SIZE
STMIA SP, {R0 - R12} ; Calling R0-R12
ADD R8, SP, #S_PC
STMDB R8, {SP, LR} ; Calling SP, LR
STR LR, [R8, #0] ; Save calling PC
MRS R6, SPSR
STR R6, [R8, #4] ; Save CPSR
STR R0, [R8, #8] ; Save SPSR
MOV R0, SP
BL rt_hw_trap_udef
SWI_Handler:
BL rt_hw_trap_swi
PAbt_Handler:
BL rt_hw_trap_pabt
DAbt_Handler:
SUB SP, SP, #S_FRAME_SIZE
STMIA SP, {R0 - R12} ; Calling R0-R12
ADD R8, SP, #S_PC
STMDB R8, {SP, LR} ; Calling SP, LR
STR LR, [R8, #0] ; Save calling PC
MRS R6, SPSR
STR R6, [R8, #4] ; Save CPSR
STR R0, [R8, #8] ; Save SPSR
MOV R0, SP
BL rt_hw_trap_dabt
Resv_Handler:
BL rt_hw_trap_resv
IRQ_Handler:
STMFD SP!, {R0-R12,LR}
BL rt_interrupt_enter
BL rt_hw_trap_irq
BL rt_interrupt_leave
; If rt_thread_switch_interrupt_flag set,
; jump to rt_hw_context_switch_interrupt_do and don't return
LDR R0, =rt_thread_switch_interrupt_flag
LDR R1, [R0]
CMP R1, #1
BEQ rt_hw_context_switch_interrupt_do
LDMFD SP!, {R0-R12,LR}
SUBS PC, LR, #4
FIQ_Handler:
STMFD SP!, {R0-R7,LR}
BL rt_hw_trap_fiq
LDMFD SP!, {R0-R7,LR}
SUBS PC, LR, #4
;------ void rt_hw_context_switch_interrupt_do(rt_base_t flag) -----------------
rt_hw_context_switch_interrupt_do:
MOV R1, #0 ; Clear flag
STR R1, [R0] ; Save to flag variable
LDMFD SP!, {R0-R12,LR} ; Reload saved registers
STMFD SP, {R0-R2} ; Save R0-R2
SUB R1, SP, #4*3 ; Save old task's SP to R1
SUB R2, LR, #4 ; Save old task's PC to R2
MRS R0, SPSR ; Get CPSR of interrupt thread
MSR CPSR_c, #MODE_SVC|NOINT ; Switch to SVC mode and no interrupt
STMFD SP!, {R2} ; Push old task's PC
STMFD SP!, {R3-R12,LR} ; Push old task's LR,R12-R3
LDMFD R1, {R1-R3}
STMFD SP!, {R1-R3} ; Push old task's R2-R0
STMFD SP!, {R0} ; Push old task's CPSR
LDR R4, =rt_interrupt_from_thread
LDR R5, [R4] ; R5 = stack ptr in old tasks's TCB
STR SP, [R5] ; Store SP in preempted tasks's TCB
LDR R6, =rt_interrupt_to_thread
LDR R6, [R6] ; R6 = stack ptr in new tasks's TCB
LDR SP, [R6] ; Get new task's stack pointer
LDMFD SP!, {R4} ; Pop new task's SPSR
MSR SPSR_cxsf, R4
LDMFD SP!, {R0-R12,LR,PC}^ ; pop new task's R0-R12,LR & PC SPSR to CPSR
END
|
vandercookking/h7_device_RTT
| 2,263
|
rt-thread/libcpu/arm/arm926/context_gcc.S
|
;/*
; * Copyright (c) 2006-2018, RT-Thread Development Team
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Change Logs:
; * Date Author Notes
; * 2011-08-14 weety copy from mini2440
; */
#define NOINT 0xC0
.text
;/*
; * rt_base_t rt_hw_interrupt_disable();
; */
.globl rt_hw_interrupt_disable
rt_hw_interrupt_disable:
MRS R0, CPSR
ORR R1, R0, #NOINT
MSR CPSR_c, R1
BX LR
/*
* void rt_hw_interrupt_enable(rt_base_t level);
*/
.globl rt_hw_interrupt_enable
rt_hw_interrupt_enable:
MSR CPSR, R0
BX LR
/*
* void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
* r0 --> from
* r1 --> to
*/
.globl rt_hw_context_switch
rt_hw_context_switch:
STMFD SP!, {LR} @; push pc (lr should be pushed in place of pc)
STMFD SP!, {R0-R12, LR} @; push lr & register file
MRS R4, CPSR
STMFD SP!, {R4} @; push cpsr
STR SP, [R0] @; store sp in preempted tasks tcb
LDR SP, [R1] @; get new task stack pointer
LDMFD SP!, {R4} @; pop new task spsr
MSR SPSR_cxsf, R4
LDMFD SP!, {R0-R12, LR, PC}^ @; pop new task r0-r12, lr & pc
/*
* void rt_hw_context_switch_to(rt_uint32 to);
* r0 --> to
*/
.globl rt_hw_context_switch_to
rt_hw_context_switch_to:
LDR SP, [R0] @; get new task stack pointer
LDMFD SP!, {R4} @; pop new task cpsr
MSR SPSR_cxsf, R4
LDMFD SP!, {R0-R12, LR, PC}^ @; pop new task r0-r12, lr & pc
/*
* void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to);
*/
.globl rt_thread_switch_interrupt_flag
.globl rt_interrupt_from_thread
.globl rt_interrupt_to_thread
.globl rt_hw_context_switch_interrupt
rt_hw_context_switch_interrupt:
LDR R2, =rt_thread_switch_interrupt_flag
LDR R3, [R2]
CMP R3, #1
BEQ _reswitch
MOV R3, #1 @; set flag to 1
STR R3, [R2]
LDR R2, =rt_interrupt_from_thread @; set rt_interrupt_from_thread
STR R0, [R2]
_reswitch:
LDR R2, =rt_interrupt_to_thread @; set rt_interrupt_to_thread
STR R1, [R2]
BX LR
|
vandercookking/h7_device_RTT
| 6,203
|
rt-thread/libcpu/arm/arm926/start_gcc.S
|
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2011-01-13 weety first version
* 2015-04-15 ArdaFu Split from AT91SAM9260 BSP
* 2015-04-21 ArdaFu Remove remap code. Using mmu to map vector table
* 2015-06-04 aozima Align stack address to 8 byte.
*/
.equ MODE_USR, 0x10
.equ MODE_FIQ, 0x11
.equ MODE_IRQ, 0x12
.equ MODE_SVC, 0x13
.equ MODE_ABT, 0x17
.equ MODE_UND, 0x1B
.equ MODE_SYS, 0x1F
.equ MODEMASK, 0x1F
.equ NOINT, 0xC0
.equ I_BIT, 0x80
.equ F_BIT, 0x40
.equ UND_STACK_SIZE, 0x00000100
.equ SVC_STACK_SIZE, 0x00000100
.equ ABT_STACK_SIZE, 0x00000100
.equ FIQ_STACK_SIZE, 0x00000100
.equ IRQ_STACK_SIZE, 0x00000100
.equ SYS_STACK_SIZE, 0x00000100
/*
***************************************
* Interrupt vector table
***************************************
*/
.section .vectors
.code 32
.global system_vectors
system_vectors:
ldr pc, _vector_reset
ldr pc, _vector_undef
ldr pc, _vector_swi
ldr pc, _vector_pabt
ldr pc, _vector_dabt
ldr pc, _vector_resv
ldr pc, _vector_irq
ldr pc, _vector_fiq
_vector_reset:
.word reset
_vector_undef:
.word vector_undef
_vector_swi:
.word vector_swi
_vector_pabt:
.word vector_pabt
_vector_dabt:
.word vector_dabt
_vector_resv:
.word vector_resv
_vector_irq:
.word vector_irq
_vector_fiq:
.word vector_fiq
.balignl 16,0xdeadbeef
/*
***************************************
* Stack and Heap Definitions
***************************************
*/
.section .data
.space UND_STACK_SIZE
.align 3
.global und_stack_start
und_stack_start:
.space ABT_STACK_SIZE
.align 3
.global abt_stack_start
abt_stack_start:
.space FIQ_STACK_SIZE
.align 3
.global fiq_stack_start
fiq_stack_start:
.space IRQ_STACK_SIZE
.align 3
.global irq_stack_start
irq_stack_start:
.skip SYS_STACK_SIZE
.align 3
.global sys_stack_start
sys_stack_start:
.space SVC_STACK_SIZE
.align 3
.global svc_stack_start
svc_stack_start:
/*
***************************************
* Startup Code
***************************************
*/
.section .text
.global reset
reset:
/* Enter svc mode and mask interrupts */
mrs r0, cpsr
bic r0, r0, #MODEMASK
orr r0, r0, #MODE_SVC|NOINT
msr cpsr_cxsf, r0
/* init cpu */
bl cpu_init_crit
/* Call low level init function */
ldr sp, =svc_stack_start
ldr r0, =rt_low_level_init
blx r0
/* init stack */
bl stack_setup
/* clear bss */
mov r0, #0
ldr r1, =__bss_start
ldr r2, =__bss_end
bss_clear_loop:
cmp r1, r2
strlo r0, [r1], #4
blo bss_clear_loop
/* call c++ constructors of global objects */
/*
ldr r0, =__ctors_start__
ldr r1, =__ctors_end__
ctor_loop:
cmp r0, r1
beq ctor_end
ldr r2, [r0], #4
stmfd sp!, {r0-r1}
mov lr, pc
bx r2
ldmfd sp!, {r0-r1}
b ctor_loop
ctor_end:
*/
/* start RT-Thread Kernel */
ldr pc, _rtthread_startup
_rtthread_startup:
.word rtthread_startup
cpu_init_crit:
/* invalidate I/D caches */
mov r0, #0
mcr p15, 0, r0, c7, c7, 0
mcr p15, 0, r0, c8, c7, 0
/* disable MMU stuff and caches */
mrc p15, 0, r0, c1, c0, 0
bic r0, r0, #0x00002300
bic r0, r0, #0x00000087
orr r0, r0, #0x00000002
orr r0, r0, #0x00001000
mcr p15, 0, r0, c1, c0, 0
bx lr
stack_setup:
/* Setup Stack for each mode */
mrs r0, cpsr
bic r0, r0, #MODEMASK
orr r1, r0, #MODE_UND|NOINT
msr cpsr_cxsf, r1
ldr sp, =und_stack_start
orr r1, r0, #MODE_ABT|NOINT
msr cpsr_cxsf, r1
ldr sp, =abt_stack_start
orr r1, r0, #MODE_IRQ|NOINT
msr cpsr_cxsf, r1
ldr sp, =irq_stack_start
orr r1, r0, #MODE_FIQ|NOINT
msr cpsr_cxsf, r1
ldr sp, =fiq_stack_start
orr r1, r0, #MODE_SYS|NOINT
msr cpsr_cxsf,r1
ldr sp, =sys_stack_start
orr r1, r0, #MODE_SVC|NOINT
msr cpsr_cxsf, r1
ldr sp, =svc_stack_start
bx lr
/*
***************************************
* exception handlers
***************************************
*/
/* Interrupt */
vector_fiq:
stmfd sp!,{r0-r7,lr}
bl rt_hw_trap_fiq
ldmfd sp!,{r0-r7,lr}
subs pc, lr, #4
vector_irq:
stmfd sp!, {r0-r12,lr}
bl rt_interrupt_enter
bl rt_hw_trap_irq
bl rt_interrupt_leave
ldr r0, =rt_thread_switch_interrupt_flag
ldr r1, [r0]
cmp r1, #1
beq rt_hw_context_switch_interrupt_do
ldmfd sp!, {r0-r12,lr}
subs pc, lr, #4
rt_hw_context_switch_interrupt_do:
mov r1, #0
str r1, [r0]
mov r1, sp
add sp, sp, #4*4
ldmfd sp!, {r4-r12,lr}
mrs r0, spsr
sub r2, lr, #4
msr cpsr_c, #I_BIT|F_BIT|MODE_SVC
stmfd sp!, {r2}
stmfd sp!, {r4-r12,lr}
ldmfd r1, {r1-r4}
stmfd sp!, {r1-r4}
stmfd sp!, {r0}
ldr r4, =rt_interrupt_from_thread
ldr r5, [r4]
str sp, [r5]
ldr r6, =rt_interrupt_to_thread
ldr r6, [r6]
ldr sp, [r6]
ldmfd sp!, {r4}
msr spsr_cxsf, r4
ldmfd sp!, {r0-r12,lr,pc}^
/* Exception */
.macro push_svc_reg
sub sp, sp, #17 * 4
stmia sp, {r0 - r12}
mov r0, sp
mrs r6, spsr
str lr, [r0, #15*4]
str r6, [r0, #16*4]
str sp, [r0, #13*4]
str lr, [r0, #14*4]
.endm
vector_swi:
push_svc_reg
bl rt_hw_trap_swi
b .
vector_undef:
push_svc_reg
bl rt_hw_trap_udef
b .
vector_pabt:
push_svc_reg
bl rt_hw_trap_pabt
b .
vector_dabt:
push_svc_reg
bl rt_hw_trap_dabt
b .
vector_resv:
push_svc_reg
bl rt_hw_trap_resv
b .
|
vandercookking/h7_device_RTT
| 2,463
|
rt-thread/libcpu/arm/arm926/context_rvds.S
|
;/*
; * Copyright (c) 2006-2018, RT-Thread Development Team
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Change Logs:
; * Date Author Notes
; * 2011-08-14 weety copy from mini2440
; */
NOINT EQU 0XC0 ; disable interrupt in psr
AREA |.TEXT|, CODE, READONLY, ALIGN=2
ARM
REQUIRE8
PRESERVE8
;/*
; * rt_base_t rt_hw_interrupt_disable();
; */
rt_hw_interrupt_disable PROC
EXPORT rt_hw_interrupt_disable
MRS R0, CPSR
ORR R1, R0, #NOINT
MSR CPSR_C, R1
BX LR
ENDP
;/*
; * void rt_hw_interrupt_enable(rt_base_t level);
; */
rt_hw_interrupt_enable proc
export rt_hw_interrupt_enable
msr cpsr_c, r0
bx lr
endp
;/*
; * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
; * r0 --> from
; * r1 --> to
; */
rt_hw_context_switch proc
export rt_hw_context_switch
stmfd sp!, {lr} ; push pc (lr should be pushed in place of pc)
stmfd sp!, {r0-r12, lr} ; push lr & register file
mrs r4, cpsr
stmfd sp!, {r4} ; push cpsr
str sp, [r0] ; store sp in preempted tasks tcb
ldr sp, [r1] ; get new task stack pointer
ldmfd sp!, {r4} ; pop new task spsr
msr spsr_cxsf, r4
ldmfd sp!, {r0-r12, lr, pc}^ ; pop new task r0-r12, lr & pc
endp
;/*
; * void rt_hw_context_switch_to(rt_uint32 to);
; * r0 --> to
; */
rt_hw_context_switch_to proc
export rt_hw_context_switch_to
ldr sp, [r0] ; get new task stack pointer
ldmfd sp!, {r4} ; pop new task spsr
msr spsr_cxsf, r4
ldmfd sp!, {r0-r12, lr, pc}^ ; pop new task r0-r12, lr & pc
endp
;/*
; * void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to);
; */
import rt_thread_switch_interrupt_flag
import rt_interrupt_from_thread
import rt_interrupt_to_thread
rt_hw_context_switch_interrupt proc
export rt_hw_context_switch_interrupt
ldr r2, =rt_thread_switch_interrupt_flag
ldr r3, [r2]
cmp r3, #1
beq _reswitch
mov r3, #1 ; set flag to 1
str r3, [r2]
ldr r2, =rt_interrupt_from_thread ; set rt_interrupt_from_thread
str r0, [r2]
_reswitch
ldr r2, =rt_interrupt_to_thread ; set rt_interrupt_to_thread
str r1, [r2]
bx lr
endp
end
|
vandercookking/h7_device_RTT
| 8,379
|
rt-thread/libcpu/arm/arm926/start_rvds.S
|
;/*
; * Copyright (c) 2006-2018, RT-Thread Development Team
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Change Logs:
; * Date Author Notes
; * 2011-08-14 weety first version
; * 2015-04-15 ArdaFu Split from AT91SAM9260 BSP
; * 2015-04-21 ArdaFu Remove remap code. Using mmu to map vector table
; * 2015-06-04 aozima Align stack address to 8 byte.
; */
UND_STK_SIZE EQU 512
SVC_STK_SIZE EQU 4096
ABT_STK_SIZE EQU 512
IRQ_STK_SIZE EQU 1024
FIQ_STK_SIZE EQU 1024
SYS_STK_SIZE EQU 512
Heap_Size EQU 512
S_FRAME_SIZE EQU (18*4) ;72
S_PC EQU (15*4) ;R15
MODE_USR EQU 0X10
MODE_FIQ EQU 0X11
MODE_IRQ EQU 0X12
MODE_SVC EQU 0X13
MODE_ABT EQU 0X17
MODE_UND EQU 0X1B
MODE_SYS EQU 0X1F
MODEMASK EQU 0X1F
NOINT EQU 0xC0
;----------------------- Stack and Heap Definitions ----------------------------
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem
SPACE UND_STK_SIZE
EXPORT UND_STACK_START
UND_STACK_START
ALIGN 8
SPACE ABT_STK_SIZE
EXPORT ABT_STACK_START
ABT_STACK_START
ALIGN 8
SPACE FIQ_STK_SIZE
EXPORT FIQ_STACK_START
FIQ_STACK_START
ALIGN 8
SPACE IRQ_STK_SIZE
EXPORT IRQ_STACK_START
IRQ_STACK_START
ALIGN 8
SPACE SYS_STK_SIZE
EXPORT SYS_STACK_START
SYS_STACK_START
ALIGN 8
SPACE SVC_STK_SIZE
EXPORT SVC_STACK_START
SVC_STACK_START
Stack_Top
__initial_sp
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
;--------------Jump vector table------------------------------------------------
EXPORT Entry_Point
AREA RESET, CODE, READONLY
ARM
Entry_Point
LDR PC, vector_reset
LDR PC, vector_undef
LDR PC, vector_swi
LDR PC, vector_pabt
LDR PC, vector_dabt
LDR PC, vector_resv
LDR PC, vector_irq
LDR PC, vector_fiq
vector_reset
DCD Reset_Handler
vector_undef
DCD Undef_Handler
vector_swi
DCD SWI_Handler
vector_pabt
DCD PAbt_Handler
vector_dabt
DCD DAbt_Handler
vector_resv
DCD Resv_Handler
vector_irq
DCD IRQ_Handler
vector_fiq
DCD FIQ_Handler
;----------------- Reset Handler -----------------------------------------------
IMPORT rt_low_level_init
IMPORT __main
EXPORT Reset_Handler
Reset_Handler
; set the cpu to SVC32 mode
MRS R0,CPSR
BIC R0,R0,#MODEMASK
ORR R0,R0,#MODE_SVC:OR:NOINT
MSR CPSR_cxsf,R0
; Set CO-Processor
; little-end,disbale I/D Cache MMU, vector table is 0x00000000
MRC p15, 0, R0, c1, c0, 0 ; Read CP15
LDR R1, =0x00003085 ; set clear bits
BIC R0, R0, R1
MCR p15, 0, R0, c1, c0, 0 ; Write CP15
; Call low level init function,
; disable and clear all IRQs, Init MMU, Init interrupt controller, etc.
LDR SP, =SVC_STACK_START
LDR R0, =rt_low_level_init
BLX R0
Setup_Stack
; Setup Stack for each mode
MRS R0, CPSR
BIC R0, R0, #MODEMASK
ORR R1, R0, #MODE_UND:OR:NOINT
MSR CPSR_cxsf, R1 ; Undef mode
LDR SP, =UND_STACK_START
ORR R1,R0,#MODE_ABT:OR:NOINT
MSR CPSR_cxsf,R1 ; Abort mode
LDR SP, =ABT_STACK_START
ORR R1,R0,#MODE_IRQ:OR:NOINT
MSR CPSR_cxsf,R1 ; IRQ mode
LDR SP, =IRQ_STACK_START
ORR R1,R0,#MODE_FIQ:OR:NOINT
MSR CPSR_cxsf,R1 ; FIQ mode
LDR SP, =FIQ_STACK_START
ORR R1,R0,#MODE_SYS:OR:NOINT
MSR CPSR_cxsf,R1 ; SYS/User mode
LDR SP, =SYS_STACK_START
ORR R1,R0,#MODE_SVC:OR:NOINT
MSR CPSR_cxsf,R1 ; SVC mode
LDR SP, =SVC_STACK_START
; Enter the C code
LDR R0, =__main
BLX R0
;----------------- Exception Handler -------------------------------------------
IMPORT rt_hw_trap_udef
IMPORT rt_hw_trap_swi
IMPORT rt_hw_trap_pabt
IMPORT rt_hw_trap_dabt
IMPORT rt_hw_trap_resv
IMPORT rt_hw_trap_irq
IMPORT rt_hw_trap_fiq
IMPORT rt_interrupt_enter
IMPORT rt_interrupt_leave
IMPORT rt_thread_switch_interrupt_flag
IMPORT rt_interrupt_from_thread
IMPORT rt_interrupt_to_thread
Undef_Handler PROC
SUB SP, SP, #S_FRAME_SIZE
STMIA SP, {R0 - R12} ; Calling R0-R12
ADD R8, SP, #S_PC
STMDB R8, {SP, LR} ; Calling SP, LR
STR LR, [R8, #0] ; Save calling PC
MRS R6, SPSR
STR R6, [R8, #4] ; Save CPSR
STR R0, [R8, #8] ; Save SPSR
MOV R0, SP
BL rt_hw_trap_udef
ENDP
SWI_Handler PROC
BL rt_hw_trap_swi
ENDP
PAbt_Handler PROC
BL rt_hw_trap_pabt
ENDP
DAbt_Handler PROC
SUB SP, SP, #S_FRAME_SIZE
STMIA SP, {R0 - R12} ; Calling R0-R12
ADD R8, SP, #S_PC
STMDB R8, {SP, LR} ; Calling SP, LR
STR LR, [R8, #0] ; Save calling PC
MRS R6, SPSR
STR R6, [R8, #4] ; Save CPSR
STR R0, [R8, #8] ; Save SPSR
MOV R0, SP
BL rt_hw_trap_dabt
ENDP
Resv_Handler PROC
BL rt_hw_trap_resv
ENDP
FIQ_Handler PROC
STMFD SP!, {R0-R7,LR}
BL rt_hw_trap_fiq
LDMFD SP!, {R0-R7,LR}
SUBS PC, LR, #4
ENDP
IRQ_Handler PROC
STMFD SP!, {R0-R12,LR}
BL rt_interrupt_enter
BL rt_hw_trap_irq
BL rt_interrupt_leave
; If rt_thread_switch_interrupt_flag set,
; jump to rt_hw_context_switch_interrupt_do and don't return
LDR R0, =rt_thread_switch_interrupt_flag
LDR R1, [R0]
CMP R1, #1
BEQ rt_hw_context_switch_interrupt_do
LDMFD SP!, {R0-R12,LR}
SUBS PC, LR, #4
ENDP
;------ void rt_hw_context_switch_interrupt_do(rt_base_t flag) -----------------
rt_hw_context_switch_interrupt_do PROC
MOV R1, #0 ; Clear flag
STR R1, [R0] ; Save to flag variable
LDMFD SP!, {R0-R12,LR} ; Reload saved registers
STMFD SP, {R0-R2} ; Save R0-R2
SUB R1, SP, #4*3 ; Save old task's SP to R1
SUB R2, LR, #4 ; Save old task's PC to R2
MRS R0, SPSR ; Get CPSR of interrupt thread
MSR CPSR_c, #MODE_SVC:OR:NOINT ; Switch to SVC mode and no interrupt
STMFD SP!, {R2} ; Push old task's PC
STMFD SP!, {R3-R12,LR} ; Push old task's LR,R12-R3
LDMFD R1, {R1-R3}
STMFD SP!, {R1-R3} ; Push old task's R2-R0
STMFD SP!, {R0} ; Push old task's CPSR
LDR R4, =rt_interrupt_from_thread
LDR R5, [R4] ; R5 = stack ptr in old tasks's TCB
STR SP, [R5] ; Store SP in preempted tasks's TCB
LDR R6, =rt_interrupt_to_thread
LDR R6, [R6] ; R6 = stack ptr in new tasks's TCB
LDR SP, [R6] ; Get new task's stack pointer
LDMFD SP!, {R4} ; Pop new task's SPSR
MSR SPSR_cxsf, R4
LDMFD SP!, {R0-R12,LR,PC}^ ; pop new task's R0-R12,LR & PC SPSR to CPSR
ENDP
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem ; heap base
LDR R1, = SVC_STACK_START ; stack base (top-address)
LDR R2, = (Heap_Mem + Heap_Size) ; heap limit
LDR R3, = (SVC_STACK_START - SVC_STK_SIZE) ; stack limit (low-address)
BX LR
ALIGN
ENDIF
END
|
vandercookking/h7_device_RTT
| 2,328
|
rt-thread/libcpu/arm/s3c24x0/context_gcc.S
|
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2006-09-06 XuXinming first version
*/
/*!
* \addtogroup S3C24X0
*/
/*@{*/
#define NOINT 0xc0
/*
* rt_base_t rt_hw_interrupt_disable();
*/
.globl rt_hw_interrupt_disable
rt_hw_interrupt_disable:
mrs r0, cpsr
orr r1, r0, #NOINT
msr cpsr_c, r1
mov pc, lr
/*
* void rt_hw_interrupt_enable(rt_base_t level);
*/
.globl rt_hw_interrupt_enable
rt_hw_interrupt_enable:
msr cpsr, r0
mov pc, lr
/*
* void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
* r0 --> from
* r1 --> to
*/
.globl rt_hw_context_switch
rt_hw_context_switch:
stmfd sp!, {lr} @ push pc (lr should be pushed in place of PC)
stmfd sp!, {r0-r12, lr} @ push lr & register file
mrs r4, cpsr
stmfd sp!, {r4} @ push cpsr
mrs r4, spsr
stmfd sp!, {r4} @ push spsr
str sp, [r0] @ store sp in preempted tasks TCB
ldr sp, [r1] @ get new task stack pointer
ldmfd sp!, {r4} @ pop new task spsr
msr spsr_cxsf, r4
ldmfd sp!, {r4} @ pop new task cpsr
msr spsr_cxsf, r4
ldmfd sp!, {r0-r12, lr, pc}^ @ pop new task r0-r12, lr & pc
/*
* void rt_hw_context_switch_to(rt_uint32 to);
* r0 --> to
*/
.globl rt_hw_context_switch_to
rt_hw_context_switch_to:
ldr sp, [r0] @ get new task stack pointer
ldmfd sp!, {r4} @ pop new task spsr
msr spsr_cxsf, r4
ldmfd sp!, {r4} @ pop new task cpsr
msr cpsr_cxsf, r4
ldmfd sp!, {r0-r12, lr, pc} @ pop new task r0-r12, lr & pc
/*
* void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to);
*/
.globl rt_thread_switch_interrupt_flag
.globl rt_interrupt_from_thread
.globl rt_interrupt_to_thread
.globl rt_hw_context_switch_interrupt
rt_hw_context_switch_interrupt:
ldr r2, =rt_thread_switch_interrupt_flag
ldr r3, [r2]
cmp r3, #1
beq _reswitch
mov r3, #1 @ set rt_thread_switch_interrupt_flag to 1
str r3, [r2]
ldr r2, =rt_interrupt_from_thread @ set rt_interrupt_from_thread
str r0, [r2]
_reswitch:
ldr r2, =rt_interrupt_to_thread @ set rt_interrupt_to_thread
str r1, [r2]
mov pc, lr
|
vandercookking/h7_device_RTT
| 11,723
|
rt-thread/libcpu/arm/s3c24x0/start_gcc.S
|
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2006-03-13 Bernard first version
* 2006-10-05 Alsor.Z for s3c2440 initialize
* 2008-01-29 Yi.Qiu for QEMU emulator
*/
#define CONFIG_STACKSIZE 512
#define S_FRAME_SIZE 72
#define S_OLD_R0 68
#define S_PSR 64
#define S_PC 60
#define S_LR 56
#define S_SP 52
#define S_IP 48
#define S_FP 44
#define S_R10 40
#define S_R9 36
#define S_R8 32
#define S_R7 28
#define S_R6 24
#define S_R5 20
#define S_R4 16
#define S_R3 12
#define S_R2 8
#define S_R1 4
#define S_R0 0
.equ USERMODE, 0x10
.equ FIQMODE, 0x11
.equ IRQMODE, 0x12
.equ SVCMODE, 0x13
.equ ABORTMODE, 0x17
.equ UNDEFMODE, 0x1b
.equ MODEMASK, 0x1f
.equ NOINT, 0xc0
.equ RAM_BASE, 0x00000000 /*Start address of RAM */
.equ ROM_BASE, 0x30000000 /*Start address of Flash */
.equ MPLLCON, 0x4c000004 /*Mpll control register */
.equ M_MDIV, 0x20
.equ M_PDIV, 0x4
.equ M_SDIV, 0x2
.equ INTMSK, 0x4a000008
.equ INTSUBMSK, 0x4a00001c
.equ WTCON, 0x53000000
.equ LOCKTIME, 0x4c000000
.equ CLKDIVN, 0x4c000014 /*Clock divider control */
.equ GPHCON, 0x56000070 /*Port H control */
.equ GPHUP, 0x56000078 /*Pull-up control H */
.equ BWSCON, 0x48000000 /*Bus width & wait status */
.equ BANKCON0, 0x48000004 /*Boot ROM control */
.equ BANKCON1, 0x48000008 /*BANK1 control */
.equ BANKCON2, 0x4800000c /*BANK2 cControl */
.equ BANKCON3, 0x48000010 /*BANK3 control */
.equ BANKCON4, 0x48000014 /*BANK4 control */
.equ BANKCON5, 0x48000018 /*BANK5 control */
.equ BANKCON6, 0x4800001c /*BANK6 control */
.equ BANKCON7, 0x48000020 /*BANK7 control */
.equ REFRESH, 0x48000024 /*DRAM/SDRAM efresh */
.equ BANKSIZE, 0x48000028 /*Flexible Bank Size */
.equ MRSRB6, 0x4800002c /*Mode egister set for SDRAM*/
.equ MRSRB7, 0x48000030 /*Mode egister set for SDRAM*/
/*
*************************************************************************
*
* Jump vector table
*
*************************************************************************
*/
.section .init, "ax"
.code 32
.globl _start
_start:
b reset
ldr pc, _vector_undef
ldr pc, _vector_swi
ldr pc, _vector_pabt
ldr pc, _vector_dabt
ldr pc, _vector_resv
ldr pc, _vector_irq
ldr pc, _vector_fiq
_vector_undef: .word vector_undef
_vector_swi: .word vector_swi
_vector_pabt: .word vector_pabt
_vector_dabt: .word vector_dabt
_vector_resv: .word vector_resv
_vector_irq: .word vector_irq
_vector_fiq: .word vector_fiq
.balignl 16,0xdeadbeef
/*
*************************************************************************
*
* Startup Code (reset vector)
* relocate armboot to ram
* setup stack
* jump to second stage
*
*************************************************************************
*/
_TEXT_BASE:
.word TEXT_BASE
/*
* rtthread kernel start and end
* which are defined in linker script
*/
.globl _rtthread_start
_rtthread_start:
.word _start
.globl _rtthread_end
_rtthread_end:
.word _end
/*
* rtthread bss start and end which are defined in linker script
*/
.globl _bss_start
_bss_start:
.word __bss_start
.globl _bss_end
_bss_end:
.word __bss_end
/* IRQ stack memory (calculated at run-time) */
.globl IRQ_STACK_START
IRQ_STACK_START:
.word _irq_stack_start + 1024
.globl FIQ_STACK_START
FIQ_STACK_START:
.word _fiq_stack_start + 1024
.globl UNDEFINED_STACK_START
UNDEFINED_STACK_START:
.word _undefined_stack_start + CONFIG_STACKSIZE
.globl ABORT_STACK_START
ABORT_STACK_START:
.word _abort_stack_start + CONFIG_STACKSIZE
.globl _STACK_START
_STACK_START:
.word _svc_stack_start + 4096
/* ----------------------------------entry------------------------------*/
reset:
/* set the cpu to SVC32 mode */
mrs r0,cpsr
bic r0,r0,#MODEMASK
orr r0,r0,#SVCMODE
msr cpsr,r0
/* watch dog disable */
ldr r0,=WTCON
ldr r1,=0x0
str r1,[r0]
/* mask all IRQs by clearing all bits in the INTMRs */
ldr r1, =INTMSK
ldr r0, =0xffffffff
str r0, [r1]
ldr r1, =INTSUBMSK
ldr r0, =0x7fff /*all sub interrupt disable */
str r0, [r1]
/* set interrupt vector */
ldr r0, _load_address
mov r1, #0x0 /* target address */
add r2, r0, #0x20 /* size, 32bytes */
copy_loop:
ldmia r0!, {r3-r10} /* copy from source address [r0] */
stmia r1!, {r3-r10} /* copy to target address [r1] */
cmp r0, r2 /* until source end address [r2] */
ble copy_loop
/* setup stack */
bl stack_setup
/* clear .bss */
mov r0,#0 /* get a zero */
ldr r1,=__bss_start /* bss start */
ldr r2,=__bss_end /* bss end */
bss_loop:
cmp r1,r2 /* check if data to clear */
strlo r0,[r1],#4 /* clear 4 bytes */
blo bss_loop /* loop until done */
/* call C++ constructors of global objects */
ldr r0, =__ctors_start__
ldr r1, =__ctors_end__
ctor_loop:
cmp r0, r1
beq ctor_end
ldr r2, [r0], #4
stmfd sp!, {r0-r1}
mov lr, pc
bx r2
ldmfd sp!, {r0-r1}
b ctor_loop
ctor_end:
/* start RT-Thread Kernel */
ldr pc, _rtthread_startup
_rtthread_startup:
.word rtthread_startup
#if defined (__FLASH_BUILD__)
_load_address:
.word ROM_BASE + _TEXT_BASE
#else
_load_address:
.word RAM_BASE + _TEXT_BASE
#endif
/*
*************************************************************************
*
* Interrupt handling
*
*************************************************************************
*/
/* exception handlers */
.align 5
vector_undef:
sub sp, sp, #S_FRAME_SIZE
stmia sp, {r0 - r12} /* Calling r0-r12 */
add r8, sp, #S_PC
stmdb r8, {sp, lr}^ /* Calling SP, LR */
str lr, [r8, #0] /* Save calling PC */
mrs r6, spsr
str r6, [r8, #4] /* Save CPSR */
str r0, [r8, #8] /* Save OLD_R0 */
mov r0, sp
bl rt_hw_trap_udef
.align 5
vector_swi:
bl rt_hw_trap_swi
.align 5
vector_pabt:
bl rt_hw_trap_pabt
.align 5
vector_dabt:
sub sp, sp, #S_FRAME_SIZE
stmia sp, {r0 - r12} /* Calling r0-r12 */
add r8, sp, #S_PC
stmdb r8, {sp, lr}^ /* Calling SP, LR */
str lr, [r8, #0] /* Save calling PC */
mrs r6, spsr
str r6, [r8, #4] /* Save CPSR */
str r0, [r8, #8] /* Save OLD_R0 */
mov r0, sp
bl rt_hw_trap_dabt
.align 5
vector_resv:
bl rt_hw_trap_resv
.globl rt_interrupt_enter
.globl rt_interrupt_leave
.globl rt_thread_switch_interrupt_flag
.globl rt_interrupt_from_thread
.globl rt_interrupt_to_thread
vector_irq:
stmfd sp!, {r0-r12,lr}
bl rt_interrupt_enter
bl rt_hw_trap_irq
bl rt_interrupt_leave
/* if rt_thread_switch_interrupt_flag set, jump to _interrupt_thread_switch and don't return */
ldr r0, =rt_thread_switch_interrupt_flag
ldr r1, [r0]
cmp r1, #1
beq _interrupt_thread_switch
ldmfd sp!, {r0-r12,lr}
subs pc, lr, #4
.align 5
vector_fiq:
stmfd sp!,{r0-r7,lr}
bl rt_hw_trap_fiq
ldmfd sp!,{r0-r7,lr}
subs pc,lr,#4
_interrupt_thread_switch:
mov r1, #0 /* clear rt_thread_switch_interrupt_flag*/
str r1, [r0]
ldmfd sp!, {r0-r12,lr} /* reload saved registers */
stmfd sp!, {r0-r3} /* save r0-r3 */
mov r1, sp
add sp, sp, #16 /* restore sp */
sub r2, lr, #4 /* save old task's pc to r2 */
mrs r3, spsr /* disable interrupt */
orr r0, r3, #NOINT
msr spsr_c, r0
ldr r0, =.+8 /* switch to interrupted task's stack*/
movs pc, r0
stmfd sp!, {r2} /* push old task's pc */
stmfd sp!, {r4-r12,lr} /* push old task's lr,r12-r4 */
mov r4, r1 /* Special optimised code below */
mov r5, r3
ldmfd r4!, {r0-r3}
stmfd sp!, {r0-r3} /* push old task's r3-r0 */
stmfd sp!, {r5} /* push old task's psr */
mrs r4, spsr
stmfd sp!, {r4} /* push old task's spsr */
ldr r4, =rt_interrupt_from_thread
ldr r5, [r4]
str sp, [r5] /* store sp in preempted tasks's TCB*/
ldr r6, =rt_interrupt_to_thread
ldr r6, [r6]
ldr sp, [r6] /* get new task's stack pointer */
ldmfd sp!, {r4} /* pop new task's spsr */
msr SPSR_cxsf, r4
ldmfd sp!, {r4} /* pop new task's psr */
msr CPSR_cxsf, r4
ldmfd sp!, {r0-r12,lr,pc} /* pop new task's r0-r12,lr & pc */
stack_setup:
mrs r0, cpsr
bic r0, r0, #MODEMASK
orr r1, r0, #UNDEFMODE|NOINT
msr cpsr_cxsf, r1 /* undef mode */
ldr sp, UNDEFINED_STACK_START
orr r1,r0,#ABORTMODE|NOINT
msr cpsr_cxsf,r1 /* abort mode */
ldr sp, ABORT_STACK_START
orr r1,r0,#IRQMODE|NOINT
msr cpsr_cxsf,r1 /* IRQ mode */
ldr sp, IRQ_STACK_START
orr r1,r0,#FIQMODE|NOINT
msr cpsr_cxsf,r1 /* FIQ mode */
ldr sp, FIQ_STACK_START
bic r0,r0,#MODEMASK
orr r1,r0,#SVCMODE|NOINT
msr cpsr_cxsf,r1 /* SVC mode */
ldr sp, _STACK_START
/* USER mode is not initialized. */
mov pc,lr /* The LR register may be not valid for the mode changes.*/
/*/*}*/
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.