repo_id
stringlengths 5
115
| size
int64 590
5.01M
| file_path
stringlengths 4
212
| content
stringlengths 590
5.01M
|
|---|---|---|---|
a3f/bareDOOM
| 1,458
|
arch/arm/lib32/lshrdi3.S
|
/* Copyright 1995, 1996, 1998, 1999, 2000, 2003, 2004, 2005
Free Software Foundation, Inc.
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2, or (at your option) any
later version.
In addition to the permissions in the GNU General Public License, the
Free Software Foundation gives you unlimited permission to link the
compiled version of this file into combinations with other programs,
and to distribute those combinations without any restriction coming
from the use of this file. (The General Public License restrictions
do apply in other respects; for example, they cover modification of
the file, and distribution when not linked into a combine
executable.)
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
*/
#include <linux/linkage.h>
#ifdef __ARMEB__
#define al r1
#define ah r0
#else
#define al r0
#define ah r1
#endif
.section .text.__lshrdi3
ENTRY(__lshrdi3)
ENTRY(__aeabi_llsr)
subs r3, r2, #32
rsb ip, r2, #32
movmi al, al, lsr r2
movpl al, ah, lsr r3
ARM( orrmi al, al, ah, lsl ip )
THUMB( lslmi r3, ah, ip )
THUMB( orrmi al, al, r3 )
mov ah, ah, lsr r2
mov pc, lr
ENDPROC(__lshrdi3)
ENDPROC(__aeabi_llsr)
|
a3f/bareDOOM
| 4,853
|
arch/arm/lib32/findbit.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/* SPDX-FileCopyrightText: 1995-2000 Russell King */
/*
* Originally from Linux kernel
* arch/arm/lib/findbit.S
*
* 16th March 2001 - John Ripley <jripley@sonicblue.com>
* Fixed so that "size" is an exclusive not an inclusive quantity.
* All users of these functions expect exclusive sizes, and may
* also call with zero size.
* Reworked by rmk.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
.text
/*
* Purpose : Find a 'zero' bit
* Prototype: int find_first_zero_bit(void *addr, unsigned int maxbit);
*/
ENTRY(_find_first_zero_bit_le)
teq r1, #0
beq 3f
mov r2, #0
1:
ARM( ldrb r3, [r0, r2, lsr #3] )
THUMB( lsr r3, r2, #3 )
THUMB( ldrb r3, [r0, r3] )
eors r3, r3, #0xff @ invert bits
bne .L_found @ any now set - found zero bit
add r2, r2, #8 @ next bit pointer
2: cmp r2, r1 @ any more?
blo 1b
3: mov r0, r1 @ no free bits
mov pc, lr
ENDPROC(_find_first_zero_bit_le)
/*
* Purpose : Find next 'zero' bit
* Prototype: int find_next_zero_bit(void *addr, unsigned int maxbit, int offset)
*/
ENTRY(_find_next_zero_bit_le)
teq r1, #0
beq 3b
ands ip, r2, #7
beq 1b @ If new byte, goto old routine
ARM( ldrb r3, [r0, r2, lsr #3] )
THUMB( lsr r3, r2, #3 )
THUMB( ldrb r3, [r0, r3] )
eor r3, r3, #0xff @ now looking for a 1 bit
movs r3, r3, lsr ip @ shift off unused bits
bne .L_found
orr r2, r2, #7 @ if zero, then no bits here
add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit
ENDPROC(_find_next_zero_bit_le)
/*
* Purpose : Find a 'one' bit
* Prototype: int find_first_bit(const unsigned long *addr, unsigned int maxbit);
*/
ENTRY(_find_first_bit_le)
teq r1, #0
beq 3f
mov r2, #0
1:
ARM( ldrb r3, [r0, r2, lsr #3] )
THUMB( lsr r3, r2, #3 )
THUMB( ldrb r3, [r0, r3] )
movs r3, r3
bne .L_found @ any now set - found zero bit
add r2, r2, #8 @ next bit pointer
2: cmp r2, r1 @ any more?
blo 1b
3: mov r0, r1 @ no free bits
mov pc, lr
ENDPROC(_find_first_bit_le)
/*
* Purpose : Find next 'one' bit
* Prototype: int find_next_zero_bit(void *addr, unsigned int maxbit, int offset)
*/
ENTRY(_find_next_bit_le)
teq r1, #0
beq 3b
ands ip, r2, #7
beq 1b @ If new byte, goto old routine
ARM( ldrb r3, [r0, r2, lsr #3] )
THUMB( lsr r3, r2, #3 )
THUMB( ldrb r3, [r0, r3] )
movs r3, r3, lsr ip @ shift off unused bits
bne .L_found
orr r2, r2, #7 @ if zero, then no bits here
add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit
ENDPROC(_find_next_bit_le)
#ifdef __ARMEB__
ENTRY(_find_first_zero_bit_be)
teq r1, #0
beq 3f
mov r2, #0
1: eor r3, r2, #0x18 @ big endian byte ordering
ARM( ldrb r3, [r0, r3, lsr #3] )
THUMB( lsr r3, #3 )
THUMB( ldrb r3, [r0, r3] )
eors r3, r3, #0xff @ invert bits
bne .L_found @ any now set - found zero bit
add r2, r2, #8 @ next bit pointer
2: cmp r2, r1 @ any more?
blo 1b
3: mov r0, r1 @ no free bits
mov pc, lr
ENDPROC(_find_first_zero_bit_be)
ENTRY(_find_next_zero_bit_be)
teq r1, #0
beq 3b
ands ip, r2, #7
beq 1b @ If new byte, goto old routine
eor r3, r2, #0x18 @ big endian byte ordering
ARM( ldrb r3, [r0, r3, lsr #3] )
THUMB( lsr r3, #3 )
THUMB( ldrb r3, [r0, r3] )
eor r3, r3, #0xff @ now looking for a 1 bit
movs r3, r3, lsr ip @ shift off unused bits
bne .L_found
orr r2, r2, #7 @ if zero, then no bits here
add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit
ENDPROC(_find_next_zero_bit_be)
ENTRY(_find_first_bit_be)
teq r1, #0
beq 3f
mov r2, #0
1: eor r3, r2, #0x18 @ big endian byte ordering
ARM( ldrb r3, [r0, r3, lsr #3] )
THUMB( lsr r3, #3 )
THUMB( ldrb r3, [r0, r3] )
movs r3, r3
bne .L_found @ any now set - found zero bit
add r2, r2, #8 @ next bit pointer
2: cmp r2, r1 @ any more?
blo 1b
3: mov r0, r1 @ no free bits
mov pc, lr
ENDPROC(_find_first_bit_be)
ENTRY(_find_next_bit_be)
teq r1, #0
beq 3b
ands ip, r2, #7
beq 1b @ If new byte, goto old routine
eor r3, r2, #0x18 @ big endian byte ordering
ARM( ldrb r3, [r0, r3, lsr #3] )
THUMB( lsr r3, #3 )
THUMB( ldrb r3, [r0, r3] )
movs r3, r3, lsr ip @ shift off unused bits
bne .L_found
orr r2, r2, #7 @ if zero, then no bits here
add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit
ENDPROC(_find_next_bit_be)
#endif
/*
* One or more bits in the LSB of r3 are assumed to be set.
*/
.L_found:
#if __LINUX_ARM_ARCH__ >= 5
rsb r0, r3, #0
and r3, r3, r0
clz r3, r3
rsb r3, r3, #31
add r0, r2, r3
#else
tst r3, #0x0f
addeq r2, r2, #4
movne r3, r3, lsl #4
tst r3, #0x30
addeq r2, r2, #2
movne r3, r3, lsl #2
tst r3, #0x40
addeq r2, r2, #1
mov r0, r2
#endif
cmp r1, r0 @ Clamp to maxbit
movlo r0, r1
mov pc, lr
|
a3f/bareDOOM
| 1,458
|
arch/arm/lib32/ashldi3.S
|
/* Copyright 1995, 1996, 1998, 1999, 2000, 2003, 2004, 2005
Free Software Foundation, Inc.
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2, or (at your option) any
later version.
In addition to the permissions in the GNU General Public License, the
Free Software Foundation gives you unlimited permission to link the
compiled version of this file into combinations with other programs,
and to distribute those combinations without any restriction coming
from the use of this file. (The General Public License restrictions
do apply in other respects; for example, they cover modification of
the file, and distribution when not linked into a combine
executable.)
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
*/
#include <linux/linkage.h>
#ifdef __ARMEB__
#define al r1
#define ah r0
#else
#define al r0
#define ah r1
#endif
.section .text.__ashldi3
ENTRY(__ashldi3)
ENTRY(__aeabi_llsl)
subs r3, r2, #32
rsb ip, r2, #32
movmi ah, ah, lsl r2
movpl ah, al, lsl r3
ARM( orrmi ah, ah, al, lsr ip )
THUMB( lsrmi r3, al, ip )
THUMB( orrmi ah, ah, r3 )
mov al, al, lsl r2
mov pc, lr
ENDPROC(__ashldi3)
ENDPROC(__aeabi_llsl)
|
a3f/bareDOOM
| 2,383
|
arch/arm/lib32/io-readsb.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/* SPDX-FileCopyrightText: 1995-2000 Russell King */
/*
* linux/arch/arm/lib/io-readsb.S
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
.section .text.readsb
.Linsb_align: rsb ip, ip, #4
cmp ip, r2
movgt ip, r2
cmp ip, #2
ldrb r3, [r0]
strb r3, [r1], #1
ldrgeb r3, [r0]
strgeb r3, [r1], #1
ldrgtb r3, [r0]
strgtb r3, [r1], #1
subs r2, r2, ip
bne .Linsb_aligned
ENTRY(readsb)
teq r2, #0 @ do we have to check for the zero len?
moveq pc, lr
ands ip, r1, #3
bne .Linsb_align
.Linsb_aligned: stmfd sp!, {r4 - r6, lr}
subs r2, r2, #16
bmi .Linsb_no_16
.Linsb_16_lp: ldrb r3, [r0]
ldrb r4, [r0]
ldrb r5, [r0]
mov r3, r3, put_byte_0
ldrb r6, [r0]
orr r3, r3, r4, put_byte_1
ldrb r4, [r0]
orr r3, r3, r5, put_byte_2
ldrb r5, [r0]
orr r3, r3, r6, put_byte_3
ldrb r6, [r0]
mov r4, r4, put_byte_0
ldrb ip, [r0]
orr r4, r4, r5, put_byte_1
ldrb r5, [r0]
orr r4, r4, r6, put_byte_2
ldrb r6, [r0]
orr r4, r4, ip, put_byte_3
ldrb ip, [r0]
mov r5, r5, put_byte_0
ldrb lr, [r0]
orr r5, r5, r6, put_byte_1
ldrb r6, [r0]
orr r5, r5, ip, put_byte_2
ldrb ip, [r0]
orr r5, r5, lr, put_byte_3
ldrb lr, [r0]
mov r6, r6, put_byte_0
orr r6, r6, ip, put_byte_1
ldrb ip, [r0]
orr r6, r6, lr, put_byte_2
orr r6, r6, ip, put_byte_3
stmia r1!, {r3 - r6}
subs r2, r2, #16
bpl .Linsb_16_lp
tst r2, #15
ldmeqfd sp!, {r4 - r6, pc}
.Linsb_no_16: tst r2, #8
beq .Linsb_no_8
ldrb r3, [r0]
ldrb r4, [r0]
ldrb r5, [r0]
mov r3, r3, put_byte_0
ldrb r6, [r0]
orr r3, r3, r4, put_byte_1
ldrb r4, [r0]
orr r3, r3, r5, put_byte_2
ldrb r5, [r0]
orr r3, r3, r6, put_byte_3
ldrb r6, [r0]
mov r4, r4, put_byte_0
ldrb ip, [r0]
orr r4, r4, r5, put_byte_1
orr r4, r4, r6, put_byte_2
orr r4, r4, ip, put_byte_3
stmia r1!, {r3, r4}
.Linsb_no_8: tst r2, #4
beq .Linsb_no_4
ldrb r3, [r0]
ldrb r4, [r0]
ldrb r5, [r0]
ldrb r6, [r0]
mov r3, r3, put_byte_0
orr r3, r3, r4, put_byte_1
orr r3, r3, r5, put_byte_2
orr r3, r3, r6, put_byte_3
str r3, [r1], #4
.Linsb_no_4: ands r2, r2, #3
ldmeqfd sp!, {r4 - r6, pc}
cmp r2, #2
ldrb r3, [r0]
strb r3, [r1], #1
ldrgeb r3, [r0]
strgeb r3, [r1], #1
ldrgtb r3, [r0]
strgtb r3, [r1]
ldmfd sp!, {r4 - r6, pc}
ENDPROC(readsb)
|
a3f/bareDOOM
| 1,634
|
arch/arm/lib32/io-writesw-armv4.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/* SPDX-FileCopyrightText: 1995-2000 Russell King */
/*
* linux/arch/arm/lib/io-writesw-armv4.S
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
.macro outword, rd
#ifndef __ARMEB__
strh \rd, [r0]
mov \rd, \rd, lsr #16
strh \rd, [r0]
#else
mov lr, \rd, lsr #16
strh lr, [r0]
strh \rd, [r0]
#endif
.endm
.section .text.__raw_writesw
.Loutsw_align: movs ip, r1, lsl #31
bne .Loutsw_noalign
ldrh r3, [r1], #2
sub r2, r2, #1
strh r3, [r0]
ENTRY(__raw_writesw)
teq r2, #0
moveq pc, lr
ands r3, r1, #3
bne .Loutsw_align
stmfd sp!, {r4, r5, lr}
subs r2, r2, #8
bmi .Lno_outsw_8
.Loutsw_8_lp: ldmia r1!, {r3, r4, r5, ip}
subs r2, r2, #8
outword r3
outword r4
outword r5
outword ip
bpl .Loutsw_8_lp
.Lno_outsw_8: tst r2, #4
beq .Lno_outsw_4
ldmia r1!, {r3, ip}
outword r3
outword ip
.Lno_outsw_4: movs r2, r2, lsl #31
bcc .Lno_outsw_2
ldr r3, [r1], #4
outword r3
.Lno_outsw_2: ldrneh r3, [r1]
strneh r3, [r0]
ldmfd sp!, {r4, r5, pc}
#ifdef __ARMEB__
#define pull_hbyte0 lsl #8
#define push_hbyte1 lsr #24
#else
#define pull_hbyte0 lsr #24
#define push_hbyte1 lsl #8
#endif
.Loutsw_noalign:
ARM( ldr r3, [r1, -r3]! )
THUMB( rsb r3, r3, #0 )
THUMB( ldr r3, [r1, r3] )
THUMB( sub r1, r3 )
subcs r2, r2, #1
bcs 2f
subs r2, r2, #2
bmi 3f
1: mov ip, r3, lsr #8
strh ip, [r0]
2: mov ip, r3, pull_hbyte0
ldr r3, [r1, #4]!
subs r2, r2, #2
orr ip, ip, r3, push_hbyte1
strh ip, [r0]
bpl 1b
tst r2, #1
3: movne ip, r3, lsr #8
strneh ip, [r0]
mov pc, lr
ENDPROC(__raw_writesw)
|
a3f/bareDOOM
| 2,299
|
arch/arm/lib32/memset.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/* SPDX-FileCopyrightText: 1995-2000 Russell King */
/*
* linux/arch/arm/lib/memset.S
* ASM optimised string functions
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
.text
.align 5
.weak memset
ENTRY(__memset)
ENTRY(memset)
ands r3, r0, #3 @ 1 unaligned?
mov ip, r0 @ preserve r0 as return value
bne 6f @ 1
/*
* we know that the pointer in ip is aligned to a word boundary.
*/
1: orr r1, r1, r1, lsl #8
orr r1, r1, r1, lsl #16
mov r3, r1
cmp r2, #16
blt 4f
#if ! CALGN(1)+0
/*
* We need an 2 extra registers for this loop - use r8 and the LR
*/
stmfd sp!, {r8, lr}
mov r8, r1
mov lr, r1
2: subs r2, r2, #64
stmgeia ip!, {r1, r3, r8, lr} @ 64 bytes at a time.
stmgeia ip!, {r1, r3, r8, lr}
stmgeia ip!, {r1, r3, r8, lr}
stmgeia ip!, {r1, r3, r8, lr}
bgt 2b
ldmeqfd sp!, {r8, pc} @ Now <64 bytes to go.
/*
* No need to correct the count; we're only testing bits from now on
*/
tst r2, #32
stmneia ip!, {r1, r3, r8, lr}
stmneia ip!, {r1, r3, r8, lr}
tst r2, #16
stmneia ip!, {r1, r3, r8, lr}
ldmfd sp!, {r8, lr}
#else
/*
* This version aligns the destination pointer in order to write
* whole cache lines at once.
*/
stmfd sp!, {r4-r8, lr}
mov r4, r1
mov r5, r1
mov r6, r1
mov r7, r1
mov r8, r1
mov lr, r1
cmp r2, #96
tstgt ip, #31
ble 3f
and r8, ip, #31
rsb r8, r8, #32
sub r2, r2, r8
movs r8, r8, lsl #(32 - 4)
stmcsia ip!, {r4, r5, r6, r7}
stmmiia ip!, {r4, r5}
tst r8, #(1 << 30)
mov r8, r1
strne r1, [ip], #4
3: subs r2, r2, #64
stmgeia ip!, {r1, r3-r8, lr}
stmgeia ip!, {r1, r3-r8, lr}
bgt 3b
ldmeqfd sp!, {r4-r8, pc}
tst r2, #32
stmneia ip!, {r1, r3-r8, lr}
tst r2, #16
stmneia ip!, {r4-r7}
ldmfd sp!, {r4-r8, lr}
#endif
4: tst r2, #8
stmneia ip!, {r1, r3}
tst r2, #4
strne r1, [ip], #4
/*
* When we get here, we've got less than 4 bytes to zero. We
* may have an unaligned pointer as well.
*/
5: tst r2, #2
strneb r1, [ip], #1
strneb r1, [ip], #1
tst r2, #1
strneb r1, [ip], #1
mov pc, lr
6: subs r2, r2, #4 @ 1 do we have enough
blt 5b @ 1 bytes to align with?
cmp r3, #2 @ 1
strltb r1, [ip], #1 @ 1
strleb r1, [ip], #1 @ 1
strb r1, [ip], #1 @ 1
add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3))
b 1b
ENDPROC(memset)
ENDPROC(__memset)
|
a3f/bareDOOM
| 1,149
|
arch/arm/lib32/io-writesl.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/* SPDX-FileCopyrightText: 1995-2000 Russell King */
/*
* linux/arch/arm/lib/io-writesl.S
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
.section .text.writesl
ENTRY(writesl)
teq r2, #0 @ do we have to check for the zero len?
moveq pc, lr
ands ip, r1, #3
bne 3f
subs r2, r2, #4
bmi 2f
stmfd sp!, {r4, lr}
1: ldmia r1!, {r3, r4, ip, lr}
subs r2, r2, #4
str r3, [r0, #0]
str r4, [r0, #0]
str ip, [r0, #0]
str lr, [r0, #0]
bpl 1b
ldmfd sp!, {r4, lr}
2: movs r2, r2, lsl #31
ldmcsia r1!, {r3, ip}
strcs r3, [r0, #0]
ldrne r3, [r1, #0]
strcs ip, [r0, #0]
strne r3, [r0, #0]
mov pc, lr
3: bic r1, r1, #3
ldr r3, [r1], #4
cmp ip, #2
blt 5f
bgt 6f
4: mov ip, r3, pull #16
ldr r3, [r1], #4
subs r2, r2, #1
orr ip, ip, r3, push #16
str ip, [r0]
bne 4b
mov pc, lr
5: mov ip, r3, pull #8
ldr r3, [r1], #4
subs r2, r2, #1
orr ip, ip, r3, push #24
str ip, [r0]
bne 5b
mov pc, lr
6: mov ip, r3, pull #24
ldr r3, [r1], #4
subs r2, r2, #1
orr ip, ip, r3, push #8
str ip, [r0]
bne 6b
mov pc, lr
ENDPROC(writesl)
|
a3f/bareDOOM
| 1,405
|
arch/arm/lib32/io-readsl.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/* SPDX-FileCopyrightText: 1995-2000 Russell King */
/*
* linux/arch/arm/lib/io-readsl.S
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
.section .text.readsl
ENTRY(readsl)
teq r2, #0 @ do we have to check for the zero len?
moveq pc, lr
ands ip, r1, #3
bne 3f
subs r2, r2, #4
bmi 2f
stmfd sp!, {r4, lr}
1: ldr r3, [r0, #0]
ldr r4, [r0, #0]
ldr ip, [r0, #0]
ldr lr, [r0, #0]
subs r2, r2, #4
stmia r1!, {r3, r4, ip, lr}
bpl 1b
ldmfd sp!, {r4, lr}
2: movs r2, r2, lsl #31
ldrcs r3, [r0, #0]
ldrcs ip, [r0, #0]
stmcsia r1!, {r3, ip}
ldrne r3, [r0, #0]
strne r3, [r1, #0]
mov pc, lr
3: ldr r3, [r0]
cmp ip, #2
mov ip, r3, get_byte_0
strb ip, [r1], #1
bgt 6f
mov ip, r3, get_byte_1
strb ip, [r1], #1
beq 5f
mov ip, r3, get_byte_2
strb ip, [r1], #1
4: subs r2, r2, #1
mov ip, r3, pull #24
ldrne r3, [r0]
orrne ip, ip, r3, push #8
strne ip, [r1], #4
bne 4b
b 8f
5: subs r2, r2, #1
mov ip, r3, pull #16
ldrne r3, [r0]
orrne ip, ip, r3, push #16
strne ip, [r1], #4
bne 5b
b 7f
6: subs r2, r2, #1
mov ip, r3, pull #8
ldrne r3, [r0]
orrne ip, ip, r3, push #24
strne ip, [r1], #4
bne 6b
mov r3, ip, get_byte_2
strb r3, [r1, #2]
7: mov r3, ip, get_byte_1
strb r3, [r1, #1]
8: mov r3, ip, get_byte_0
strb r3, [r1, #0]
mov pc, lr
ENDPROC(readsl)
|
a3f/bareDOOM
| 7,809
|
arch/arm/lib32/lib1funcs.S
|
/*
* linux/arch/arm/lib/lib1funcs.S: Optimized ARM division routines
*
* Author: Nicolas Pitre <nico@fluxnic.net>
* - contributed to gcc-3.4 on Sep 30, 2003
* - adapted for the Linux kernel on Oct 2, 2003
*/
/* Copyright 1995, 1996, 1998, 1999, 2000, 2003 Free Software Foundation, Inc.
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2, or (at your option) any
later version.
In addition to the permissions in the GNU General Public License, the
Free Software Foundation gives you unlimited permission to link the
compiled version of this file into combinations with other programs,
and to distribute those combinations without any restriction coming
from the use of this file. (The General Public License restrictions
do apply in other respects; for example, they cover modification of
the file, and distribution when not linked into a combine
executable.)
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
.macro ARM_DIV_BODY dividend, divisor, result, curbit
#if __LINUX_ARM_ARCH__ >= 5
clz \curbit, \divisor
clz \result, \dividend
sub \result, \curbit, \result
mov \curbit, #1
mov \divisor, \divisor, lsl \result
mov \curbit, \curbit, lsl \result
mov \result, #0
#else
@ Initially shift the divisor left 3 bits if possible,
@ set curbit accordingly. This allows for curbit to be located
@ at the left end of each 4 bit nibbles in the division loop
@ to save one loop in most cases.
tst \divisor, #0xe0000000
moveq \divisor, \divisor, lsl #3
moveq \curbit, #8
movne \curbit, #1
@ Unless the divisor is very big, shift it up in multiples of
@ four bits, since this is the amount of unwinding in the main
@ division loop. Continue shifting until the divisor is
@ larger than the dividend.
1: cmp \divisor, #0x10000000
cmplo \divisor, \dividend
movlo \divisor, \divisor, lsl #4
movlo \curbit, \curbit, lsl #4
blo 1b
@ For very big divisors, we must shift it a bit at a time, or
@ we will be in danger of overflowing.
1: cmp \divisor, #0x80000000
cmplo \divisor, \dividend
movlo \divisor, \divisor, lsl #1
movlo \curbit, \curbit, lsl #1
blo 1b
mov \result, #0
#endif
@ Division loop
1: cmp \dividend, \divisor
subhs \dividend, \dividend, \divisor
orrhs \result, \result, \curbit
cmp \dividend, \divisor, lsr #1
subhs \dividend, \dividend, \divisor, lsr #1
orrhs \result, \result, \curbit, lsr #1
cmp \dividend, \divisor, lsr #2
subhs \dividend, \dividend, \divisor, lsr #2
orrhs \result, \result, \curbit, lsr #2
cmp \dividend, \divisor, lsr #3
subhs \dividend, \dividend, \divisor, lsr #3
orrhs \result, \result, \curbit, lsr #3
cmp \dividend, #0 @ Early termination?
movnes \curbit, \curbit, lsr #4 @ No, any more bits to do?
movne \divisor, \divisor, lsr #4
bne 1b
.endm
.macro ARM_DIV2_ORDER divisor, order
#if __LINUX_ARM_ARCH__ >= 5
clz \order, \divisor
rsb \order, \order, #31
#else
cmp \divisor, #(1 << 16)
movhs \divisor, \divisor, lsr #16
movhs \order, #16
movlo \order, #0
cmp \divisor, #(1 << 8)
movhs \divisor, \divisor, lsr #8
addhs \order, \order, #8
cmp \divisor, #(1 << 4)
movhs \divisor, \divisor, lsr #4
addhs \order, \order, #4
cmp \divisor, #(1 << 2)
addhi \order, \order, #3
addls \order, \order, \divisor, lsr #1
#endif
.endm
.macro ARM_MOD_BODY dividend, divisor, order, spare
#if __LINUX_ARM_ARCH__ >= 5
clz \order, \divisor
clz \spare, \dividend
sub \order, \order, \spare
mov \divisor, \divisor, lsl \order
#else
mov \order, #0
@ Unless the divisor is very big, shift it up in multiples of
@ four bits, since this is the amount of unwinding in the main
@ division loop. Continue shifting until the divisor is
@ larger than the dividend.
1: cmp \divisor, #0x10000000
cmplo \divisor, \dividend
movlo \divisor, \divisor, lsl #4
addlo \order, \order, #4
blo 1b
@ For very big divisors, we must shift it a bit at a time, or
@ we will be in danger of overflowing.
1: cmp \divisor, #0x80000000
cmplo \divisor, \dividend
movlo \divisor, \divisor, lsl #1
addlo \order, \order, #1
blo 1b
#endif
@ Perform all needed substractions to keep only the reminder.
@ Do comparisons in batch of 4 first.
subs \order, \order, #3 @ yes, 3 is intended here
blt 2f
1: cmp \dividend, \divisor
subhs \dividend, \dividend, \divisor
cmp \dividend, \divisor, lsr #1
subhs \dividend, \dividend, \divisor, lsr #1
cmp \dividend, \divisor, lsr #2
subhs \dividend, \dividend, \divisor, lsr #2
cmp \dividend, \divisor, lsr #3
subhs \dividend, \dividend, \divisor, lsr #3
cmp \dividend, #1
mov \divisor, \divisor, lsr #4
subges \order, \order, #4
bge 1b
tst \order, #3
teqne \dividend, #0
beq 5f
@ Either 1, 2 or 3 comparison/substractions are left.
2: cmn \order, #2
blt 4f
beq 3f
cmp \dividend, \divisor
subhs \dividend, \dividend, \divisor
mov \divisor, \divisor, lsr #1
3: cmp \dividend, \divisor
subhs \dividend, \dividend, \divisor
mov \divisor, \divisor, lsr #1
4: cmp \dividend, \divisor
subhs \dividend, \dividend, \divisor
5:
.endm
.section .text.__udivsi3
ENTRY(__udivsi3)
ENTRY(__aeabi_uidiv)
subs r2, r1, #1
moveq pc, lr
bcc Ldiv0
cmp r0, r1
bls 11f
tst r1, r2
beq 12f
ARM_DIV_BODY r0, r1, r2, r3
mov r0, r2
mov pc, lr
11: moveq r0, #1
movne r0, #0
mov pc, lr
12: ARM_DIV2_ORDER r1, r2
mov r0, r0, lsr r2
mov pc, lr
ENDPROC(__udivsi3)
ENDPROC(__aeabi_uidiv)
.section .text.__umodsi3
ENTRY(__umodsi3)
subs r2, r1, #1 @ compare divisor with 1
bcc Ldiv0
cmpne r0, r1 @ compare dividend with divisor
moveq r0, #0
tsthi r1, r2 @ see if divisor is power of 2
andeq r0, r0, r2
movls pc, lr
ARM_MOD_BODY r0, r1, r2, r3
mov pc, lr
ENDPROC(__umodsi3)
.section .text.__divsi3
ENTRY(__divsi3)
ENTRY(__aeabi_idiv)
cmp r1, #0
eor ip, r0, r1 @ save the sign of the result.
beq Ldiv0
rsbmi r1, r1, #0 @ loops below use unsigned.
subs r2, r1, #1 @ division by 1 or -1 ?
beq 10f
movs r3, r0
rsbmi r3, r0, #0 @ positive dividend value
cmp r3, r1
bls 11f
tst r1, r2 @ divisor is power of 2 ?
beq 12f
ARM_DIV_BODY r3, r1, r0, r2
cmp ip, #0
rsbmi r0, r0, #0
mov pc, lr
10: teq ip, r0 @ same sign ?
rsbmi r0, r0, #0
mov pc, lr
11: movlo r0, #0
moveq r0, ip, asr #31
orreq r0, r0, #1
mov pc, lr
12: ARM_DIV2_ORDER r1, r2
cmp ip, #0
mov r0, r3, lsr r2
rsbmi r0, r0, #0
mov pc, lr
ENDPROC(__divsi3)
ENDPROC(__aeabi_idiv)
.section .text.__modsi3
ENTRY(__modsi3)
cmp r1, #0
beq Ldiv0
rsbmi r1, r1, #0 @ loops below use unsigned.
movs ip, r0 @ preserve sign of dividend
rsbmi r0, r0, #0 @ if negative make positive
subs r2, r1, #1 @ compare divisor with 1
cmpne r0, r1 @ compare dividend with divisor
moveq r0, #0
tsthi r1, r2 @ see if divisor is power of 2
andeq r0, r0, r2
bls 10f
ARM_MOD_BODY r0, r1, r2, r3
10: cmp ip, #0
rsbmi r0, r0, #0
mov pc, lr
ENDPROC(__modsi3)
#ifdef CONFIG_AEABI
.section .text.__aeabi_uidivmod
ENTRY(__aeabi_uidivmod)
stmfd sp!, {r0, r1, ip, lr}
bl __aeabi_uidiv
ldmfd sp!, {r1, r2, ip, lr}
mul r3, r0, r2
sub r1, r1, r3
mov pc, lr
ENDPROC(__aeabi_uidivmod)
.section .text.__aeabi_idivmod
ENTRY(__aeabi_idivmod)
stmfd sp!, {r0, r1, ip, lr}
bl __aeabi_idiv
ldmfd sp!, {r1, r2, ip, lr}
mul r3, r0, r2
sub r1, r1, r3
mov pc, lr
ENDPROC(__aeabi_idivmod)
#endif
.section .text.Ldiv0
Ldiv0:
str lr, [sp, #-8]!
bl __div0
mov r0, #0 @ About as wrong as it could be.
ldr pc, [sp], #8
|
a3f/bareDOOM
| 2,256
|
arch/arm/lib32/io-readsw-armv4.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/* SPDX-FileCopyrightText: 1995-2000 Russell King */
/*
* linux/arch/arm/lib/io-readsw-armv4.S
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
.macro pack, rd, hw1, hw2
#ifndef __ARMEB__
orr \rd, \hw1, \hw2, lsl #16
#else
orr \rd, \hw2, \hw1, lsl #16
#endif
.endm
.section .text.readsw
.Linsw_align: movs ip, r1, lsl #31
bne .Linsw_noalign
ldrh ip, [r0]
sub r2, r2, #1
strh ip, [r1], #2
ENTRY(readsw)
teq r2, #0
moveq pc, lr
tst r1, #3
bne .Linsw_align
stmfd sp!, {r4, r5, lr}
subs r2, r2, #8
bmi .Lno_insw_8
.Linsw_8_lp: ldrh r3, [r0]
ldrh r4, [r0]
pack r3, r3, r4
ldrh r4, [r0]
ldrh r5, [r0]
pack r4, r4, r5
ldrh r5, [r0]
ldrh ip, [r0]
pack r5, r5, ip
ldrh ip, [r0]
ldrh lr, [r0]
pack ip, ip, lr
subs r2, r2, #8
stmia r1!, {r3 - r5, ip}
bpl .Linsw_8_lp
.Lno_insw_8: tst r2, #4
beq .Lno_insw_4
ldrh r3, [r0]
ldrh r4, [r0]
pack r3, r3, r4
ldrh r4, [r0]
ldrh ip, [r0]
pack r4, r4, ip
stmia r1!, {r3, r4}
.Lno_insw_4: movs r2, r2, lsl #31
bcc .Lno_insw_2
ldrh r3, [r0]
ldrh ip, [r0]
pack r3, r3, ip
str r3, [r1], #4
.Lno_insw_2: ldrneh r3, [r0]
strneh r3, [r1]
ldmfd sp!, {r4, r5, pc}
#ifdef __ARMEB__
#define _BE_ONLY_(code...) code
#define _LE_ONLY_(code...)
#define push_hbyte0 lsr #8
#define pull_hbyte1 lsl #24
#else
#define _BE_ONLY_(code...)
#define _LE_ONLY_(code...) code
#define push_hbyte0 lsl #24
#define pull_hbyte1 lsr #8
#endif
.Linsw_noalign: stmfd sp!, {r4, lr}
ldrccb ip, [r1, #-1]!
bcc 1f
ldrh ip, [r0]
sub r2, r2, #1
_BE_ONLY_( mov ip, ip, ror #8 )
strb ip, [r1], #1
_LE_ONLY_( mov ip, ip, lsr #8 )
_BE_ONLY_( mov ip, ip, lsr #24 )
1: subs r2, r2, #2
bmi 3f
_BE_ONLY_( mov ip, ip, lsl #24 )
2: ldrh r3, [r0]
ldrh r4, [r0]
subs r2, r2, #2
orr ip, ip, r3, lsl #8
orr ip, ip, r4, push_hbyte0
str ip, [r1], #4
mov ip, r4, pull_hbyte1
bpl 2b
_BE_ONLY_( mov ip, ip, lsr #24 )
3: tst r2, #1
strb ip, [r1], #1
ldrneh ip, [r0]
_BE_ONLY_( movne ip, ip, ror #8 )
strneb ip, [r1], #1
_LE_ONLY_( movne ip, ip, lsr #8 )
_BE_ONLY_( movne ip, ip, lsr #24 )
strneb ip, [r1]
ldmfd sp!, {r4, pc}
ENDPROC(readsw)
|
a3f/bareDOOM
| 12,265
|
arch/arm/boards/phytec-phycore-pxa270/lowlevel_init.S
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* This was originally from the Lubbock u-boot port.
*
* Most of this taken from Redboot hal_platform_setup.h with cleanup
*
* NOTE: I haven't clean this up considerably, just enough to get it
* running. See hal_platform_setup.h for the source. See
* board/cradle/lowlevel_init.S for another PXA250 setup that is
* much cleaner.
*/
#include <config.h>
#include <linux/sizes.h>
#include <mach/pxa-regs.h>
#include <mach/regs-ost.h>
#include <mach/regs-intc.h>
#include <asm/barebox-arm-head.h>
#define GPSR0 0x40E00018 /* GPIO Pin Output Set Register GPIO <31:00> */
#define GPSR1 0x40E0001C /* GPIO Pin Output Set Register GPIO <63:32> */
#define GPSR2 0x40E00020 /* GPIO Pin Output Set Register GPIO <80:64> */
#define GPCR0 0x40E00024 /* GPIO Pin Output Clear Register GPIO <31:00> */
#define GPCR1 0x40E00028 /* GPIO Pin Output Clear Register GPIO <63:32> */
#define GPCR2 0x40E0002C /* GPIO Pin Output Clear Register GPIO <80:64> */
#define GPDR0 0x40E0000C /* GPIO Pin Direction Register GPIO <31:0o> */
#define GPDR1 0x40E00010 /* GPIO Pin Direction Register GPIO <63:32> */
#define GPDR2 0x40E00014 /* GPIO Pin Direction Register GPIO <80:64> */
#define GAFR0_L 0x40E00054 /* GPIO Alternate Function Select Register GPIO <15:00> */
#define GAFR0_U 0x40E00058 /* GPIO Alternate Function Select Register GPIO <31:16> */
#define GAFR1_L 0x40E0005C /* GPIO Alternate Function Select Register GPIO <47:32> */
#define GAFR1_U 0x40E00060 /* GPIO Alternate Function Select Register GPIO <63:48> */
#define GAFR2_L 0x40E00064 /* GPIO Alternate Function Select Register GPIO <79:64> */
#define GAFR2_U 0x40E00068 /* GPIO Alternate Function Select Register GPIO <95:80> */
/*
* Memory setup
*/
.globl barebox_arm_reset_vector
barebox_arm_reset_vector:
bl arm_cpu_lowlevel_init
@ Preserve r8/r7 i.e. kernel entry values
@ Data cache might be active.
@ Be sure to flush kernel binary out of the cache,
@ whatever state it is, before it is turned off.
@ This is done by fetching through currently executed
@ memory to be sure we hit the same cache.
bic r2, pc, #0x1f
add r3, r2, #0x10000 @ 64 kb is quite enough...
1: ldr r0, [r2], #32
teq r2, r3
bne 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mcr p15, 0, r0, c7, c7, 0 @ flush I & D caches
@ disabling MMU and caches
mrc p15, 0, r0, c1, c0, 0 @ read control reg
bic r0, r0, #0x05 @ clear DC, MMU
bic r0, r0, #0x1000 @ clear Icache
mcr p15, 0, r0, c1, c0, 0
/* set output */
ldr r0, =GPSR0
ldr r1, =CONFIG_GPSR0_VAL
str r1, [r0]
ldr r0, =GPSR1
ldr r1, =CONFIG_GPSR1_VAL
str r1, [r0]
ldr r0, =GPSR2
ldr r1, =CONFIG_GPSR2_VAL
str r1, [r0]
/* set direction */
ldr r0, =GPDR0
ldr r1, =CONFIG_GPDR0_VAL
str r1, [r0]
ldr r0, =GPDR1
ldr r1, =CONFIG_GPDR1_VAL
str r1, [r0]
ldr r0, =GPDR2
ldr r1, =CONFIG_GPDR2_VAL
str r1, [r0]
/* alternate function */
ldr r0, =GAFR0_L
ldr r1, =CONFIG_GAFR0_L_VAL
str r1, [r0]
ldr r0, =GAFR0_U
ldr r1, =CONFIG_GAFR0_U_VAL
str r1, [r0]
ldr r0, =GAFR1_L
ldr r1, =CONFIG_GAFR1_L_VAL
str r1, [r0]
ldr r0, =GAFR1_U
ldr r1, =CONFIG_GAFR1_U_VAL
str r1, [r0]
ldr r0, =GAFR2_L
ldr r1, =CONFIG_GAFR2_L_VAL
str r1, [r0]
ldr r0, =GAFR2_U
ldr r1, =CONFIG_GAFR2_U_VAL
str r1, [r0]
/* enable GPIO pins */
ldr r0, =PSSR
ldr r1, =CONFIG_PSSR_VAL
str r1, [r0]
/* -------------------------------------------------------------------- */
/* Enable memory interface */
/* */
/* The sequence below is based on the recommended init steps */
/* detailed in the Intel PXA250 Operating Systems Developers Guide, */
/* Chapter 10. */
/* -------------------------------------------------------------------- */
/* -------------------------------------------------------------------- */
/* Step 1: Wait for at least 200 microsedonds to allow internal */
/* clocks to settle. Only necessary after hard reset... */
/* FIXME: can be optimized later */
/* -------------------------------------------------------------------- */
ldr r3, =OSCR /* reset the OS Timer Count to zero */
mov r2, #0
str r2, [r3]
ldr r4, =0x300 /* really 0x2E1 is about 200usec, */
/* so 0x300 should be plenty */
1:
ldr r2, [r3]
cmp r4, r2
bgt 1b
cmp pc, #0xa0000000
bls mem_init
cmp pc, #0xb0000000
bhi mem_init
b skip_mem_init
mem_init:
ldr r1, =MDCNFG /* get memory controller base addr. */
/* -------------------------------------------------------------------- */
/* Step 2a: Initialize Asynchronous static memory controller */
/* -------------------------------------------------------------------- */
/* MSC registers: timing, bus width, mem type */
/* MSC0: nCS(0,1) */
ldr r2, =CONFIG_MSC0_VAL
str r2, [r1, #MSC0_OFFSET]
ldr r2, [r1, #MSC0_OFFSET] /* read back to ensure */
/* that data latches */
/* MSC1: nCS(2,3) */
ldr r2, =CONFIG_MSC1_VAL
str r2, [r1, #MSC1_OFFSET]
ldr r2, [r1, #MSC1_OFFSET]
/* MSC2: nCS(4,5) */
ldr r2, =CONFIG_MSC2_VAL
str r2, [r1, #MSC2_OFFSET]
ldr r2, [r1, #MSC2_OFFSET]
/* -------------------------------------------------------------------- */
/* Step 2b: Initialize Card Interface */
/* -------------------------------------------------------------------- */
/* MECR: Memory Expansion Card Register */
ldr r2, =CONFIG_MECR_VAL
str r2, [r1, #MECR_OFFSET]
ldr r2, [r1, #MECR_OFFSET]
/* MCMEM0: Card Interface slot 0 timing */
ldr r2, =CONFIG_MCMEM0_VAL
str r2, [r1, #MCMEM0_OFFSET]
ldr r2, [r1, #MCMEM0_OFFSET]
/* MCMEM1: Card Interface slot 1 timing */
ldr r2, =CONFIG_MCMEM1_VAL
str r2, [r1, #MCMEM1_OFFSET]
ldr r2, [r1, #MCMEM1_OFFSET]
/* MCATT0: Card Interface Attribute Space Timing, slot 0 */
ldr r2, =CONFIG_MCATT0_VAL
str r2, [r1, #MCATT0_OFFSET]
ldr r2, [r1, #MCATT0_OFFSET]
/* MCATT1: Card Interface Attribute Space Timing, slot 1 */
ldr r2, =CONFIG_MCATT1_VAL
str r2, [r1, #MCATT1_OFFSET]
ldr r2, [r1, #MCATT1_OFFSET]
/* MCIO0: Card Interface I/O Space Timing, slot 0 */
ldr r2, =CONFIG_MCIO0_VAL
str r2, [r1, #MCIO0_OFFSET]
ldr r2, [r1, #MCIO0_OFFSET]
/* MCIO1: Card Interface I/O Space Timing, slot 1 */
ldr r2, =CONFIG_MCIO1_VAL
str r2, [r1, #MCIO1_OFFSET]
ldr r2, [r1, #MCIO1_OFFSET]
/* -------------------------------------------------------------------- */
/* Step 2c: Write FLYCNFG FIXME: what's that??? */
/* -------------------------------------------------------------------- */
ldr r2, =CONFIG_FLYCNFG_VAL
str r2, [r1, #FLYCNFG_OFFSET]
str r2, [r1, #FLYCNFG_OFFSET]
/* -------------------------------------------------------------------- */
/* Step 2d: Initialize Timing for Sync Memory (SDCLK0) */
/* -------------------------------------------------------------------- */
/* Before accessing MDREFR we need a valid DRI field, so we set */
/* this to power on defaults + DRI field. */
ldr r4, [r1, #MDREFR_OFFSET]
ldr r2, =0xFFF
bic r4, r4, r2
ldr r3, =CONFIG_MDREFR_VAL
and r3, r3, r2
orr r4, r4, r3
str r4, [r1, #MDREFR_OFFSET] /* write back MDREFR */
orr r4, r4, #MDREFR_K0RUN
orr r4, r4, #MDREFR_K0DB4
orr r4, r4, #MDREFR_K0FREE
orr r4, r4, #MDREFR_K2FREE
orr r4, r4, #MDREFR_K0DB2
orr r4, r4, #MDREFR_K1DB2
bic r4, r4, #MDREFR_K1FREE
str r4, [r1, #MDREFR_OFFSET] /* write back MDREFR */
ldr r4, [r1, #MDREFR_OFFSET]
/* Note: preserve the mdrefr value in r4 */
/* -------------------------------------------------------------------- */
/* Step 3: Initialize Synchronous Static Memory (Flash/Peripherals) */
/* -------------------------------------------------------------------- */
/* Initialize SXCNFG register. Assert the enable bits */
/*
* Write SXMRS to cause an MRS command to all enabled banks of
* synchronous static memory. Note that SXLCR need not be
* written at this time.
*/
ldr r2, =CONFIG_SXCNFG_VAL
str r2, [r1, #SXCNFG_OFFSET]
/* -------------------------------------------------------------------- */
/* Step 4: Initialize SDRAM */
/* -------------------------------------------------------------------- */
bic r4, r4, #(MDREFR_K1FREE | MDREFR_K0FREE)
orr r4, r4, #MDREFR_K1RUN
orr r4, r4, #MDREFR_K2FREE
bic r4, r4, #MDREFR_K2DB2
str r4, [r1, #MDREFR_OFFSET]
ldr r4, [r1, #MDREFR_OFFSET]
bic r4, r4, #MDREFR_SLFRSH
str r4, [r1, #MDREFR_OFFSET]
ldr r4, [r1, #MDREFR_OFFSET]
orr r4, r4, #MDREFR_E1PIN
str r4, [r1, #MDREFR_OFFSET]
ldr r4, [r1, #MDREFR_OFFSET]
nop
nop
/*
* Step 4d: write MDCNFG with MDCNFG:DEx deasserted
* (set to 0), to configure but not enable each SDRAM
* partition pair.
*/
ldr r4, =CONFIG_MDCNFG_VAL
bic r4, r4, #(MDCNFG_DE0|MDCNFG_DE1)
bic r4, r4, #(MDCNFG_DE2|MDCNFG_DE3)
str r4, [r1, #MDCNFG_OFFSET] /* write back MDCNFG */
ldr r4, [r1, #MDCNFG_OFFSET]
/*
* Step 4e: Wait for the clock to the SDRAMs to stabilize,
* 100..200 usec.
*/
ldr r3, =OSCR /* reset the OS Timer Count to zero */
mov r2, #0
str r2, [r3]
ldr r4, =0x300 /* really 0x2E1 is about 200 usec, */
/* so 0x300 should be plenty */
1:
ldr r2, [r3]
cmp r4, r2
bgt 1b
/* Step 4f: Trigger a number (usually 8) refresh cycles by */
/* attempting non-burst read or write accesses to disabled */
/* SDRAM, as commonly specified in the power up sequence */
/* documented in SDRAM data sheets. The address(es) used */
/* for this purpose must not be cacheable. */
ldr r3, =CONFIG_DRAM_BASE
str r2, [r3]
str r2, [r3]
str r2, [r3]
str r2, [r3]
str r2, [r3]
str r2, [r3]
str r2, [r3]
str r2, [r3]
/*
* Step 4g: Write MDCNFG with enable bits asserted
* (MDCNFG:DEx set to 1)
*/
ldr r3, [r1, #MDCNFG_OFFSET]
mov r4, r3
orr r3, r3, #MDCNFG_DE0
str r3, [r1, #MDCNFG_OFFSET]
mov r0, r3
/* Step 4h: Write MDMRS. */
ldr r2, =CONFIG_MDMRS_VAL
str r2, [r1, #MDMRS_OFFSET]
/* enable APD */
ldr r3, [r1, #MDREFR_OFFSET]
orr r3, r3, #MDREFR_APD
str r3, [r1, #MDREFR_OFFSET]
/* We are finished with Intel's memory controller initialisation */
skip_mem_init:
wakeup:
/* Are we waking from sleep? */
ldr r0, =RCSR
ldr r1, [r0]
and r1, r1, #(RCSR_GPR | RCSR_SMR | RCSR_WDR | RCSR_HWR)
str r1, [r0]
teq r1, #RCSR_SMR
bne initirqs
ldr r0, =PSSR
mov r1, #PSSR_PH
str r1, [r0]
/* if so, resume at PSPR */
ldr r0, =PSPR
ldr r1, [r0]
mov pc, r1
/* -------------------------------------------------------------------- */
/* Disable (mask) all interrupts at interrupt controller */
/* -------------------------------------------------------------------- */
initirqs:
mov r1, #0 /* clear int. level register (IRQ, not FIQ) */
ldr r2, =ICLR
str r1, [r2]
ldr r2, =ICMR /* mask all interrupts at the controller */
str r1, [r2]
/* -------------------------------------------------------------------- */
/* Clock initialisation */
/* -------------------------------------------------------------------- */
initclks:
/* Disable the peripheral clocks, and set the core clock frequency */
/* Turn Off on-chip peripheral clocks (except for memory) */
/* for re-configuration. */
ldr r1, =CKEN
ldr r2, =CONFIG_CKEN
str r2, [r1]
/* ... and write the core clock config register */
ldr r2, =CONFIG_CCCR
ldr r1, =CCCR
str r2, [r1]
/* Turn on turbo mode */
mrc p14, 0, r2, c6, c0, 0
orr r2, r2, #0xB /* Turbo, Fast-Bus, Freq change */
mcr p14, 0, r2, c6, c0, 0
/* Re-write MDREFR */
ldr r1, =MDCNFG
ldr r2, [r1, #MDREFR_OFFSET]
str r2, [r1, #MDREFR_OFFSET]
/* enable the 32Khz oscillator for RTC and PowerManager */
ldr r1, =OSCC
mov r2, #OSCC_OON
str r2, [r1]
/* Interrupt init: Mask all interrupts */
ldr r0, =ICMR /* enable no sources */
mov r1, #0
str r1, [r0]
/* FIXME */
#ifdef NODEBUG
/* Disable software and data breakpoints */
mov r0, #0
mcr p15, 0, r0, c14, c8, 0 /* ibcr0 */
mcr p15, 0, r0, c14, c9, 0 /* ibcr1 */
mcr p15, 0, r0, c14, c4, 0 /* dbcon */
/* Enable all debug functionality */
mov r0, #0x80000000
mcr p14, 0, r0, c10, c0, 0 /* dcsr */
#endif
/* -------------------------------------------------------------------- */
/* End lowlevel_init */
/* -------------------------------------------------------------------- */
endlowlevel_init:
mov r0, #0xa0000000
mov r1, #SZ_64M
mov r2, #0
b barebox_arm_entry
|
a3f/bareDOOM
| 3,064
|
arch/arm/boards/freescale-mx27-ads/lowlevel_init.S
|
/*
* For clock initialization, see chapter 3 of the "MCIMX27 Multimedia
* Applications Processor Reference Manual, Rev. 0.2".
*
*/
#include <config.h>
#include <mach/imx27-regs.h>
#include <asm/barebox-arm-head.h>
#define writel(val, reg) \
ldr r0, =reg; \
ldr r1, =val; \
str r1, [r0];
#define CRM_PLL_PCTL_PARAM(pd, fd, fi, fn) (((pd-1)<<26) + ((fd-1)<<16) + (fi<<10) + (fn<<0))
.macro sdram_init
/*
* DDR on CSD0
*/
writel(0x00000008, 0xD8001010)
writel(0x55555555, 0x10027828)
writel(0x55555555, 0x10027830)
writel(0x55555555, 0x10027834)
writel(0x00005005, 0x10027838)
writel(0x15555555, 0x1002783C)
writel(0x00000004, 0xD8001010)
writel(0x006ac73a, 0xD8001004)
writel(0x92100000, 0xD8001000)
writel(0x00000000, 0xA0000F00)
writel(0xA2100000, 0xD8001000)
writel(0x00000000, 0xA0000F00)
writel(0x00000000, 0xA0000F00)
writel(0x00000000, 0xA0000F00)
writel(0x00000000, 0xA0000F00)
writel(0xA2200000, 0xD8001000)
writel(0x00000000, 0xA0000F00)
writel(0x00000000, 0xA0000F00)
writel(0x00000000, 0xA0000F00)
writel(0x00000000, 0xA0000F00)
writel(0xb2100000, 0xD8001000)
ldr r0, =0xA0000033
mov r1, #0xda
strb r1, [r0]
ldr r0, =0xA1000000
mov r1, #0xff
strb r1, [r0]
writel(0x82226080, 0xD8001000)
.endm
.globl barebox_arm_reset_vector
barebox_arm_reset_vector:
bl arm_cpu_lowlevel_init
ldr sp, =MX27_IRAM_BASE_ADDR + MX27_IRAM_SIZE - 4;
/* ahb lite ip interface */
writel(0x20040304, MX27_AIPI_BASE_ADDR + MX27_AIPI1_PSR0)
writel(0xDFFBFCFB, MX27_AIPI_BASE_ADDR + MX27_AIPI1_PSR1)
writel(0x00000000, MX27_AIPI_BASE_ADDR + MX27_AIPI2_PSR0)
writel(0xFFFFFFFF, MX27_AIPI_BASE_ADDR + MX27_AIPI2_PSR1)
/* disable mpll/spll */
ldr r0, =MX27_CCM_BASE_ADDR + MX27_CSCR
ldr r1, [r0]
bic r1, r1, #0x03
str r1, [r0]
/*
* pll clock initialization - see section 3.4.3 of the i.MX27 manual
*
* FIXME: Using the 399*2 MHz values from table 3-8 doens't work
* with 1.2 V core voltage! Find out if this is
* documented somewhere.
*/
writel(0x00191403, MX27_CCM_BASE_ADDR + MX27_MPCTL0) /* MPLL = 199.5*2 MHz */
writel(0x040C2403, MX27_CCM_BASE_ADDR + MX27_SPCTL0) /* SPLL = FIXME (needs review) */
/*
* ARM clock = (399 MHz / 2) / (ARM divider = 1) = 200 MHz
* AHB clock = (399 MHz / 3) / (AHB divider = 2) = 66.5 MHz
* System clock (HCLK) = 133 MHz
*/
writel(0x33F30307 | MX27_CSCR_MPLL_RESTART | MX27_CSCR_SPLL_RESTART,
MX27_CCM_BASE_ADDR + MX27_CSCR)
/* add some delay here */
mov r1, #0x1000
1: subs r1, r1, #0x1
bne 1b
/* clock gating enable */
writel(0x00050f08, MX27_SYSCTRL_BASE_ADDR + MX27_GPCR)
/* peripheral clock divider */
/* FIXME */
writel(0x23C8F403, MX27_CCM_BASE_ADDR + MX27_PCDR0)
/* PERDIV1=08 @133 MHz */
/* PERDIV1=04 @266 MHz */
writel(0x09030913, MX27_CCM_BASE_ADDR + MX27_PCDR1)
/* skip sdram initialization if we run from ram */
cmp pc, #0xa0000000
bls 1f
cmp pc, #0xc0000000
bhi 1f
b imx27_barebox_entry
1:
sdram_init
b imx27_barebox_entry
|
a3f/bareDOOM
| 2,959
|
arch/arm/boards/scb9328/lowlevel_init.S
|
// SPDX-License-Identifier: GPL-2.0-or-later
// SPDX-FileCopyrightText: 2004 Sascha Hauer, Synertronixx GmbH
#include <mach/imx1-regs.h>
#include <asm/barebox-arm-head.h>
#define CFG_MPCTL0_VAL 0x00321431
#define CFG_SPCTL0_VAL 0x04002400
#define CFG_CSCR_VAL 0x2f030403
#define CFG_PCDR_VAL 0x000b00b8
#define writel(val, reg) \
ldr r0, =reg; \
ldr r1, =val; \
str r1, [r0];
.section .text_head_entry_start_scb9328
.globl start_scb9328
start_scb9328:
bl arm_cpu_lowlevel_init
/* Change PERCLK1DIV to 14 ie 14+1 */
writel(CFG_PCDR_VAL, MX1_CCM_BASE_ADDR + MX1_PCDR)
/* set MCU PLL Control Register 0 */
writel(CFG_MPCTL0_VAL, MX1_CCM_BASE_ADDR + MX1_MPCTL0)
/* set mpll restart bit */
ldr r0, =MX1_CCM_BASE_ADDR + MX1_CSCR
ldr r1, [r0]
orr r1,r1,#(1<<21)
str r1, [r0]
mov r2,#0x10
1:
mov r3,#0x2000
2:
subs r3,r3,#1
bne 2b
subs r2,r2,#1
bne 1b
/* set System PLL Control Register 0 */
writel(CFG_SPCTL0_VAL, MX1_CCM_BASE_ADDR + MX1_SPCTL0)
/* set spll restart bit */
ldr r0, =MX1_CCM_BASE_ADDR + MX1_CSCR
ldr r1, [r0]
orr r1,r1,#(1<<22)
str r1, [r0]
mov r2,#0x10
1:
mov r3,#0x2000
2:
subs r3,r3,#1
bne 2b
subs r2,r2,#1
bne 1b
writel(CFG_CSCR_VAL, MX1_CCM_BASE_ADDR + MX1_CSCR)
/* I have now read the ARM920 DataSheet back-to-Back, and have stumbled upon
*this.....
*
* It would appear that from a Cold-Boot the ARM920T enters "FastBus" mode CP15
* register 1, this stops it using the output of the PLL and thus runs at the
* slow rate. Unless you place the Core into "Asynch" mode, the CPU will never
* use the value set in the CM_OSC registers...regardless of what you set it
* too! Thus, although i thought i was running at 140MHz, i'm actually running
* at 40!..
* Slapping this into my bootloader does the trick...
* MRC p15,0,r0,c1,c0,0 ; read core configuration register
* ORR r0,r0,#0xC0000000 ; set asynchronous clocks and not fastbus mode
* MCR p15,0,r0,c1,c0,0 ; write modified value to core configuration
* register
*/
MRC p15,0,r0,c1,c0,0
ORR r0,r0,#0xC0000000
MCR p15,0,r0,c1,c0,0
/* Skip SDRAM initialization if we run from RAM */
cmp pc, #0x08000000
bls 1f
cmp pc, #0x09000000
bhi 1f
b 2f
1:
/* SDRAM Setup */
/* Precharge cmd, CAS = 2 */
writel(0x910a8200, MX1_SDRAMC_BASE_ADDR + MX1_SDCTL0)
/* Issue Precharge all Command */
writel(0x0, 0x08200000)
/* Autorefresh cmd, CAS = 2 */
writel(0xa10a8200, MX1_SDRAMC_BASE_ADDR + MX1_SDCTL0)
ldr r0, =0x08000000
ldr r1, =0x0 /* Issue AutoRefresh Command */
str r1, [r0]
str r1, [r0]
str r1, [r0]
str r1, [r0]
str r1, [r0]
str r1, [r0]
str r1, [r0]
str r1, [r0]
writel(0xb10a8300, MX1_SDRAMC_BASE_ADDR + MX1_SDCTL0)
/* CAS Latency 2, issue Mode Register Command, Burst Length = 8 */
writel(0x0, 0x08223000)
/* Set to Normal Mode CAS 2 */
writel(0x810a8200, MX1_SDRAMC_BASE_ADDR + MX1_SDCTL0)
2:
ldr sp, =0x08100000 - 4;
b scb9328_start
|
a3f/bareDOOM
| 5,094
|
arch/arm/boards/freescale-mx35-3ds/lowlevel_init.S
|
// SPDX-License-Identifier: GPL-2.0-or-later
// SPDX-FileCopyrightText: 2007 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
#include <mach/imx35-regs.h>
#include <mach/imx-pll.h>
#include <mach/esdctl.h>
#include <asm/cache-l2x0.h>
#include <asm-generic/memory_layout.h>
#include <asm/barebox-arm-head.h>
#include "board-mx35_3stack.h"
#define CSD0_BASE_ADDR 0x80000000
#define CSD1_BASE_ADDR 0x90000000
#define ESDCTL_BASE_ADDR 0xB8001000
#define writel(val, reg) \
ldr r0, =reg; \
ldr r1, =val; \
str r1, [r0];
#define writeb(val, reg) \
ldr r0, =reg; \
ldr r1, =val; \
strb r1, [r0];
.section ".text_bare_init","ax"
ARM_PPMRR: .word 0x40000015
L2CACHE_PARAM: .word 0x00030024
CCM_CCMR_W: .word 0x003F4208
CCM_PDR0_W: .word 0x00001000
MPCTL_PARAM_399_W: .word MPCTL_PARAM_399
MPCTL_PARAM_532_W: .word MPCTL_PARAM_532
PPCTL_PARAM_W: .word PPCTL_PARAM_300
CCM_BASE_ADDR_W: .word MX35_CCM_BASE_ADDR
.globl barebox_arm_reset_vector
barebox_arm_reset_vector:
bl arm_cpu_lowlevel_init
/* Setup a temporary stack in internal SRAM */
ldr sp, =MX35_IRAM_BASE_ADDR + MX35_IRAM_SIZE - 4
mrc 15, 0, r1, c1, c0, 0
mrc 15, 0, r0, c1, c0, 1
orr r0, r0, #7
mcr 15, 0, r0, c1, c0, 1
orr r1, r1, #(1 << 11) /* Flow prediction (Z) */
orr r1, r1, #(1 << 22) /* unaligned accesses */
orr r1, r1, #(1 << 21) /* Low Int Latency */
mcr 15, 0, r1, c1, c0, 0
mov r0, #0
mcr 15, 0, r0, c15, c2, 4
/*
* Branch predicition is now enabled. Flush the BTAC to ensure a valid
* starting point. Don't flush BTAC while it is disabled to avoid
* ARM1136 erratum 408023.
*/
mov r0, #0
mcr p15, 0, r0, c7, c5, 6 /* flush entire BTAC */
mov r0, #0
mcr 15, 0, r0, c7, c7, 0 /* invalidate I cache and D cache */
mcr 15, 0, r0, c8, c7, 0 /* invalidate TLBs */
mcr 15, 0, r0, c7, c10, 4 /* Drain the write buffer */
/* Also setup the Peripheral Port Remap register inside the core */
ldr r0, ARM_PPMRR /* start from AIPS 2GB region */
mcr p15, 0, r0, c15, c2, 4
/*
* End of ARM1136 init
*/
ldr r0, CCM_BASE_ADDR_W
ldr r2, CCM_CCMR_W
str r2, [r0, #MX35_CCM_CCMR]
ldr r3, MPCTL_PARAM_532_W /* consumer path*/
/* Set MPLL, arm clock and ahb clock */
str r3, [r0, #MX35_CCM_MPCTL]
ldr r1, PPCTL_PARAM_W
str r1, [r0, #MX35_CCM_PPCTL]
ldr r1, CCM_PDR0_W
str r1, [r0, #MX35_CCM_PDR0]
ldr r1, [r0, #MX35_CCM_CGR0]
orr r1, r1, #0x00300000
str r1, [r0, #MX35_CCM_CGR0]
ldr r1, [r0, #MX35_CCM_CGR1]
orr r1, r1, #0x00000C00
orr r1, r1, #0x00000003
str r1, [r0, #MX35_CCM_CGR1]
/* Skip SDRAM initialization if we run from RAM */
cmp pc, #CSD0_BASE_ADDR
bls 1f
cmp pc, #CSD1_BASE_ADDR
bhi 1f
b imx35_barebox_entry
1:
ldr r0, =ESDCTL_BASE_ADDR
mov r3, #0x2000
str r3, [r0, #0x0]
str r3, [r0, #0x8]
/* ip(r12) has used to save lr register in upper calling */
mov fp, lr
/* setup bank 0 */
mov r5, #0x00
mov r2, #0x00
mov r1, #MX35_CSD0_BASE_ADDR
bl setup_sdram_bank
/* setup bank 1 */
mov r5, #0x00
mov r2, #0x00
mov r1, #MX35_CSD1_BASE_ADDR
bl setup_sdram_bank
mov lr, fp
ldr r3, =ESDCTL_DELAY_LINE5
str r3, [r0, #0x30]
#ifdef CONFIG_ARCH_IMX_EXTERNAL_BOOT_NAND
mov r0, #0
b imx35_barebox_boot_nand_external
#endif /* CONFIG_ARCH_IMX_EXTERNAL_BOOT_NAND */
b imx35_barebox_entry
/*
* r0: ESDCTL control base, r1: sdram slot base
* r2: DDR type (0: DDR2, 1: MDDR) r3, r4: working base
*/
setup_sdram_bank:
mov r3, #0xE /* 0xA + 0x4 */
tst r2, #0x1
orreq r3, r3, #0x300 /* DDR2 */
str r3, [r0, #0x10]
bic r3, r3, #0x00A
str r3, [r0, #0x10]
beq 2f
mov r3, #0x20000
1: subs r3, r3, #1
bne 1b
2: tst r2, #0x1
ldreq r3, =ESDCTL_DDR2_CONFIG
ldrne r3, =ESDCTL_MDDR_CONFIG
cmp r1, #CSD1_BASE_ADDR
strlo r3, [r0, #0x4]
strhs r3, [r0, #0xC]
ldr r3, =ESDCTL_0x92220000
strlo r3, [r0, #0x0]
strhs r3, [r0, #0x8]
mov r3, #0xDA
ldr r4, =ESDCTL_PRECHARGE
strb r3, [r1, r4]
tst r2, #0x1
bne skip_set_mode
cmp r1, #CSD1_BASE_ADDR
ldr r3, =ESDCTL_0xB2220000
strlo r3, [r0, #0x0]
strhs r3, [r0, #0x8]
mov r3, #0xDA
ldr r4, =ESDCTL_DDR2_EMR2
strb r3, [r1, r4]
ldr r4, =ESDCTL_DDR2_EMR3
strb r3, [r1, r4]
ldr r4, =ESDCTL_DDR2_EN_DLL
strb r3, [r1, r4]
ldr r4, =ESDCTL_DDR2_RESET_DLL
strb r3, [r1, r4]
ldr r3, =ESDCTL_0x92220000
strlo r3, [r0, #0x0]
strhs r3, [r0, #0x8]
mov r3, #0xDA
ldr r4, =ESDCTL_PRECHARGE
strb r3, [r1, r4]
skip_set_mode:
cmp r1, #CSD1_BASE_ADDR
ldr r3, =ESDCTL_0xA2220000
strlo r3, [r0, #0x0]
strhs r3, [r0, #0x8]
mov r3, #0xDA
strb r3, [r1]
strb r3, [r1]
ldr r3, =ESDCTL_0xB2220000
strlo r3, [r0, #0x0]
strhs r3, [r0, #0x8]
tst r2, #0x1
ldreq r4, =ESDCTL_DDR2_MR
ldrne r4, =ESDCTL_MDDR_MR
mov r3, #0xDA
strb r3, [r1, r4]
ldreq r4, =ESDCTL_DDR2_OCD_DEFAULT
streqb r3, [r1, r4]
ldreq r4, =ESDCTL_DDR2_EN_DLL
ldrne r4, =ESDCTL_MDDR_EMR
strb r3, [r1, r4]
cmp r1, #CSD1_BASE_ADDR
ldr r3, =ESDCTL_0x82228080
strlo r3, [r0, #0x0]
strhs r3, [r0, #0x8]
tst r2, #0x1
moveq r4, #0x20000
movne r4, #0x200
1: subs r4, r4, #1
bne 1b
str r3, [r1, #0x100]
ldr r4, [r1, #0x100]
cmp r3, r4
movne r3, #1
moveq r3, #0
mov pc, lr
|
a3f/bareDOOM
| 1,081
|
arch/arm/boards/guf-neso/pll_init.S
|
#include <config.h>
#include <mach/imx27-regs.h>
#include <mach/imx-pll.h>
#include <linux/linkage.h>
#define writel(val, reg) \
ldr r0, =reg; \
ldr r1, =val; \
str r1, [r0];
#define CSCR_VAL MX27_CSCR_USB_DIV(3) | \
MX27_CSCR_SD_CNT(3) | \
MX27_CSCR_MSHC_SEL | \
MX27_CSCR_H264_SEL | \
MX27_CSCR_SSI1_SEL | \
MX27_CSCR_SSI2_SEL | \
MX27_CSCR_MCU_SEL | \
MX27_CSCR_ARM_SRC_MPLL | \
MX27_CSCR_SP_SEL | \
MX27_CSCR_ARM_DIV(0) | \
MX27_CSCR_FPM_EN | \
MX27_CSCR_SPEN | \
MX27_CSCR_MPEN | \
MX27_CSCR_AHB_DIV(1)
ENTRY(neso_pll_init)
/* 399 MHz */
writel(IMX_PLL_PD(0) |
IMX_PLL_MFD(51) |
IMX_PLL_MFI(7) |
IMX_PLL_MFN(35), MX27_CCM_BASE_ADDR + MX27_MPCTL0)
/* SPLL = 2 * 26 * 4.61538 MHz = 240 MHz */
writel(IMX_PLL_PD(1) |
IMX_PLL_MFD(12) |
IMX_PLL_MFI(9) |
IMX_PLL_MFN(3), MX27_CCM_BASE_ADDR + MX27_SPCTL0)
writel(CSCR_VAL | MX27_CSCR_MPLL_RESTART | MX27_CSCR_SPLL_RESTART,
MX27_CCM_BASE_ADDR + MX27_CSCR)
ldr r2, =16000
1:
subs r2, r2, #1
nop
bcs 1b
mov pc, lr
ENDPROC(neso_pll_init)
|
a3f/bareDOOM
| 4,691
|
arch/arm/boards/freescale-mx25-3ds/lowlevel_init.S
|
// SPDX-License-Identifier: GPL-2.0-or-later
// SPDX-FileCopyrightText: 2007 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
#include <linux/sizes.h>
#include <asm-generic/memory_layout.h>
#include <mach/imx25-regs.h>
#include <mach/imx-pll.h>
#include <mach/esdctl.h>
#include <asm/barebox-arm-head.h>
#define writel(val, reg) \
ldr r0, =reg; \
ldr r1, =val; \
str r1, [r0];
#define writeb(val, reg) \
ldr r0, =reg; \
ldr r1, =val; \
strb r1, [r0];
/* Assuming 24MHz input clock */
#define MPCTL_PARAM_532_MX25 \
(IMX_PLL_PD(1) | IMX_PLL_MFD(0) | IMX_PLL_MFI(11) | IMX_PLL_MFN(1))
.section ".text_bare_init","ax"
ARM_PPMRR: .word 0x40000015
L2CACHE_PARAM: .word 0x00030024
CCM_CCMR_W: .word 0x003F4208
CCM_PDR0_W: .word 0x00801000
MPCTL_PARAM_399_W: .word MPCTL_PARAM_399
MPCTL_PARAM_532_W: .word MPCTL_PARAM_532_MX25
PPCTL_PARAM_W: .word PPCTL_PARAM_300
CCM_BASE_ADDR_W: .word MX25_CCM_BASE_ADDR
.globl barebox_arm_reset_vector
barebox_arm_reset_vector:
bl arm_cpu_lowlevel_init
#define MX25_CCM_MCR 0x64
ldr r0, CCM_BASE_ADDR_W
/* default CLKO to 1/32 of the ARM core */
ldr r1, [r0, #MX25_CCM_MCR]
bic r1, r1, #0x00F00000
bic r1, r1, #0x7F000000
mov r2, #0x5F000000
add r2, r2, #0x00200000
orr r1, r1, r2
str r1, [r0, #MX25_CCM_MCR]
/* enable all the clocks */
writel(0x1FFFFFFF, MX25_CCM_BASE_ADDR + MX25_CCM_CGCR0)
writel(0xFFFFFFFF, MX25_CCM_BASE_ADDR + MX25_CCM_CGCR1)
writel(0x000FDFFF, MX25_CCM_BASE_ADDR + MX25_CCM_CGCR2)
writel(0x0000FEFF, MX25_CCM_BASE_ADDR + MX25_CCM_MCR)
/* Setup a temporary stack in SRAM */
ldr sp, =MX25_IRAM_BASE_ADDR + MX25_IRAM_SIZE - 4
/* Skip SDRAM initialization if we run from RAM */
cmp pc, #0x80000000
bls 1f
cmp pc, #0x90000000
bhi 1f
b imx25_barebox_entry
1:
ldr r0, ESDCTL_BASE_W
mov r3, #0x2000
str r3, [r0, #0x0]
str r3, [r0, #0x8]
mov r12, #0x00
mov r2, #0x1 /* mDDR */
mov r1, #MX25_CSD0_BASE_ADDR
bl setup_sdram_bank
// cmp r3, #0x0
// orreq r12, r12, #1
// eorne r2, r2, #0x1
// blne setup_sdram_bank
ldr r3, ESDCTL_DELAY5
str r3, [r0, #0x30]
#ifdef CONFIG_ARCH_IMX_EXTERNAL_BOOT_NAND
mov r0, #0
b imx25_barebox_boot_nand_external
#endif /* CONFIG_ARCH_IMX_EXTERNAL_BOOT_NAND */
ret:
b imx25_barebox_entry
/*
* r0: control base, r1: ram bank base
* r2: ddr type(0:DDR2, 1:MDDR) r3, r4: working
*/
setup_sdram_bank:
mov r3, #0xE /* 0xA + 0x4 */
tst r2, #0x1
orreq r3, r3, #0x300 /* DDR2 */
str r3, [r0, #0x10]
bic r3, r3, #0x00A
str r3, [r0, #0x10]
beq 2f
mov r3, #0x20000
1: subs r3, r3, #1
bne 1b
2: adr r4, ESDCTL_CONFIG
tst r2, #0x1
ldreq r3, [r4, #0x0]
ldrne r3, [r4, #0x4]
cmp r1, #MX25_CSD1_BASE_ADDR
strlo r3, [r0, #0x4]
strhs r3, [r0, #0xC]
ldr r3, ESDCTL_0x92220000
strlo r3, [r0, #0x0]
strhs r3, [r0, #0x8]
mov r3, #0xDA
ldr r4, RAM_PARAM1_MDDR
strb r3, [r1, r4]
tst r2, #0x1
bne skip_set_mode
cmp r1, #MX25_CSD1_BASE_ADDR
ldr r3, ESDCTL_0xB2220000
strlo r3, [r0, #0x0]
strhs r3, [r0, #0x8]
mov r3, #0xDA
ldr r4, RAM_PARAM4_MDDR
strb r3, [r1, r4]
ldr r4, RAM_PARAM5_MDDR
strb r3, [r1, r4]
ldr r4, RAM_PARAM3_MDDR
strb r3, [r1, r4]
ldr r4, RAM_PARAM2_MDDR
strb r3, [r1, r4]
ldr r3, ESDCTL_0x92220000
strlo r3, [r0, #0x0]
strhs r3, [r0, #0x8]
mov r3, #0xDA
ldr r4, RAM_PARAM1_MDDR
strb r3, [r1, r4]
skip_set_mode:
cmp r1, #MX25_CSD1_BASE_ADDR
ldr r3, ESDCTL_0xA2220000
strlo r3, [r0, #0x0]
strhs r3, [r0, #0x8]
mov r3, #0xDA
strb r3, [r1]
strb r3, [r1]
ldr r3, ESDCTL_0xB2220000
strlo r3, [r0, #0x0]
strhs r3, [r0, #0x8]
adr r4, RAM_PARAM6_MDDR
tst r2, #0x1
ldreq r4, [r4, #0x0]
ldrne r4, [r4, #0x4]
mov r3, #0xDA
strb r3, [r1, r4]
ldreq r4, RAM_PARAM7_MDDR
streqb r3, [r1, r4]
adr r4, RAM_PARAM3_MDDR
ldreq r4, [r4, #0x0]
ldrne r4, [r4, #0x4]
strb r3, [r1, r4]
cmp r1, #MX25_CSD1_BASE_ADDR
ldr r3, ESDCTL_0x82226080
strlo r3, [r0, #0x0]
strhs r3, [r0, #0x8]
tst r2, #0x1
moveq r4, #0x20000
movne r4, #0x200
1: subs r4, r4, #1
bne 1b
str r3, [r1, #0x100]
ldr r4, [r1, #0x100]
cmp r3, r4
movne r3, #1
moveq r3, #0
mov pc, lr
RAM_PARAM1_MDDR: .word 0x00000400
RAM_PARAM2_MDDR: .word 0x00000333
RAM_PARAM3_MDDR: .word 0x02000400
.word 0x02000000
RAM_PARAM4_MDDR: .word 0x04000000
RAM_PARAM5_MDDR: .word 0x06000000
RAM_PARAM6_MDDR: .word 0x00000233
.word 0x00000033
RAM_PARAM7_MDDR: .word 0x02000780
ESDCTL_0x92220000: .word 0x92210000
ESDCTL_0xA2220000: .word 0xA2210000
ESDCTL_0xB2220000: .word 0xB2210000
ESDCTL_0x82226080: .word 0x82216080
ESDCTL_CONFIG: .word 0x007FFC3F
.word 0x007FFC3F
ESDCTL_DELAY5: .word 0x00F49F00
ESDCTL_BASE_W: .word MX25_ESDCTL_BASE_ADDR
|
a3f/bareDOOM
| 6,780
|
arch/arm/boards/a9m2440/lowlevel_init.S
|
/*
*
*/
#include <config.h>
#include <linux/sizes.h>
#include <mach/s3c-iomap.h>
#include <mach/s3c24xx-gpio.h>
#include <asm/barebox-arm-head.h>
.section ".text_bare_init.barebox_arm_reset_vector","ax"
/*
* To be able to setup the SDRAM interface correctly, we need some
* external information about the connected SDRAM devices.
*
* When we set GPH8, we can read at GPB:
* Bit 0..1: Memory device size -> 00=16M, 01=64M, 10=32M, 11=128M
* Bit 2: CL setting
*
* Some remarks: The CL setting seems useless. It always signals a CL3
* requirement, but the SDRAM types I found on the cards are supporting
* CL2 @ 100 MHz. But also these SDRAM types are only support 105 MHz max.
* So, we never need CL3 because we can't run the CPU at 533 MHz (which
* implies an 133 MHz SDRAM clock).
* All devices are connected via 32 bit databus
*
* Note: I was able to check the 32 MiB and 64 MiB configuration only. I didn't
* had access to a 16 MiB nor 128 MiB config.
*
*/
sdram_init:
/*
* Read the configuration. After reset until any GPIO port is
* configured yet, these pins show external settings, to detect
* the SDRAM size.
*/
ldr r1, =S3C_GPBDAT
ldr r4, [r1]
and r4, r4, #0x3
ldr r1, =S3C_MEMCTL_BASE
/* configure both SDRAM areas with 32 bit data bus width */
ldr r0, =((0x2 << 24) + (0x2 << 28))
str r0, [r1], #0x1c /* post add register offset for bank6 */
/*
* With the configuration we simply need to calculate an offset into
* our table with the predefined SDRAM settings
*/
adr r0, SDRAMDATA
mov r2, #6*4 /* # of bytes per table entry */
mul r3, r4, r2
add r0, r0, r3 /* start address of the entry */
/*
* store the table entry data into the registers
*/
1:
ldr r3, [r0], #4
str r3, [r1], #4
subs r2, r2, #4
bne 1b
/* TODO: Check if the second bank is populated, and switch it off if not */
mov pc, lr
/*
* we need 4 sets of memory settings per main CPU clock speed
*
* 400MHz main speed:
* - 16 MiB in the first bank, maybe 16 MiB in the second bank (untested!)
* - 32 MiB in the first bank, maybe 32 MiB in the second bank (CL=2)
* - 64 MiB in the first bank, maybe 64 MiB in the second bank (CL=2)
* - 128 MiB in the first bank, maybe 128 MiB in the second bank (untested!)
*
* Note: SDRAM clock runs at 100MHz
*/
SDRAMDATA:
/* --------------------------- 16 MiB @ 100MHz --------------------------- */
/*
* - MT = 11 (= sync dram type)
* - Trcd = 01 (= CL3)
* - SCAN = 00 (= 8 bit columns)
*/
.word ((0x3 << 15) + (0x1 << 2) + (0x0))
.word ((0x3 << 15) + (0x1 << 2) + (0x0))
/*
* SDRAM refresh settings
* - REFEN = 1 (= refresh enabled)
* - TREFMD = 0 (= auto refresh)
* - Trp = 00 (= 2 RAS precharge clocks)
* - Tsrc = 11 (= 7 clocks -> row cycle time @100MHz 2+5=7 -> 70ns)
* - Refrsh = 2^11 + 1 - 100 * 15.6 = 2049 - 1560 = FIXME
*/
.word ((0x1 << 23) + (0x0 << 22) + (0x0 << 20) + (0x3 << 18) + 468)
/*
* SDRAM banksize
* - BURST_EN = 0 (= burst mode disabled)
* - SCKE_EN = 1 (= SDRAM SCKE enabled)
* - SCLK_EN = 1 (= clock active only during accesses)
* - BK67MAP = 010 (= 128MiB) FIXME?????
*/
.word ((0 << 7) + (1 << 5) + (1 << 4) + 2)
/*
* SDRAM mode register
* CL = 010 (= 2 clocks)
*/
.word (0x2 << 4)
.word (0x2 << 4)
/* ------------- one or two banks with 64 MiB @ 100MHz -------------------- */
/*
* - MT = 11 (= sync dram type)
* - Trcd = 00 (= CL2)
* - SCAN = 01 (= 9 bit columns)
*/
.word ((0x3 << 15) + (0x0 << 2) + (0x1))
.word ((0x3 << 15) + (0x0 << 2) + (0x1))
/*
* SDRAM refresh settings
* - REFEN = 1 (= refresh enabled)
* - TREFMD = 0 (= auto refresh)
* - Trp = 00 (= 2 RAS precharge clocks)
* - Tsrc = 01 (= 5 clocks -> row cycle time @100MHz 2+5=7 -> 70ns)
* - Refrsh = 2^11 + 1 - 100 * 15.6 = 2049 - 1560 = 489
*/
.word ((0x1 << 23) + (0x0 << 22) + (0x0 << 20) + (0x1 << 18) + 489)
/*
* SDRAM banksize
* - BURST_EN = 1 (= burst mode enabled)
* - SCKE_EN = 1 (= SDRAM SCKE enabled)
* - SCLK_EN = 1 (= clock active only during accesses)
* - BK67MAP = 001 (= 64 MiB)
*/
.word ((1 << 7) + (1 << 5) + (1 << 4) + 1)
/*
* SDRAM mode register
* CL = 010 (= 2 clocks)
*/
.word (0x2 << 4)
.word (0x2 << 4)
/* ------------- one or two banks with 32 MiB @ 100MHz -------------------- */
/*
* - MT = 11 (= sync dram type)
* - Trcd = 00 (= CL2)
* - SCAN = 01 (= 9 bit columns)
*/
.word ((0x3 << 15) + (0x0 << 2) + (0x1))
.word ((0x3 << 15) + (0x0 << 2) + (0x1))
/*
* SDRAM refresh settings
* - REFEN = 1 (= refresh enabled)
* - TREFMD = 0 (= auto refresh)
* - Trp = 00 (= 2 RAS precharge clocks)
* - Tsrc = 01 (= 5 clocks -> row cycle time @100MHz 2+5=7 -> 70ns)
* - Refrsh = 2^11 + 1 - 100 * 15.6 = 2049 - 1560 = 489
*/
.word ((0x1 << 23) + (0x0 << 22) + (0x0 << 20) + (0x1 << 18) + 489)
/*
* SDRAM banksize
* - BURST_EN = 1 (= burst mode enabled)
* - SCKE_EN = 1 (= SDRAM SCKE enabled)
* - SCLK_EN = 1 (= clock active only during accesses)
* - BK67MAP = 000 (= 32 MiB)
*/
.word ((1 << 7) + (1 << 5) + (1 << 4) + 0)
/*
* SDRAM mode register
* CL = 010 (= 2 clocks)
*/
.word (0x2 << 4)
.word (0x2 << 4)
/* ------------ one or two banks with 128 MiB @ 100MHz -------------------- */
/*
* - MT = 11 (= sync dram type)
* - Trcd = 00 (= CL2)
* - SCAN = 01 (= 9 bit columns)
*/
.word ((0x3 << 15) + (0x0 << 2) + (0x1))
.word ((0x3 << 15) + (0x0 << 2) + (0x1))
/*
* SDRAM refresh settings
* - REFEN = 1 (= refresh enabled)
* - TREFMD = 0 (= auto refresh)
* - Trp = 00 (= 2 RAS precharge clocks)
* - Tsrc = 01 (= 5 clocks -> row cycle time @100MHz 2+5=7 -> 70ns)
* - Refrsh = 2^11 + 1 - 100 * 7.5 = 2049 - FIXME = 1259
*/
.word ((0x1 << 23) + (0x0 << 22) + (0x1 << 20) + (0x3 << 18) + 1259)
/*
* SDRAM banksize
* - BURST_EN = 0 (= burst mode disabled)
* - SCKE_EN = 1 (= SDRAM SCKE enabled)
* - SCLK_EN = 1 (= clock active only during accesses)
* - BK67MAP = 010 (= 128MiB)
*/
.word (0x32)
/*
* SDRAM mode register
* CL = 010 (= 2 clocks)
*/
.word (0x2 << 4)
.word (0x2 << 4)
/* ------------------------------------------------------------------------ */
.globl barebox_arm_reset_vector
barebox_arm_reset_vector:
bl arm_cpu_lowlevel_init
bl s3c24x0_disable_wd
/* skip everything here if we are already running from SDRAM */
cmp pc, #S3C_SDRAM_BASE
blo 1f
cmp pc, #S3C_SDRAM_END
bhs 1f
b out
/* we are running from NOR or NAND/SRAM memory. Do further initialisation */
1:
bl s3c24x0_pll_init
bl sdram_init
#ifdef CONFIG_S3C_NAND_BOOT
/* up to here we are running from the internal SRAM area */
bl s3c24x0_nand_boot
#endif
out:
mov r0, #S3C_SDRAM_BASE
mov r1, #SZ_32M
mov r2, #0
b barebox_arm_entry
|
a3f/bareDOOM
| 3,544
|
arch/arm/boards/eukrea_cpuimx27/lowlevel_init.S
|
#include <config.h>
#include <asm-generic/memory_layout.h>
#include <mach/imx27-regs.h>
#include <mach/esdctl.h>
#include <asm/barebox-arm-head.h>
#define writel(val, reg) \
ldr r0, =reg; \
ldr r1, =val; \
str r1, [r0];
#if defined CONFIG_EUKREA_CPUIMX27_SDRAM_256MB
#define ROWS0 ESDCTL0_ROW14
#define CFG0 0x0029572D
#elif defined CONFIG_EUKREA_CPUIMX27_SDRAM_128MB
#define ROWS0 ESDCTL0_ROW13
#define CFG0 0x00095728
#endif
#define ESDCTL0_VAL (ESDCTL0_SDE | ROWS0 | ESDCTL0_COL10)
.macro sdram_init
/*
* DDR on CSD0
*/
/* Enable DDR SDRAM operation */
writel(0x0000000C, MX27_ESDCTL_BASE_ADDR + IMX_ESDMISC)
/* Set the driving strength */
writel(0x55555555, MX27_SYSCTRL_BASE_ADDR + MX27_DSCR(3))
writel(0x55555555, MX27_SYSCTRL_BASE_ADDR + MX27_DSCR(5))
writel(0x55555555, MX27_SYSCTRL_BASE_ADDR + MX27_DSCR(6))
writel(0x00005005, MX27_SYSCTRL_BASE_ADDR + MX27_DSCR(7))
writel(0x15555555, MX27_SYSCTRL_BASE_ADDR + MX27_DSCR(8))
/* Initial reset */
writel(0x00000004, MX27_ESDCTL_BASE_ADDR + IMX_ESDMISC)
writel(CFG0, MX27_ESDCTL_BASE_ADDR + IMX_ESDCFG0)
/* precharge CSD0 all banks */
writel(ESDCTL0_VAL | ESDCTL0_SMODE_PRECHARGE,
MX27_ESDCTL_BASE_ADDR + IMX_ESDCTL0)
writel(0x00000000, 0xA0000F00) /* CSD0 precharge address (A10 = 1) */
writel(ESDCTL0_VAL | ESDCTL0_SMODE_AUTO_REFRESH,
MX27_ESDCTL_BASE_ADDR + IMX_ESDCTL0)
ldr r0, =0xa0000f00
mov r1, #0
mov r2, #8
1:
str r1, [r0]
subs r2, #1
bne 1b
writel(ESDCTL0_VAL | ESDCTL0_SMODE_LOAD_MODE,
MX27_ESDCTL_BASE_ADDR + IMX_ESDCTL0)
ldr r0, =0xA0000033
mov r1, #0xda
strb r1, [r0]
#if defined CONFIG_EUKREA_CPUIMX27_SDRAM_256MB
ldr r0, =0xA2000000
#elif defined CONFIG_EUKREA_CPUIMX27_SDRAM_128MB
ldr r0, =0xA1000000
#endif
mov r1, #0xff
strb r1, [r0]
writel(ESDCTL0_VAL | ESDCTL0_DSIZ_31_0 | ESDCTL0_REF4 |
ESDCTL0_BL | ESDCTL0_SMODE_NORMAL,
MX27_ESDCTL_BASE_ADDR + IMX_ESDCTL0)
.endm
.section ".text_bare_init","ax"
.globl barebox_arm_reset_vector
barebox_arm_reset_vector:
bl arm_cpu_lowlevel_init
ldr sp, =MX27_IRAM_BASE_ADDR + MX27_IRAM_SIZE - 4;
/* ahb lite ip interface */
writel(0x20040304, MX27_AIPI_BASE_ADDR + MX27_AIPI1_PSR0)
writel(0xDFFBFCFB, MX27_AIPI_BASE_ADDR + MX27_AIPI1_PSR1)
writel(0x00000000, MX27_AIPI_BASE_ADDR + MX27_AIPI2_PSR0)
writel(0xFFFFFFFF, MX27_AIPI_BASE_ADDR + MX27_AIPI2_PSR1)
/* disable mpll/spll */
ldr r0, =MX27_CCM_BASE_ADDR + MX27_CSCR
ldr r1, [r0]
bic r1, r1, #0x03
str r1, [r0]
/*
* pll clock initialization - see section 3.4.3 of the i.MX27 manual
*/
/* MPLL = 399 MHz */
writel(0x00331C23, MX27_CCM_BASE_ADDR + MX27_MPCTL0)
/* SPLL = 240 MHz */
writel(0x040C2403, MX27_CCM_BASE_ADDR + MX27_SPCTL0)
writel(0x33F38107 | MX27_CSCR_MPLL_RESTART | MX27_CSCR_SPLL_RESTART,
MX27_CCM_BASE_ADDR + MX27_CSCR)
/* add some delay here */
mov r1, #0x1000
1: subs r1, r1, #0x1
bne 1b
/* clock gating enable */
writel(0x00050f08, MX27_SYSCTRL_BASE_ADDR + MX27_GPCR)
/* peripheral clock divider */
/* FIXME */
writel(0x130400c3, MX27_CCM_BASE_ADDR + MX27_PCDR0)
/* PERDIV1=08 @133 MHz */
writel(0x09030208, MX27_CCM_BASE_ADDR + MX27_PCDR1)
/* PERDIV1=04 @266 MHz */
/* skip sdram initialization if we run from ram */
cmp pc, #0xa0000000
bls 1f
cmp pc, #0xc0000000
bhi 1f
b imx27_barebox_entry
1:
sdram_init
#ifdef CONFIG_ARCH_IMX_EXTERNAL_BOOT_NAND
mov r0, #0
b imx27_barebox_boot_nand_external
#endif /* CONFIG_ARCH_IMX_EXTERNAL_BOOT_NAND */
ret:
b imx27_barebox_entry
|
a3f/bareDOOM
| 2,612
|
arch/arm/boards/freescale-mx21-ads/lowlevel_init.S
|
// SPDX-License-Identifier: GPL-2.0-or-later
// SPDX-FileCopyrightText: 2010 Jaccon Bastiaansen <jaccon.bastiaansen@gmail.com>
#include <config.h>
#include <linux/sizes.h>
#include <asm-generic/memory_layout.h>
#include <mach/imx21-regs.h>
#include <asm/barebox-arm-head.h>
.section ".text_bare_init","ax"
.globl barebox_arm_reset_vector
barebox_arm_reset_vector:
bl arm_cpu_lowlevel_init
/*
* Initialize the AHB-Lite IP Interface (AIPI) module (to enable access to
* on chip peripherals) as described in section 7.2 of rev3 of the i.MX21
* reference manual.
*/
ldr r0, =MX21_AIPI_BASE_ADDR + MX21_AIPI1_PSR0
ldr r1, =0x00040304
str r1, [r0]
ldr r0, =MX21_AIPI_BASE_ADDR + MX21_AIPI1_PSR1
ldr r1, =0xfffbfcfb
str r1, [r0]
ldr r0, =MX21_AIPI_BASE_ADDR + MX21_AIPI2_PSR0
ldr r1, =0x3ffc0000
str r1, [r0]
ldr r0, =MX21_AIPI_BASE_ADDR + MX21_AIPI2_PSR1
ldr r1, =0xffffffff
str r1, [r0]
/*
* Configure CPU core clock (266MHz), peripheral clock (133MHz) and enable
* the clock to peripherals.
*/
ldr r0, =MX21_CCM_BASE_ADDR + MX21_CSCR
ldr r1, =0x17180607
str r1, [r0]
ldr r0, =MX21_CCM_BASE_ADDR + MX21_PCCR1
ldr r1, =0x0e000000
str r1, [r0]
/*
* SDRAM and SDRAM controller configuration
*/
/*
* CSD1 not required, because the MX21ADS board only contains 64Mbyte.
* CS3 can therefore be made available.
*/
ldr r0, =MX21_SYSCTRL_BASE_ADDR + MX21_FMCR
ldr r1, =0xffffffc9
str r1, [r0]
/* Skip SDRAM initialization if we run from RAM */
cmp pc, #0xc0000000
bls 1f
cmp pc, #0xc8000000
bhi 1f
b ret
1:
/* Precharge */
ldr r0, =MX21_X_MEMC_BASE_ADDR + MX21_SDCTL0
ldr r1, =0x92120300
str r1, [r0]
ldr r2, =0xc0200000
ldr r1, [r2]
bl mem_delay
/* Auto refresh */
ldr r1, =0xa2120300
str r1, [r0]
ldr r2, =0xc0000000
ldr r1, [r2]
ldr r1, [r2]
ldr r1, [r2]
ldr r1, [r2]
ldr r1, [r2]
ldr r1, [r2]
ldr r1, [r2]
ldr r1, [r2]
/* Set mode register */
ldr r1, =0xB2120300
str r1, [r0]
ldr r1, =0xC0119800
ldr r2, [r1]
bl mem_delay
/* Back to Normal Mode */
ldr r1, =0x8212F339
str r1, [r0]
/* Set NFC_CLK to 24MHz */
ldr r0, =MX21_CCM_BASE_ADDR + MX21_PCDR0
ldr r1, =0x6419a007
str r1, [r0]
#ifdef CONFIG_ARCH_IMX_EXTERNAL_BOOT_NAND
/* Setup a temporary stack in SRAM */
ldr sp, =MX21_IRAM_BASE_ADDR + MX21_IRAM_SIZE - 4
b imx21_barebox_boot_nand_external
#endif /* CONFIG_ARCH_IMX_EXTERNAL_BOOT_NAND */
ret:
mov r0, #0xc0000000
mov r1, #SZ_64M
mov r2, #0
b barebox_arm_entry
/*
* spin for a while. we need to wait at least 200 usecs.
*/
mem_delay:
mov r4, #0x4000
spin: subs r4, r4, #1
bne spin
mov pc, lr
|
aadomn/aes
| 39,006
|
armcortexm/barrel_shiftrows/aes_encrypt.s
|
/******************************************************************************
* Bitsliced implementations of AES-128 and AES-256 (encryption only) in C using
* the barrel-shiftrows representation.
*
* See the paper at https://eprint.iacr.org/2020/1123.pdf for more details.
*
* @author Alexandre Adomnicai, Nanyang Technological University, Singapore
* alexandre.adomnicai@ntu.edu.sg
*
* @date October 2020
******************************************************************************/
.syntax unified
.thumb
/******************************************************************************
* Macro to compute the SWAPMOVE technique: swap the bits in 'in1' masked by 'm'
* by the bits in 'in0' masked by 'm << n' and put the results in 'out0', 'out1'
******************************************************************************/
.macro swpmv out0, out1, in0, in1, m, n, tmp
eor \tmp, \in1, \in0, lsr \n
and \tmp, \m
eor \out1, \in1, \tmp
eor \out0, \in0, \tmp, lsl \n
.endm
/******************************************************************************
* AddRoundKey on a quarter state (i.e. 1024/4 = 256 bits).
******************************************************************************/
.align 2
add_round_key:
ldm r12, {r4-r11} //state[0] ... state[7]
ldr.w r12, [sp, #180] //load rkeys' address
ldmia.w r12!, {r0-r3} //rkeys for state[0]...state[3]
eor r4, r0, r4
eor r5, r1, r5
eor r6, r2, r6
eor r7, r3, r7
ldmia.w r12!, {r0-r3} //rkeys for state[4]...state[7]
eor r8, r0, r8
eor r9, r1, r9
eor r10, r2, r10
eor r11, r3, r11
str r12, [sp, #180] //save rkeys' address
bx lr
/******************************************************************************
* Bitsliced implementation of the AES Sbox based on Boyar, Peralta and Calik.
* See http://www.cs.yale.edu/homes/peralta/CircuitStuff/SLP_AES_113.txt
* Note that the 4 NOT (^= 0xffffffff) are moved to the key schedule.
* Updates only a quarter of the state (i.e. 256 bits) => need to be applied 4
* times per round when considering the barrel-shiftrows representation.
******************************************************************************/
.align 2
sbox:
str.w r14, [sp, #176] //save link register
eor r1, r7, r9 //Exec y14 = U3 ^ U5; into r1
eor r3, r4, r10 //Exec y13 = U0 ^ U6; into r3
eor r2, r3, r1 //Exec y12 = y13 ^ y14; into r2
eor r0, r8, r2 //Exec t1 = U4 ^ y12; into r0
eor r14, r0, r9 //Exec y15 = t1 ^ U5; into r14
and r12, r2, r14 //Exec t2 = y12 & y15; into r12
eor r8, r14, r11 //Exec y6 = y15 ^ U7; into r8
eor r0, r0, r5 //Exec y20 = t1 ^ U1; into r0
str.w r2, [sp, #172] //Store r2/y12 on stack
eor r2, r4, r7 //Exec y9 = U0 ^ U3; into r2
str.w r0, [sp, #168] //Store r0/y20 on stack
eor r0, r0, r2 //Exec y11 = y20 ^ y9; into r0
str.w r2, [sp, #164] //Store r2/y9 on stack
and r2, r2, r0 //Exec t12 = y9 & y11; into r2
str.w r8, [sp, #160] //Store r8/y6 on stack
eor r8, r11, r0 //Exec y7 = U7 ^ y11; into r8
eor r9, r4, r9 //Exec y8 = U0 ^ U5; into r9
eor r6, r5, r6 //Exec t0 = U1 ^ U2; into r6
eor r5, r14, r6 //Exec y10 = y15 ^ t0; into r5
str.w r14, [sp, #156] //Store r14/y15 on stack
eor r14, r5, r0 //Exec y17 = y10 ^ y11; into r14
str.w r1, [sp, #152] //Store r1/y14 on stack
and r1, r1, r14 //Exec t13 = y14 & y17; into r1
eor r1, r1, r2 //Exec t14 = t13 ^ t12; into r1
str.w r14, [sp, #148] //Store r14/y17 on stack
eor r14, r5, r9 //Exec y19 = y10 ^ y8; into r14
str.w r5, [sp, #144] //Store r5/y10 on stack
and r5, r9, r5 //Exec t15 = y8 & y10; into r5
eor r2, r5, r2 //Exec t16 = t15 ^ t12; into r2
eor r5, r6, r0 //Exec y16 = t0 ^ y11; into r5
str.w r0, [sp, #140] //Store r0/y11 on stack
eor r0, r3, r5 //Exec y21 = y13 ^ y16; into r0
str.w r3, [sp, #136] //Store r3/y13 on stack
and r3, r3, r5 //Exec t7 = y13 & y16; into r3
str.w r5, [sp, #132] //Store r5/y16 on stack
str.w r11, [sp, #128] //Store r11/U7 on stack
eor r5, r4, r5 //Exec y18 = U0 ^ y16; into r5
eor r6, r6, r11 //Exec y1 = t0 ^ U7; into r6
eor r7, r6, r7 //Exec y4 = y1 ^ U3; into r7
and r11, r7, r11 //Exec t5 = y4 & U7; into r11
eor r11, r11, r12 //Exec t6 = t5 ^ t2; into r11
eor r11, r11, r2 //Exec t18 = t6 ^ t16; into r11
eor r14, r11, r14 //Exec t22 = t18 ^ y19; into r14
eor r4, r6, r4 //Exec y2 = y1 ^ U0; into r4
and r11, r4, r8 //Exec t10 = y2 & y7; into r11
eor r11, r11, r3 //Exec t11 = t10 ^ t7; into r11
eor r2, r11, r2 //Exec t20 = t11 ^ t16; into r2
eor r2, r2, r5 //Exec t24 = t20 ^ y18; into r2
eor r10, r6, r10 //Exec y5 = y1 ^ U6; into r10
and r11, r10, r6 //Exec t8 = y5 & y1; into r11
eor r3, r11, r3 //Exec t9 = t8 ^ t7; into r3
eor r3, r3, r1 //Exec t19 = t9 ^ t14; into r3
eor r3, r3, r0 //Exec t23 = t19 ^ y21; into r3
eor r0, r10, r9 //Exec y3 = y5 ^ y8; into r0
ldr.w r11, [sp, #160] //Load y6 into r11
and r5, r0, r11 //Exec t3 = y3 & y6; into r5
eor r12, r5, r12 //Exec t4 = t3 ^ t2; into r12
ldr.w r5, [sp, #168] //Load y20 into r5
str.w r7, [sp, #160] //Store r7/y4 on stack
eor r12, r12, r5 //Exec t17 = t4 ^ y20; into r12
eor r1, r12, r1 //Exec t21 = t17 ^ t14; into r1
and r12, r1, r3 //Exec t26 = t21 & t23; into r12
eor r5, r2, r12 //Exec t27 = t24 ^ t26; into r5
eor r12, r14, r12 //Exec t31 = t22 ^ t26; into r12
eor r1, r1, r14 //Exec t25 = t21 ^ t22; into r1
and r7, r1, r5 //Exec t28 = t25 & t27; into r7
eor r14, r7, r14 //Exec t29 = t28 ^ t22; into r14
and r4, r14, r4 //Exec z14 = t29 & y2; into r4
and r8, r14, r8 //Exec z5 = t29 & y7; into r8
eor r7, r3, r2 //Exec t30 = t23 ^ t24; into r7
and r12, r12, r7 //Exec t32 = t31 & t30; into r12
eor r12, r12, r2 //Exec t33 = t32 ^ t24; into r12
eor r7, r5, r12 //Exec t35 = t27 ^ t33; into r7
and r2, r2, r7 //Exec t36 = t24 & t35; into r2
eor r5, r5, r2 //Exec t38 = t27 ^ t36; into r5
and r5, r14, r5 //Exec t39 = t29 & t38; into r5
eor r1, r1, r5 //Exec t40 = t25 ^ t39; into r1
eor r5, r14, r1 //Exec t43 = t29 ^ t40; into r5
ldr.w r7, [sp, #132] //Load y16 into r7
and r7, r5, r7 //Exec z3 = t43 & y16; into r7
eor r8, r7, r8 //Exec tc12 = z3 ^ z5; into r8
str.w r8, [sp, #168] //Store r8/tc12 on stack
and r10, r1, r10 //Exec z13 = t40 & y5; into r10
ldr.w r8, [sp, #136] //Load y13 into r8
and r8, r5, r8 //Exec z12 = t43 & y13; into r8
and r6, r1, r6 //Exec z4 = t40 & y1; into r6
eor r6, r7, r6 //Exec tc6 = z3 ^ z4; into r6
eor r3, r3, r12 //Exec t34 = t23 ^ t33; into r3
eor r3, r2, r3 //Exec t37 = t36 ^ t34; into r3
eor r1, r1, r3 //Exec t41 = t40 ^ t37; into r1
ldr.w r5, [sp, #144] //Load y10 into r5
and r2, r1, r5 //Exec z8 = t41 & y10; into r2
and r9, r1, r9 //Exec z17 = t41 & y8; into r9
str.w r9, [sp, #144] //Store r9/z17 on stack
eor r5, r12, r3 //Exec t44 = t33 ^ t37; into r5
ldr.w r7, [sp, #156] //Load y15 into r7
ldr.w r9, [sp, #172] //Load y12 into r9
and r7, r5, r7 //Exec z0 = t44 & y15; into r7
and r9, r5, r9 //Exec z9 = t44 & y12; into r9
and r0, r3, r0 //Exec z10 = t37 & y3; into r0
and r3, r3, r11 //Exec z1 = t37 & y6; into r3
eor r3, r3, r7 //Exec tc5 = z1 ^ z0; into r3
eor r3, r6, r3 //Exec tc11 = tc6 ^ tc5; into r3
ldr.w r11, [sp, #160] //Load y4 into r11
ldr.w r5, [sp, #148] //Load y17 into r5
and r11, r12, r11 //Exec z11 = t33 & y4; into r11
eor r14, r14, r12 //Exec t42 = t29 ^ t33; into r14
eor r1, r14, r1 //Exec t45 = t42 ^ t41; into r1
and r5, r1, r5 //Exec z7 = t45 & y17; into r5
eor r6, r5, r6 //Exec tc8 = z7 ^ tc6; into r6
ldr.w r5, [sp, #152] //Load y14 into r5
str.w r4, [sp, #160] //Store r4/z14 on stack
and r1, r1, r5 //Exec z16 = t45 & y14; into r1
ldr.w r5, [sp, #140] //Load y11 into r5
ldr.w r4, [sp, #164] //Load y9 into r4
and r5, r14, r5 //Exec z6 = t42 & y11; into r5
eor r5, r5, r6 //Exec tc16 = z6 ^ tc8; into r5
and r4, r14, r4 //Exec z15 = t42 & y9; into r4
eor r14, r4, r5 //Exec tc20 = z15 ^ tc16; into r14
eor r4, r4, r1 //Exec tc1 = z15 ^ z16; into r4
eor r1, r0, r4 //Exec tc2 = z10 ^ tc1; into r1
eor r0, r1, r11 //Exec tc21 = tc2 ^ z11; into r0
eor r9, r9, r1 //Exec tc3 = z9 ^ tc2; into r9
eor r1, r9, r5 //Exec S0 = tc3 ^ tc16; into r1
eor r9, r9, r3 //Exec S3 = tc3 ^ tc11; into r9
eor r3, r9, r5 //Exec S1 = S3 ^ tc16 ^ 1; into r3
eor r11, r10, r4 //Exec tc13 = z13 ^ tc1; into r11
ldr.w r4, [sp, #128] //Load U7 into r4
and r12, r12, r4 //Exec z2 = t33 & U7; into r12
eor r7, r7, r12 //Exec tc4 = z0 ^ z2; into r7
eor r12, r8, r7 //Exec tc7 = z12 ^ tc4; into r12
eor r2, r2, r12 //Exec tc9 = z8 ^ tc7; into r2
eor r2, r6, r2 //Exec tc10 = tc8 ^ tc9; into r2
ldr.w r4, [sp, #160] //Load z14 into r4
eor r12, r4, r2 //Exec tc17 = z14 ^ tc10; into r12
eor r0, r0, r12 //Exec S5 = tc21 ^ tc17; into r0
eor r6, r12, r14 //Exec tc26 = tc17 ^ tc20; into r6
ldr.w r4, [sp, #144] //Load z17 into r4
ldr.w r12, [sp, #168] //Load tc12 into r12
eor r6, r6, r4 //Exec S2 = tc26 ^ z17 ^ 1; into r6
eor r12, r7, r12 //Exec tc14 = tc4 ^ tc12; into r12
eor r14, r11, r12 //Exec tc18 = tc13 ^ tc14; into r14
eor r2, r2, r14 //Exec S6 = tc10 ^ tc18 ^ 1; into r2
eor r11, r8, r14 //Exec S7 = z12 ^ tc18 ^ 1; into r11
ldr r14, [sp, #176] //restore link register
eor r4, r12, r9 //Exec S4 = tc14 ^ S3; into r4
bx lr
//[('r0', 'S5'), ('r1', 'S0'), ('r2', 'S6'), ('r3', 'S1'),
// ('r4', 'S4'), ('r6', 'S2'), ('r9', 'S3'), ('r11', 'S7')]
/******************************************************************************
* Shifts the second row.
* Note that one can take advantage of the inline barrel-shiftrows to compute
* the rotations for free by doing some rework.
******************************************************************************/
.align 2
shiftrows_1:
ror r0, r0, #8
ror r1, r1, #8
ror r2, r2, #8
ror r3, r3, #8
ror r4, r4, #8
ror r6, r6, #8
ror r9, r9, #8
ror r11, r11, #8
strd r1, r3, [sp, #32]
strd r6, r9, [sp, #40]
strd r4, r0, [sp, #48]
strd r2, r11, [sp, #56]
bx lr
/******************************************************************************
* Shifts the third row.
* Note that one can take advantage of the inline barrel-shiftrows to compute
* the rotations for free by doing some rework.
******************************************************************************/
.align 2
shiftrows_2:
ror r0, r0, #16
ror r1, r1, #16
ror r2, r2, #16
ror r3, r3, #16
ror r4, r4, #16
ror r6, r6, #16
ror r9, r9, #16
ror r11, r11, #16
strd r1, r3, [sp, #64]
strd r6, r9, [sp, #72]
strd r4, r0, [sp, #80]
strd r2, r11, [sp, #88]
bx lr
/******************************************************************************
* Shifts the fourth row.
* Note that one can take advantage of the inline barrel-shiftrows to compute
* the rotations for free by doing some rework.
******************************************************************************/
.align 2
shiftrows_3:
ror r0, r0, #24
ror r1, r1, #24
ror r2, r2, #24
ror r3, r3, #24
ror r4, r4, #24
ror r6, r6, #24
ror r9, r9, #24
ror r11, r11, #24
bx lr
/******************************************************************************
* MixColumns according to the barrel-shiftrows representation.
******************************************************************************/
.align 2
mixcolumns:
str.w r14, [sp, #176] //save link register
ldr.w r0, [sp] //load S0 in r0
ldr.w r2, [sp, #32] //load s8 in r2
ldr.w r3, [sp, #64] //load s16 in r3
ldr.w r12, [sp, #28] //load S7 in r12
ldr.w r10, [sp, #60] //load S15 in r12
ldr.w r14, [sp, #92] //load S23 in r14
str.w r4, [sp, #112] //store S28 (interleaving saves some cycles)
eor r6, r0, r2 //r6 <- S0 ^ S8
eor r7, r2, r3 //r7 <- S8 ^ S16
eor r8, r3, r1 //r8 <- S16 ^ S24
eor r9, r1, r0 //r9 <- S24 ^ S0
eor r0, r12, r10 //r0 <- S7 ^ S15
eor r1, r10, r14 //r1 <- S15 ^ S23
eor r2, r14, r11 //r2 <- S23 ^ S31
eor r3, r11, r12 //r3 <- S31 ^ S7
eor r4, r6, r2 //r4 <- S0 ^ S8 ^ S23 ^ S31
eor r4, r4, r10 //r4 <- S'7 = S0 ^ S8 ^ S23 ^ S31 ^ S15
ldr.w r10, [sp, #24] //load S6 in r10
str.w r4, [sp, #28] //store S'7
eor r4, r7, r2 //r4 <- S8 ^ S16 ^ S23 ^ S31
eor r4, r4, r12 //r4 <- S'15 = S8 ^ S16 ^ S23 ^ S31 ^ S7
ldr.w r12, [sp, #56] //load S14 in r12
str.w r4, [sp, #60] //store S'15
eor r4, r8, r0 //r4 <- S16 ^ S24 ^ S7 ^ S15
eor r4, r4, r11 //r4 <- S'23 = S16 ^ S24 ^ S7 ^ S15 ^ S31
ldr.w r11, [sp, #88] //load S22 in r11
str.w r4, [sp,#92] //store S'23
eor r4, r9, r0 //r4 <- S24 ^ S0 ^ S7 ^ S15
eor r4, r4, r14 //r4 <- S'31 = S24 ^ S0 ^ S7 ^ S15 ^ S23
ldr.w r14, [sp, #120] //load S30 in r14
str.w r4, [sp, #124] //store S'31
eor r4, r11, r14 //r4 <- S22 ^ S30
eor r5, r14, r10 //r5 <- S30 ^ S6
eor r0, r0, r6 //r0 <- S7 ^ S15 ^ S0 ^ S8
eor r0, r0, r4 //r0 <- S7 ^ S15 ^ S0 ^ S8 ^ S22 ^ S30
eor r0, r0, r12 //r0 <- S'6 = S7 ^ S15 ^ S0 ^ S8 ^ S22 ^ S30 ^ S14
str.w r0, [sp, #24] //store S'6
eor r1, r1, r7 //r1 <- S7 ^ S15 ^ S8 ^ S16
eor r1, r1, r4 //r1 <- S7 ^ S15 ^ S8 ^ S16 ^ S22 ^ S30
eor r1, r1, r10 //r1 <- S'14 = S7 ^ S15 ^ S8 ^ S16 ^ S22 ^ S30 ^ S6
str.w r1, [sp, #56] //store S'14
eor r0, r10, r12 //r0 <- S6 ^ S14
eor r1, r12, r11 //r1 <- S14 ^ S22
eor r2, r2, r8 //r2 <- S23 ^ S31 ^ S16 ^ S24
eor r2, r2, r5 //r2 <- S23 ^ S31 ^ S16 ^ S24 ^ S30 ^ S6
eor r2, r2, r12 //r2 <- S'22 = S23 ^ S31 ^ S16 ^ S24 ^ S30 ^ S6 ^ S14
ldr.w r10, [sp, #20] //load S5 in r10
ldr.w r12, [sp, #52] //load S13 in r12
str.w r2, [sp, #88] //store S'22
eor r3, r3, r9 //r3 <- S31 ^ S7 ^ S24 ^ S0
eor r3, r3, r0 //r3 <- S31 ^ S7 ^ S24 ^ S0 ^ S6 ^ S14
eor r3, r3, r11 //r3 <- S'30 = S31 ^ S7 ^ S24 ^ S0 ^ S6 ^ S14 ^ S22
ldr.w r11, [sp, #84] //load S21 in r11
ldr.w r14, [sp, #116] //load S29 in r14
str.w r3, [sp, #120] //store S'30
eor r2, r12, r11 //r2 <- S13 ^ S21
eor r3, r11, r14 //r3 <- S21 ^ S29
eor r0, r0, r2 //r0 <- S6 ^ S14 ^ S13 ^ S21
eor r0, r0, r14 //r0 <- S'5 = S6 ^ S14 ^ S13 ^ S21 ^ S29
str.w r0, [sp, #20] //store S'5
eor r1, r1, r3 //r1 <- S14 ^ S22 ^ S21 ^ S29
eor r1, r1, r10 //r1 <- S'13 = S14 ^ S22 ^ S21 ^ S29 ^ S5
str.w r1, [sp, #52] //store S'13
eor r0, r14, r10 //r0 <- S29 ^ S5
eor r1, r10, r12 //r1 <- S5 ^ S13
eor r4, r4, r0 //r4 <- S22 ^ S30 ^ S29 ^ S5
eor r4, r4, r12 //r4 <- S'21 = S22 ^ S30 ^ S29 ^ S5 ^ S13
ldr.w r10, [sp, #16] //load S4 in r10
ldr.w r12, [sp, #48] //load S12 in r12
str.w r4, [sp, #84] //store S'21
eor r5, r5, r1 //r5 <- S30 ^ S6 ^ S5 ^ S13
eor r5, r5, r11 //r5 <- S'29 = S30 ^ S6 ^ S5 ^ S13 ^ S21
ldr.w r11, [sp, #80] //load S20 in r11
ldr.w r14, [sp, #112] //load S28 in r14
str.w r5, [sp, #116] //store S'29
eor r4, r12, r11 //r4 <- S12 ^ S20
eor r5, r11, r14 //r5 <- S20 ^ S28
eor r1, r1, r6 //r1 <- S5 ^ S13 ^ S0 ^ S8
eor r1, r1, r4 //r1 <- S5 ^ S13 ^ S0 ^ S8 ^ S12 ^ S20
eor r1, r1, r14 //r1 <- S'4 = S5 ^ S13 ^ S0 ^ S8 ^ S12 ^ S20 ^ S28
str.w r1, [sp, #16] //store S'4
eor r2, r2, r7 //r2 <- S13 ^ S21 ^ S8 ^ S16
eor r2, r2, r5 //r2 <- S13 ^ S21 ^ S8 ^ S16 ^ S20 ^ S28
eor r2, r2, r10 //r2 <- S'12 = S13 ^ S21 ^ S8 ^ S16 ^ S20 ^ S28 ^ S4
str.w r2, [sp, #48] //store S'12
eor r1, r14, r10 //r1 <- S28 ^ S4
eor r2, r10, r12 //r2 <- S4 ^ S12
eor r3, r3, r8 //r3 <- S21 ^ S29 ^ S16 ^ S24
eor r3, r3, r1 //r3 <- S21 ^ S29 ^ S16 ^ S24 ^ S28 ^ S4
eor r3, r3, r12 //r3 <- S'20 = S21 ^ S29 ^ S16 ^ S24 ^ S28 ^ S4 ^ S12
ldr.w r10, [sp, #12] //load S3 in r10
ldr.w r12, [sp, #44] //load S11 in r12
str.w r3, [sp, #80] //store S'20
eor r0, r0, r9 //r0 <- S29 ^ S5 ^ S24 ^ S0
eor r0, r0, r2 //r0 <- S29 ^ S5 ^ S24 ^ S0 ^ S4 ^ S12
eor r0, r0, r11 //r0 <- S'28 = S29 ^ S5 ^ S24 ^ S0 ^ S4 ^ S12 ^ S20
ldr.w r11, [sp, #76] //load S19 in r11
ldr.w r14, [sp, #108] //load S27 in r14
str.w r0, [sp, #112] //store S'28
eor r0, r12, r11 //r0 <- S11 ^ S19
eor r3, r11, r14 //r3 <- S19 ^ S27
eor r2, r2, r6 //r2 <- S4 ^ S12 ^ S0 ^ S8
eor r2, r2, r0 //r2 <- S4 ^ S12 ^ S0 ^ S8 ^ S11 ^ S19
eor r2, r2, r14 //r2 <- S'3 = S4 ^ S12 ^ S0 ^ S8 ^ S11 ^ S19 ^ S27
str.w r2, [sp, #12] //store S'3
eor r4, r4, r7 //r4 <- S12 ^ S20 ^ S8 ^ S16
eor r4, r4, r3 //r4 <- S12 ^ S20 ^ S8 ^ S16 ^ S19 ^ S27
eor r4, r4, r10 //r4 <- S'11 = S12 ^ S20 ^ S8 ^ S16 ^ S19 ^ S27 ^ S3
str.w r4, [sp, #44] //store S'11
eor r4, r14, r10 //r4 <- S27 ^ S3
eor r2, r10, r12 //r2 <- S3 ^ S11
eor r5, r5, r8 //r5 <- S20 ^ S28 ^ S16 ^ S24
eor r5, r5, r4 //r5 <- S20 ^ S28 ^ S16 ^ S24 ^ S27 ^ S3
eor r5, r5, r12 //r5 <- S'19 = S20 ^ S28 ^ S16 ^ S24 ^ S27 ^ S3 ^ S11
ldr.w r10, [sp, #8] //load S2 in r10
ldr.w r12, [sp, #40] //load S10 in r12
str.w r5, [sp, #76] //store S'19
eor r1, r1, r9 //r1 <- S28 ^ S4 ^ S24 ^ S0
eor r1, r1, r2 //r1 <- S28 ^ S4 ^ S24 ^ S0 ^ S3 ^ S11
eor r1, r1, r11 //r1 <- S'27 = S28 ^ S4 ^ S24 ^ S0 ^ S3 ^ S11 ^ S19
ldr.w r11, [sp, #72] //load S18 in r11
ldr.w r14, [sp, #104] //load S26 in r14
str.w r1, [sp, #108] //store S'27
eor r1, r12, r11 //r1 <- S10 ^ S18
eor r5, r11, r14 //r5 <- S18 ^ S26
eor r2, r2, r1 //r2 <- S3 ^ S11 ^ S10 ^ S18
eor r2, r2, r14 //r2 <- S'2 = S3 ^ S11 ^ S10 ^ S18 ^ S26
str.w r2, [sp, #8] //store S'2
eor r0, r0, r5 //r0 <- S11 ^ S19 ^ S18 ^ S26
eor r0, r0, r10 //r0 <- S'10 = S11 ^ S19 ^ S18 ^ S26 ^ S2
str.w r0, [sp, #40] //store S'10
eor r2, r14, r10 //r2 <- S26 ^ S2
eor r0, r10, r12 //r0 <- S2 ^ S10
eor r3, r3, r2 //r3 <- S19 ^ S27 ^ S26 ^ S2
eor r3, r3, r12 //r3 <- S'18 = S19 ^ S27 ^ S26 ^ S2 ^S10
ldr.w r10, [sp, #4] //load S1 in r10
ldr.w r12, [sp, #36] //load S9 in r12
str.w r3, [sp, #72] //store S'18
eor r4, r4, r0 //r4 <- S27 ^ S3 ^ S2 ^ S10
eor r4, r4, r11 //r4 <- S'26 = S27 ^ S3 ^ S2 ^ S10 ^ S18
ldr.w r11, [sp, #68] //load S17 in r11
ldr.w r14, [sp, #100] //load S25 in r14
str.w r4, [sp, #104] //store S'26
eor r3, r12, r11 //r3 <- S9 ^ S17
eor r4, r11, r14 //r4 <- S17 ^ S25
eor r0, r0, r3 //r0 <- S2 ^ S10 ^ S9 ^ S17
eor r0, r0, r14 //r0 <- S'1 = S2 ^ S10 ^ S9 ^ S17 ^ S25
str.w r0, [sp, #4] //store S'1
eor r1, r1, r4 //r1 <- S10 ^ S18 ^ S17 ^ S25
eor r1, r1, r10 //r1 <- S'9 = S10 ^ S18 ^ S17 ^ S25 ^ S1
str.w r1, [sp, #36] //store S'9
eor r0, r14, r10 //r0 <- S25 ^ S1
eor r1, r10, r12 //r1 <- S1 ^ S9
eor r5, r5, r0 //r5 <- S18 ^ S26 ^ S25 ^ S1
eor r5, r5, r12 //r5 <- S'17 = S18 ^ S26 ^ S25 ^ S1 ^ S9
ldr.w r10, [sp] //load S0 in r10
ldr.w r12, [sp, #32] //load S8 in r12
str.w r5, [sp, #68] //store S'17
eor r2, r2, r1 //r2 <- S26 ^ S2 ^ S1 ^ S9
eor r2, r2, r11 //r2 <- S'25 = S26 ^ S2 ^ S1 ^ S9 ^ S17
ldr.w r11, [sp, #64] //load S16 in r11
ldr.w r14, [sp, #96] //load S24 in r14
str.w r2, [sp, #100] //store S'25
eor r1, r1, r7 //r1 <- S1 ^ S9 ^ S8 ^ S16
eor r1, r1, r14 //r1 <- S'0 = S1 ^ S9 ^ S8 ^ S16 ^ S24
str.w r1, [sp] //store S'0
eor r3, r3, r8 //r3 <- S9 ^ S17 ^ S16 ^ S24
eor r3, r3, r10 //r3 <- S'8 = S9 ^ S17 ^ S16 ^ S24 ^ S0
str.w r3, [sp, #32] //store S'8
eor r4, r4, r9 //r4 <- S17 ^ S25 ^ S24 ^ S0
eor r4, r4, r12 //r4 <- S'16 = S17 ^ S25 ^ S24 ^ S0 ^ S8
ldr.w r14, [sp, #176] //restore link register
str.w r4, [sp, #64] //store S'16
eor r0, r0, r6 //r0 <- S25 ^ S1 ^ S0 ^ S8
eor r0, r0, r11 //r0 <- S'24 = S25 ^ S1 ^ S0 ^ S8 ^ S16
str r0, [sp, #96]
bx lr
/******************************************************************************
* Subroutine for the first layer of packing.
******************************************************************************/
.align 2
packing_0:
movw r0, #0x00ff
movt r0, #0x00ff // mask for SWAPMOVE
mov r3, #0 // loop counter
loop_p0:
ldmia r2!, {r4-r7} // load input
add.w r3, r3, 1 // increment loop counter
swpmv r4, r5, r4, r5, r0, 8, r12
swpmv r6, r7, r6, r7, r0, 8, r12
str.w r4, [sp], #4 // store state words on the stack
str.w r5, [sp, #28] // store state words on the stack
str.w r6, [sp, #60] // store state words on the stack
str.w r7, [sp, #92] // store state words on the stack
cmp r3, #7
ble loop_p0 // loop until r3 <= 7
bx lr
/******************************************************************************
* Subroutine for the second layer of packing.
******************************************************************************/
.align 2
packing_1:
movw r0, #0xffff // mask for SWAPMOVE
mov r3, #0 // loop counter
loop_p1:
ldrd r4, r5, [sp] // load state from the stack
ldrd r6, r7, [sp, #64] // load state from the stack
add.w r3, r3, 1 // increment loop counter
swpmv r4, r6, r4, r6, r0, 16, r12
swpmv r5, r7, r5, r7, r0, 16, r12
strd r4, r5, [sp], #8 // store state words on the stack
strd r6, r7, [sp, #56] // store state words on the stack
cmp r3, #7
ble loop_p1
bx lr
/******************************************************************************
* Subroutine for the third layer of packing.
******************************************************************************/
.align 2
packing_2:
movw r2, #0x0f0f
movt r2, #0x0f0f // mask for SWAPMOVE
eor r1, r2, r2, lsl #2 // mask for SWAPMOVE (0x33333333)
eor r0, r1, r1, lsl #1 // mask for SWAPMOVE (0x55555555)
mov r3, #0 // loop counter
loop_p2:
ldm sp, {r4-r11}
add.w r3, r3, 1
swpmv r5, r4, r5, r4, r0, 1, r12
swpmv r7, r6, r7, r6, r0, 1, r12
swpmv r9, r8, r9, r8, r0, 1, r12
swpmv r11, r10, r11, r10, r0, 1, r12
swpmv r6, r4, r6, r4, r1, 2, r12
swpmv r7, r5, r7, r5, r1, 2, r12
swpmv r10, r8, r10, r8, r1, 2, r12
swpmv r11, r9, r11, r9, r1, 2, r12
swpmv r8, r4, r8, r4, r2, 4, r12
swpmv r9, r5, r9, r5, r2, 4, r12
swpmv r10, r6, r10, r6, r2, 4, r12
swpmv r11, r7, r11, r7, r2, 4, r12
stmia sp!, {r4-r11} // store the state words on the stack
cmp r3, #3
ble loop_p2 //loop until r3 <= 3
bx lr
/******************************************************************************
* Subroutine for the first layer of unpacking.
******************************************************************************/
.align 2
unpacking_0:
movw r0, #0x00ff
movt r0, #0x00ff // mask for SWAPMOVE
mov r3, #0 // loop counter
loop_up0:
ldr.w r4, [sp], #4
ldr.w r5, [sp, #28]
ldr.w r6, [sp, #60]
ldr.w r7, [sp, #92]
add.w r3, r3, 1 // increment loop counter
swpmv r4, r5, r4, r5, r0, 8, r12
swpmv r6, r7, r6, r7, r0, 8, r12
stmia r2!, {r4-r7} // store to output array
cmp r3, #7
ble loop_up0 // loop until r3 <= 7
bx lr
/******************************************************************************
* Subroutine for the third layer of unpacking.
******************************************************************************/
.align 2
unpacking_2:
movw r2, #0x0f0f
movt r2, #0x0f0f // mask for SWAPMOVE
eor r1, r2, r2, lsl #2 // mask for SWAPMOVE (0x33333333)
eor r0, r1, r1, lsl #1 // mask for SWAPMOVE (0x55555555)
mov r3, #0 // loop counter
loop_up2:
ldm sp, {r4-r11}
add.w r3, r3, 1
swpmv r8, r4, r8, r4, r2, 4, r12
swpmv r9, r5, r9, r5, r2, 4, r12
swpmv r10, r6, r10, r6, r2, 4, r12
swpmv r11, r7, r11, r7, r2, 4, r12
swpmv r6, r4, r6, r4, r1, 2, r12
swpmv r7, r5, r7, r5, r1, 2, r12
swpmv r10, r8, r10, r8, r1, 2, r12
swpmv r11, r9, r11, r9, r1, 2, r12
swpmv r5, r4, r5, r4, r0, 1, r12
swpmv r7, r6, r7, r6, r0, 1, r12
swpmv r9, r8, r9, r8, r0, 1, r12
swpmv r11, r10, r11, r10, r0, 1, r12
stmia sp!, {r4-r11} // store the state words on the stack
cmp r3, #3
ble loop_up2 //loop until r3 <= 3
bx lr
/******************************************************************************
* Encryption of 8 128-bit blocks of data in parallel using AES-128 with the
* barrel-shiftrows representation.
* The round keys are assumed to be pre-computed.
******************************************************************************/
.align 2
@ void aes128_encrypt(param* ctext, u32* rkey, const u8* ptext)
.global aes128_encrypt
.type aes128_encrypt,%function
aes128_encrypt:
push {r0-r12,r14}
sub.w sp, sp, #188
str.w r1, [sp, #180] // store pointer to rkey on the stack
mov r1, #0 // init loop counter
str.w r1, [sp, #184] // store loop counter on the stack
bl packing_0 // 1st packing layer
sub.w sp, sp, #32
bl packing_1 // 2nd packing layer
sub.w sp, sp, #64
bl packing_2 // 3rd packing layer
sub.w sp, sp, #128
loop_aes128_core:
mov r12, sp // r12 points to 1st quarter state
bl add_round_key // addroundkey on 1st quarter state
bl sbox // sbox on 1st quarter state
stm sp, {r1,r3,r6,r9}
strd r4, r0, [sp, #16]
strd r2, r11, [sp, #24]
add.w r12, sp, #32 // r12 points to 2nd quarter state
bl add_round_key // addroundkey on 2nd quarter state
bl sbox // sbox on 2nd quarter state
bl shiftrows_1 // shiftrows on 2nd quarter state
add.w r12, sp, #64 // r12 points to 3rd quarter state
bl add_round_key // addroundkey on 3rd quarter state
bl sbox // sbox on 3rd quarter state
bl shiftrows_2 // shiftrows on 3rd quarter state
add.w r12, sp, #96 // r12 points to 4th quarter state
bl add_round_key // addroundkey on 4th quarter state
bl sbox // sbox on 4th quarter state
bl shiftrows_3 // shiftrows on 4t quarter state
strd r1, r3, [sp, #96]
strd r6, r9, [sp, #104]
strd r0, r2, [sp, #116]
bl mixcolumns // mixcolumns on the entire state
ldr.w r1, [sp, #184] // load loop counter
add.w r1, r1, #1 // increment loop counter
str.w r1, [sp, #184] // store loop counter on the stack
cmp r1, #8
ble loop_aes128_core // loop until r1 <= 8
// Last round
mov r12, sp // r12 points to 1st quarter state
bl add_round_key // addroundkey on 1st quarter state
bl sbox // sbox on 1st quarter state
stm sp, {r1,r3,r6,r9}
strd r4, r0, [sp, #16]
strd r2, r11, [sp, #24]
add.w r12, sp, #32 // r12 points to 2nd quarter state
bl add_round_key // addroundkey on 2nd quarter state
bl sbox // sbox on 2nd quarter state
bl shiftrows_1 // shiftrows on 2nd quarter state
add.w r12, sp, #64 // r12 points to 3rd quarter state
bl add_round_key // addroundkey on 3rd quarter state
bl sbox // sbox on 3rd quarter state
bl shiftrows_2 // shiftrows on 3rd quarter state
add.w r12, sp, #96 // r12 points to 4th quarter state
bl add_round_key // addroundkey on 4th quarter state
bl sbox // sbox on 4th quarter state
bl shiftrows_3 // shiftrows on 4t quarter state
strd r1, r3, [sp, #96]
strd r6, r9, [sp, #104]
strd r4, r0, [sp, #112]
strd r2, r11, [sp, #120]
mov r12, sp // r12 points to 1st quarter state
bl add_round_key // last addroundkey on 1st quarter state
strd r4, r5, [sp]
strd r6, r7, [sp, #8]
strd r8, r9, [sp, #16]
strd r10, r11, [sp, #24]
add.w r12, sp, #32 // r12 points to 2nd quarter state
bl add_round_key // last addroundkey on 2nd quarter state
strd r4, r5, [sp, #32]
strd r6, r7, [sp, #40]
strd r8, r9, [sp, #48]
strd r10, r11, [sp, #56]
add.w r12, sp, #64 // r12 points to 3rd quarter state
bl add_round_key // last addroundkey on 3rd quarter state
strd r4, r5, [sp, #64]
strd r6, r7, [sp, #72]
strd r8, r9, [sp, #80]
strd r10, r11, [sp, #88]
add.w r12, sp, #96 // r12 points to 4th quarter state
bl add_round_key // last addroundkey on 4th quarter state
strd r4, r5, [sp, #96]
strd r6, r7, [sp, #104]
strd r8, r9, [sp, #112]
strd r10, r11, [sp, #120]
bl unpacking_2 // order matters, have to use another routine
sub.w sp, sp, #128
bl packing_1 // order does not matter, can reuse packing routine
sub.w sp, sp, #64
ldr r2, [sp, #188] // restore output address in r2
bl unpacking_0 // use another routine for input/output arrays
add.w sp, sp, #156
pop {r0-r12, r14}
bx lr
/******************************************************************************
* Encryption of 8 128-bit blocks of data in parallel using AES-128 with the
* barrel-shiftrows representation.
* The round keys are assumed to be pre-computed.
******************************************************************************/
.align 2
@ void aes256_encrypt(param* ctext, u32* rkey, const u8* ptext)
.global aes256_encrypt
.type aes256_encrypt,%function
aes256_encrypt:
push {r0-r12,r14}
sub.w sp, sp, #188
str.w r1, [sp, #180] // store pointer to rkey on the stack
mov r1, #0 // init loop counter
str.w r1, [sp, #184] // store loop counter on the stack
bl packing_0 // 1st packing layer
sub.w sp, sp, #32
bl packing_1 // 2nd packing layer
sub.w sp, sp, #64
bl packing_2 // 3rd packing layer
sub.w sp, sp, #128
loop_aes256_core:
mov r12, sp // r12 points to 1st quarter state
bl add_round_key // addroundkey on 1st quarter state
bl sbox // sbox on 1st quarter state
stm sp, {r1,r3,r6,r9}
strd r4, r0, [sp, #16]
strd r2, r11, [sp, #24]
add.w r12, sp, #32 // r12 points to 2nd quarter state
bl add_round_key // addroundkey on 2nd quarter state
bl sbox // sbox on 2nd quarter state
bl shiftrows_1 // shiftrows on 2nd quarter state
add.w r12, sp, #64 // r12 points to 3rd quarter state
bl add_round_key // addroundkey on 3rd quarter state
bl sbox // sbox on 3rd quarter state
bl shiftrows_2 // shiftrows on 3rd quarter state
add.w r12, sp, #96 // r12 points to 4th quarter state
bl add_round_key // addroundkey on 4th quarter state
bl sbox // sbox on 4th quarter state
bl shiftrows_3 // shiftrows on 4t quarter state
strd r1, r3, [sp, #96]
strd r6, r9, [sp, #104]
strd r0, r2, [sp, #116]
bl mixcolumns // mixcolumns on the entire state
ldr.w r1, [sp, #184] // load loop counter
add.w r1, r1, #1 // increment loop counter
str.w r1, [sp, #184] // store loop counter on the stack
cmp r1, #12
ble loop_aes256_core // loop until r1 <= 8
// Last round
mov r12, sp // r12 points to 1st quarter state
bl add_round_key // addroundkey on 1st quarter state
bl sbox // sbox on 1st quarter state
stm sp, {r1,r3,r6,r9}
strd r4, r0, [sp, #16]
strd r2, r11, [sp, #24]
add.w r12, sp, #32 // r12 points to 2nd quarter state
bl add_round_key // addroundkey on 2nd quarter state
bl sbox // sbox on 2nd quarter state
bl shiftrows_1 // shiftrows on 2nd quarter state
add.w r12, sp, #64 // r12 points to 3rd quarter state
bl add_round_key // addroundkey on 3rd quarter state
bl sbox // sbox on 3rd quarter state
bl shiftrows_2 // shiftrows on 3rd quarter state
add.w r12, sp, #96 // r12 points to 4th quarter state
bl add_round_key // addroundkey on 4th quarter state
bl sbox // sbox on 4th quarter state
bl shiftrows_3 // shiftrows on 4t quarter state
strd r1, r3, [sp, #96]
strd r6, r9, [sp, #104]
strd r4, r0, [sp, #112]
strd r2, r11, [sp, #120]
mov r12, sp // r12 points to 1st quarter state
bl add_round_key // last addroundkey on 1st quarter state
strd r4, r5, [sp]
strd r6, r7, [sp, #8]
strd r8, r9, [sp, #16]
strd r10, r11, [sp, #24]
add.w r12, sp, #32 // r12 points to 2nd quarter state
bl add_round_key // last addroundkey on 2nd quarter state
strd r4, r5, [sp, #32]
strd r6, r7, [sp, #40]
strd r8, r9, [sp, #48]
strd r10, r11, [sp, #56]
add.w r12, sp, #64 // r12 points to 3rd quarter state
bl add_round_key // last addroundkey on 3rd quarter state
strd r4, r5, [sp, #64]
strd r6, r7, [sp, #72]
strd r8, r9, [sp, #80]
strd r10, r11, [sp, #88]
add.w r12, sp, #96 // r12 points to 4th quarter state
bl add_round_key // last addroundkey on 4th quarter state
strd r4, r5, [sp, #96]
strd r6, r7, [sp, #104]
strd r8, r9, [sp, #112]
strd r10, r11, [sp, #120]
bl unpacking_2 // order matters, have to use another routine
sub.w sp, sp, #128
bl packing_1 // order does not matter, can reuse packing routine
sub.w sp, sp, #64
ldr r2, [sp, #188] // restore output address in r2
bl unpacking_0 // use another routine for input/output arrays
add.w sp, sp, #156
pop {r0-r12, r14}
bx lr
|
aadomn/aes
| 19,475
|
armcortexm/barrel_shiftrows/aes_keyschedule_lut.s
|
/******************************************************************************
* Assembly implementations of the AES-128 and AES-256 key schedules to match
* the barrel-shiftrows representation. Note that those implementations rely on
* Look-Up Tables (LUT).
*
* See the paper at https://eprint.iacr.org/2020/1123.pdf for more details.
*
* @author Alexandre Adomnicai, Nanyang Technological University, Singapore
* alexandre.adomnicai@ntu.edu.sg
*
* @date August 2020
******************************************************************************/
.syntax unified
.thumb
/******************************************************************************
* LUT of the AES Sbox used in the key schedule.
******************************************************************************/
.align 2
.type AES_Sbox_compact,%object
AES_Sbox_compact:
.word 0x7b777c63, 0xc56f6bf2, 0x2b670130, 0x76abd7fe
.word 0x7dc982ca, 0xf04759fa, 0xafa2d4ad, 0xc072a49c
.word 0x2693fdb7, 0xccf73f36, 0xf1e5a534, 0x1531d871
.word 0xc323c704, 0x9a059618, 0xe2801207, 0x75b227eb
.word 0x1a2c8309, 0xa05a6e1b, 0xb3d63b52, 0x842fe329
.word 0xed00d153, 0x5bb1fc20, 0x39becb6a, 0xcf584c4a
.word 0xfbaaefd0, 0x85334d43, 0x7f02f945, 0xa89f3c50
.word 0x8f40a351, 0xf5389d92, 0x21dab6bc, 0xd2f3ff10
.word 0xec130ccd, 0x1744975f, 0x3d7ea7c4, 0x73195d64
.word 0xdc4f8160, 0x88902a22, 0x14b8ee46, 0xdb0b5ede
.word 0x0a3a32e0, 0x5c240649, 0x62acd3c2, 0x79e49591
.word 0x6d37c8e7, 0xa94ed58d, 0xeaf4566c, 0x08ae7a65
.word 0x2e2578ba, 0xc6b4a61c, 0x1f74dde8, 0x8a8bbd4b
.word 0x66b53e70, 0x0ef60348, 0xb9573561, 0x9e1dc186
.word 0x1198f8e1, 0x948ed969, 0xe9871e9b, 0xdf2855ce
.word 0x0d89a18c, 0x6842e6bf, 0x0f2d9941, 0x16bb54b0
/******************************************************************************
* Round function of the AES-128 key expansion.
* Note that it expects r2 to contain the corresponding round constant and r3 to
* contain the S-box address.
******************************************************************************/
.align 2
keyschedule_round_func:
movw r1, #0xfc
and r8, r1, r7, lsr #8
and r9, r1, r7, lsr #16
and r10, r1, r7, lsr #24
and r11, r1, r7
ldr r8, [r3, r8] // computes the sbox using the LUT
ldr r9, [r3, r9] // computes the sbox using the LUT
ldr r10, [r3, r10] // computes the sbox using the LUT
ldr r11, [r3, r11] // computes the sbox using the LUT
movw r1, #0x18
and r12, r1, r7, lsr #5
lsr r8, r8, r12
and r8, #0xff
and r12, r1, r7, lsr #13
lsr r9, r9, r12
and r9, #0xff
and r12, r1, r7, lsr #21
lsr r10, r10, r12
and r10, #0xff
and r12, r1, r7, lsl #3
lsr r11, r11, r12
and r11, #0xff
eor r4, r2 // adds the rconst
eor r4, r8 // xor the columns
eor r4, r4, r9, ror #24 // xor the columns
eor r4, r4, r10, ror #16 // xor the columns
eor r4, r4, r11, ror #8 // r4 <- rk[4]
eor r5, r4 // r5 <- rk[5]
eor r6, r5 // r6 <- rk[6]
eor r7, r6 // r7 <- rk[7]
strd r4, r5, [sp], #8 // store on the stack for bitslicing
strd r6, r7, [sp], #8 // store on the stack for bitslicing
bx lr
/******************************************************************************
* Double round function of the AES-256 key expansion.
* Note that it expects r2 to contain the corresponding round constant and r3 to
* contain the S-box address.
* Operates slightly differently than 'aes128_keyschedule_rfunc' as 8 words have
* to be maintained in registers (instead of 4).
******************************************************************************/
.align 2
aes256_keyschedule_rfunc_0:
eor r4, r2 // adds the first rconst
movw r1, #0xfc
movw r2, #0x18
and r12, r1, r11, lsr #8
ldr r12, [r3, r12] // computes the sbox using the LUT
and r0, r2, r11, lsr #5
lsr r12, r12, r0
and r12, #0xff
eor r4, r12 // xor the columns (sbox output byte)
and r12, r1, r11, lsr #16
ldr r12, [r3, r12] // computes the sbox using the LUT
and r0, r2, r11, lsr #13
lsr r12, r12, r0
and r12, #0xff
eor r4, r4, r12, ror #24 // xor the columns (sbox output byte)
and r12, r1, r11, lsr #24
ldr r12, [r3, r12] // computes the sbox using the LUT
and r0, r2, r11, lsr #21
lsr r12, r12, r0
and r12, #0xff
eor r4, r4, r12, ror #16 // xor the columns (sbox output byte)
and r12, r1, r11
ldr r12, [r3, r12] // computes the sbox using the LUT
and r0, r2, r11, lsl #3
lsr r12, r12, r0
and r12, #0xff
eor r4, r4, r12, ror #8 // xor the columns (sbox output byte)
eor r5, r4 // xor the columns
eor r6, r5 // xor the columns
eor r7, r6 // xor the columns
strd r4, r5, [sp], #8 // store on the stack for bitslicing
strd r6, r7, [sp], #8 // store on the stack for bitslicing
bx lr
/******************************************************************************
* Double round function of the AES-256 key expansion.
* Note that it expects r2 to contain the corresponding round constant and r3 to
* contain the S-box address.
* Unlike 'aes256_keyschedule_rfunc_0' it doesnt compute the RotWord operation.
******************************************************************************/
aes256_keyschedule_rfunc_1:
and r12, r1, r7, lsr #8
ldr r12, [r3, r12] // computes the sbox using the LUT
and r0, r2, r7, lsr #5
lsr r12, r12, r0
and r12, #0xff
eor r8, r8, r12, lsl #8 // xor the columns (sbox output byte)
and r12, r1, r7, lsr #16
ldr r12, [r3, r12] // computes the sbox using the LUT
and r0, r2, r7, lsr #13
lsr r12, r12, r0
and r12, #0xff
eor r8, r8, r12, lsl #16 // xor the columns (sbox output byte)
and r12, r1, r7, lsr #24
ldr r12, [r3, r12] // computes the sbox using the LUT
and r0, r2, r7, lsr #21
lsr r12, r12, r0
and r12, #0xff
eor r8, r8, r12, lsl #24 // xor the columns (sbox output byte)
and r12, r1, r7
ldr r12, [r3, r12] // computes the sbox using the LUT
and r0, r2, r7, lsl #3
lsr r12, r12, r0
and r12, #0xff
eor r8, r8, r12 // xor the columns (sbox output byte)
eor r9, r8 // xor the columns
eor r10, r9 // xor the columns
eor r11, r10 // xor the columns
strd r8, r9, [sp], #8 // store on the stack for bitslicing
strd r10, r11, [sp], #8 // store on the stack for bitslicing
bx lr
/******************************************************************************
* SWAPMOVE calls used in the packing_rkey subroutine.
******************************************************************************/
.align 2
swapmove_rkey:
str r14, [sp, #-4] // store link register on the stack
eor r14, r5, r4, lsr #8 // SWAPMOVE(r4, r5, 0x00ff00ff, 8) ...
and r14, r14, r3
eor r5, r5, r14
eor r4, r4, r14, lsl #8 // ... SWAPMOVE(r4, r5, 0x00ff00ff, 8)
eor r14, r7, r6, lsr #8 // SWAPMOVE(r6, r7, 0x00ff00ff, 8) ...
and r14, r14, r3
eor r7, r7, r14
eor r6, r6, r14, lsl #8 // ... SWAPMOVE(r6, r7, 0x00ff00ff, 8)
eor r14, r6, r4, lsr #16 // SWAPMOVE(r4, r6, 0x0000ffff, 16) ...
and r14, r14, r2
eor r6, r6, r14
eor r4, r4, r14, lsl #16 // ... SWAPMOVE(r4, r6, 0x0000ffff, 16)
eor r14, r7, r5, lsr #16 // SWAPMOVE(r5, r7, 0x0000ffff, 16) ...
and r14, r14, r2
eor r7, r7, r14
eor r5, r5, r14, lsl #16 // ... SWAPMOVE(r5, r7, 0x0000ffff, 16)
ldr r14, [sp, #-4] // restore link register
bx lr
/******************************************************************************
* Packing subroutine used to rearrange the rkeys to match the barrel-shiftrows.
* It is about twice more efficient than the 'packing' func. This optimization
* is possible because the 8 16-byte blocks to pack are all equal.
******************************************************************************/
.align 2
packing_rkey:
and r8, r4, r1 // r8 <- r4 & 0x80808080
orr r8, r8, r8, lsr #1 // r8 <- r8 | r8 >> 1
orr r8, r8, r8, lsr #2 // r8 <- r8 | r8 >> 2
orr r8, r8, r8, lsr #4 // r8 <- r8 | r8 >> 4
and r9, r1, r4, lsl #1 // r9 <- r4 << 1 & 0x80808080
orr r9, r9, r9, lsr #1 // r9 <- r9 | r9 >> 1
orr r9, r9, r9, lsr #2 // r9 <- r9 | r9 >> 2
orr r9, r9, r9, lsr #4 // r9 <- r9 | r9 >> 4
and r10, r1, r4, lsl #2 // r10<- r4 << 2 & 0x80808080
orr r10, r10, r10, lsr #1 // r10<- r10 | r10 >> 1
orr r10, r10, r10, lsr #2 // r10<- r10 | r10 >> 2
orr r10, r10, r10, lsr #4 // r10<- r10 | r10 >> 4
and r11, r1, r4, lsl #3 // r11<- r4 << 3 & 0x80808080
orr r11, r11, r11, lsr #1 // r11<- r11 | r11 >> 1
orr r11, r11, r11, lsr #2 // r11<- r11 | r11 >> 2
orr r11, r11, r11, lsr #4 // r11<- r11 | r11 >> 4
stmia r0!, {r8-r11}
and r8, r1, r4, lsl #4 // r8 <- r4 << 4 & 0x80808080
orr r8, r8, r8, lsr #1 // r8 <- r8 | r8 >> 1
orr r8, r8, r8, lsr #2 // r8 <- r8 | r8 >> 2
orr r8, r8, r8, lsr #4 // r8 <- r8 | r8 >> 4
and r9, r1, r4, lsl #5 // r9 <- r4 << 5 & 0x80808080
orr r9, r9, r9, lsr #1 // r9 <- r9 | r9 >> 1
orr r9, r9, r9, lsr #2 // r9 <- r9 | r9 >> 2
orr r9, r9, r9, lsr #4 // r9 <- r9 | r9 >> 4
and r10, r1, r4, lsl #6 // r10<- r4 << 6 & 0x80808080
orr r10, r10, r10, lsr #1 // r10<- r10 | r10 >> 1
orr r10, r10, r10, lsr #2 // r10<- r10 | r10 >> 2
orr r10, r10, r10, lsr #4 // r10<- r10 | r10 >> 4
and r11, r1, r4, lsl #7 // r11<- r4 << 7 & 0x80808080
orr r11, r11, r11, lsr #1 // r11<- r11 | r11 >> 1
orr r11, r11, r11, lsr #2 // r11<- r11 | r11 >> 2
orr r11, r11, r11, lsr #4 // r11<- r11 | r11 >> 4
stmia r0!, {r8-r11}
bx lr
/******************************************************************************
* Packing subroutine used to rearrange the rkeys to match the barrel-shiftrows.
* Same as 'packing_rkey' but includes NOT to speed up SBox calculations in the
* encryption function. Could be removed by adding 'mvn' instructions manually
* instead of taking advantage of the 'bic' instruction to save some cycles.
******************************************************************************/
.align 2
packing_rkey_not:
and r8, r4, r1 // r8 <- r4 & 0x80808080
orr r8, r8, r8, lsr #1 // r8 <- r8 | r8 >> 1
orr r8, r8, r8, lsr #2 // r8 <- r8 | r8 >> 2
orr r8, r8, r8, lsr #4 // r8 <- r8 | r8 >> 4
bic r9, r1, r4, lsl #1 // r9 <- r4 << 1 & 0x80808080
orr r9, r9, r9, lsr #1 // r9 <- r9 | r9 >> 1
orr r9, r9, r9, lsr #2 // r9 <- r9 | r9 >> 2
orr r9, r9, r9, lsr #4 // r9 <- r9 | r9 >> 4
bic r10, r1, r4, lsl #2 // r10<- r4 << 2 & 0x80808080
orr r10, r10, r10, lsr #1 // r10<- r10 | r10 >> 1
orr r10, r10, r10, lsr #2 // r10<- r10 | r10 >> 2
orr r10, r10, r10, lsr #4 // r10<- r10 | r10 >> 4
and r11, r1, r4, lsl #3 // r11<- r4 << 3 & 0x80808080
orr r11, r11, r11, lsr #1 // r11<- r11 | r11 >> 1
orr r11, r11, r11, lsr #2 // r11<- r11 | r11 >> 2
orr r11, r11, r11, lsr #4 // r11<- r11 | r11 >> 4
stmia r0!, {r8-r11}
and r8, r1, r4, lsl #4 // r8 <- r4 << 4 & 0x80808080
orr r8, r8, r8, lsr #1 // r8 <- r8 | r8 >> 1
orr r8, r8, r8, lsr #2 // r8 <- r8 | r8 >> 2
orr r8, r8, r8, lsr #4 // r8 <- r8 | r8 >> 4
and r9, r1, r4, lsl #5 // r9 <- r4 << 5 & 0x80808080
orr r9, r9, r9, lsr #1 // r9 <- r9 | r9 >> 1
orr r9, r9, r9, lsr #2 // r9 <- r9 | r9 >> 2
orr r9, r9, r9, lsr #4 // r9 <- r9 | r9 >> 4
bic r10, r1, r4, lsl #6 // r10<- r4 << 6 & 0x80808080
orr r10, r10, r10, lsr #1 // r10<- r10 | r10 >> 1
orr r10, r10, r10, lsr #2 // r10<- r10 | r10 >> 2
orr r10, r10, r10, lsr #4 // r10<- r10 | r10 >> 4
bic r11, r1, r4, lsl #7 // r11<- r4 << 7 & 0x80808080
orr r11, r11, r11, lsr #1 // r11<- r11 | r11 >> 1
orr r11, r11, r11, lsr #2 // r11<- r11 | r11 >> 2
orr r11, r11, r11, lsr #4 // r11<- r11 | r11 >> 4
stmia r0!, {r8-r11}
bx lr
/******************************************************************************
* Pre-computes all the AES-128 round keys according to the barrel-shiftrows
* representation. Note that additional NOTs are incorporated to speed up SBox
* calculations in the encryption function.
******************************************************************************/
@ void aes128_keyschedule_lut(u32* rkeys, const u8* key);
.global aes128_keyschedule_lut
.type aes128_keyschedule_lut,%function
.align 2
aes128_keyschedule_lut:
push {r0-r12,r14}
sub.w sp, #160 // allocate space to store the 10 rkeys
ldm r1, {r4-r7} // load the encryption key
adr r3, AES_Sbox_compact // load the sbox LUT address in r3
movw r2, #0x01 // 1st const
bl keyschedule_round_func // 1st round
movw r2, #0x02 // 2nd rconst
bl keyschedule_round_func // 2nd round
movw r2, #0x04 // 3rd rconst
bl keyschedule_round_func // 3rd round
movw r2, #0x08 // 4th rconst
bl keyschedule_round_func // 4th round
movw r2, #0x10 // 5th rconst
bl keyschedule_round_func // 5th round
movw r2, #0x20 // 6th rconst
bl keyschedule_round_func // 6th round
movw r2, #0x40 // 7th rconst
bl keyschedule_round_func // 7th round
movw r2, #0x80 // 8th rconst
bl keyschedule_round_func // 8th round
movw r2, #0x1b // 9th rconst
bl keyschedule_round_func // 9th round
movw r2, #0x36 // 10th rconst
bl keyschedule_round_func // 10th round
//done expanding, now start bitslicing
ldr.w r1, [sp, #4]
ldm r1, {r4-r7} // load the encryption key
sub.w sp, #160 // stack now points to the 1st key
movw r1, #0x8080
movt r1, #0x8080 // r1 <- 0x80808080
movw r2, #0xffff // r2 <- 0x0000ffff
eor r3, r2, r2, lsl #8 // r3 <- 0x00ff00ff
bl swapmove_rkey
bl packing_rkey // do not apply NOT on the 1st key
mov r4, r5
bl packing_rkey // do not apply NOT on the 1st key
mov r4, r6
bl packing_rkey // do not apply NOT on the 1st key
mov r4, r7
bl packing_rkey // do not apply NOT on the 1st key
movw r12, #10 // 10 rkeys left to pack
loop_aes128_keyschedule:
ldmia sp!, {r4-r7}
bl swapmove_rkey
bl packing_rkey_not
mov r4, r5
bl packing_rkey_not
mov r4, r6
bl packing_rkey_not
mov r4, r7
bl packing_rkey_not
subs r12, #1
bne loop_aes128_keyschedule
pop {r0-r12, r14} // restore context
bx lr
/******************************************************************************
* Pre-computes all the AES-256 round keys according to the barrel-shiftrows
* representation. Note that additional NOTs are incorporated to speed up SBox
* calculations in the encryption function.
******************************************************************************/
@ void aes256_keyschedule_lut(u32* rkeys, const u8* key);
.global aes256_keyschedule_lut
.type aes256_keyschedule_lut,%function
.align 2
aes256_keyschedule_lut:
push {r0-r12,r14}
sub.w sp, #224 // allocate space to store the 14 rkeys
ldm r1, {r4-r11} // load the encryption key
stmia sp!, {r8-r11} // store the 128 last key bits on stack
adr r3, AES_Sbox_compact // load the sbox LUT address in r3
movw r2, #0x01 // 1st const
bl aes256_keyschedule_rfunc_0 // 1st round
bl aes256_keyschedule_rfunc_1 // 2nd round
movw r2, #0x02 // 2nd rconst
bl aes256_keyschedule_rfunc_0 // 3rd round
bl aes256_keyschedule_rfunc_1 // 4th round
movw r2, #0x04 // 3rd rconst
bl aes256_keyschedule_rfunc_0 // 5th round
bl aes256_keyschedule_rfunc_1 // 6th round
movw r2, #0x08 // 4th rconst
bl aes256_keyschedule_rfunc_0 // 7th round
bl aes256_keyschedule_rfunc_1 // 8th round
movw r2, #0x10 // 5th rconst
bl aes256_keyschedule_rfunc_0 // 9th round
bl aes256_keyschedule_rfunc_1 // 10th round
movw r2, #0x20 // 6th rconst
bl aes256_keyschedule_rfunc_0 // 11th round
bl aes256_keyschedule_rfunc_1 // 12th round
movw r2, #0x40 // 7th rconst
bl aes256_keyschedule_rfunc_0 // 13th round
//done expanding, now start bitslicing
ldrd r0, r1, [sp]
ldm r1, {r4-r7} // load the encryption key
sub.w sp, #224 // stack now points to the 1st key
movw r1, #0x8080
movt r1, #0x8080 // r1 <- 0x80808080
movw r2, #0xffff // r2 <- 0x0000ffff
eor r3, r2, r2, lsl #8 // r3 <- 0x00ff00ff
bl swapmove_rkey
bl packing_rkey // do not apply NOT on the 1st key
mov r4, r5
bl packing_rkey // do not apply NOT on the 1st key
mov r4, r6
bl packing_rkey // do not apply NOT on the 1st key
mov r4, r7
bl packing_rkey // do not apply NOT on the 1st key
movw r12, #14 // 14 rkeys left to pack
loop_aes256_keyschedule:
ldmia sp!, {r4-r7}
bl swapmove_rkey
bl packing_rkey_not
mov r4, r5
bl packing_rkey_not
mov r4, r6
bl packing_rkey_not
mov r4, r7
bl packing_rkey_not
subs r12, #1
bne loop_aes256_keyschedule
pop {r0-r12, r14} // restore context
bx lr
|
aadomn/aes
| 41,958
|
armcortexm/fixslicing/aes_encrypt.s
|
/******************************************************************************
* Assembly fixsliced implementation of AES-128 and AES-256 (encryption only).
*
* Fully-fixsliced implementation runs faster than the semi-fixsliced variant
* at the cost of a larger code size.
*
* See the paper at https://eprint.iacr.org/2020/1123.pdf for more details.
*
* @author Alexandre Adomnicai, Nanyang Technological University, Singapore
* alexandre.adomnicai@ntu.edu.sg
*
* @date October 2020
******************************************************************************/
.syntax unified
.thumb
/******************************************************************************
* Macro to compute the SWAPMOVE technique: swap the bits in 'in1' masked by 'm'
* by the bits in 'in0' masked by 'm << n' and put the results in 'out0', 'out1'
******************************************************************************/
.macro swpmv out0, out1, in0, in1, m, n, tmp
eor \tmp, \in1, \in0, lsr \n
and \tmp, \m
eor \out1, \in1, \tmp
eor \out0, \in0, \tmp, lsl \n
.endm
/******************************************************************************
* Rotate all bytes in 'in' by 'n0' bits to the rights and put the results in
* 'out'. 'm' refers to the appropriate bitmask and 'n1' = 8-'n0'.
******************************************************************************/
.macro byteror out, in, m, n0, n1, tmp
and \out, \m, \in, lsr \n0
bic \tmp, \in, \m, ror \n1
orr \out, \out, \tmp, lsl \n1
.endm
/******************************************************************************
* Compute the MixColumns for rounds i st i%4 == 0 or 2.
* Between the two versions, only the masks and the shifts for the 'byteror' are
* differing.
******************************************************************************/
.macro mc_0_2 m, n0, n1, n2, n3
byteror r14, r1, \m, \n0, \n1, r9 // r14 <- BYTE_ROR_n0(S0)
eor r4, r1, r14, ror #8 // r4 <- S0 ^ (BYTE_ROR_6(S0) >>> 8)
movw r1, #0x0f0f
movt r1, #0x0f0f // r1 <- 0x0f0f0f0f (for BYTE_ROR)
byteror r5, r11, \m, \n0, \n1, r9 // r5 <- BYTE_ROR_n0(S7)
eor r10, r11, r5, ror #8 // r10<- S7 ^ BYTE_ROR_n0(S7 >>> 8)
byteror r11, r10, r1, 4, 4, r9 // r11<- BYTE_ROR_4(r10)
eor r11, r4, r11, ror #16 // r11<- BYTE_ROR_4(r10) ^ (r10 >>> 16)
eor r11, r11, r5, ror #8 // r11<- S'7
byteror r5, r2, \m, \n0, \n1, r9 // r5 <- BYTE_ROR_n0(S6)
eor r2, r2, r5, ror #8 // r2 <- S6 ^ BYTE_ROR_n0(S6 >>> 8)
eor r10, r10, r5, ror #8 // r10<- r10 ^ (BYTE_ROR_n0(S6) >>> 8)
byteror r5, r2, r1, 4, 4, r9 // r5 <- BYTE_ROR_4(r2)
eor r10, r10, r5, ror #16 // r10<- r10 ^ (r5 >>> 16)
eor r10, r10, r4 // r10<- S'6
byteror r5, r0, \m, \n0, \n1, r9 // r5 <- BYTE_ROR_n0(S5)
eor r0, r0, r5, ror #8 // r0 <- S5 ^ BYTE_ROR_6(S5 >>> 8)
eor r9, r2, r5, ror #8 // r9 <- r2 ^ (BYTE_ROR_n0(S5) >>> 8)
byteror r5, r0, r1, 4, 4, r2 // r5 <- BYTE_ROR_4(r0)
eor r9, r9, r5, ror #16 // r9 <- S'5
byteror r5, r8, \m, \n0, \n1, r2 // r5 <- BYTE_ROR_n0(S4)
eor r2, r8, r5, ror #8 // r2 <- S4 ^ BYTE_ROR_6(S4 >>> 8)
eor r8, r0, r5, ror #8 // r8 <- r0 ^ (BYTE_ROR_n0(S4) >>> 8)
byteror r5, r2, r1, 4, 4, r0 // r5 <- BYTE_ROR_4(r2)
eor r8, r8, r5, ror #16 // r8 <- r8 ^ (r5 >>> 16)
eor r8, r8, r4 // r8 <- S'4
byteror r5, r7, \m, \n0, \n1, r0 // r5 <- BYTE_ROR_n0(S3)
eor r0, r7, r5, ror #8 // r0 <- S3 ^ BYTE_ROR_6(S3 >>> 8)
eor r7, r2, r5, ror #8 // r2 ^ (BYTE_ROR_n0(S3) >>> 8)
byteror r5, r0, r1, 4, 4, r2 // r5 <- BYTE_ROR_4(r0)
eor r7, r7, r5, ror #16 // r7 <- r7 ^ (r5 >>> 16)
eor r7, r7, r4 // r7 <- S'3
byteror r5, r6, \m, \n0, \n1, r2 // r5 <- BYTE_ROR_n0(S2)
eor r2, r6, r5, ror #8 // r2 <- S2 ^ BYTE_ROR_6(S2 >>> 8)
eor r6, r0, r5, ror #8 // r6 <- r0 ^ (BYTE_ROR_n0(S2) >>> 8)
byteror r5, r2, r1, 4, 4, r0 // r5 <- BYTE_ROR_4(r2)
eor r6, r6, r5, ror #16 // r6 <- S'2
byteror r5, r3, \m, \n0, \n1, r0 // r5 <- BYTE_ROR_n0(S1)
eor r0, r3, r5, ror #8 // r0 <- S1 ^ BYTE_ROR_6(S1 >>> 8)
eor r3, r2, r5, ror #8 // r3 <- r0 ^ (BYTE_ROR_n0(S1) >>> 8)
byteror r5, r0, r1, 4, 4, r2 // r5 <- BYTE_ROR_4(r0)
eor r5, r3, r5, ror #16 // r5 <- S'1
eor r14, r0, r14, ror #8 // r14<- r0 ^ (BYTE_ROR_n0(S0) >>> 8)
byteror r0, r4, r1, 4, 4, r2 // r0 <- BYTE_ROR_4(r4)
eor r4, r14, r0, ror #16 // r4 <- S'0
.endm
/******************************************************************************
* Packs two 128-bit input blocs stored in r4-r7 and r8-r11, respectively, into
* the 256-bit internal state where the bits are packed as follows:
* r4 = b_24 b_56 b_88 b_120 || ... || b_0 b_32 b_64 b_96
* r5 = b_25 b_57 b_89 b_121 || ... || b_1 b_33 b_65 b_97
* r6 = b_26 b_58 b_90 b_122 || ... || b_2 b_34 b_66 b_98
* r7 = b_27 b_59 b_91 b_123 || ... || b_3 b_35 b_67 b_99
* r8 = b_28 b_60 b_92 b_124 || ... || b_4 b_36 b_68 b_100
* r9 = b_29 b_61 b_93 b_125 || ... || b_5 b_37 b_69 b_101
* r10 = b_30 b_62 b_94 b_126 || ... || b_6 b_38 b_70 b_102
* r11 = b_31 b_63 b_95 b_127 || ... || b_7 b_39 b_71 b_103
******************************************************************************/
.align 2
packing:
movw r3, #0x0f0f
movt r3, #0x0f0f // r3 <- 0x0f0f0f0f (mask for SWAPMOVE)
eor r2, r3, r3, lsl #2 // r2 <- 0x33333333 (mask for SWAPMOVE)
eor r1, r2, r2, lsl #1 // r1 <- 0x55555555 (mask for SWAPMOVE)
swpmv r8, r4, r8, r4, r1, #1, r12
swpmv r9, r5, r9, r5, r1, #1, r12
swpmv r10, r6, r10, r6, r1, #1, r12
swpmv r11, r7, r11, r7, r1, #1, r12
swpmv r0, r4, r5, r4, r2, #2, r12
swpmv r9, r5, r9, r8, r2, #2, r12
swpmv r7, r8, r7, r6, r2, #2, r12
swpmv r11, r2, r11, r10, r2, #2, r12
swpmv r8, r4, r8, r4, r3, #4, r12
swpmv r10, r6, r7, r0, r3, #4, r12
swpmv r11, r7, r11, r9, r3, #4, r12
swpmv r9, r5, r2, r5, r3, #4, r12
bx lr
/******************************************************************************
* Unpacks the 256-bit internal state in two 128-bit blocs.
******************************************************************************/
.align 2
unpacking:
movw r3, #0x0f0f
movt r3, #0x0f0f // r3 <- 0x0f0f0f0f (mask for SWAPMOVE)
swpmv r2, r5, r9, r5, r3, #4, r12
swpmv r11, r9, r11, r7, r3, #4, r12
swpmv r7, r1, r10, r6, r3, #4, r12
swpmv r8, r4, r8, r4, r3, #4, r12
eor r3, r3, r3, lsl #2 // r3 <- 0x33333333 (mask for SWAPMOVE)
swpmv r11, r10,r11, r2, r3, #2, r12
swpmv r7, r6, r7, r8, r3, #2, r12
swpmv r9, r8, r9, r5, r3, #2, r12
swpmv r5, r4, r1, r4, r3, #2, r12
eor r1, r3, r3, lsl #1 // r1 <- 0x55555555 (mask for SWAPMOVE)
swpmv r8, r4, r8, r4, r1, #1, r12
swpmv r9, r5,r9, r5, r1, #1, r12
swpmv r10, r6, r10, r6, r1, #1, r12
swpmv r11, r7, r11, r7, r1, #1, r12
bx lr
/******************************************************************************
* Subroutine that computes the AddRoundKey and the S-box.
* Credits to https://github.com/Ko-/aes-armcortexm for the S-box implementation
******************************************************************************/
.align 2
ark_sbox:
// add round key
ldr.w r1, [sp, #48]
ldmia r1!, {r0,r2,r3,r12}
eor r4, r0
eor r5, r2
eor r6, r3
eor r7, r12
ldmia r1!, {r0,r2,r3,r12}
eor r8, r0
eor r9, r2
eor r10, r3
eor r11, r12
str.w r1, [sp, #48]
str r14, [sp, #52]
// sbox: credits to https://github.com/Ko-/aes-armcortexm
eor r1, r7, r9 //Exec y14 = U3 ^ U5; into r1
eor r3, r4, r10 //Exec y13 = U0 ^ U6; into r3
eor r2, r3, r1 //Exec y12 = y13 ^ y14; into r2
eor r0, r8, r2 //Exec t1 = U4 ^ y12; into r0
eor r14, r0, r9 //Exec y15 = t1 ^ U5; into r14
and r12, r2, r14 //Exec t2 = y12 & y15; into r12
eor r8, r14, r11 //Exec y6 = y15 ^ U7; into r8
eor r0, r0, r5 //Exec y20 = t1 ^ U1; into r0
str.w r2, [sp, #44] //Store r2/y12 on stack
eor r2, r4, r7 //Exec y9 = U0 ^ U3; into r2
str r0, [sp, #40] //Store r0/y20 on stack
eor r0, r0, r2 //Exec y11 = y20 ^ y9; into r0
str r2, [sp, #36] //Store r2/y9 on stack
and r2, r2, r0 //Exec t12 = y9 & y11; into r2
str r8, [sp, #32] //Store r8/y6 on stack
eor r8, r11, r0 //Exec y7 = U7 ^ y11; into r8
eor r9, r4, r9 //Exec y8 = U0 ^ U5; into r9
eor r6, r5, r6 //Exec t0 = U1 ^ U2; into r6
eor r5, r14, r6 //Exec y10 = y15 ^ t0; into r5
str r14, [sp, #28] //Store r14/y15 on stack
eor r14, r5, r0 //Exec y17 = y10 ^ y11; into r14
str.w r1, [sp, #24] //Store r1/y14 on stack
and r1, r1, r14 //Exec t13 = y14 & y17; into r1
eor r1, r1, r2 //Exec t14 = t13 ^ t12; into r1
str r14, [sp, #20] //Store r14/y17 on stack
eor r14, r5, r9 //Exec y19 = y10 ^ y8; into r14
str.w r5, [sp, #16] //Store r5/y10 on stack
and r5, r9, r5 //Exec t15 = y8 & y10; into r5
eor r2, r5, r2 //Exec t16 = t15 ^ t12; into r2
eor r5, r6, r0 //Exec y16 = t0 ^ y11; into r5
str.w r0, [sp, #12] //Store r0/y11 on stack
eor r0, r3, r5 //Exec y21 = y13 ^ y16; into r0
str r3, [sp, #8] //Store r3/y13 on stack
and r3, r3, r5 //Exec t7 = y13 & y16; into r3
str r5, [sp, #4] //Store r5/y16 on stack
str r11, [sp, #0] //Store r11/U7 on stack
eor r5, r4, r5 //Exec y18 = U0 ^ y16; into r5
eor r6, r6, r11 //Exec y1 = t0 ^ U7; into r6
eor r7, r6, r7 //Exec y4 = y1 ^ U3; into r7
and r11, r7, r11 //Exec t5 = y4 & U7; into r11
eor r11, r11, r12 //Exec t6 = t5 ^ t2; into r11
eor r11, r11, r2 //Exec t18 = t6 ^ t16; into r11
eor r14, r11, r14 //Exec t22 = t18 ^ y19; into r14
eor r4, r6, r4 //Exec y2 = y1 ^ U0; into r4
and r11, r4, r8 //Exec t10 = y2 & y7; into r11
eor r11, r11, r3 //Exec t11 = t10 ^ t7; into r11
eor r2, r11, r2 //Exec t20 = t11 ^ t16; into r2
eor r2, r2, r5 //Exec t24 = t20 ^ y18; into r2
eor r10, r6, r10 //Exec y5 = y1 ^ U6; into r10
and r11, r10, r6 //Exec t8 = y5 & y1; into r11
eor r3, r11, r3 //Exec t9 = t8 ^ t7; into r3
eor r3, r3, r1 //Exec t19 = t9 ^ t14; into r3
eor r3, r3, r0 //Exec t23 = t19 ^ y21; into r3
eor r0, r10, r9 //Exec y3 = y5 ^ y8; into r0
ldr r11, [sp, #32] //Load y6 into r11
and r5, r0, r11 //Exec t3 = y3 & y6; into r5
eor r12, r5, r12 //Exec t4 = t3 ^ t2; into r12
ldr r5, [sp, #40] //Load y20 into r5
str r7, [sp, #32] //Store r7/y4 on stack
eor r12, r12, r5 //Exec t17 = t4 ^ y20; into r12
eor r1, r12, r1 //Exec t21 = t17 ^ t14; into r1
and r12, r1, r3 //Exec t26 = t21 & t23; into r12
eor r5, r2, r12 //Exec t27 = t24 ^ t26; into r5
eor r12, r14, r12 //Exec t31 = t22 ^ t26; into r12
eor r1, r1, r14 //Exec t25 = t21 ^ t22; into r1
and r7, r1, r5 //Exec t28 = t25 & t27; into r7
eor r14, r7, r14 //Exec t29 = t28 ^ t22; into r14
and r4, r14, r4 //Exec z14 = t29 & y2; into r4
and r8, r14, r8 //Exec z5 = t29 & y7; into r8
eor r7, r3, r2 //Exec t30 = t23 ^ t24; into r7
and r12, r12, r7 //Exec t32 = t31 & t30; into r12
eor r12, r12, r2 //Exec t33 = t32 ^ t24; into r12
eor r7, r5, r12 //Exec t35 = t27 ^ t33; into r7
and r2, r2, r7 //Exec t36 = t24 & t35; into r2
eor r5, r5, r2 //Exec t38 = t27 ^ t36; into r5
and r5, r14, r5 //Exec t39 = t29 & t38; into r5
eor r1, r1, r5 //Exec t40 = t25 ^ t39; into r1
eor r5, r14, r1 //Exec t43 = t29 ^ t40; into r5
ldr.w r7, [sp, #4] //Load y16 into r7
and r7, r5, r7 //Exec z3 = t43 & y16; into r7
eor r8, r7, r8 //Exec tc12 = z3 ^ z5; into r8
str r8, [sp, #40] //Store r8/tc12 on stack
ldr r8, [sp, #8] //Load y13 into r8
and r8, r5, r8 //Exec z12 = t43 & y13; into r8
and r10, r1, r10 //Exec z13 = t40 & y5; into r10
and r6, r1, r6 //Exec z4 = t40 & y1; into r6
eor r6, r7, r6 //Exec tc6 = z3 ^ z4; into r6
eor r3, r3, r12 //Exec t34 = t23 ^ t33; into r3
eor r3, r2, r3 //Exec t37 = t36 ^ t34; into r3
eor r1, r1, r3 //Exec t41 = t40 ^ t37; into r1
ldr.w r5, [sp, #16] //Load y10 into r5
and r2, r1, r5 //Exec z8 = t41 & y10; into r2
and r9, r1, r9 //Exec z17 = t41 & y8; into r9
str r9, [sp, #16] //Store r9/z17 on stack
eor r5, r12, r3 //Exec t44 = t33 ^ t37; into r5
ldr r9, [sp, #28] //Load y15 into r9
ldr.w r7, [sp, #44] //Load y12 into r7
and r9, r5, r9 //Exec z0 = t44 & y15; into r9
and r7, r5, r7 //Exec z9 = t44 & y12; into r7
and r0, r3, r0 //Exec z10 = t37 & y3; into r0
and r3, r3, r11 //Exec z1 = t37 & y6; into r3
eor r3, r3, r9 //Exec tc5 = z1 ^ z0; into r3
eor r3, r6, r3 //Exec tc11 = tc6 ^ tc5; into r3
ldr r11, [sp, #32] //Load y4 into r11
ldr.w r5, [sp, #20] //Load y17 into r5
and r11, r12, r11 //Exec z11 = t33 & y4; into r11
eor r14, r14, r12 //Exec t42 = t29 ^ t33; into r14
eor r1, r14, r1 //Exec t45 = t42 ^ t41; into r1
and r5, r1, r5 //Exec z7 = t45 & y17; into r5
eor r6, r5, r6 //Exec tc8 = z7 ^ tc6; into r6
ldr r5, [sp, #24] //Load y14 into r5
str r4, [sp, #32] //Store r4/z14 on stack
and r1, r1, r5 //Exec z16 = t45 & y14; into r1
ldr r5, [sp, #12] //Load y11 into r5
ldr r4, [sp, #36] //Load y9 into r4
and r5, r14, r5 //Exec z6 = t42 & y11; into r5
eor r5, r5, r6 //Exec tc16 = z6 ^ tc8; into r5
and r4, r14, r4 //Exec z15 = t42 & y9; into r4
eor r14, r4, r5 //Exec tc20 = z15 ^ tc16; into r14
eor r4, r4, r1 //Exec tc1 = z15 ^ z16; into r4
eor r1, r0, r4 //Exec tc2 = z10 ^ tc1; into r1
eor r0, r1, r11 //Exec tc21 = tc2 ^ z11; into r0
eor r7, r7, r1 //Exec tc3 = z9 ^ tc2; into r7
eor r1, r7, r5 //Exec S0 = tc3 ^ tc16; into r1
eor r7, r7, r3 //Exec S3 = tc3 ^ tc11; into r7
eor r3, r7, r5 //Exec S1 = S3 ^ tc16 ^ 1; into r3
eor r11, r10, r4 //Exec tc13 = z13 ^ tc1; into r11
ldr.w r4, [sp, #0] //Load U7 into r4
and r12, r12, r4 //Exec z2 = t33 & U7; into r12
eor r9, r9, r12 //Exec tc4 = z0 ^ z2; into r9
eor r12, r8, r9 //Exec tc7 = z12 ^ tc4; into r12
eor r2, r2, r12 //Exec tc9 = z8 ^ tc7; into r2
eor r2, r6, r2 //Exec tc10 = tc8 ^ tc9; into r2
ldr.w r4, [sp, #32] //Load z14 into r4
eor r12, r4, r2 //Exec tc17 = z14 ^ tc10; into r12
eor r0, r0, r12 //Exec S5 = tc21 ^ tc17; into r0
eor r6, r12, r14 //Exec tc26 = tc17 ^ tc20; into r6
ldr.w r4, [sp, #16] //Load z17 into r4
ldr r12, [sp, #40] //Load tc12 into r12
eor r6, r6, r4 //Exec S2 = tc26 ^ z17 ^ 1; into r6
eor r12, r9, r12 //Exec tc14 = tc4 ^ tc12; into r12
eor r14, r11, r12 //Exec tc18 = tc13 ^ tc14; into r14
eor r2, r2, r14 //Exec S6 = tc10 ^ tc18 ^ 1; into r2
eor r11, r8, r14 //Exec S7 = z12 ^ tc18 ^ 1; into r11
ldr r14, [sp, #52] // restore link register
eor r8, r12, r7 //Exec S4 = tc14 ^ S3; into r8
bx lr
// [('r0', 'S5'), ('r1', 'S0'), ('r2', 'S6'), ('r3', 'S1'),
// ('r6', 'S2'),('r7', 'S3'), ('r8', 'S4'), ('r11', 'S7')]
/******************************************************************************
* Computation of the MixColumns transformation in the fixsliced representation.
* For fully-fixsliced implementations, it is used for rounds i s.t. (i%4) == 0.
* For semi-fixsliced implementations, it is used for rounds i s.t. (i%2) == 0.
******************************************************************************/
.align 2
mixcolumns_0:
str r14, [sp, #52] // store link register
movw r12, #0x0303
movt r12, #0x0303
mc_0_2 r12, 6, 2, 26, 18
ldr r14, [sp, #52] // restore link register
bx lr
/******************************************************************************
* Computation of the MixColumns transformation in the fixsliced representation.
* For fully-fixsliced implementations only, for round i s.t. (i%4) == 1.
******************************************************************************/
.align 2
mixcolumns_1:
str r14, [sp, #52] // store link register
movw r14, #0x0f0f
movt r14, #0x0f0f // r14<- 0x0f0f0f0f (mask for BYTE_ROR_4)
and r5, r14, r1, lsr #4 // r5 <- (S0 >> 4) & 0x0f0f0f0f
and r9, r14, r1 // r9 <- S0 & 0x0f0f0f0f
orr r5, r5, r9, lsl #4 // r5 <- BYTE_ROR_4(S0)
eor r4, r1, r5, ror #8 // r4 <- S0 ^ (BYTE_ROR_4(S0) >>> 8)
mov.w r1, r5, ror #8 // r1 <- (BYTE_ROR_4(S0) >>> 8)
and r5, r14, r11, lsr #4 // r5 <- (S7 >> 4) & 0x0f0f0f0f
and r9, r14, r11 // r9 <- S7 & 0x0f0f0f0f
orr r5, r5, r9, lsl #4 // r5 <- BYTE_ROR_4(S7)
eor r12, r11, r5, ror #8 // r12<- S7 ^ (BYTE_ROR_4(S7) >>> 8)
eor r10, r4, r12 // r10<- r4 ^ r12
eor r11, r10 // r11<- S7 ^ r4 ^ r12
eor r11, r11, r12, ror #16 // r11<- r11 ^ (r12 >>> 16)
and r5, r14, r2, lsr #4 // r5 <- (S6 >> 4) & 0x0f0f0f0f
and r9, r14, r2 // r9 <- S6 & 0x0f0f0f0f
orr r5, r5, r9, lsl #4 // r5 <- BYTE_ROR_4(S6)
eor r10, r10, r5, ror #8 // r10<- r10 ^ (BYTE_ROR_4(S6) >>> 8)
eor r12, r2, r5, ror #8 // r12<- S6 ^ (BYTE_ROR_4(S6) >>> 8)
eor r10, r10, r12, ror #16 // r10<- r10 ^ (r12 >>> 16)
and r5, r14, r0, lsr #4 // r5 <- (S5 >> 4) & 0x0f0f0f0f
and r9, r14, r0 // r9 <- S5 & 0x0f0f0f0f
orr r5, r5, r9, lsl #4 // r5 <- BYTE_ROR_4(S5)
eor r9, r12, r5, ror #8 // r9 <- r12 ^ (BYTE_ROR_4(S5) >>> 8)
eor r12, r0, r5, ror #8 // r12<- S5 ^ (BYTE_ROR_4(S5) >>> 8)
eor r9, r9, r12, ror #16 // r9 <- (r9 ^ r12 >>> 16)
eor r0, r4, r12 // r0 <- r12 ^ S0 ^ (BYTE_ROR_4(S0) >>> 8)
and r5, r14, r8, lsr #4 // r5 <- (S4 >> 4) & 0x0f0f0f0f
and r2, r14, r8 // r2 <- S4 & 0x0f0f0f0f
orr r2, r5, r2, lsl #4 // r2 <- BYTE_ROR_4(S4)
eor r0, r0, r2, ror #8 // r0 <- r0 ^ (BYTE_ROR_4(S4) >>> 8)
eor r2, r8, r2, ror #8 // r2 <- S4 ^ (BYTE_ROR_4(S4) >>> 8)
eor r8, r0, r2, ror #16 // r8 <- r0 ^ (r2 >>> 16)
eor r2, r4 // r2 <- r2 ^ S0 ^ (BYTE_ROR_4(S0) >>> 8)
and r5, r14, r7, lsr #4 // r5 <- (S3 >> 4) & 0x0f0f0f0f
and r0, r14, r7 // r0 <- S3 & 0x0f0f0f0f
orr r0, r5, r0, lsl #4 // r0 <- BYTE_ROR_4(S3)
eor r2, r2, r0, ror #8 // r2 <- r2 ^ (BYTE_ROR_4(S3) >>> 8)
eor r0, r7, r0, ror #8 // r0 <- S3 ^ (BYTE_ROR_4(S3) >>> 8)
eor r7, r2, r0, ror #16 // r7 <- r2 ^ (r0 >>> 16)
and r5, r14, r6, lsr #4 // r5 <- (S2 >> 4) & 0x0f0f0f0f
and r2, r14, r6 // r2 <- S2 & 0x0f0f0f0f
orr r2, r5, r2, lsl #4 // r2 <- BYTE_ROR_4(S2)
eor r0, r0, r2, ror #8 // r0 <- r0 ^ (BYTE_ROR_4(S2) >>> 8)
eor r2, r6, r2, ror #8 // r2 <- S2 ^ (BYTE_ROR_4(S2) >>> 8)
eor r6, r0, r2, ror #16 // r6 <- r0 ^ (r2 >>> 16)
and r5, r14, r3, lsr #4 // r5 <- (S1 >> 4) & 0x0f0f0f0f
and r0, r14, r3 // r0 <- S1 & 0x0f0f0f0f
orr r0, r5, r0, lsl #4 // r0 <- BYTE_ROR_4(S1)
ldr r14, [sp, #52] // restore link register
eor r2, r2, r0, ror #8 // r2 <- r2 ^ (BYTE_ROR_4(S1) >>> 8)
eor r0, r3, r0, ror #8 // r0 <- S1 ^ (BYTE_ROR_4(S1) >>> 8)
eor r5, r2, r0, ror #16 // r5 <- r2 <- (r0 >>> 16)
eor r1, r0, r1 // r1 <- r0 ^ BYTE_ROR_4(S0) >>> 8
eor r4, r1, r4, ror #16 // r4 <- r4 ^ (r0 >>> 16)
bx lr
/******************************************************************************
* Computation of the MixColumns transformation in the fixsliced representation.
* For fully-fixsliced implementations only, for rounds i s.t. (i%4) == 2.
******************************************************************************/
.align 2
mixcolumns_2:
str r14, [sp, #52] // store link register
movw r12, #0x3f3f
movt r12, #0x3f3f
mc_0_2 r12, 2, 6, 30, 22
ldr r14, [sp, #52] // restore link register
bx lr
/******************************************************************************
* Computation of the MixColumns transformation in the fixsliced representation.
* For fully-fixsliced implementations, it is used for rounds i s.t. (i%4) == 3.
* For semi-fixsliced implementations, it is used for rounds i s.t. (i%2) == 1.
* Based on Käsper-Schwabe, similar to https://github.com/Ko-/aes-armcortexm.
******************************************************************************/
.align 2
mixcolumns_3:
eor r12, r11, r11, ror #8 // r12<- S7 ^ (S7 >>> 8)
eor r4, r1, r1, ror #8 // r4 <- S0 ^ (S0 >>> 8)
eor r11, r4, r11, ror #8 // r11<- S0 ^ (S0 >>> 8) ^ (S7 >>> 8)
eor r11, r11, r12, ror #16 // r11<- r11 ^ (S7 >>> 16) ^ (S7 >>> 24)
eor r10, r12, r2, ror #8 // r10<- S7 ^ (S7 >>> 8) ^ (S6 >>> 8)
eor r12, r2, r2, ror #8 // r12<- S6 ^ (S6 >>> 8)
eor r10, r10, r12, ror #16 // r10<- r10 ^ (S6 >>> 16) ^ (S6 >>> 24)
eor r10, r4 // r10<- r10 ^ S0 ^ (S0 >>> 8)
eor r9, r12, r0, ror #8 // r9 <- S6 ^ (S6 >>> 8) ^ (S5 >>> 8)
eor r12, r0, r0, ror #8 // r12<- S5 ^ (S5 >>> 8)
eor r9, r9, r12, ror #16 // r9 <- r9 ^ (S5 >>> 16) ^ (S5 >>> 24)
eor r2, r8, r8, ror #8 // r2 <- S4 ^ (S4 >>> 8)
eor r8, r12, r8, ror #8 // r8 <- S5 ^ (S5 >>> 8) ^ (S4 >>> 8)
eor r8, r4 // r8 <- r8 ^ S0 ^ (S0 >>> 8)
eor r8, r8, r2, ror #16 // r8 <- r8 ^ (S4 >>> 16) ^ (S4 >>> 24)
eor r12, r7, r7, ror #8 // r12<- S3 ^ (S3 >>> 8)
eor r7, r2, r7, ror #8 // r7 <- S4 ^ (S4 >>> 8) ^ (S3 >>> 8)
eor r7, r4 // r7 <- r7 ^ S0 ^ (S0 >>> 8)
eor r7, r7, r12, ror #16 // r7 <- r7 ^ (S3 >>> 16) ^ (S3 >>> 24)
eor r2, r6, r6, ror #8 // r2 <- S2 ^ (S2 >>> 8)
eor r6, r12, r6, ror #8 // r6 <- S3 ^ (S3 >>> 8) ^ (S2 >>> 8)
eor r6, r6, r2, ror #16 // r6 <- r6 ^ (S2 >>> 16) ^ (S2 >>> 24)
eor r12, r3, r3, ror #8 // r12<- S1 ^ (S1 >>> 8)
eor r5, r2, r3, ror #8 // r5 <- S2 ^ (S2 >>> 8) ^ (S1 >>> 8)
eor r5, r5, r12, ror #16 // r5 <- r5 ^ (S1 >>> 16) ^ (S1 >>> 24)
eor r4, r12, r4, ror #16 // r4 <- S1 ^ (S1 >>> 8) ^ (r4 >>> 16)
eor r4, r4, r1, ror #8 // r4 <- r4 ^ (S0 >>> 8)
bx lr
/******************************************************************************
* Applies the ShiftRows transformation twice (i.e. SR^2) on the internal state.
******************************************************************************/
.align 2
double_shiftrows:
movw r10, #0x0f00
movt r10, #0x0f00 // r10<- 0x0f000f00 (mask)
swpmv r0, r0, r0, r0, r10, #4, r12
swpmv r1, r1, r1, r1, r10, #4, r12
swpmv r2, r2, r2, r2, r10, #4, r12
swpmv r3, r3, r3, r3, r10, #4, r12
swpmv r6, r6, r6, r6, r10, #4, r12
swpmv r7, r7, r7, r7, r10, #4, r12
swpmv r8, r8, r8, r8, r10, #4, r12
swpmv r11, r11, r11, r11, r10, #4, r12
bx lr
/******************************************************************************
* Fully-fixsliced implementation of AES-128.
*
* Two blocks are encrypted in parallel, without any operating mode.
*
* Note that additional 4 bytes are allocated on the stack as the function takes
* 5 arguments as input.
******************************************************************************/
@ void aes128_encrypt_ffs(u8* ctext, u8* ctext_bis, const u8* ptext,
@ const u8* ptext_bis, const u32* rkey);
.global aes128_encrypt_ffs
.type aes128_encrypt_ffs,%function
.align 2
aes128_encrypt_ffs:
push {r0-r12,r14}
sub.w sp, #56 // allow space on the stack for tmp var
ldr.w r4, [r2] // load the 1st 128-bit blocks in r4-r7
ldr r5, [r2, #4]
ldr r6, [r2, #8]
ldr r7, [r2, #12]
ldr.w r8, [r3] // load the 2nd 128-bit blocks in r8-r11
ldr r9, [r3, #4]
ldr r10,[r3, #8]
ldr r11,[r3, #12]
ldr.w r1, [sp, #112] // load 'rkey' argument from the stack
str.w r1, [sp, #48] // store it there for 'add_round_key'
bl packing // pack the 2 input blocks
bl ark_sbox // ark + sbox (round 0)
bl mixcolumns_0 // mixcolumns (round 0)
bl ark_sbox // ark + sbox (round 1)
bl mixcolumns_1 // mixcolumns (round 1)
bl ark_sbox // ark + sbox (round 2)
bl mixcolumns_2 // mixcolumns (round 2)
bl ark_sbox // ark + sbox (round 3)
bl mixcolumns_3 // mixcolumns (round 3)
bl ark_sbox // ark + sbox (round 4)
bl mixcolumns_0 // mixcolumns (round 4)
bl ark_sbox // ark + sbox (round 5)
bl mixcolumns_1 // mixcolumns (round 5)
bl ark_sbox // ark + sbox (round 6)
bl mixcolumns_2 // mixcolumns (round 6)
bl ark_sbox // ark + sbox (round 7)
bl mixcolumns_3 // mixcolumns (round 7)
bl ark_sbox // ark + sbox (round 8)
bl mixcolumns_0 // mixcolumns (round 8)
bl ark_sbox // ark + sbox (round 9)
bl double_shiftrows // to resynchronize with the classical rep
ldr r14, [sp, #48] // ---------------------------------------
ldmia r14!, {r4,r5,r10,r12} //
eor r4, r1 //
eor r5, r3 //
eor r6, r10 //
eor r7, r12 // Last add_round_key
ldmia r14!, {r1,r3,r10,r12} //
eor r8, r1 //
eor r9, r0, r3 //
eor r10, r2 //
eor r11, r12 // ---------------------------------------
bl unpacking // unpack the internal state
ldrd r0, r1, [sp, #56] // restore the addr to store the ciphertext
add.w sp, #64 // restore the stack pointer
str.w r4, [r0] // store the ciphertext
str r5, [r0, #4]
str r6, [r0, #8]
str r7, [r0, #12]
str.w r8, [r1] // store the ciphertext
str r9, [r1, #4]
str r10,[r1, #8]
str r11,[r1, #12]
pop {r2-r12, r14} // restore context
bx lr
/******************************************************************************
* Fully-fixsliced implementation of AES-256.
*
* Two blocks are encrypted in parallel, without any operating mode.
*
* Note that additional 4 bytes are allocated on the stack as the function takes
* 5 arguments as input.
******************************************************************************/
@ void aes256_encrypt_ffs(u8* ctext, u8* ctext_bis, const u8* ptext,
@ const u8* ptext_bis, const u32* rkey);
.global aes256_encrypt_ffs
.type aes256_encrypt_ffs,%function
.align 2
aes256_encrypt_ffs:
push {r0-r12,r14}
sub.w sp, #56 // allow space on the stack for tmp var
ldr.w r4, [r2] // load the 1st 128-bit blocks in r4-r7
ldr r5, [r2, #4]
ldr r6, [r2, #8]
ldr r7, [r2, #12]
ldr.w r8, [r3] // load the 2nd 128-bit blocks in r8-r11
ldr r9, [r3, #4]
ldr r10,[r3, #8]
ldr r11,[r3, #12]
ldr.w r1, [sp, #112] // load 'rkey' argument from the stack
str.w r1, [sp, #48] // store it there for 'add_round_key'
bl packing // pack the 2 input blocks
bl ark_sbox // ark + sbox (round 0)
bl mixcolumns_0 // mixcolumns (round 0)
bl ark_sbox // ark + sbox (round 1)
bl mixcolumns_1 // mixcolumns (round 1)
bl ark_sbox // ark + sbox (round 2)
bl mixcolumns_2 // mixcolumns (round 2)
bl ark_sbox // ark + sbox (round 3)
bl mixcolumns_3 // mixcolumns (round 3)
bl ark_sbox // ark + sbox (round 4)
bl mixcolumns_0 // mixcolumns (round 4)
bl ark_sbox // ark + sbox (round 5)
bl mixcolumns_1 // mixcolumns (round 5)
bl ark_sbox // ark + sbox (round 6)
bl mixcolumns_2 // mixcolumns (round 6)
bl ark_sbox // ark + sbox (round 7)
bl mixcolumns_3 // mixcolumns (round 7)
bl ark_sbox // ark + sbox (round 8)
bl mixcolumns_0 // mixcolumns (round 8)
bl ark_sbox // ark + sbox (round 9)
bl mixcolumns_1 // mixcolumns (round 9)
bl ark_sbox // ark + sbox (round 10)
bl mixcolumns_2 // mixcolumns (round 10)
bl ark_sbox // ark + sbox (round 11)
bl mixcolumns_3 // mixcolumns (round 11)
bl ark_sbox // ark + sbox (round 12)
bl mixcolumns_0 // mixcolumns (round 12)
bl ark_sbox // ark + sbox (round 13)
bl double_shiftrows // to resynchronize with the classical rep
ldr r14, [sp, #48] // ---------------------------------------
ldmia r14!, {r4,r5,r10,r12} //
eor r4, r1 //
eor r5, r3 //
eor r6, r10 //
eor r7, r12 // Last add_round_key
ldmia r14!, {r1,r3,r10,r12} //
eor r8, r1 //
eor r9, r0, r3 //
eor r10, r2 //
eor r11, r12 // ---------------------------------------
bl unpacking // unpack the internal state
ldrd r0, r1, [sp, #56] // restore the addr to store the ciphertext
add.w sp, #64 // restore the stack pointer
str.w r4, [r0] // store the ciphertext
str r5, [r0, #4]
str r6, [r0, #8]
str r7, [r0, #12]
str.w r8, [r1] // store the ciphertext
str r9, [r1, #4]
str r10,[r1, #8]
str r11,[r1, #12]
pop {r2-r12, r14} // restore context
bx lr
/******************************************************************************
* Semi-fixsliced implementation of AES-128.
*
* Two blocks are encrypted in parallel.
*
* Note that additional 4 bytes are allocated on the stack as the function takes
* 5 arguments as input.
******************************************************************************/
@ void aes128_encrypt_sfs(u8* ctext, u8* ctext_bis, const u8* ptext,
@ const u8* ptext_bis, const u32* rkey);
.global aes128_encrypt_sfs
.type aes128_encrypt_sfs,%function
.align 2
aes128_encrypt_sfs:
push {r0-r12,r14}
sub.w sp, #56 // allow space on the stack for tmp var
ldr.w r4, [r2] // load the 1st 128-bit blocks in r4-r7
ldr r5, [r2, #4]
ldr r6, [r2, #8]
ldr r7, [r2, #12]
ldr.w r8, [r3] // load the 2nd 128-bit blocks in r8-r11
ldr r9, [r3, #4]
ldr r10,[r3, #8]
ldr r11,[r3, #12]
ldr.w r1, [sp, #112] // load 'rkey' argument from the stack
str.w r1, [sp, #48] // store it there for 'add_round_key'
bl packing // pack the 2 input blocks
bl ark_sbox // ark + sbox (round 0)
bl mixcolumns_0 // mixcolumns (round 0)
bl ark_sbox // ark + sbox (round 1)
bl double_shiftrows // to resynchronize with the classical rep
bl mixcolumns_3 // mixcolumns (round 1)
bl ark_sbox // ark + sbox (round 2)
bl mixcolumns_0 // mixcolumns (round 2)
bl ark_sbox // ark + sbox (round 3)
bl double_shiftrows // to resynchronize with the classical rep
bl mixcolumns_3 // mixcolumns (round 3)
bl ark_sbox // ark + sbox (round 4)
bl mixcolumns_0 // mixcolumns (round 4)
bl ark_sbox // ark + sbox (round 5)
bl double_shiftrows // to resynchronize with the classical rep
bl mixcolumns_3 // mixcolumns (round 5)
bl ark_sbox // ark + sbox (round 6)
bl mixcolumns_0 // mixcolumns (round 6)
bl ark_sbox // ark + sbox (round 7)
bl double_shiftrows // to resynchronize with the classical rep
bl mixcolumns_3 // mixcolumns (round 7)
bl ark_sbox // ark + sbox (round 8)
bl mixcolumns_0 // mixcolumns (round 8)
bl ark_sbox // ark + sbox (round 9)
bl double_shiftrows // to resynchronize with the classical rep
ldr r14, [sp, #48] // ---------------------------------------
ldmia r14!, {r4,r5,r10,r12} //
eor r4, r1 //
eor r5, r3 //
eor r6, r10 //
eor r7, r12 // Last add_round_key
ldmia r14!, {r1,r3,r10,r12} //
eor r8, r1 //
eor r9, r0, r3 //
eor r10, r2 //
eor r11, r12 // ---------------------------------------
bl unpacking // unpack the internal state
ldrd r0, r1, [sp, #56] // restore the addr to store the ciphertext
add.w sp, #64 // restore the stack pointer
str.w r4, [r0] // store the ciphertext
str r5, [r0, #4]
str r6, [r0, #8]
str r7, [r0, #12]
str.w r8, [r1] // store the ciphertext
str r9, [r1, #4]
str r10,[r1, #8]
str r11,[r1, #12]
pop {r2-r12, r14} // restore context
bx lr
/******************************************************************************
* Semi-fixsliced implementation of AES-256.
*
* Two blocks are encrypted in parallel.
*
* Note that additional 4 bytes are allocated on the stack as the function takes
* 5 arguments as input.
******************************************************************************/
@ void aes256_encrypt_sfs(u8* ctext, u8* ctext_bis, const u8* ptext,
@ const u8* ptext_bis, const u32* rkey);
.global aes256_encrypt_sfs
.type aes256_encrypt_sfs,%function
.align 2
aes256_encrypt_sfs:
push {r0-r12,r14}
sub.w sp, #56 // allow space on the stack for tmp var
ldr.w r4, [r2] // load the 1st 128-bit blocks in r4-r7
ldr r5, [r2, #4]
ldr r6, [r2, #8]
ldr r7, [r2, #12]
ldr.w r8, [r3] // load the 2nd 128-bit blocks in r8-r11
ldr r9, [r3, #4]
ldr r10,[r3, #8]
ldr r11,[r3, #12]
ldr.w r1, [sp, #112] // load 'rkey' argument from the stack
str.w r1, [sp, #48] // store it there for 'add_round_key'
bl packing // pack the 2 input blocks
bl ark_sbox // ark + sbox (round 0)
bl mixcolumns_0 // mixcolumns (round 0)
bl ark_sbox // ark + sbox (round 1)
bl double_shiftrows // to resynchronize with the classical rep
bl mixcolumns_3 // mixcolumns (round 1)
bl ark_sbox // ark + sbox (round 2)
bl mixcolumns_0 // mixcolumns (round 2)
bl ark_sbox // ark + sbox (round 3)
bl double_shiftrows // to resynchronize with the classical rep
bl mixcolumns_3 // mixcolumns (round 3)
bl ark_sbox // ark + sbox (round 4)
bl mixcolumns_0 // mixcolumns (round 4)
bl ark_sbox // ark + sbox (round 5)
bl double_shiftrows // to resynchronize with the classical rep
bl mixcolumns_3 // mixcolumns (round 5)
bl ark_sbox // ark + sbox (round 6)
bl mixcolumns_0 // mixcolumns (round 6)
bl ark_sbox // ark + sbox (round 7)
bl double_shiftrows // to resynchronize with the classical rep
bl mixcolumns_3 // mixcolumns (round 7)
bl ark_sbox // ark + sbox (round 8)
bl mixcolumns_0 // mixcolumns (round 8)
bl ark_sbox // ark + sbox (round 9)
bl double_shiftrows // to resynchronize with the classical rep
bl mixcolumns_3 // mixcolumns (round 9)
bl ark_sbox // ark + sbox (round 10)
bl mixcolumns_0 // mixcolumns (round 10)
bl ark_sbox // ark + sbox (round 11)
bl double_shiftrows // to resynchronize with the classical rep
bl mixcolumns_3 // mixcolumns (round 11)
bl ark_sbox // ark + sbox (round 12)
bl mixcolumns_0 // mixcolumns (round 12)
bl ark_sbox // ark + sbox (round 13)
bl double_shiftrows // to resynchronize with the classical rep
ldr r14, [sp, #48] // ---------------------------------------
ldmia r14!, {r4,r5,r10,r12} //
eor r4, r1 //
eor r5, r3 //
eor r6, r10 //
eor r7, r12 // Last add_round_key
ldmia r14!, {r1,r3,r10,r12} //
eor r8, r1 //
eor r9, r0, r3 //
eor r10, r2 //
eor r11, r12 // ---------------------------------------
bl unpacking // unpack the internal state
ldrd r0, r1, [sp, #56] // restore the addr to store the ciphertext
add.w sp, #64 // restore the stack pointer
str.w r4, [r0] // store the ciphertext
str r5, [r0, #4]
str r6, [r0, #8]
str r7, [r0, #12]
str.w r8, [r1] // store the ciphertext
str r9, [r1, #4]
str r10,[r1, #8]
str r11,[r1, #12]
pop {r2-r12, r14} // restore context
bx lr
|
aadomn/aes
| 56,744
|
armcortexm/fixslicing/aes_keyschedule.s
|
/******************************************************************************
* ARM assembly implemetnations of the AES-128 and AES-256 key schedule to
* match fixslicing.
* Note that those implementations are fully bitsliced and do not rely on any
* Look-Up Table (LUT).
*
* See the paper at https://eprint.iacr.org/2020/1123.pdf for more details.
*
* @author Alexandre Adomnicai, Nanyang Technological University, Singapore
* alexandre.adomnicai@ntu.edu.sg
*
* @date October 2020
******************************************************************************/
.syntax unified
.thumb
/******************************************************************************
* Macro to compute the SWAPMOVE technique: swap the bits in 'in1' masked by 'm'
* by the bits in 'in0' masked by 'm << n' and put the results in 'out0', 'out1'
******************************************************************************/
.macro swpmv out0, out1, in0, in1, m, n, tmp
eor \tmp, \in1, \in0, lsr \n
and \tmp, \m
eor \out1, \in1, \tmp
eor \out0, \in0, \tmp, lsl \n
.endm
/******************************************************************************
* Packing routine. Note that it is the same as the one used in the encryption
* function so some code size could be saved by merging the two files.
******************************************************************************/
.align 2
packing:
movw r3, #0x0f0f
movt r3, #0x0f0f // r3 <- 0x0f0f0f0f (mask for SWAPMOVE)
eor r2, r3, r3, lsl #2 // r2 <- 0x33333333 (mask for SWAPMOVE)
eor r1, r2, r2, lsl #1 // r1 <- 0x55555555 (mask for SWAPMOVE)
swpmv r8, r4, r8, r4, r1, #1, r12
swpmv r9, r5, r9, r5, r1, #1, r12
swpmv r10, r6, r10, r6, r1, #1, r12
swpmv r11, r7, r11, r7, r1, #1, r12
swpmv r0, r4, r5, r4, r2, #2, r12
swpmv r9, r5, r9, r8, r2, #2, r12
swpmv r7, r8, r7, r6, r2, #2, r12
swpmv r11, r2, r11, r10, r2, #2, r12
swpmv r8, r4, r8, r4, r3, #4, r12
swpmv r10, r6, r7, r0, r3, #4, r12
swpmv r11, r7, r11, r9, r3, #4, r12
swpmv r9, r5, r2, r5, r3, #4, r12
bx lr
/******************************************************************************
* Subroutine that computes S-box. Note that the same code is used in the
* encryption function, so some code size could be saved by merging the 2 files.
* Credits to https://github.com/Ko-/aes-armcortexm.
******************************************************************************/
.align 2
sbox:
str r14, [sp, #52]
eor r1, r7, r9 //Exec y14 = U3 ^ U5; into r1
eor r3, r4, r10 //Exec y13 = U0 ^ U6; into r3
eor r2, r3, r1 //Exec y12 = y13 ^ y14; into r2
eor r0, r8, r2 //Exec t1 = U4 ^ y12; into r0
eor r14, r0, r9 //Exec y15 = t1 ^ U5; into r14
and r12, r2, r14 //Exec t2 = y12 & y15; into r12
eor r8, r14, r11 //Exec y6 = y15 ^ U7; into r8
eor r0, r0, r5 //Exec y20 = t1 ^ U1; into r0
str.w r2, [sp, #44] //Store r2/y12 on stack
eor r2, r4, r7 //Exec y9 = U0 ^ U3; into r2
str r0, [sp, #40] //Store r0/y20 on stack
eor r0, r0, r2 //Exec y11 = y20 ^ y9; into r0
str r2, [sp, #36] //Store r2/y9 on stack
and r2, r2, r0 //Exec t12 = y9 & y11; into r2
str r8, [sp, #32] //Store r8/y6 on stack
eor r8, r11, r0 //Exec y7 = U7 ^ y11; into r8
eor r9, r4, r9 //Exec y8 = U0 ^ U5; into r9
eor r6, r5, r6 //Exec t0 = U1 ^ U2; into r6
eor r5, r14, r6 //Exec y10 = y15 ^ t0; into r5
str r14, [sp, #28] //Store r14/y15 on stack
eor r14, r5, r0 //Exec y17 = y10 ^ y11; into r14
str.w r1, [sp, #24] //Store r1/y14 on stack
and r1, r1, r14 //Exec t13 = y14 & y17; into r1
eor r1, r1, r2 //Exec t14 = t13 ^ t12; into r1
str r14, [sp, #20] //Store r14/y17 on stack
eor r14, r5, r9 //Exec y19 = y10 ^ y8; into r14
str.w r5, [sp, #16] //Store r5/y10 on stack
and r5, r9, r5 //Exec t15 = y8 & y10; into r5
eor r2, r5, r2 //Exec t16 = t15 ^ t12; into r2
eor r5, r6, r0 //Exec y16 = t0 ^ y11; into r5
str.w r0, [sp, #12] //Store r0/y11 on stack
eor r0, r3, r5 //Exec y21 = y13 ^ y16; into r0
str r3, [sp, #8] //Store r3/y13 on stack
and r3, r3, r5 //Exec t7 = y13 & y16; into r3
str r5, [sp, #4] //Store r5/y16 on stack
str r11, [sp, #0] //Store r11/U7 on stack
eor r5, r4, r5 //Exec y18 = U0 ^ y16; into r5
eor r6, r6, r11 //Exec y1 = t0 ^ U7; into r6
eor r7, r6, r7 //Exec y4 = y1 ^ U3; into r7
and r11, r7, r11 //Exec t5 = y4 & U7; into r11
eor r11, r11, r12 //Exec t6 = t5 ^ t2; into r11
eor r11, r11, r2 //Exec t18 = t6 ^ t16; into r11
eor r14, r11, r14 //Exec t22 = t18 ^ y19; into r14
eor r4, r6, r4 //Exec y2 = y1 ^ U0; into r4
and r11, r4, r8 //Exec t10 = y2 & y7; into r11
eor r11, r11, r3 //Exec t11 = t10 ^ t7; into r11
eor r2, r11, r2 //Exec t20 = t11 ^ t16; into r2
eor r2, r2, r5 //Exec t24 = t20 ^ y18; into r2
eor r10, r6, r10 //Exec y5 = y1 ^ U6; into r10
and r11, r10, r6 //Exec t8 = y5 & y1; into r11
eor r3, r11, r3 //Exec t9 = t8 ^ t7; into r3
eor r3, r3, r1 //Exec t19 = t9 ^ t14; into r3
eor r3, r3, r0 //Exec t23 = t19 ^ y21; into r3
eor r0, r10, r9 //Exec y3 = y5 ^ y8; into r0
ldr r11, [sp, #32] //Load y6 into r11
and r5, r0, r11 //Exec t3 = y3 & y6; into r5
eor r12, r5, r12 //Exec t4 = t3 ^ t2; into r12
ldr r5, [sp, #40] //Load y20 into r5
str r7, [sp, #32] //Store r7/y4 on stack
eor r12, r12, r5 //Exec t17 = t4 ^ y20; into r12
eor r1, r12, r1 //Exec t21 = t17 ^ t14; into r1
and r12, r1, r3 //Exec t26 = t21 & t23; into r12
eor r5, r2, r12 //Exec t27 = t24 ^ t26; into r5
eor r12, r14, r12 //Exec t31 = t22 ^ t26; into r12
eor r1, r1, r14 //Exec t25 = t21 ^ t22; into r1
and r7, r1, r5 //Exec t28 = t25 & t27; into r7
eor r14, r7, r14 //Exec t29 = t28 ^ t22; into r14
and r4, r14, r4 //Exec z14 = t29 & y2; into r4
and r8, r14, r8 //Exec z5 = t29 & y7; into r8
eor r7, r3, r2 //Exec t30 = t23 ^ t24; into r7
and r12, r12, r7 //Exec t32 = t31 & t30; into r12
eor r12, r12, r2 //Exec t33 = t32 ^ t24; into r12
eor r7, r5, r12 //Exec t35 = t27 ^ t33; into r7
and r2, r2, r7 //Exec t36 = t24 & t35; into r2
eor r5, r5, r2 //Exec t38 = t27 ^ t36; into r5
and r5, r14, r5 //Exec t39 = t29 & t38; into r5
eor r1, r1, r5 //Exec t40 = t25 ^ t39; into r1
eor r5, r14, r1 //Exec t43 = t29 ^ t40; into r5
ldr.w r7, [sp, #4] //Load y16 into r7
and r7, r5, r7 //Exec z3 = t43 & y16; into r7
eor r8, r7, r8 //Exec tc12 = z3 ^ z5; into r8
str r8, [sp, #40] //Store r8/tc12 on stack
ldr r8, [sp, #8] //Load y13 into r8
and r8, r5, r8 //Exec z12 = t43 & y13; into r8
and r10, r1, r10 //Exec z13 = t40 & y5; into r10
and r6, r1, r6 //Exec z4 = t40 & y1; into r6
eor r6, r7, r6 //Exec tc6 = z3 ^ z4; into r6
eor r3, r3, r12 //Exec t34 = t23 ^ t33; into r3
eor r3, r2, r3 //Exec t37 = t36 ^ t34; into r3
eor r1, r1, r3 //Exec t41 = t40 ^ t37; into r1
ldr.w r5, [sp, #16] //Load y10 into r5
and r2, r1, r5 //Exec z8 = t41 & y10; into r2
and r9, r1, r9 //Exec z17 = t41 & y8; into r9
str r9, [sp, #16] //Store r9/z17 on stack
eor r5, r12, r3 //Exec t44 = t33 ^ t37; into r5
ldr r9, [sp, #28] //Load y15 into r9
ldr.w r7, [sp, #44] //Load y12 into r7
and r9, r5, r9 //Exec z0 = t44 & y15; into r9
and r7, r5, r7 //Exec z9 = t44 & y12; into r7
and r0, r3, r0 //Exec z10 = t37 & y3; into r0
and r3, r3, r11 //Exec z1 = t37 & y6; into r3
eor r3, r3, r9 //Exec tc5 = z1 ^ z0; into r3
eor r3, r6, r3 //Exec tc11 = tc6 ^ tc5; into r3
ldr r11, [sp, #32] //Load y4 into r11
ldr.w r5, [sp, #20] //Load y17 into r5
and r11, r12, r11 //Exec z11 = t33 & y4; into r11
eor r14, r14, r12 //Exec t42 = t29 ^ t33; into r14
eor r1, r14, r1 //Exec t45 = t42 ^ t41; into r1
and r5, r1, r5 //Exec z7 = t45 & y17; into r5
eor r6, r5, r6 //Exec tc8 = z7 ^ tc6; into r6
ldr r5, [sp, #24] //Load y14 into r5
str r4, [sp, #32] //Store r4/z14 on stack
and r1, r1, r5 //Exec z16 = t45 & y14; into r1
ldr r5, [sp, #12] //Load y11 into r5
ldr r4, [sp, #36] //Load y9 into r4
and r5, r14, r5 //Exec z6 = t42 & y11; into r5
eor r5, r5, r6 //Exec tc16 = z6 ^ tc8; into r5
and r4, r14, r4 //Exec z15 = t42 & y9; into r4
eor r14, r4, r5 //Exec tc20 = z15 ^ tc16; into r14
eor r4, r4, r1 //Exec tc1 = z15 ^ z16; into r4
eor r1, r0, r4 //Exec tc2 = z10 ^ tc1; into r1
eor r0, r1, r11 //Exec tc21 = tc2 ^ z11; into r0
eor r7, r7, r1 //Exec tc3 = z9 ^ tc2; into r7
eor r1, r7, r5 //Exec S0 = tc3 ^ tc16; into r1
eor r7, r7, r3 //Exec S3 = tc3 ^ tc11; into r7
eor r3, r7, r5 //Exec S1 = S3 ^ tc16 ^ 1; into r3
eor r11, r10, r4 //Exec tc13 = z13 ^ tc1; into r11
ldr.w r4, [sp, #0] //Load U7 into r4
and r12, r12, r4 //Exec z2 = t33 & U7; into r12
eor r9, r9, r12 //Exec tc4 = z0 ^ z2; into r9
eor r12, r8, r9 //Exec tc7 = z12 ^ tc4; into r12
eor r2, r2, r12 //Exec tc9 = z8 ^ tc7; into r2
eor r2, r6, r2 //Exec tc10 = tc8 ^ tc9; into r2
ldr.w r4, [sp, #32] //Load z14 into r4
eor r12, r4, r2 //Exec tc17 = z14 ^ tc10; into r12
eor r0, r0, r12 //Exec S5 = tc21 ^ tc17; into r0
eor r6, r12, r14 //Exec tc26 = tc17 ^ tc20; into r6
ldr.w r4, [sp, #16] //Load z17 into r4
ldr r12, [sp, #40] //Load tc12 into r12
eor r6, r6, r4 //Exec S2 = tc26 ^ z17 ^ 1; into r6
eor r12, r9, r12 //Exec tc14 = tc4 ^ tc12; into r12
eor r14, r11, r12 //Exec tc18 = tc13 ^ tc14; into r14
eor r2, r2, r14 //Exec S6 = tc10 ^ tc18 ^ 1; into r2
eor r11, r8, r14 //Exec S7 = z12 ^ tc18 ^ 1; into r11
ldr r14, [sp, #52] // restore link register
eor r8, r12, r7 //Exec S4 = tc14 ^ S3; into r8
bx lr
// [('r0', 'S5'), ('r1', 'S0'), ('r2', 'S6'), ('r3', 'S1'),
// ('r6', 'S2'),('r7', 'S3'), ('r8', 'S4'), ('r11', 'S7')]
/******************************************************************************
* Subroutine that XORs the columns after the S-box during the AES-128 key
* schedule round function, for rounds i such that (i % 4) == 0.
* Note that the code size could be reduced at the cost of some instructions
* since some redundant code is applied on different registers.
******************************************************************************/
.align 2
aes128_xorcolumns_rotword:
ldr r12, [sp, #56] // restore 'rkeys' address
ldr.w r5, [r12, #28] // load rkey word of rkey from prev round
movw r4, #0xc0c0
movt r4, #0xc0c0 // r4 <- 0xc0c0c0c0
eor r11, r5, r11, ror #2 // r11<- r5 ^ (r11 >>> 2)
bic r11, r4, r11 // r11<- ~r11 & 0xc0c0c0c0 (NOT omitted in sbox)
eor r9, r5, r11, ror #2 // r9 <- r5 ^ (r11 >>> 2)
and r9, r9, r4, ror #2 // r9 <- r9 & 0x30303030
orr r11, r11, r9 // r11<- r11 | r9
eor r9, r5, r11, ror #2 // r9 <- r5 ^ (r11 >>> 2)
and r9, r9, r4, ror #4 // r9 <- r9 & 0x0c0c0c0c
orr r11, r11, r9 // r11<- r11 | r9
eor r9, r5, r11, ror #2 // r9 <- r5 ^ (r11 >>> 2)
and r9, r9, r4, ror #6 // r9 <- r9 & 0x03030303
orr r11, r11, r9 // r11<- r11 | r9
mvn r9, r5 // NOT omitted in sbox
ldr.w r5, [r12, #24] // load rkey word of rkey from prev round
str r9, [r12, #28] // store new rkey word after NOT
str r11, [r12, #60] // store new rkey word in 'rkeys'
eor r10, r5, r2, ror #2 // r10<- r5 ^ (r2 >>> 2)
bic r10, r4, r10 // r10<- ~r10 & 0xc0c0c0c0 (NOT omitted in sbox)
eor r9, r5, r10, ror #2 // r9 <- r5 ^ (r10 >>> 2)
and r9, r9, r4, ror #2 // r9 <- r9 & 0x30303030
orr r10, r10, r9 // r10<- r10 | r9
eor r9, r5, r10, ror #2 // r9 <- r5 ^ (r10 >>> 2)
and r9, r9, r4, ror #4 // r9 <- r9 & 0x0c0c0c0c
orr r10, r10, r9 // r10<- r10 | r9
eor r9, r5, r10, ror #2 // r9 <- r5 ^ (r10 >>> 2)
and r9, r9, r4, ror #6 // r9 <- r9 & 0x03030303
orr r10, r10, r9 // r10<- r10 | r9
mvn r9, r5 // NOT omitted in sbox
ldr.w r2, [r12, #20] // load rkey word of rkey from prev round
str r9, [r12, #24] // store new rkey word after NOT
str r10, [r12, #56] // store new rkey word in 'rkeys'
eor r9, r2, r0, ror #2 // r9 <- r2 ^ (r9 >>> 2)
and r9, r4, r9 // r9 <- r9 & 0xc0c0c0c0
eor r0, r2, r9, ror #2 // r0 <- r2 ^ (r9 >>> 2)
and r0, r0, r4, ror #2 // r0 <- r0 & 0x30303030
orr r9, r9, r0 // r9 <- r9 | r0
eor r0, r2, r9, ror #2 // r0 <- r2 ^ (r9 >>> 2)
and r0, r0, r4, ror #4 // r0 <- r0 & 0x0c0c0c0c
orr r9, r9, r0 // r9 <- r9 | r0
eor r0, r2, r9, ror #2 // r0 <- r2 ^ (r9 >>> 2)
and r0, r0, r4, ror #6 // r0 <- r0 & 0x03030303
orr r9, r9, r0 // r9 <- r9 | r0
ldr.w r2, [r12, #16] // load rkey word of rkey from prev round
str.w r9, [r12, #52] // store new rkey word in 'rkeys'
eor r8, r2, r8, ror #2 // r8 <- r2 ^ (r8 >>> 2)
and r8, r4, r8 // r8 <- r8 & 0xc0c0c0c0
eor r0, r2, r8, ror #2 // r0 <- r2 ^ (r8 >>> 2)
and r0, r0, r4, ror #2 // r0 <- r0 & 0x30303030
orr r8, r8, r0 // r8 <- r8 | r0
eor r0, r2, r8, ror #2 // r0 <- r2 ^ (r8 >>> 2)
and r0, r0, r4, ror #4 // r0 <- r0 & 0x0c0c0c0c
orr r8, r8, r0 // r8 <- r8 | r0
eor r0, r2, r8, ror #2 // r0 <- r2 ^ (r8 >>> 2)
and r0, r0, r4, ror #6 // r0 <- r0 & 0x03030303
orr r8, r8, r0 // r8 <- r8 | r0
ldr.w r2, [r12, #12] // load rkey word of rkey from prev round
str.w r8, [r12, #48] // store new rkey word in 'rkeys'
eor r7, r2, r7, ror #2 // r7 <- r2 ^ (r7 >>> 2)
and r7, r4, r7 // r7 <- r7 & 0xc0c0c0c0
eor r0, r2, r7, ror #2 // r0 <- r2 ^ (r7 >>> 2)
and r0, r0, r4, ror #2 // r0 <- r0 & 0x30303030
orr r7, r7, r0 // r7 <- r7 | r0
eor r0, r2, r7, ror #2 // r0 <- r2 ^ (r7 >>> 2)
and r0, r0, r4, ror #4 // r0 <- r0 & 0x0c0c0c0c
orr r7, r7, r0 // r7 <- r7 | r0
eor r0, r2, r7, ror #2 // r0 <- r2 ^ (r7 >>> 2)
and r0, r0, r4, ror #6 // r0 <- r0 & 0x03030303
orr r7, r7, r0 // r7 <- r7 | r0
ldr.w r2, [r12, #8] // load rkey word of rkey from prev round
str.w r7, [r12, #44] // store new rkey word in 'rkeys'
eor r6, r2, r6, ror #2 // r6 <- r2 ^ (r6 >>> 2)
bic r6, r4, r6 // r6 <- ~r6 & 0xc0c0c0c0 (NOT omitted in sbox)
eor r0, r2, r6, ror #2 // r0 <- r2 ^ (r6 >>> 2)
and r0, r0, r4, ror #2 // r0 <- r0 & 0x30303030
orr r6, r6, r0 // r6 <- r6 | r0
eor r0, r2, r6, ror #2 // r0 <- r2 ^ (r6 >>> 2)
and r0, r0, r4, ror #4 // r0 <- r0 & 0x0c0c0c0c
orr r6, r6, r0 // r6 <- r6 | r0
eor r0, r2, r6, ror #2 // r0 <- r2 ^ (r6 >>> 2)
and r0, r0, r4, ror #6 // r0 <- r0 & 0x03030303
orr r6, r6, r0 // r6 <- r6 | r0
mvn r0, r2 // NOT omitted in sbox
ldr.w r2, [r12, #4] // load rkey word of rkey from prev round
str.w r0, [r12, #8] // store new rkey word after NOT
str.w r6, [r12, #40] // store new rkey word in 'rkeys'
eor r5, r2, r3, ror #2 // r5 <- r2 ^ (r3 >>> 2)
bic r5, r4, r5 // r5 <- ~r5 & 0xc0c0c0c0 (NOT omitted in sbox)
eor r0, r2, r5, ror #2 // r0 <- r2 ^ (r5 >>> 2)
and r0, r0, r4, ror #2 // r0 <- r0 & 0x30303030
orr r5, r5, r0 // r5 <- r5 | r0
eor r0, r2, r5, ror #2 // r0 <- r2 ^ (r5 >>> 2)
and r0, r0, r4, ror #4 // r0 <- r0 & 0x0c0c0c0c
orr r5, r5, r0 // r5 <- r5 | r0
eor r0, r2, r5, ror #2 // r0 <- r2 ^ (r5 >>> 2)
and r0, r0, r4, ror #6 // r0 <- r0 & 0x03030303
orr r5, r5, r0 // r5 <- r5 | r0
mvn r0, r2 // NOT omitted in sbox
ldr.w r2, [r12], #32 // load rkey word of rkey from prev round
str.w r0, [r12, #-28] // store new rkey word after NOT
str.w r5, [r12, #4] // store new rkey word in 'rkeys'
eor r3, r2, r1, ror #2 // r3 <- r2 ^ (r1 >>> 2)
and r3, r4, r3 // r3 <- r3 & 0xc0c0c0c0
eor r0, r2, r3, ror #2 // r0 <- r2 ^ (r3 >>> 2)
and r0, r0, r4, ror #2 // r0 <- r0 & 0x30303030
orr r3, r3, r0 // r3 <- r3 | r0
eor r0, r2, r3, ror #2 // r0 <- r2 ^ (r3 >>> 2)
and r0, r0, r4, ror #4 // r0 <- r0 & 0x0c0c0c0c
orr r3, r3, r0 // r3 <- r3 | r0
eor r0, r2, r3, ror #2 // r0 <- r2 ^ (r3 >>> 2)
and r0, r0, r4, ror #6 // r0 <- r0 & 0x03030303
orr r4, r3, r0 // r4 <- r3 | r0
str.w r4, [r12]
str.w r12, [sp, #56] // store the new rkeys address on the stack
bx lr
/******************************************************************************
* Subroutine that XORs the columns after the S-box during the AES-256 key
* schedule round function, for rounds i such that (i % 4) == 0.
* Differs from 'aes128_xorcolumns_rotword' by the rkeys' indexes to be involved
* in XORs.
******************************************************************************/
.align 2
aes256_xorcolumns_rotword:
ldr r12, [sp, #56] // restore 'rkeys' address
ldr.w r5, [r12, #28] // load rkey word of rkey from prev round
movw r4, #0xc0c0
movt r4, #0xc0c0 // r4 <- 0xc0c0c0c0
eor r11, r5, r11, ror #2 // r11<- r5 ^ (r11 >>> 2)
bic r11, r4, r11 // r11<- ~r11 & 0xc0c0c0c0 (NOT omitted in sbox)
eor r9, r5, r11, ror #2 // r9 <- r5 ^ (r11 >>> 2)
and r9, r9, r4, ror #2 // r9 <- r9 & 0x30303030
orr r11, r11, r9 // r11<- r11 | r9
eor r9, r5, r11, ror #2 // r9 <- r5 ^ (r11 >>> 2)
and r9, r9, r4, ror #4 // r9 <- r9 & 0x0c0c0c0c
orr r11, r11, r9 // r11<- r11 | r9
eor r9, r5, r11, ror #2 // r9 <- r5 ^ (r11 >>> 2)
and r9, r9, r4, ror #6 // r9 <- r9 & 0x03030303
orr r11, r11, r9 // r11<- r11 | r9
mvn r9, r5 // NOT omitted in sbox
ldr.w r5, [r12, #24] // load rkey word of rkey from prev round
str r9, [r12, #28] // store new rkey word after NOT
str r11, [r12, #92] // store new rkey word in 'rkeys'
eor r10, r5, r2, ror #2 // r10<- r5 ^ (r2 >>> 2)
bic r10, r4, r10 // r10<- ~r10 & 0xc0c0c0c0 (NOT omitted in sbox)
eor r9, r5, r10, ror #2 // r9 <- r5 ^ (r10 >>> 2)
and r9, r9, r4, ror #2 // r9 <- r9 & 0x30303030
orr r10, r10, r9 // r10<- r10 | r9
eor r9, r5, r10, ror #2 // r9 <- r5 ^ (r10 >>> 2)
and r9, r9, r4, ror #4 // r9 <- r9 & 0x0c0c0c0c
orr r10, r10, r9 // r10<- r10 | r9
eor r9, r5, r10, ror #2 // r9 <- r5 ^ (r10 >>> 2)
and r9, r9, r4, ror #6 // r9 <- r9 & 0x03030303
orr r10, r10, r9 // r10<- r10 | r9
mvn r9, r5 // NOT omitted in sbox
ldr.w r2, [r12, #20] // load rkey word of rkey from prev round
str r9, [r12, #24] // store new rkey word after NOT
str r10, [r12, #88] // store new rkey word in 'rkeys'
eor r9, r2, r0, ror #2 // r9 <- r2 ^ (r9 >>> 2)
and r9, r4, r9 // r9 <- r9 & 0xc0c0c0c0
eor r0, r2, r9, ror #2 // r0 <- r2 ^ (r9 >>> 2)
and r0, r0, r4, ror #2 // r0 <- r0 & 0x30303030
orr r9, r9, r0 // r9 <- r9 | r0
eor r0, r2, r9, ror #2 // r0 <- r2 ^ (r9 >>> 2)
and r0, r0, r4, ror #4 // r0 <- r0 & 0x0c0c0c0c
orr r9, r9, r0 // r9 <- r9 | r0
eor r0, r2, r9, ror #2 // r0 <- r2 ^ (r9 >>> 2)
and r0, r0, r4, ror #6 // r0 <- r0 & 0x03030303
orr r9, r9, r0 // r9 <- r9 | r0
ldr.w r2, [r12, #16] // load rkey word of rkey from prev round
str.w r9, [r12, #84] // store new rkey word in 'rkeys'
eor r8, r2, r8, ror #2 // r8 <- r2 ^ (r8 >>> 2)
and r8, r4, r8 // r8 <- r8 & 0xc0c0c0c0
eor r0, r2, r8, ror #2 // r0 <- r2 ^ (r8 >>> 2)
and r0, r0, r4, ror #2 // r0 <- r0 & 0x30303030
orr r8, r8, r0 // r8 <- r8 | r0
eor r0, r2, r8, ror #2 // r0 <- r2 ^ (r8 >>> 2)
and r0, r0, r4, ror #4 // r0 <- r0 & 0x0c0c0c0c
orr r8, r8, r0 // r8 <- r8 | r0
eor r0, r2, r8, ror #2 // r0 <- r2 ^ (r8 >>> 2)
and r0, r0, r4, ror #6 // r0 <- r0 & 0x03030303
orr r8, r8, r0 // r8 <- r8 | r0
ldr.w r2, [r12, #12] // load rkey word of rkey from prev round
str.w r8, [r12, #80] // store new rkey word in 'rkeys'
eor r7, r2, r7, ror #2 // r7 <- r2 ^ (r7 >>> 2)
and r7, r4, r7 // r7 <- r7 & 0xc0c0c0c0
eor r0, r2, r7, ror #2 // r0 <- r2 ^ (r7 >>> 2)
and r0, r0, r4, ror #2 // r0 <- r0 & 0x30303030
orr r7, r7, r0 // r7 <- r7 | r0
eor r0, r2, r7, ror #2 // r0 <- r2 ^ (r7 >>> 2)
and r0, r0, r4, ror #4 // r0 <- r0 & 0x0c0c0c0c
orr r7, r7, r0 // r7 <- r7 | r0
eor r0, r2, r7, ror #2 // r0 <- r2 ^ (r7 >>> 2)
and r0, r0, r4, ror #6 // r0 <- r0 & 0x03030303
orr r7, r7, r0 // r7 <- r7 | r0
ldr.w r2, [r12, #8] // load rkey word of rkey from prev round
str.w r7, [r12, #76] // store new rkey word in 'rkeys'
eor r6, r2, r6, ror #2 // r6 <- r2 ^ (r6 >>> 2)
bic r6, r4, r6 // r6 <- ~r6 & 0xc0c0c0c0 (NOT omitted in sbox)
eor r0, r2, r6, ror #2 // r0 <- r2 ^ (r6 >>> 2)
and r0, r0, r4, ror #2 // r0 <- r0 & 0x30303030
orr r6, r6, r0 // r6 <- r6 | r0
eor r0, r2, r6, ror #2 // r0 <- r2 ^ (r6 >>> 2)
and r0, r0, r4, ror #4 // r0 <- r0 & 0x0c0c0c0c
orr r6, r6, r0 // r6 <- r6 | r0
eor r0, r2, r6, ror #2 // r0 <- r2 ^ (r6 >>> 2)
and r0, r0, r4, ror #6 // r0 <- r0 & 0x03030303
orr r6, r6, r0 // r6 <- r6 | r0
mvn r0, r2 // NOT omitted in sbox
ldr.w r2, [r12, #4] // load rkey word of rkey from prev round
str.w r0, [r12, #8] // store new rkey word after NOT
str.w r6, [r12, #72] // store new rkey word in 'rkeys'
eor r5, r2, r3, ror #2 // r5 <- r2 ^ (r3 >>> 2)
bic r5, r4, r5 // r5 <- ~r5 & 0xc0c0c0c0 (NOT omitted in sbox)
eor r0, r2, r5, ror #2 // r0 <- r2 ^ (r5 >>> 2)
and r0, r0, r4, ror #2 // r0 <- r0 & 0x30303030
orr r5, r5, r0 // r5 <- r5 | r0
eor r0, r2, r5, ror #2 // r0 <- r2 ^ (r5 >>> 2)
and r0, r0, r4, ror #4 // r0 <- r0 & 0x0c0c0c0c
orr r5, r5, r0 // r5 <- r5 | r0
eor r0, r2, r5, ror #2 // r0 <- r2 ^ (r5 >>> 2)
and r0, r0, r4, ror #6 // r0 <- r0 & 0x03030303
orr r5, r5, r0 // r5 <- r5 | r0
mvn r0, r2 // NOT omitted in sbox
ldr.w r2, [r12], #32 // load rkey word of rkey from prev round
str.w r0, [r12, #-28] // store new rkey word after NOT
str.w r5, [r12, #36] // store new rkey word in 'rkeys'
eor r3, r2, r1, ror #2 // r3 <- r2 ^ (r1 >>> 2)
and r3, r4, r3 // r3 <- r3 & 0xc0c0c0c0
eor r0, r2, r3, ror #2 // r0 <- r2 ^ (r3 >>> 2)
and r0, r0, r4, ror #2 // r0 <- r0 & 0x30303030
orr r3, r3, r0 // r3 <- r3 | r0
eor r0, r2, r3, ror #2 // r0 <- r2 ^ (r3 >>> 2)
and r0, r0, r4, ror #4 // r0 <- r0 & 0x0c0c0c0c
orr r3, r3, r0 // r3 <- r3 | r0
eor r0, r2, r3, ror #2 // r0 <- r2 ^ (r3 >>> 2)
and r0, r0, r4, ror #6 // r0 <- r0 & 0x03030303
orr r4, r3, r0 // r4 <- r3 | r0
str.w r4, [r12, #32]
str.w r12, [sp, #56] // store the new rkeys address on the stack
bx lr
/******************************************************************************
* Subroutine that XORs the columns after the S-box during the AES-256 key
* schedule round function, for rounds i such that (i % 4) == 0.
* It differs from 'aes256_xorcolumns_rotword' by the omission of the rotword
* operation (i.e. 'ror #26' instead of 'ror #2').
******************************************************************************/
.align 2
aes256_xorcolumns:
ldr r12, [sp, #56] // restore 'rkeys' address
ldr.w r5, [r12, #28] // load rkey word of rkey from prev round
movw r4, #0xc0c0
movt r4, #0xc0c0 // r4 <- 0xc0c0c0c0
eor r11, r5, r11, ror #26 // r11<- r5 ^ (r11 >>> 26)
bic r11, r4, r11 // r11<- ~r11 & 0xc0c0c0c0 (NOT omitted in sbox)
eor r9, r5, r11, ror #2 // r9 <- r5 ^ (r11 >>> 2)
and r9, r9, r4, ror #2 // r9 <- r9 & 0x30303030
orr r11, r11, r9 // r11<- r11 | r9
eor r9, r5, r11, ror #2 // r9 <- r5 ^ (r11 >>> 2)
and r9, r9, r4, ror #4 // r9 <- r9 & 0x0c0c0c0c
orr r11, r11, r9 // r11<- r11 | r9
eor r9, r5, r11, ror #2 // r9 <- r5 ^ (r11 >>> 2)
and r9, r9, r4, ror #6 // r9 <- r9 & 0x03030303
orr r11, r11, r9 // r11<- r11 | r9
mvn r9, r5 // NOT omitted in sbox
ldr.w r5, [r12, #24] // load rkey word of rkey from prev round
str r9, [r12, #28] // store new rkey word after NOT
str r11, [r12, #92] // store new rkey word in 'rkeys'
eor r10, r5, r2, ror #26 // r10<- r5 ^ (r2 >>> 2)
bic r10, r4, r10 // r10<- ~r10 & 0xc0c0c0c0 (NOT omitted in sbox)
eor r9, r5, r10, ror #2 // r9 <- r5 ^ (r10 >>> 2)
and r9, r9, r4, ror #2 // r9 <- r9 & 0x30303030
orr r10, r10, r9 // r10<- r10 | r9
eor r9, r5, r10, ror #2 // r9 <- r5 ^ (r10 >>> 2)
and r9, r9, r4, ror #4 // r9 <- r9 & 0x0c0c0c0c
orr r10, r10, r9 // r10<- r10 | r9
eor r9, r5, r10, ror #2 // r9 <- r5 ^ (r10 >>> 2)
and r9, r9, r4, ror #6 // r9 <- r9 & 0x03030303
orr r10, r10, r9 // r10<- r10 | r9
mvn r9, r5 // NOT omitted in sbox
ldr.w r2, [r12, #20] // load rkey word of rkey from prev round
str r9, [r12, #24] // store new rkey word after NOT
str r10, [r12, #88] // store new rkey word in 'rkeys'
eor r9, r2, r0, ror #26 // r9 <- r2 ^ (r9 >>> 26)
and r9, r4, r9 // r9 <- r9 & 0xc0c0c0c0
eor r0, r2, r9, ror #2 // r0 <- r2 ^ (r9 >>> 2)
and r0, r0, r4, ror #2 // r0 <- r0 & 0x30303030
orr r9, r9, r0 // r9 <- r9 | r0
eor r0, r2, r9, ror #2 // r0 <- r2 ^ (r9 >>> 2)
and r0, r0, r4, ror #4 // r0 <- r0 & 0x0c0c0c0c
orr r9, r9, r0 // r9 <- r9 | r0
eor r0, r2, r9, ror #2 // r0 <- r2 ^ (r9 >>> 2)
and r0, r0, r4, ror #6 // r0 <- r0 & 0x03030303
orr r9, r9, r0 // r9 <- r9 | r0
ldr.w r2, [r12, #16] // load rkey word of rkey from prev round
str.w r9, [r12, #84] // store new rkey word in 'rkeys'
eor r8, r2, r8, ror #26 // r8 <- r2 ^ (r8 >>> 26)
and r8, r4, r8 // r8 <- r8 & 0xc0c0c0c0
eor r0, r2, r8, ror #2 // r0 <- r2 ^ (r8 >>> 2)
and r0, r0, r4, ror #2 // r0 <- r0 & 0x30303030
orr r8, r8, r0 // r8 <- r8 | r0
eor r0, r2, r8, ror #2 // r0 <- r2 ^ (r8 >>> 2)
and r0, r0, r4, ror #4 // r0 <- r0 & 0x0c0c0c0c
orr r8, r8, r0 // r8 <- r8 | r0
eor r0, r2, r8, ror #2 // r0 <- r2 ^ (r8 >>> 2)
and r0, r0, r4, ror #6 // r0 <- r0 & 0x03030303
orr r8, r8, r0 // r8 <- r8 | r0
ldr.w r2, [r12, #12] // load rkey word of rkey from prev round
str.w r8, [r12, #80] // store new rkey word in 'rkeys'
eor r7, r2, r7, ror #26 // r7 <- r2 ^ (r7 >>> 26)
and r7, r4, r7 // r7 <- r7 & 0xc0c0c0c0
eor r0, r2, r7, ror #2 // r0 <- r2 ^ (r7 >>> 2)
and r0, r0, r4, ror #2 // r0 <- r0 & 0x30303030
orr r7, r7, r0 // r7 <- r7 | r0
eor r0, r2, r7, ror #2 // r0 <- r2 ^ (r7 >>> 2)
and r0, r0, r4, ror #4 // r0 <- r0 & 0x0c0c0c0c
orr r7, r7, r0 // r7 <- r7 | r0
eor r0, r2, r7, ror #2 // r0 <- r2 ^ (r7 >>> 2)
and r0, r0, r4, ror #6 // r0 <- r0 & 0x03030303
orr r7, r7, r0 // r7 <- r7 | r0
ldr.w r2, [r12, #8] // load rkey word of rkey from prev round
str.w r7, [r12, #76] // store new rkey word in 'rkeys'
eor r6, r2, r6, ror #26 // r6 <- r2 ^ (r6 >>> 26)
bic r6, r4, r6 // r6 <- ~r6 & 0xc0c0c0c0 (NOT omitted in sbox)
eor r0, r2, r6, ror #2 // r0 <- r2 ^ (r6 >>> 2)
and r0, r0, r4, ror #2 // r0 <- r0 & 0x30303030
orr r6, r6, r0 // r6 <- r6 | r0
eor r0, r2, r6, ror #2 // r0 <- r2 ^ (r6 >>> 2)
and r0, r0, r4, ror #4 // r0 <- r0 & 0x0c0c0c0c
orr r6, r6, r0 // r6 <- r6 | r0
eor r0, r2, r6, ror #2 // r0 <- r2 ^ (r6 >>> 2)
and r0, r0, r4, ror #6 // r0 <- r0 & 0x03030303
orr r6, r6, r0 // r6 <- r6 | r0
mvn r0, r2 // NOT omitted in sbox
ldr.w r2, [r12, #4] // load rkey word of rkey from prev round
str.w r0, [r12, #8] // store new rkey word after NOT
str.w r6, [r12, #72] // store new rkey word in 'rkeys'
eor r5, r2, r3, ror #26 // r5 <- r2 ^ (r3 >>> 26)
bic r5, r4, r5 // r5 <- ~r5 & 0xc0c0c0c0 (NOT omitted in sbox)
eor r0, r2, r5, ror #2 // r0 <- r2 ^ (r5 >>> 2)
and r0, r0, r4, ror #2 // r0 <- r0 & 0x30303030
orr r5, r5, r0 // r5 <- r5 | r0
eor r0, r2, r5, ror #2 // r0 <- r2 ^ (r5 >>> 2)
and r0, r0, r4, ror #4 // r0 <- r0 & 0x0c0c0c0c
orr r5, r5, r0 // r5 <- r5 | r0
eor r0, r2, r5, ror #2 // r0 <- r2 ^ (r5 >>> 2)
and r0, r0, r4, ror #6 // r0 <- r0 & 0x03030303
orr r5, r5, r0 // r5 <- r5 | r0
mvn r0, r2 // NOT omitted in sbox
ldr.w r2, [r12], #32 // load rkey word of rkey from prev round
str.w r0, [r12, #-28] // store new rkey word after NOT
str.w r5, [r12, #36] // store new rkey word in 'rkeys'
eor r3, r2, r1, ror #26 // r3 <- r2 ^ (r1 >>> 26)
and r3, r4, r3 // r3 <- r3 & 0xc0c0c0c0
eor r0, r2, r3, ror #2 // r0 <- r2 ^ (r3 >>> 2)
and r0, r0, r4, ror #2 // r0 <- r0 & 0x30303030
orr r3, r3, r0 // r3 <- r3 | r0
eor r0, r2, r3, ror #2 // r0 <- r2 ^ (r3 >>> 2)
and r0, r0, r4, ror #4 // r0 <- r0 & 0x0c0c0c0c
orr r3, r3, r0 // r3 <- r3 | r0
eor r0, r2, r3, ror #2 // r0 <- r2 ^ (r3 >>> 2)
and r0, r0, r4, ror #6 // r0 <- r0 & 0x03030303
orr r4, r3, r0 // r4 <- r3 | r0
str.w r4, [r12, #32]
str.w r12, [sp, #56] // store the new rkeys address on the stack
bx lr
/******************************************************************************
* Applies ShiftRows^(-1) on a round key to match the fixsliced representation.
******************************************************************************/
.align 2
inv_shiftrows_1:
ldr.w r2, [r12, #-32]!
str r14, [sp, #52] // store link register
movw r1, #8
movw r14, #0x0300
movt r14, #0x0c0f // r14<- 0x0c0f0300 for ShiftRows^[-1]
loop_inv_sr_1:
movw r3, #0x3300
movt r3, #0x3300 // r3 <- 0x33003300 for ShiftRows^[-1]
swpmv r2, r2, r2, r2, r14, 4, r0
eor r0, r2, r2, lsr #2
and r0, r3
eor r2, r2, r0
eor r3, r2, r0, lsl #2
ldr.w r2, [r12, #4]!
str.w r3, [r12, #-4]
subs r1, #1
bne loop_inv_sr_1
ldr r14, [sp, #52] // restore link register
bx lr
/******************************************************************************
* Applies ShiftRows^(-2) on a round key to match the fixsliced representation.
* Only needed for the fully-fixsliced (ffs) representation.
******************************************************************************/
.align 2
inv_shiftrows_2:
ldr.w r2, [r12, #-32]!
str r14, [sp, #52] // store link register
movw r1, #8
movw r14, #0x0f00
movt r14, #0x0f00 // r14<- 0x0f000f00 for ShiftRows^[-2]
loop_inv_sr_2:
eor r0, r2, r2, lsr #4
and r0, r14
eor r2, r2, r0
eor r3, r2, r0, lsl #4
ldr.w r2, [r12, #4]!
str.w r3, [r12, #-4]
subs r1, #1
bne loop_inv_sr_2
ldr r14, [sp, #52] // restore link register
bx lr
/******************************************************************************
* Applies ShiftRows^(-3) on a round key to match the fixsliced representation.
* Only needed for the fully-fixsliced (ffs) representation.
******************************************************************************/
.align 2
inv_shiftrows_3:
ldr.w r2, [r12, #-32]!
str r14, [sp, #52] // store link register
movw r1, #8
movw r14, #0x0c00
movt r14, #0x030f // r14<- 0x030f0c00 for ShiftRows^[-3]
loop_inv_sr_3:
movw r3, #0x3300
movt r3, #0x3300 // r3 <- 0x33003300 for ShiftRows^[-3]
swpmv r2, r2, r2, r2, r14, 4, r0
eor r0, r2, r2, lsr #2
and r0, r3
eor r2, r2, r0
eor r3, r2, r0, lsl #2
ldr.w r2, [r12, #4]!
str.w r3, [r12, #-4]
subs r1, #1
bne loop_inv_sr_3
ldr r14, [sp, #52] // restore link register
bx lr
/******************************************************************************
* Fully bitsliced AES-128 key schedule to match the fully-fixsliced (ffs)
* representation. Note that it is possible to pass two different keys as input
* parameters if one wants to encrypt 2 blocks in with two different keys.
******************************************************************************/
@ void aes128_keyschedule_ffs(u32* rkeys, const u8* key);
.global aes128_keyschedule_ffs
.type aes128_keyschedule_ffs,%function
.align 2
aes128_keyschedule_ffs:
push {r0-r12,r14}
sub.w sp, #56 // allow space on the stack for tmp var
ldr.w r4, [r1] // load the 128-bit key in r4-r7
ldr r5, [r1, #4]
ldr r6, [r1, #8]
ldr r7, [r1, #12]
ldr.w r8, [r1] // load the 128-bit key in r8-r11
ldr r9, [r1, #4]
ldr r10,[r1, #8]
ldr r11,[r1, #12]
bl packing // pack the master key
ldr.w r0, [sp, #56] // restore 'rkeys' address
stm r0, {r4-r11} // store the packed master key in 'rkeys'
bl sbox // apply the sbox to the master key
eor r11, r11, #0x00000300 // add the 1st rconst
bl aes128_xorcolumns_rotword
bl sbox // apply the sbox to the master key
eor r2, r2, #0x00000300 // add the 2nd rconst
bl aes128_xorcolumns_rotword
bl inv_shiftrows_1
bl sbox // apply the sbox to the master key
eor r0, r0, #0x00000300 // add the 3rd rconst
bl aes128_xorcolumns_rotword
bl inv_shiftrows_2
bl sbox // apply the sbox to the master key
eor r8, r8, #0x00000300 // add the 4th rconst
bl aes128_xorcolumns_rotword
bl inv_shiftrows_3
bl sbox // apply the sbox to the master key
eor r7, r7, #0x00000300 // add the 5th rconst
bl aes128_xorcolumns_rotword
bl sbox // apply the sbox to the master key
eor r6, r6, #0x00000300 // add the 6th rconst
bl aes128_xorcolumns_rotword
bl inv_shiftrows_1
bl sbox // apply the sbox to the master key
eor r3, r3, #0x00000300 // add the 7th rconst
bl aes128_xorcolumns_rotword
bl inv_shiftrows_2
bl sbox // apply the sbox to the master key
eor r1, r1, #0x00000300 // add the 8th rconst
bl aes128_xorcolumns_rotword
bl inv_shiftrows_3
bl sbox // apply the sbox to the master key
eor r11, r11, #0x00000300 // add the 9th rconst
eor r2, r2, #0x00000300 // add the 9th rconst
eor r8, r8, #0x00000300 // add the 9th rconst
eor r7, r7, #0x00000300 // add the 9th rconst
bl aes128_xorcolumns_rotword
bl sbox // apply the sbox to the master key
eor r2, r2, #0x00000300 // add the 10th rconst
eor r0, r0, #0x00000300 // add the 10th rconst
eor r7, r7, #0x00000300 // add the 10th rconst
eor r6, r6, #0x00000300 // add the 10th rconst
bl aes128_xorcolumns_rotword
bl inv_shiftrows_1
mvn r5, r5 // add the NOT for the last rkey
mvn r6, r6 // add the NOT for the last rkey
mvn r10, r10 // add the NOT for the last rkey
mvn r11, r11 // add the NOT for the last rkey
strd r5, r6, [r12, #4]
strd r10, r11, [r12, #24]
ldrd r0, r1, [r12, #-316]
ldrd r2, r3, [r12, #-296]
mvn r0, r0 // remove the NOT for the key whitening
mvn r1, r1 // remove the NOT for the key whitening
mvn r2, r2 // remove the NOT for the key whitening
mvn r3, r3 // remove the NOT for the key whitening
strd r0, r1, [r12, #-316]
strd r2, r3, [r12, #-296]
add.w sp, #56 // restore stack
pop {r0-r12, r14} // restore context
bx lr
/******************************************************************************
* Fully bitsliced AES-256 key schedule to match the fully-fixsliced (ffs)
* representation. Note that it is possible to pass 2 different keys as input
* parameters if one wants to encrypt 2 blocks in with 2 different keys.
******************************************************************************/
@ void aes256_keyschedule_ffs(u32* rkeys, const u8* key);
.global aes256_keyschedule_ffs
.type aes256_keyschedule_ffs,%function
.align 2
aes256_keyschedule_ffs:
push {r0-r12,r14}
sub.w sp, #56 // allow space on the stack for tmp var
ldr.w r4, [r1] // load the 128 first key bits in r4-r7
ldr r5, [r1, #4]
ldr r6, [r1, #8]
ldr r7, [r1, #12]
ldr.w r8, [r1] // load the 128 first key bits in r8-r11
ldr r9, [r1, #4]
ldr r10,[r1, #8]
ldr r11,[r1, #12]
bl packing // pack the master key
ldrd r0,r1, [sp, #56] // restore 'rkeys' and 'key' addresses
stm r0, {r4-r11} // store the packed master key in 'rkeys'
add.w r1, #16 // points to the 128 last bits of the key
ldr.w r4, [r1] // load the 128 first key bits in r4-r7
ldr r5, [r1, #4]
ldr r6, [r1, #8]
ldr r7, [r1, #12]
ldr.w r8, [r1] // load the 128 first key bits in r8-r11
ldr r9, [r1, #4]
ldr r10,[r1, #8]
ldr r11,[r1, #12]
bl packing // pack the master key
ldr.w r0, [sp, #56] // restore 'rkeys' address
add.w r0, #32 // points to the 128 last bits of the key
stm r0, {r4-r11} // store the packed master key in 'rkeys'
bl sbox // apply the sbox to the master key
eor r11, r11, #0x00000300 // add the 1st rconst
bl aes256_xorcolumns_rotword
bl sbox // apply the sbox to the master key
bl aes256_xorcolumns
bl inv_shiftrows_1
bl sbox // apply the sbox to the master key
eor r2, r2, #0x00000300 // add the 2nd rconst
bl aes256_xorcolumns_rotword
bl inv_shiftrows_2
bl sbox // apply the sbox to the master key
bl aes256_xorcolumns
bl inv_shiftrows_3
bl sbox // apply the sbox to the master key
eor r0, r0, #0x00000300 // add the 3rd rconst
bl aes256_xorcolumns_rotword
bl sbox // apply the sbox to the master key
bl aes256_xorcolumns
bl inv_shiftrows_1
bl sbox // apply the sbox to the master key
eor r8, r8, #0x00000300 // add the 4th rconst
bl aes256_xorcolumns_rotword
bl inv_shiftrows_2
bl sbox // apply the sbox to the master key
bl aes256_xorcolumns
bl inv_shiftrows_3
bl sbox // apply the sbox to the master key
eor r7, r7, #0x00000300 // add the 5th rconst
bl aes256_xorcolumns_rotword
bl sbox // apply the sbox to the master key
bl aes256_xorcolumns
bl inv_shiftrows_1
bl sbox // apply the sbox to the master key
eor r6, r6, #0x00000300 // add the 6th rconst
bl aes256_xorcolumns_rotword
bl inv_shiftrows_2
bl sbox // apply the sbox to the master key
bl aes256_xorcolumns
bl inv_shiftrows_3
bl sbox // apply the sbox to the master key
eor r3, r3, #0x00000300 // add the 6th rconst
bl aes256_xorcolumns_rotword
add r12, #32
bl inv_shiftrows_1
mvn r5, r5 // add the NOT for the last rkey
mvn r6, r6 // add the NOT for the last rkey
mvn r10, r10 // add the NOT for the last rkey
mvn r11, r11 // add the NOT for the last rkey
ldrd r0, r1, [r12, #-28]
ldrd r2, r3, [r12, #-8]
strd r5, r6, [r12, #4]
strd r10, r11, [r12, #24]
mvn r0, r0 // add the NOT for the penultimate rkey
mvn r1, r1 // add the NOT for the penultimate rkey
mvn r2, r2 // add the NOT for the penultimate rkey
mvn r3, r3 // add the NOT for the penultimate rkey
ldrd r5, r6, [r12, #-444]
ldrd r10, r11, [r12, #-424]
strd r0, r1, [r12, #-28]
strd r2, r3, [r12, #-8]
mvn r5, r5 // remove the NOT for the key whitening
mvn r6, r6 // remove the NOT for the key whitening
mvn r10, r10 // remove the NOT for the key whitening
mvn r11, r11 // remove the NOT for the key whitening
strd r5, r6, [r12, #-444]
strd r10, r11, [r12, #-424]
add.w sp, #56 // restore stack
pop {r0-r12, r14} // restore context
bx lr
/******************************************************************************
* Fully bitsliced AES-128 key schedule to match the semi-fixsliced (sfs)
* representation. Note that it is possible to pass 2 different keys as input
* parameters if one wants to encrypt 2 blocks in with 2 different keys.
******************************************************************************/
@ void aes128_keyschedule_sfs(u32* rkeys, const u8* key);
.global aes128_keyschedule_sfs
.type aes128_keyschedule_sfs,%function
.align 2
aes128_keyschedule_sfs:
push {r0-r12,r14}
sub.w sp, #56 // allow space on the stack for tmp var
ldr.w r4, [r1] // load the 128-bit key in r4-r7
ldr r5, [r1, #4]
ldr r6, [r1, #8]
ldr r7, [r1, #12]
ldr.w r8, [r1] // load the 128-bit key in r8-r11
ldr r9, [r1, #4]
ldr r10,[r1, #8]
ldr r11,[r1, #12]
bl packing // pack the master key
ldr.w r0, [sp, #56] // restore 'rkeys' address
stm r0, {r4-r11} // store the packed master key in 'rkeys'
bl sbox // apply the sbox to the master key
eor r11, r11, #0x00000300 // add the 1st rconst
bl aes128_xorcolumns_rotword
bl sbox // apply the sbox to the master key
eor r2, r2, #0x00000300 // add the 2nd rconst
bl aes128_xorcolumns_rotword
bl inv_shiftrows_1
bl sbox // apply the sbox to the master key
eor r0, r0, #0x00000300 // add the 3rd rconst
bl aes128_xorcolumns_rotword
bl sbox // apply the sbox to the master key
eor r8, r8, #0x00000300 // add the 4th rconst
bl aes128_xorcolumns_rotword
bl inv_shiftrows_1
bl sbox // apply the sbox to the master key
eor r7, r7, #0x00000300 // add the 5th rconst
bl aes128_xorcolumns_rotword
bl sbox // apply the sbox to the master key
eor r6, r6, #0x00000300 // add the 6th rconst
bl aes128_xorcolumns_rotword
bl inv_shiftrows_1
bl sbox // apply the sbox to the master key
eor r3, r3, #0x00000300 // add the 7th rconst
bl aes128_xorcolumns_rotword
bl sbox // apply the sbox to the master key
eor r1, r1, #0x00000300 // add the 8th rconst
bl aes128_xorcolumns_rotword
bl inv_shiftrows_1
bl sbox // apply the sbox to the master key
eor r11, r11, #0x00000300 // add the 9th rconst
eor r2, r2, #0x00000300 // add the 9th rconst
eor r8, r8, #0x00000300 // add the 9th rconst
eor r7, r7, #0x00000300 // add the 9th rconst
bl aes128_xorcolumns_rotword
bl sbox // apply the sbox to the master key
eor r2, r2, #0x00000300 // add the 10th rconst
eor r0, r0, #0x00000300 // add the 10th rconst
eor r7, r7, #0x00000300 // add the 10th rconst
eor r6, r6, #0x00000300 // add the 10th rconst
bl aes128_xorcolumns_rotword
bl inv_shiftrows_1
mvn r5, r5 // add the NOT for the last rkey
mvn r6, r6 // add the NOT for the last rkey
mvn r10, r10 // add the NOT for the last rkey
mvn r11, r11 // add the NOT for the last rkey
strd r5, r6, [r12, #4]
strd r10, r11, [r12, #24]
ldrd r0, r1, [r12, #-316]
ldrd r2, r3, [r12, #-296]
mvn r0, r0 // remove the NOT for the key whitening
mvn r1, r1 // remove the NOT for the key whitening
mvn r2, r2 // remove the NOT for the key whitening
mvn r3, r3 // remove the NOT for the key whitening
strd r0, r1, [r12, #-316]
strd r2, r3, [r12, #-296]
add.w sp, #56 // restore stack
pop {r0-r12, r14} // restore context
bx lr
/******************************************************************************
* Fully bitsliced AES-256 key schedule to match the semi-fixsliced (sfs)
* representation. Note that it is possible to pass 2 different keys as input
* parameters if one wants to encrypt 2 blocks in with 2 different keys.
******************************************************************************/
@ void aes256_keyschedule_sfs(u32* rkeys, const u8* key);
.global aes256_keyschedule_sfs
.type aes256_keyschedule_sfs,%function
.align 2
aes256_keyschedule_sfs:
push {r0-r12,r14}
sub.w sp, #56 // allow space on the stack for tmp var
ldr.w r4, [r1] // load the 128 first key bits in r4-r7
ldr r5, [r1, #4]
ldr r6, [r1, #8]
ldr r7, [r1, #12]
ldr.w r8, [r1] // load the 128 first key bits in r8-r11
ldr r9, [r1, #4]
ldr r10,[r1, #8]
ldr r11,[r1, #12]
bl packing // pack the master key
ldrd r0,r1, [sp, #56] // restore 'rkeys' and 'key' addresses
stm r0, {r4-r11} // store the packed master key in 'rkeys'
add.w r1, #16 // points to the 128 last bits of the key
ldr.w r4, [r1] // load the 128 first key bits in r4-r7
ldr r5, [r1, #4]
ldr r6, [r1, #8]
ldr r7, [r1, #12]
ldr.w r8, [r1] // load the 128 first key bits in r8-r11
ldr r9, [r1, #4]
ldr r10,[r1, #8]
ldr r11,[r1, #12]
bl packing // pack the master key
ldr.w r0, [sp, #56] // restore 'rkeys' address
add.w r0, #32 // points to the 128 last bits of the key
stm r0, {r4-r11} // store the packed master key in 'rkeys'
bl sbox // apply the sbox to the master key
eor r11, r11, #0x00000300 // add the 1st rconst
bl aes256_xorcolumns_rotword
bl sbox // apply the sbox to the master key
bl aes256_xorcolumns
bl inv_shiftrows_1
bl sbox // apply the sbox to the master key
eor r2, r2, #0x00000300 // add the 2nd rconst
bl aes256_xorcolumns_rotword
bl sbox // apply the sbox to the master key
bl aes256_xorcolumns
bl inv_shiftrows_1
bl sbox // apply the sbox to the master key
eor r0, r0, #0x00000300 // add the 3rd rconst
bl aes256_xorcolumns_rotword
bl sbox // apply the sbox to the master key
bl aes256_xorcolumns
bl inv_shiftrows_1
bl sbox // apply the sbox to the master key
eor r8, r8, #0x00000300 // add the 4th rconst
bl aes256_xorcolumns_rotword
bl sbox // apply the sbox to the master key
bl aes256_xorcolumns
bl inv_shiftrows_1
bl sbox // apply the sbox to the master key
eor r7, r7, #0x00000300 // add the 5th rconst
bl aes256_xorcolumns_rotword
bl sbox // apply the sbox to the master key
bl aes256_xorcolumns
bl inv_shiftrows_1
bl sbox // apply the sbox to the master key
eor r6, r6, #0x00000300 // add the 6th rconst
bl aes256_xorcolumns_rotword
bl sbox // apply the sbox to the master key
bl aes256_xorcolumns
bl inv_shiftrows_1
bl sbox // apply the sbox to the master key
eor r3, r3, #0x00000300 // add the 6th rconst
bl aes256_xorcolumns_rotword
add r12, #32
bl inv_shiftrows_1
mvn r5, r5 // add the NOT for the last rkey
mvn r6, r6 // add the NOT for the last rkey
mvn r10, r10 // add the NOT for the last rkey
mvn r11, r11 // add the NOT for the last rkey
ldrd r0, r1, [r12, #-28]
ldrd r2, r3, [r12, #-8]
strd r5, r6, [r12, #4]
strd r10, r11, [r12, #24]
mvn r0, r0 // add the NOT for the penultimate rkey
mvn r1, r1 // add the NOT for the penultimate rkey
mvn r2, r2 // add the NOT for the penultimate rkey
mvn r3, r3 // add the NOT for the penultimate rkey
ldrd r5, r6, [r12, #-444]
ldrd r10, r11, [r12, #-424]
strd r0, r1, [r12, #-28]
strd r2, r3, [r12, #-8]
mvn r5, r5 // remove the NOT for the key whitening
mvn r6, r6 // remove the NOT for the key whitening
mvn r10, r10 // remove the NOT for the key whitening
mvn r11, r11 // remove the NOT for the key whitening
strd r5, r6, [r12, #-444]
strd r10, r11, [r12, #-424]
add.w sp, #56 // restore stack
pop {r0-r12, r14} // restore context
bx lr
|
aadomn/aes
| 29,867
|
armcortexm/fixslicing/aes_keyschedule_lut.s
|
/******************************************************************************
* ARM assembly implementations of the AES-128 and AES-256 key schedules to
* match fixslicing.
* Note that those implementations rely on Look-Up Tables (LUT).
*
* See the paper at https://eprint.iacr.org/2020/1123.pdf for more details.
*
* @author Alexandre Adomnicai, Nanyang Technological University, Singapore
* alexandre.adomnicai@ntu.edu.sg
*
* @date August 2020
******************************************************************************/
.syntax unified
.thumb
/******************************************************************************
* LUT of the AES S-box.
******************************************************************************/
.align 2
.type AES_Sbox_compact,%object
AES_Sbox_compact:
.word 0x7b777c63, 0xc56f6bf2, 0x2b670130, 0x76abd7fe
.word 0x7dc982ca, 0xf04759fa, 0xafa2d4ad, 0xc072a49c
.word 0x2693fdb7, 0xccf73f36, 0xf1e5a534, 0x1531d871
.word 0xc323c704, 0x9a059618, 0xe2801207, 0x75b227eb
.word 0x1a2c8309, 0xa05a6e1b, 0xb3d63b52, 0x842fe329
.word 0xed00d153, 0x5bb1fc20, 0x39becb6a, 0xcf584c4a
.word 0xfbaaefd0, 0x85334d43, 0x7f02f945, 0xa89f3c50
.word 0x8f40a351, 0xf5389d92, 0x21dab6bc, 0xd2f3ff10
.word 0xec130ccd, 0x1744975f, 0x3d7ea7c4, 0x73195d64
.word 0xdc4f8160, 0x88902a22, 0x14b8ee46, 0xdb0b5ede
.word 0x0a3a32e0, 0x5c240649, 0x62acd3c2, 0x79e49591
.word 0x6d37c8e7, 0xa94ed58d, 0xeaf4566c, 0x08ae7a65
.word 0x2e2578ba, 0xc6b4a61c, 0x1f74dde8, 0x8a8bbd4b
.word 0x66b53e70, 0x0ef60348, 0xb9573561, 0x9e1dc186
.word 0x1198f8e1, 0x948ed969, 0xe9871e9b, 0xdf2855ce
.word 0x0d89a18c, 0x6842e6bf, 0x0f2d9941, 0x16bb54b0
/******************************************************************************
* Round function of the AES-128 key expansion.
* Note that it expects r2 to contain the corresponding round constant and r3 to
* contain the S-box address.
******************************************************************************/
.align 2
aes128_keyschedule_rfunc:
movw r1, #0xfc
and r8, r1, r7, lsr #8
and r9, r1, r7, lsr #16
and r10, r1, r7, lsr #24
and r11, r1, r7
ldr r8, [r3, r8] // computes the sbox using the LUT
ldr r9, [r3, r9] // computes the sbox using the LUT
ldr r10, [r3, r10] // computes the sbox using the LUT
ldr r11, [r3, r11] // computes the sbox using the LUT
movw r1, #0x18
and r12, r1, r7, lsr #5
lsr r8, r8, r12
and r8, #0xff
and r12, r1, r7, lsr #13
lsr r9, r9, r12
and r9, #0xff
and r12, r1, r7, lsr #21
lsr r10, r10, r12
and r10, #0xff
and r12, r1, r7, lsl #3
lsr r11, r11, r12
and r11, #0xff
eor r4, r2 // adds the first rconst
eor r4, r8 // xor the columns (1st sbox byte)
eor r4, r4, r9, ror #24 // xor the columns (2nd sbox byte)
eor r4, r4, r10, ror #16 // xor the columns (3rd sbox byte)
eor r4, r4, r11, ror #8 // xor the columns (4th sbox byte)
eor r5, r4 // xor the columns
eor r6, r5 // xor the columns
eor r7, r6 // xor the columns
push.w {r4-r7}
bx lr
/******************************************************************************
* Double round function of the AES-256 key expansion.
* Note that it expects r2 to contain the corresponding round constant and r3 to
* contain the S-box address.
* Operates slightly differently than 'aes128_keyschedule_rfunc' as 8 words have
* to be maintained in registers (instead of 4).
******************************************************************************/
.align 2
aes256_keyschedule_rfunc_0:
eor r4, r2 // adds the first rconst
movw r1, #0xfc
movw r2, #0x18
and r12, r1, r11, lsr #8
ldr r12, [r3, r12] // computes the sbox using the LUT
and r0, r2, r11, lsr #5
lsr r12, r12, r0
and r12, #0xff
eor r4, r12 // xor the columns (sbox output byte)
and r12, r1, r11, lsr #16
ldr r12, [r3, r12] // computes the sbox using the LUT
and r0, r2, r11, lsr #13
lsr r12, r12, r0
and r12, #0xff
eor r4, r4, r12, ror #24 // xor the columns (sbox output byte)
and r12, r1, r11, lsr #24
ldr r12, [r3, r12] // computes the sbox using the LUT
and r0, r2, r11, lsr #21
lsr r12, r12, r0
and r12, #0xff
eor r4, r4, r12, ror #16 // xor the columns (sbox output byte)
and r12, r1, r11
ldr r12, [r3, r12] // computes the sbox using the LUT
and r0, r2, r11, lsl #3
lsr r12, r12, r0
and r12, #0xff
eor r4, r4, r12, ror #8 // xor the columns (sbox output byte)
eor r5, r4 // xor the columns
eor r6, r5 // xor the columns
eor r7, r6 // xor the columns
push.w {r4-r7} // store on stack, to be packed later
bx lr
/******************************************************************************
* Double round function of the AES-256 key expansion.
* Note that it expects r2 to contain the corresponding round constant and r3 to
* contain the S-box address.
* Unlike 'aes256_keyschedule_rfunc_0' it doesnt compute the RotWord operation.
******************************************************************************/
aes256_keyschedule_rfunc_1:
and r12, r1, r7, lsr #8
ldr r12, [r3, r12] // computes the sbox using the LUT
and r0, r2, r7, lsr #5
lsr r12, r12, r0
and r12, #0xff
eor r8, r8, r12, lsl #8 // xor the columns (sbox output byte)
and r12, r1, r7, lsr #16
ldr r12, [r3, r12] // computes the sbox using the LUT
and r0, r2, r7, lsr #13
lsr r12, r12, r0
and r12, #0xff
eor r8, r8, r12, lsl #16 // xor the columns (sbox output byte)
and r12, r1, r7, lsr #24
ldr r12, [r3, r12] // computes the sbox using the LUT
and r0, r2, r7, lsr #21
lsr r12, r12, r0
and r12, #0xff
eor r8, r8, r12, lsl #24 // xor the columns (sbox output byte)
and r12, r1, r7
ldr r12, [r3, r12] // computes the sbox using the LUT
and r0, r2, r7, lsl #3
lsr r12, r12, r0
and r12, #0xff
eor r8, r8, r12 // xor the columns (sbox output byte)
eor r9, r8 // xor the columns
eor r10, r9 // xor the columns
eor r11, r10 // xor the columns
push {r8-r11}
bx lr
/******************************************************************************
* Packing routine. Note that it is the same as the one used in the encryption
* function so some code size could be saved by merging the two files.
******************************************************************************/
.align 2
packing_rkey:
eor r12, r8, r8, lsr #1 // SWAPMOVE(r8, r4, 0x55555555, 1) ....
and r12, r1
eor r4, r8, r12
eor r8, r8, r12, lsl #1 // .... SWAPMOVE(r8, r4, 0x55555555, 1)
eor r12, r9, r9, lsr #1 // SWAPMOVE(r9, r5, 0x55555555, 1) ....
and r12, r1
eor r5, r9, r12
eor r9, r9, r12, lsl #1 // .... SWAPMOVE(r9, r5, 0x55555555, 1)
eor r12, r10, r10, lsr #1 // SWAPMOVE(r10, r6, 0x55555555, 1) ....
and r12, r1
eor r6, r10, r12
eor r10, r10, r12, lsl #1 // .... SWAPMOVE(r10, r6, 0x55555555, 1)
eor r12, r11, r11, lsr #1 // SWAPMOVE(r11, r7, 0x55555555, 1) ....
and r12, r1
eor r7, r11, r12
eor r11, r11, r12, lsl #1 // .... SWAPMOVE(r11, r7, 0x55555555, 1)
eor r12, r4, r5, lsr #2 // SWAPMOVE(r5, r4, 0x33333333, 2) ....
and r12, r2
eor r4, r12
eor r5, r5, r12, lsl #2 // .... SWAPMOVE(r5, r4, 0x33333333, 2)
eor r12, r8, r9, lsr #2 // SWAPMOVE(r9, r8, 0x33333333, 2) ....
and r12, r2
eor r8, r8, r12
eor r9, r9, r12, lsl #2 // .... SWAPMOVE(r9, r8, 0x33333333, 2)
eor r12, r6, r7, lsr #2 // SWAPMOVE(r7, r6, 0x33333333, 2) ....
and r12, r2
eor r6, r6, r12
eor r7, r7, r12, lsl #2 // .... SWAPMOVE(r7, r6, 0x33333333, 2)
eor r12, r10, r11, lsr #2 // SWAPMOVE(r11, r10, 0x33333333, 2) ....
and r12, r2
eor r10, r10, r12
eor r11, r11, r12, lsl #2 // .... SWAPMOVE(r11, r10, 0x33333333, 2)
eor r12, r4, r6, lsr #4 // SWAPMOVE(r6, r4, 0x0f0f0f0f, 4) ....
and r12, r3
eor r4, r12
eor r6, r6, r12, lsl #4 // .... SWAPMOVE(r6, r4, 0x0f0f0f0f,4)
eor r12, r5, r7, lsr #4 // SWAPMOVE(r7, r5, 0x0f0f0f0f, 4) ....
and r12, r3
eor r5, r5, r12
eor r7, r7, r12, lsl #4 // .... SWAPMOVE(r7, r5, 0x0f0f0f0f, 4)
eor r12, r8, r10, lsr #4 // SWAPMOVE(r10, r8, 0x0f0f0f0f, 4) ....
and r12, r3
eor r8, r8, r12
eor r10, r10, r12, lsl #4 // .... SWAPMOVE(r10,r8, 0x0f0f0f0f, 4)
eor r12, r9, r11, lsr #4 // SWAPMOVE(r11, r9, 0x0f0f0f0f, 4) ....
and r12, r3
eor r9, r12
eor r11, r11, r12, lsl #4 // .... SWAPMOVE(r11, r9, 0x0f0f0f0f, 4)
mvn r5, r5
mvn r8, r8
mvn r7, r7
mvn r11, r11
strd r7, r11, [r0, #-8]
strd r6, r10, [r0, #-16]
strd r5, r9, [r0, #-24]
strd r4, r8, [r0, #-32]!
bx lr
/******************************************************************************
* Applies ShiftRows^(-1) on a round key to match fully/semi-fixslicing.
******************************************************************************/
.align 2
inv_shiftrows_1:
and r8, r4, #0xff
and r12, r7, #0xff00
orr r8, r8, r12
and r12, r6, #0xff0000
orr r8, r8, r12
and r12, r5, #0xff000000
orr r8, r8, r12
and r9, r5, #0xff
and r12, r4, #0xff00
orr r9, r9, r12
and r12, r7, #0xff0000
orr r9, r9, r12
and r12, r6, #0xff000000
orr r9, r9, r12
and r10, r6, #0xff
and r12, r5, #0xff00
orr r10, r10, r12
and r12, r4, #0xff0000
orr r10, r10, r12
and r12, r7, #0xff000000
orr r10, r10, r12
and r11, r7, #0xff
and r12, r6, #0xff00
orr r11, r11, r12
and r12, r5, #0xff0000
orr r11, r11, r12
and r12, r4, #0xff000000
orr r11, r11, r12
bx lr
/******************************************************************************
* Applies ShiftRows^(-2) on a round key to match full-fixslicing.
******************************************************************************/
.align 2
inv_shiftrows_2:
movw r12, #0xff00
movt r12, #0xff00
eor r8, r4, r6
and r8, r8, r12
eor r10, r8, r6
eor r8, r8, r4
eor r9, r5, r7
and r9, r9, r12
eor r11, r9, r7
eor r9, r9, r5
bx lr
/******************************************************************************
* Applies ShiftRows^(-3) on a round key to match fully-fixslicing.
******************************************************************************/
.align 2
inv_shiftrows_3:
and r8, r4, #0xff
and r12, r5, #0xff00
orr r8, r8, r12
and r12, r6, #0xff0000
orr r8, r8, r12
and r12, r7, #0xff000000
orr r8, r8, r12
and r9, r5, #0xff
and r12, r6, #0xff00
orr r9, r9, r12
and r12, r7, #0xff0000
orr r9, r9, r12
and r12, r4, #0xff000000
orr r9, r9, r12
and r10, r6, #0xff
and r12, r7, #0xff00
orr r10, r10, r12
and r12, r4, #0xff0000
orr r10, r10, r12
and r12, r5, #0xff000000
orr r10, r10, r12
and r11, r7, #0xff
and r12, r4, #0xff00
orr r11, r11, r12
and r12, r5, #0xff0000
orr r11, r11, r12
and r12, r6, #0xff000000
orr r11, r11, r12
bx lr
/******************************************************************************
* Pre-computes all the round keys for a given encryption key, according to the
* fully-fixsliced (ffs) representation.
* Note that the round keys also include the NOTs omitted in the S-box.
******************************************************************************/
@ void aes128_keyschedule_ffs_lut(u32* rkeys, const u8* key);
.global aes128_keyschedule_ffs_lut
.type aes128_keyschedule_ffs_lut,%function
.align 2
aes128_keyschedule_ffs_lut:
push {r1-r12,r14}
ldr.w r4, [r1] // load the encryption key
ldr r5, [r1, #4]
ldr r6, [r1, #8]
ldr r7, [r1, #12]
adr r3, AES_Sbox_compact // load the sbox LUT address in r3
movw r2, #0x01 // 1st const
bl aes128_keyschedule_rfunc // 1st round
movw r2, #0x02 // 2nd rconst
bl aes128_keyschedule_rfunc // 2nd round
movw r2, #0x04 // 3rd rconst
bl aes128_keyschedule_rfunc // 3rd round
movw r2, #0x08 // 4th rconst
bl aes128_keyschedule_rfunc // 4th round
movw r2, #0x10 // 5th rconst
bl aes128_keyschedule_rfunc // 5th round
movw r2, #0x20 // 6th rconst
bl aes128_keyschedule_rfunc // 6th round
movw r2, #0x40 // 7th rconst
bl aes128_keyschedule_rfunc // 7th round
movw r2, #0x80 // 8th rconst
bl aes128_keyschedule_rfunc // 8th round
movw r2, #0x1b // 9th rconst
bl aes128_keyschedule_rfunc // 9th round
movw r2, #0x36 // 10th rconst
bl aes128_keyschedule_rfunc // 10th round
//done expanding, now start bitslicing
//set r0 to end of rk, to be filled backwards
add r0, #352
movw r3, #0x0f0f
movt r3, #0x0f0f // r3 <- 0x0f0f0f0f (mask for SWAPMOVE)
eor r2, r3, r3, lsl #2 // r2 <- 0x33333333 (mask for SWAPMOVE)
eor r1, r2, r2, lsl #1 // r1 <- 0x55555555 (mask for SWAPMOVE)
pop.w {r4-r7}
mov r8, r4
mov r9, r5
mov r10, r6
mov r11, r7
bl packing_rkey
pop.w {r4-r7}
bl inv_shiftrows_1
bl packing_rkey
pop.w {r4-r7}
mov r8, r4
mov r9, r5
mov r10, r6
mov r11, r7
bl packing_rkey
pop.w {r4-r7}
bl inv_shiftrows_3
bl packing_rkey
pop.w {r4-r7}
bl inv_shiftrows_2
bl packing_rkey
pop.w {r4-r7}
bl inv_shiftrows_1
bl packing_rkey
pop.w {r4-r7}
mov r8, r4
mov r9, r5
mov r10, r6
mov r11, r7
bl packing_rkey
pop.w {r4-r7}
bl inv_shiftrows_3
bl packing_rkey
pop.w {r4-r7}
bl inv_shiftrows_2
bl packing_rkey
pop.w {r4-r7}
bl inv_shiftrows_1
bl packing_rkey
ldr r12, [sp]
ldr.w r4, [r12] // load the encryption key
ldr r5, [r12, #4]
ldr r6, [r12, #8]
ldr r7, [r12, #12]
mov r8, r4
mov r9, r5
mov r10, r6
mov r11, r7
bl packing_rkey
mvn r5, r5 // cancels the NOT applied in 'packing_rkey'
mvn r8, r8 // cancels the NOT applied in 'packing_rkey'
mvn r7, r7 // cancels the NOT applied in 'packing_rkey'
mvn r11, r11 // cancels the NOT applied in 'packing_rkey'
strd r7, r11, [r0, #24] // restore after fix
strd r6, r10, [r0, #16] // restore after fix
strd r5, r9, [r0, #8] // restore after fix
strd r4, r8, [r0] // restore after fix
pop {r1-r12, r14} // restore context
bx lr
/******************************************************************************
* Pre-computes all the round keys for a given encryption key, according to the
* fully-fixsliced (ffs) representation.
* Note that the round keys also include the NOTs omitted in the S-box.
******************************************************************************/
@ void aes256_keyschedule_ffs_lut(u32* rkeys, const u8* key);
.global aes256_keyschedule_ffs_lut
.type aes256_keyschedule_ffs_lut,%function
.align 2
aes256_keyschedule_ffs_lut:
push {r0-r12,r14}
ldr.w r4, [r1] // load the encryption key
ldr r5, [r1, #4]
ldr r6, [r1, #8]
ldr r7, [r1, #12]
adr r3, AES_Sbox_compact // load the sbox LUT address in r3
movw r2, #0x01 // 1st const
bl aes256_keyschedule_rfunc_0 // 1st round
bl aes256_keyschedule_rfunc_1 // 2nd round
movw r2, #0x02 // 2nd rconst
bl aes256_keyschedule_rfunc_0 // 3rd round
bl aes256_keyschedule_rfunc_1 // 4th round
movw r2, #0x04 // 3rd rconst
bl aes256_keyschedule_rfunc_0 // 5th round
bl aes256_keyschedule_rfunc_1 // 6th round
movw r2, #0x08 // 4th rconst
bl aes256_keyschedule_rfunc_0 // 7th round
bl aes256_keyschedule_rfunc_1 // 8th round
movw r2, #0x10 // 5th rconst
bl aes256_keyschedule_rfunc_0 // 9th round
bl aes256_keyschedule_rfunc_1 // 10th round
movw r2, #0x20 // 6th rconst
bl aes256_keyschedule_rfunc_0 // 11th round
bl aes256_keyschedule_rfunc_1 // 12th round
movw r2, #0x40 // 7th rconst
bl aes256_keyschedule_rfunc_0 // 13th round
//done expanding, now start bitslicing
//set r0 to end of rk, to be filled backwards
ldr.w r0, [sp, #208] // restore rkeys address
pop.w {r4-r7} // load the last rkey stored on the stack
add.w r0, #480 // points to the last rkey
movw r3, #0x0f0f
movt r3, #0x0f0f // r3 <- 0x0f0f0f0f (mask for SWAPMOVE)
eor r2, r3, r3, lsl #2 // r2 <- 0x33333333 (mask for SWAPMOVE)
eor r1, r2, r2, lsl #1 // r1 <- 0x55555555 (mask for SWAPMOVE)
mov r8, r4
mov r9, r5
mov r10, r6
mov r11, r7
bl packing_rkey
pop.w {r4-r7}
bl inv_shiftrows_1
bl packing_rkey
pop.w {r4-r7}
mov r8, r4
mov r9, r5
mov r10, r6
mov r11, r7
bl packing_rkey
pop.w {r4-r7}
bl inv_shiftrows_3
bl packing_rkey
pop.w {r4-r7}
bl inv_shiftrows_2
bl packing_rkey
pop.w {r4-r7}
bl inv_shiftrows_1
bl packing_rkey
pop.w {r4-r7}
mov r8, r4
mov r9, r5
mov r10, r6
mov r11, r7
bl packing_rkey
pop.w {r4-r7}
bl inv_shiftrows_3
bl packing_rkey
pop.w {r4-r7}
bl inv_shiftrows_2
bl packing_rkey
pop.w {r4-r7}
bl inv_shiftrows_1
bl packing_rkey
pop.w {r4-r7}
mov r8, r4
mov r9, r5
mov r10, r6
mov r11, r7
bl packing_rkey
pop.w {r4-r7}
bl inv_shiftrows_3
bl packing_rkey
pop.w {r4-r7}
bl inv_shiftrows_2
bl packing_rkey
ldr r12, [sp, #4]!
add.w r12, #16
ldr.w r4, [r12]
ldr r5, [r12, #4]
ldr r6, [r12, #8]
ldr r7, [r12, #12]
bl inv_shiftrows_1
bl packing_rkey
ldr r12, [sp]
ldr.w r4, [r12] // load the encryption key
ldr r5, [r12, #4]
ldr r6, [r12, #8]
ldr r7, [r12, #12]
mov r8, r4
mov r9, r5
mov r10, r6
mov r11, r7
bl packing_rkey
mvn r5, r5 // cancels the NOT applied in 'packing_rkey'
mvn r8, r8 // cancels the NOT applied in 'packing_rkey'
mvn r7, r7 // cancels the NOT applied in 'packing_rkey'
mvn r11, r11 // cancels the NOT applied in 'packing_rkey'
strd r7, r11, [r0, #24] // restore after fix
strd r6, r10, [r0, #16] // restore after fix
strd r5, r9, [r0, #8] // restore after fix
strd r4, r8, [r0] // restore after fix
pop {r1-r12, r14} // restore context
bx lr
/******************************************************************************
* Pre-computes all the round keys for a given encryption key, according to the
* semi-fixsliced (sfs) representation.
* Note that the round keys also include the NOTs omitted in the S-box.
******************************************************************************/
@ void aes128_keyschedule_sfs_lut(u32* rkeys, const u8* key);
.global aes128_keyschedule_sfs_lut
.type aes128_keyschedule_sfs_lut,%function
.align 2
aes128_keyschedule_sfs_lut:
push {r1-r12,r14}
ldr.w r4, [r1] // load the encryption key
ldr r5, [r1, #4]
ldr r6, [r1, #8]
ldr r7, [r1, #12]
adr r3, AES_Sbox_compact // load the sbox LUT address in r3
movw r2, #0x01 // 1st const
bl aes128_keyschedule_rfunc // 1st round
movw r2, #0x02 // 2nd rconst
bl aes128_keyschedule_rfunc // 2nd round
movw r2, #0x04 // 3rd rconst
bl aes128_keyschedule_rfunc // 3rd round
movw r2, #0x08 // 4th rconst
bl aes128_keyschedule_rfunc // 4th round
movw r2, #0x10 // 5th rconst
bl aes128_keyschedule_rfunc // 5th round
movw r2, #0x20 // 6th rconst
bl aes128_keyschedule_rfunc // 6th round
movw r2, #0x40 // 7th rconst
bl aes128_keyschedule_rfunc // 7th round
movw r2, #0x80 // 8th rconst
bl aes128_keyschedule_rfunc // 8th round
movw r2, #0x1b // 9th rconst
bl aes128_keyschedule_rfunc // 9th round
movw r2, #0x36 // 10th rconst
bl aes128_keyschedule_rfunc // 10th round
//done expanding, now start bitslicing
//set r0 to end of rk, to be filled backwards
add r0, #352
movw r3, #0x0f0f
movt r3, #0x0f0f // r3 <- 0x0f0f0f0f (mask for SWAPMOVE)
eor r2, r3, r3, lsl #2 // r2 <- 0x33333333 (mask for SWAPMOVE)
eor r1, r2, r2, lsl #1 // r1 <- 0x55555555 (mask for SWAPMOVE)
pop.w {r4-r7}
mov r8, r4
mov r9, r5
mov r10, r6
mov r11, r7
bl packing_rkey
pop.w {r4-r7}
bl inv_shiftrows_1
bl packing_rkey
pop.w {r4-r7}
mov r8, r4
mov r9, r5
mov r10, r6
mov r11, r7
bl packing_rkey
pop.w {r4-r7}
bl inv_shiftrows_1
bl packing_rkey
pop.w {r4-r7}
mov r8, r4
mov r9, r5
mov r10, r6
mov r11, r7
bl packing_rkey
pop.w {r4-r7}
bl inv_shiftrows_1
bl packing_rkey
pop.w {r4-r7}
mov r8, r4
mov r9, r5
mov r10, r6
mov r11, r7
bl packing_rkey
pop.w {r4-r7}
bl inv_shiftrows_1
bl packing_rkey
pop.w {r4-r7}
mov r8, r4
mov r9, r5
mov r10, r6
mov r11, r7
bl packing_rkey
pop.w {r4-r7}
bl inv_shiftrows_1
bl packing_rkey
ldr r12, [sp]
ldr.w r4, [r12] // load the encryption key
ldr r5, [r12, #4]
ldr r6, [r12, #8]
ldr r7, [r12, #12]
mov r8, r4
mov r9, r5
mov r10, r6
mov r11, r7
bl packing_rkey
mvn r5, r5 // cancels the NOT applied in 'packing_rkey'
mvn r8, r8 // cancels the NOT applied in 'packing_rkey'
mvn r7, r7 // cancels the NOT applied in 'packing_rkey'
mvn r11, r11 // cancels the NOT applied in 'packing_rkey'
strd r7, r11, [r0, #24] // restore after fix
strd r6, r10, [r0, #16] // restore after fix
strd r5, r9, [r0, #8] // restore after fix
strd r4, r8, [r0] // restore after fix
pop {r1-r12, r14} // restore context
bx lr
/******************************************************************************
* Pre-computes all the round keys for a given encryption key, according to the
* semi-fixsliced (sfs) representation.
* Note that the round keys also include the NOTs omitted in the S-box.
******************************************************************************/
@ void aes256_keyschedule_sfs_lut(u32* rkeys, const u8* key);
.global aes256_keyschedule_sfs_lut
.type aes256_keyschedule_sfs_lut,%function
.align 2
aes256_keyschedule_sfs_lut:
push {r0-r12,r14}
ldr.w r4, [r1] // load the encryption key
ldr r5, [r1, #4]
ldr r6, [r1, #8]
ldr r7, [r1, #12]
adr r3, AES_Sbox_compact // load the sbox LUT address in r3
movw r2, #0x01 // 1st const
bl aes256_keyschedule_rfunc_0 // 1st round
bl aes256_keyschedule_rfunc_1 // 2nd round
movw r2, #0x02 // 2nd rconst
bl aes256_keyschedule_rfunc_0 // 3rd round
bl aes256_keyschedule_rfunc_1 // 4th round
movw r2, #0x04 // 3rd rconst
bl aes256_keyschedule_rfunc_0 // 5th round
bl aes256_keyschedule_rfunc_1 // 6th round
movw r2, #0x08 // 4th rconst
bl aes256_keyschedule_rfunc_0 // 7th round
bl aes256_keyschedule_rfunc_1 // 8th round
movw r2, #0x10 // 5th rconst
bl aes256_keyschedule_rfunc_0 // 9th round
bl aes256_keyschedule_rfunc_1 // 10th round
movw r2, #0x20 // 6th rconst
bl aes256_keyschedule_rfunc_0 // 11th round
bl aes256_keyschedule_rfunc_1 // 12th round
movw r2, #0x40 // 7th rconst
bl aes256_keyschedule_rfunc_0 // 13th round
//done expanding, now start bitslicing
//set r0 to end of rk, to be filled backwards
ldr.w r0, [sp, #208] // restore rkeys address
pop.w {r4-r7} // load the last rkey stored on the stack
add.w r0, #480 // points to the last rkey
movw r3, #0x0f0f
movt r3, #0x0f0f // r3 <- 0x0f0f0f0f (mask for SWAPMOVE)
eor r2, r3, r3, lsl #2 // r2 <- 0x33333333 (mask for SWAPMOVE)
eor r1, r2, r2, lsl #1 // r1 <- 0x55555555 (mask for SWAPMOVE)
mov r8, r4
mov r9, r5
mov r10, r6
mov r11, r7
bl packing_rkey
pop.w {r4-r7}
bl inv_shiftrows_1
bl packing_rkey
pop.w {r4-r7}
mov r8, r4
mov r9, r5
mov r10, r6
mov r11, r7
bl packing_rkey
pop.w {r4-r7}
bl inv_shiftrows_1
bl packing_rkey
pop.w {r4-r7}
mov r8, r4
mov r9, r5
mov r10, r6
mov r11, r7
bl packing_rkey
pop.w {r4-r7}
bl inv_shiftrows_1
bl packing_rkey
pop.w {r4-r7}
mov r8, r4
mov r9, r5
mov r10, r6
mov r11, r7
bl packing_rkey
pop.w {r4-r7}
bl inv_shiftrows_1
bl packing_rkey
pop.w {r4-r7}
mov r8, r4
mov r9, r5
mov r10, r6
mov r11, r7
bl packing_rkey
pop.w {r4-r7}
bl inv_shiftrows_1
bl packing_rkey
pop.w {r4-r7}
mov r8, r4
mov r9, r5
mov r10, r6
mov r11, r7
bl packing_rkey
pop.w {r4-r7}
bl inv_shiftrows_1
bl packing_rkey
pop.w {r4-r7}
mov r8, r4
mov r9, r5
mov r10, r6
mov r11, r7
bl packing_rkey
ldr r12, [sp, #4]!
add.w r12, #16
ldr.w r4, [r12]
ldr r5, [r12, #4]
ldr r6, [r12, #8]
ldr r7, [r12, #12]
bl inv_shiftrows_1
bl packing_rkey
ldr r12, [sp]
ldr.w r4, [r12] // load the encryption key
ldr r5, [r12, #4]
ldr r6, [r12, #8]
ldr r7, [r12, #12]
mov r8, r4
mov r9, r5
mov r10, r6
mov r11, r7
bl packing_rkey
mvn r5, r5 // cancels the NOT applied in 'packing_rkey'
mvn r8, r8 // cancels the NOT applied in 'packing_rkey'
mvn r7, r7 // cancels the NOT applied in 'packing_rkey'
mvn r11, r11 // cancels the NOT applied in 'packing_rkey'
strd r7, r11, [r0, #24] // restore after fix
strd r6, r10, [r0, #16] // restore after fix
strd r5, r9, [r0, #8] // restore after fix
strd r4, r8, [r0] // restore after fix
pop {r1-r12, r14} // restore context
bx lr
|
aadomn/aes
| 75,213
|
armcortexm/1storder_masking/aes_encrypt.s
|
/******************************************************************************
* ARM assembly 1st-order masked fixsliced implementation of the AES-128.
*
* The masking scheme is the one described in "Masking AES with 2 random bits"
* available at https://eprint.iacr.org/2018/1007.
* See supplementary material at https://github.com/LaurenDM/TwoRandomBits
*
* All bytes within the internal state are masked in the following way:
* m1 || m0^m1 || m0^m1 || m0 || m0 || m1 || m0 || m1 where m0, m1 are random
* bits. Note that because each round key is assumed to be masked using the same
* masking scheme with different random bits, the mask is updated at every
* AddRoundKey operation.
*
* @author Alexandre Adomnicai, Nanyang Technological University, Singapore
* alexandre.adomnicai@ntu.edu.sg
*
* @date October 2020
******************************************************************************/
.syntax unified
.thumb
/******************************************************************************
* Macro to compute the SWAPMOVE technique: swap the bits in 'in1' masked by 'm'
* by the bits in 'in0' masked by 'm << n' and put the results in 'out0', 'out1'
******************************************************************************/
.macro swpmv out0, out1, in0, in1, m, n, tmp
eor \tmp, \in1, \in0, lsr \n
and \tmp, \m
eor \out1, \in1, \tmp
eor \out0, \in0, \tmp, lsl \n
.endm
/******************************************************************************
* Packs two 128-bit input blocs stored in r4-r7 and r8-r11, respectively, into
* the 256-bit internal state where the bits are packed as follows:
* r4 = b_24 b_56 b_88 b_120 || ... || b_0 b_32 b_64 b_96
* r5 = b_25 b_57 b_89 b_121 || ... || b_1 b_33 b_65 b_97
* r6 = b_26 b_58 b_90 b_122 || ... || b_2 b_34 b_66 b_98
* r7 = b_27 b_59 b_91 b_123 || ... || b_3 b_35 b_67 b_99
* r8 = b_28 b_60 b_92 b_124 || ... || b_4 b_36 b_68 b_100
* r9 = b_29 b_61 b_93 b_125 || ... || b_5 b_37 b_69 b_101
* r10 = b_30 b_62 b_94 b_126 || ... || b_6 b_38 b_70 b_102
* r11 = b_31 b_63 b_95 b_127 || ... || b_7 b_39 b_71 b_103
******************************************************************************/
.align 2
packing:
movw r3, #0x0f0f
movt r3, #0x0f0f // r3 <- 0x0f0f0f0f (mask for SWAPMOVE)
eor r2, r3, r3, lsl #2 // r2 <- 0x33333333 (mask for SWAPMOVE)
eor r1, r2, r2, lsl #1 // r1 <- 0x55555555 (mask for SWAPMOVE)
swpmv r8, r4, r8, r4, r1, #1, r12
swpmv r9, r5, r9, r5, r1, #1, r12
swpmv r10, r6, r10, r6, r1, #1, r12
swpmv r11, r7, r11, r7, r1, #1, r12
swpmv r0, r4, r5, r4, r2, #2, r12
swpmv r9, r5, r9, r8, r2, #2, r12
swpmv r7, r8, r7, r6, r2, #2, r12
swpmv r11, r2, r11, r10, r2, #2, r12
swpmv r8, r4, r8, r4, r3, #4, r12
swpmv r10, r6, r7, r0, r3, #4, r12
swpmv r11, r7, r11, r9, r3, #4, r12
swpmv r9, r5, r2, r5, r3, #4, r12
bx lr
/******************************************************************************
* Unpacks the 256-bit internal state in two 128-bit blocs.
******************************************************************************/
.align 2
unpacking:
movw r3, #0x0f0f
movt r3, #0x0f0f // r3 <- 0x0f0f0f0f (mask for SWAPMOVE)
swpmv r2, r5, r9, r5, r3, #4, r12
swpmv r11, r9, r11, r7, r3, #4, r12
swpmv r7, r1, r10, r6, r3, #4, r12
swpmv r8, r4, r8, r4, r3, #4, r12
eor r3, r3, r3, lsl #2 // r3 <- 0x33333333 (mask for SWAPMOVE)
swpmv r11, r10,r11, r2, r3, #2, r12
swpmv r7, r6, r7, r8, r3, #2, r12
swpmv r9, r8, r9, r5, r3, #2, r12
swpmv r5, r4, r1, r4, r3, #2, r12
eor r1, r3, r3, lsl #1 // r1 <- 0x55555555 (mask for SWAPMOVE)
swpmv r8, r4, r8, r4, r1, #1, r12
swpmv r9, r5,r9, r5, r1, #1, r12
swpmv r10, r6, r10, r6, r1, #1, r12
swpmv r11, r7, r11, r7, r1, #1, r12
bx lr
/******************************************************************************
* Subroutine that computes the AddRoundKey.
* Note that the masks are updated since all round keys are assumed to be masked
* with different shares by following the same masking scheme that the one used
* in the encrption function.
******************************************************************************/
.align 2
add_round_key:
str r14, [sp] // save link register
ldr.w r3, [sp, #116] // load 'rkey' argument from the stack
ldr r12, [sp, #128] // load m0 in r12
ldrd r1, r14, [r3], #8 // load km0, km1
eor r12, r12, r1 // m0 ^ km0
eor r2, r2, r14 // m1 ^ km1
ldr.w r1, [r3], #4 // load km0 ^ km1
eor r0, r0, r1 // m0 ^ m1 ^ km0 ^ km1
ldr.w r1, [r3], #4
ldr r14, [r3], #4
str.w r12, [sp, #128] // store new m0
eor r4, r4, r1
eor r5, r5, r14
ldr.w r1, [r3], #4
ldr r14, [r3], #4
str.w r2, [sp, #124] // store new m1
eor r6, r6, r1
eor r7, r7, r14
ldr.w r1, [r3], #4
ldr r14, [r3], #4
str.w r0, [sp, #120] // store new m0 ^ m1
eor r8, r8, r1
eor r9, r9, r14
ldr r14, [r3, #4]
ldr.w r1, [r3], #8
str.w r3, [sp, #116]
eor r10, r10, r1
eor r11, r11, r14
ldr.w r14, [sp] // restore link register
bx lr
/******************************************************************************
* 1st-order masked implementation of the S-box in a bitsliced manner.
* Credits to https://github.com/LaurenDM/TwoRandomBits.
* The bitsliced key state should be contained in r4-r11 while the masks
* m2=m0^m1, m1, m0 are supposed to be stored in sp[120,124,128].
******************************************************************************/
.align 2
sbox:
str r14, [sp, #132] // save link register
mov.w r14, r2
orr r0, r12, r14 //Exec (m0 | m1) = m0 | m1 into r0
eor r2, r7, r9 //Exec y14 = i4 ^ i2 into r2
str.w r0, [sp, #112] //Store r0/(m0 | m1) on stack
eor r0, r4, r10 //Exec y13 = i7 ^ i1 into r0
eor r1, r0, r14 //Exec hy13 = y13 ^ m1 into r1
eor r3, r4, r7 //Exec y9 = i7 ^ i4 into r3
str.w r3, [sp, #108] //Store r3/y9 on stack
eor r3, r3, r14 //Exec hy9 = y9 ^ m1 into r3
str.w r1, [sp, #104] //Store r1/hy13 on stack
eor r1, r4, r9 //Exec y8 = i7 ^ i2 into r1
eor r6, r5, r6 //Exec t0 = i6 ^ i5 into r6
str.w r3, [sp, #100] //Store r3/hy9 on stack
eor r3, r6, r11 //Exec y1 = t0 ^ i0 into r3
str.w r6, [sp, #96] //Store r6/t0 on stack
eor r6, r3, r14 //Exec hy1 = y1 ^ m1 into r6
eor r7, r6, r7 //Exec y4 = hy1 ^ i4 into r7
str.w r7, [sp, #92] //Store r7/y4 on stack
eor r7, r7, r12 //Exec hy4 = y4 ^ m0 into r7
str.w r0, [sp, #88] //Store r0/y13 on stack
eor r0, r0, r2 //Exec y12 = y13 ^ y14 into r0
str.w r6, [sp, #84] //Store r6/hy1 on stack
eor r6, r3, r4 //Exec y2 = y1 ^ i7 into r6
eor r10, r3, r10 //Exec y5 = y1 ^ i1 into r10
str.w r2, [sp, #80] //Store r2/y14 on stack
eor r2, r10, r1 //Exec y3 = y5 ^ y8 into r2
str r10, [sp, #60] //Store r10/y5 on stack
eor r2, r2, r14 //Exec hy3 = y3 ^ m1 into r2
eor r8, r8, r0 //Exec t1 = i3 ^ y12 into r8
eor r9, r8, r9 //Exec y15 = t1 ^ i2 into r9
str.w r6, [sp, #76] //Store r6/y2 on stack
eor r6, r9, r14 //Exec hy15 = y15 ^ m1 into r6
eor r5, r8, r5 //Exec y20 = t1 ^ i6 into r5
eor r8, r9, r11 //Exec y6 = y15 ^ i0 into r8
str.w r6, [sp, #72] //Store r6/hy15 on stack
eor r6, r8, r12 //Exec hy6 = y6 ^ m0 into r6
str.w r6, [sp, #68] //Store r6/hy6 on stack
ldr.w r6, [sp, #96] //Load t0 into r6
str.w r3, [sp, #64] //Store r3/y1 on stack
eor r3, r9, r6 //Exec y10 = y15 ^ t0 into r3
eor r10, r3, r12 //Exec hy10 = y10 ^ m0 into r10
str r10, [sp, #56] //Store r10/hy10 on stack
ldr r10, [sp, #100] //Load hy9 into r10
str.w r5, [sp, #100] //Store r5/y20 on stack
eor r10, r5, r10 //Exec y11 = y20 ^ hy9 into r10
eor r5, r10, r12 //Exec hy11 = y11 ^ m0 into r5
eor r14, r11, r5 //Exec y7 = i0 ^ hy11 into r14
eor r5, r3, r5 //Exec y17 = y10 ^ hy11 into r5
str.w r1, [sp, #52] //Store r1/y8 on stack
eor r1, r3, r1 //Exec y19 = y10 ^ y8 into r1
str.w r1, [sp, #96] //Store r1/y19 on stack
eor r6, r6, r10 //Exec y16 = t0 ^ y11 into r6
ldr.w r1, [sp, #104] //Load hy13 into r1
str.w r3, [sp, #48] //Store r3/y10 on stack
eor r3, r1, r6 //Exec y21 = hy13 ^ y16 into r3
str.w r3, [sp, #32] //Store r3/y21 on stack
eor r4, r4, r6 //Exec y18 = i7 ^ y16 into r4
str.w r4, [sp, #44] //Store r4/y18 on stack
and r4, r0, r9 //Exec t2_0 = y12 & y15 into r4
str.w r0, [sp, #40] //Store r0/y12 on stack
and r0, r0, r12 //Exec t2_1 = y12 & m0 into r0
str.w r0, [sp, #36] //Store r0/t2_1 on stack
eor r0, r0, r12 //Exec t2_2 = t2_1 ^ m0 into r0
eor r0, r4, r0 //Exec t2_3 = t2_0 ^ t2_2 into r0
ldr.w r4, [sp, #120] //Load m2 into r4
ldr.w r3, [sp, #112] //Load (m0 | m1) into r3
and r9, r4, r9 //Exec t2_4 = m2 & y15 into r9
eor r9, r9, r3 //Exec t2_5 = t2_4 ^ (m0 | m1) into r9
eor r0, r0, r9 //Exec t2 = t2_3 ^ t2_5 into r0
and r9, r2, r8 //Exec t3_0 = hy3 & y6 into r9
str.w r2, [sp, #28] //Store r2/hy3 on stack
and r2, r2, r4 //Exec t3_1 = hy3 & m2 into r2
str.w r2, [sp, #24] //Store r2/t3_1 on stack
eor r2, r2, r4 //Exec t3_2 = t3_1 ^ m2 into r2
eor r2, r9, r2 //Exec t3_3 = t3_0 ^ t3_2 into r2
and r8, r12, r8 //Exec t3_4 = m0 & y6 into r8
eor r8, r8, r3 //Exec t3_5 = t3_4 ^ (m0 | m1) into r8
eor r2, r2, r8 //Exec t3 = t3_3 ^ t3_5 into r2
eor r2, r2, r0 //Exec t4 = t3 ^ t2 into r2
and r8, r11, r7 //Exec t5_0 = i0 & hy4 into r8
and r9, r11, r4 //Exec t5_1 = i0 & m2 into r9
str r9, [sp, #20] //Store r9/t5_1 on stack
eor r9, r9, r4 //Exec t5_2 = t5_1 ^ m2 into r9
eor r8, r8, r9 //Exec t5_3 = t5_0 ^ t5_2 into r8
ldr r9, [sp, #124] //Load m1 into r9
and r7, r9, r7 //Exec t5_4 = m1 & hy4 into r7
eor r7, r7, r3 //Exec t5_5 = t5_4 ^ (m0 | m1) into r7
eor r7, r8, r7 //Exec t5 = t5_3 ^ t5_5 into r7
eor r0, r7, r0 //Exec t6 = t5 ^ t2 into r0
and r7, r1, r6 //Exec t7_0 = hy13 & y16 into r7
and r1, r1, r12 //Exec t7_1 = hy13 & m0 into r1
eor r1, r1, r12 //Exec t7_2 = t7_1 ^ m0 into r1
eor r1, r7, r1 //Exec t7_3 = t7_0 ^ t7_2 into r1
and r7, r6, r4 //Exec t7_4 = y16 & m2 into r7
eor r8, r7, r3 //Exec t7_5 = t7_4 ^ (m0 | m1) into r8
str.w r7, [sp, #104] //Store r7/t7_4 on stack
eor r1, r1, r8 //Exec t7 = t7_3 ^ t7_5 into r1
ldr r8, [sp, #64] //Load y1 into r8
ldr.w r7, [sp, #60] //Load y5 into r7
str.w r6, [sp, #16] //Store r6/y16 on stack
and r6, r8, r7 //Exec t8_0 = y1 & y5 into r6
and r8, r8, r9 //Exec t8_1 = y1 & m1 into r8
eor r8, r8, r9 //Exec t8_2 = t8_1 ^ m1 into r8
eor r6, r6, r8 //Exec t8_3 = t8_0 ^ t8_2 into r6
and r8, r7, r12 //Exec t8_4 = y5 & m0 into r8
str r8, [sp, #64] //Store r8/t8_4 on stack
eor r8, r8, r3 //Exec t8_5 = t8_4 ^ (m0 | m1) into r8
eor r6, r6, r8 //Exec t8 = t8_3 ^ t8_5 into r6
ldr r8, [sp, #76] //Load y2 into r8
str r14, [sp, #12] //Store r14/y7 on stack
eor r6, r6, r1 //Exec t9 = t8 ^ t7 into r6
and r7, r14, r8 //Exec t10_0 = y7 & y2 into r7
and r14, r14, r4 //Exec t10_1 = y7 & m2 into r14
eor r14, r14, r4 //Exec t10_2 = t10_1 ^ m2 into r14
eor r7, r7, r14 //Exec t10_3 = t10_0 ^ t10_2 into r7
and r14, r12, r8 //Exec t10_4 = m0 & y2 into r14
eor r14, r14, r3 //Exec t10_5 = t10_4 ^ (m0 | m1) into r14
eor r7, r7, r14 //Exec t10 = t10_3 ^ t10_5 into r7
eor r1, r7, r1 //Exec t11 = t10 ^ t7 into r1
ldr.w r7, [sp, #108] //Load y9 into r7
and r14, r10, r7 //Exec t12_0 = y11 & y9 into r14
and r8, r10, r4 //Exec t12_1 = y11 & m2 into r8
eor r8, r8, r4 //Exec t12_2 = t12_1 ^ m2 into r8
eor r8, r14, r8 //Exec t12_3 = t12_0 ^ t12_2 into r8
and r14, r9, r7 //Exec t12_4 = m1 & y9 into r14
eor r14, r14, r3 //Exec t12_5 = t12_4 ^ (m0 | m1) into r14
eor r8, r8, r14 //Exec t12 = t12_3 ^ t12_5 into r8
ldr r14, [sp, #80] //Load y14 into r14
str.w r5, [sp, #8 ] //Store r5/y17 on stack
and r7, r5, r14 //Exec t13_0 = y17 & y14 into r7
and r5, r5, r9 //Exec t13_1 = y17 & m1 into r5
eor r5, r5, r9 //Exec t13_2 = t13_1 ^ m1 into r5
eor r5, r7, r5 //Exec t13_3 = t13_0 ^ t13_2 into r5
and r7, r12, r14 //Exec t13_4 = m0 & y14 into r7
eor r7, r7, r3 //Exec t13_5 = t13_4 ^ (m0 | m1) into r7
eor r5, r5, r7 //Exec t13 = t13_3 ^ t13_5 into r5
eor r5, r5, r8 //Exec t14 = t13 ^ t12 into r5
ldr.w r7, [sp, #52] //Load y8 into r7
ldr r14, [sp, #48] //Load y10 into r14
str r10, [sp, #4 ] //Store r10/y11 on stack
and r10, r7, r14 //Exec t15_0 = y8 & y10 into r10
and r7, r7, r9 //Exec t15_1 = y8 & m1 into r7
str.w r7, [sp, #0 ] //Store r7/t15_1 on stack
eor r7, r7, r9 //Exec t15_2 = t15_1 ^ m1 into r7
eor r7, r10, r7 //Exec t15_3 = t15_0 ^ t15_2 into r7
and r10, r12, r14 //Exec t15_4 = m0 & y10 into r10
eor r10, r10, r3 //Exec t15_5 = t15_4 ^ (m0 | m1) into r10
eor r7, r7, r10 //Exec t15 = t15_3 ^ t15_5 into r7
eor r7, r7, r8 //Exec t16 = t15 ^ t12 into r7
ldr r8, [sp, #100] //Load y20 into r8
eor r2, r2, r8 //Exec t17 = t4 ^ y20 into r2
eor r0, r0, r7 //Exec t18 = t6 ^ t16 into r0
eor r6, r6, r5 //Exec t19 = t9 ^ t14 into r6
eor r1, r1, r7 //Exec t20 = t11 ^ t16 into r1
eor r2, r2, r5 //Exec t21 = t17 ^ t14 into r2
ldr.w r5, [sp, #96] //Load y19 into r5
eor r0, r0, r5 //Exec t22 = t18 ^ y19 into r0
ldr.w r5, [sp, #32] //Load y21 into r5
ldr.w r7, [sp, #44] //Load y18 into r7
str r11, [sp, #100] //Store r11/i0 on stack
eor r5, r6, r5 //Exec t23 = t19 ^ y21 into r5
eor r6, r5, r12 //Exec ht23 = t23 ^ m0 into r6
eor r1, r1, r7 //Exec t24 = t20 ^ y18 into r1
eor r7, r1, r12 //Exec ht24 = t24 ^ m0 into r7
eor r8, r2, r0 //Exec t25 = t21 ^ t22 into r8
and r10, r5, r2 //Exec t26_0 = t23 & t21 into r10
and r14, r5, r9 //Exec t26_1 = t23 & m1 into r14
eor r14, r14, r9 //Exec t26_2 = t26_1 ^ m1 into r14
eor r10, r10, r14 //Exec t26_3 = t26_0 ^ t26_2 into r10
and r2, r4, r2 //Exec t26_4 = m2 & t21 into r2
eor r2, r2, r3 //Exec t26_5 = t26_4 ^ (m0 | m1) into r2
eor r2, r10, r2 //Exec t26 = t26_3 ^ t26_5 into r2
eor r10, r1, r2 //Exec t27 = t24 ^ t26 into r10
and r14, r8, r10 //Exec t28_0 = t25 & t27 into r14
and r11, r8, r12 //Exec t28_1 = t25 & m0 into r11
eor r11, r11, r12 //Exec t28_2 = t28_1 ^ m0 into r11
eor r11, r14, r11 //Exec t28_3 = t28_0 ^ t28_2 into r11
and r14, r4, r10 //Exec t28_4 = m2 & t27 into r14
eor r14, r14, r3 //Exec t28_5 = t28_4 ^ (m0 | m1) into r14
eor r11, r11, r14 //Exec t28 = t28_3 ^ t28_5 into r11
eor r11, r11, r0 //Exec t29 = t28 ^ t22 into r11
eor r5, r5, r1 //Exec t30 = t23 ^ t24 into r5
eor r0, r0, r2 //Exec t31 = t22 ^ t26 into r0
and r2, r5, r0 //Exec t32_0 = t30 & t31 into r2
and r5, r5, r9 //Exec t32_1 = t30 & m1 into r5
eor r5, r5, r9 //Exec t32_2 = t32_1 ^ m1 into r5
eor r2, r2, r5 //Exec t32_3 = t32_0 ^ t32_2 into r2
and r0, r12, r0 //Exec t32_4 = m0 & t31 into r0
eor r0, r0, r3 //Exec t32_5 = t32_4 ^ (m0 | m1) into r0
eor r0, r2, r0 //Exec t32 = t32_3 ^ t32_5 into r0
eor r0, r0, r1 //Exec t33 = t32 ^ t24 into r0
eor r1, r0, r12 //Exec ht33 = t33 ^ m0 into r1
eor r2, r6, r0 //Exec t34 = ht23 ^ t33 into r2
eor r5, r10, r0 //Exec t35 = t27 ^ t33 into r5
and r6, r5, r7 //Exec t36_0 = t35 & ht24 into r6
and r5, r5, r4 //Exec t36_1 = t35 & m2 into r5
eor r5, r5, r4 //Exec t36_2 = t36_1 ^ m2 into r5
eor r5, r6, r5 //Exec t36_3 = t36_0 ^ t36_2 into r5
and r6, r9, r7 //Exec t36_4 = m1 & ht24 into r6
eor r6, r6, r3 //Exec t36_5 = t36_4 ^ (m0 | m1) into r6
eor r5, r5, r6 //Exec t36 = t36_3 ^ t36_5 into r5
eor r2, r5, r2 //Exec t37 = t36 ^ t34 into r2
eor r5, r10, r5 //Exec t38 = t27 ^ t36 into r5
and r6, r11, r5 //Exec t39_0 = t29 & t38 into r6
and r7, r11, r4 //Exec t39_1 = t29 & m2 into r7
eor r7, r7, r4 //Exec t39_2 = t39_1 ^ m2 into r7
eor r6, r6, r7 //Exec t39_3 = t39_0 ^ t39_2 into r6
str.w r7, [sp, #96] //Store r7/t39_2 on stack
and r5, r9, r5 //Exec t39_4 = m1 & t38 into r5
eor r5, r5, r3 //Exec t39_5 = t39_4 ^ (m0 | m1) into r5
eor r5, r6, r5 //Exec t39 = t39_3 ^ t39_5 into r5
eor r5, r8, r5 //Exec t40 = t25 ^ t39 into r5
eor r6, r5, r2 //Exec t41 = t40 ^ t37 into r6
eor r8, r11, r0 //Exec t42 = t29 ^ t33 into r8
eor r10, r11, r5 //Exec t43 = t29 ^ t40 into r10
eor r1, r1, r2 //Exec t44 = ht33 ^ t37 into r1
eor r14, r8, r6 //Exec t45 = t42 ^ t41 into r14
ldr.w r7, [sp, #72] //Load hy15 into r7
str.w r6, [sp, #48] //Store r6/t41 on stack
and r6, r1, r7 //Exec z0_0 = t44 & hy15 into r6
str.w r1, [sp, #44] //Store r1/t44 on stack
and r1, r1, r4 //Exec z0_1 = t44 & m2 into r1
eor r1, r1, r4 //Exec z0_2 = z0_1 ^ m2 into r1
eor r6, r6, r1 //Exec z0_3 = z0_0 ^ z0_2 into r6
and r7, r12, r7 //Exec z0_4 = m0 & hy15 into r7
eor r7, r7, r3 //Exec z0_5 = z0_4 ^ (m0 | m1) into r7
eor r6, r6, r7 //Exec z0 = z0_3 ^ z0_5 into r6
ldr.w r7, [sp, #68] //Load hy6 into r7
str.w r6, [sp, #72] //Store r6/z0 on stack
and r6, r7, r2 //Exec z1_0 = hy6 & t37 into r6
and r7, r7, r4 //Exec z1_1 = hy6 & m2 into r7
eor r7, r7, r4 //Exec z1_2 = z1_1 ^ m2 into r7
eor r6, r6, r7 //Exec z1_3 = z1_0 ^ z1_2 into r6
and r7, r9, r2 //Exec z1_4 = m1 & t37 into r7
eor r7, r7, r3 //Exec z1_5 = z1_4 ^ (m0 | m1) into r7
eor r6, r6, r7 //Exec z1 = z1_3 ^ z1_5 into r6
ldr.w r7, [sp, #100] //Load i0 into r7
str.w r6, [sp, #100] //Store r6/z1 on stack
and r7, r0, r7 //Exec z2_0 = t33 & i0 into r7
and r6, r0, r9 //Exec z2_1 = t33 & m1 into r6
eor r6, r6, r9 //Exec z2_2 = z2_1 ^ m1 into r6
str.w r6, [sp, #68] //Store r6/z2_2 on stack
eor r7, r7, r6 //Exec z2_3 = z2_0 ^ z2_2 into r7
ldr.w r6, [sp, #20] //Load t5_1 into r6
eor r6, r6, r3 //Exec z2_5 = t5_1 ^ (m0 | m1) into r6
eor r6, r7, r6 //Exec z2 = z2_3 ^ z2_5 into r6
str.w r6, [sp, #32] //Store r6/z2 on stack
ldr.w r7, [sp, #16] //Load y16 into r7
ldr.w r6, [sp, #104] //Load t7_4 into r6
and r7, r7, r10 //Exec z3_0 = y16 & t43 into r7
eor r6, r6, r4 //Exec z3_2 = t7_4 ^ m2 into r6
eor r6, r7, r6 //Exec z3_3 = z3_0 ^ z3_2 into r6
and r7, r12, r10 //Exec z3_4 = m0 & t43 into r7
str.w r7, [sp, #104] //Store r7/z3_4 on stack
eor r7, r7, r3 //Exec z3_5 = z3_4 ^ (m0 | m1) into r7
eor r6, r6, r7 //Exec z3 = z3_3 ^ z3_5 into r6
ldr.w r7, [sp, #84] //Load hy1 into r7
str.w r6, [sp, #20] //Store r6/z3 on stack
and r6, r7, r5 //Exec z4_0 = hy1 & t40 into r6
and r7, r7, r12 //Exec z4_1 = hy1 & m0 into r7
eor r7, r7, r12 //Exec z4_2 = z4_1 ^ m0 into r7
eor r6, r6, r7 //Exec z4_3 = z4_0 ^ z4_2 into r6
and r7, r4, r5 //Exec z4_4 = m2 & t40 into r7
eor r7, r7, r3 //Exec z4_5 = z4_4 ^ (m0 | m1) into r7
eor r6, r6, r7 //Exec z4 = z4_3 ^ z4_5 into r6
ldr.w r7, [sp, #12] //Load y7 into r7
str.w r6, [sp, #84] //Store r6/z4 on stack
and r6, r11, r7 //Exec z5_0 = t29 & y7 into r6
str r11, [sp, #16] //Store r11/t29 on stack
and r11, r11, r12 //Exec z5_1 = t29 & m0 into r11
eor r11, r11, r12 //Exec z5_2 = z5_1 ^ m0 into r11
eor r6, r6, r11 //Exec z5_3 = z5_0 ^ z5_2 into r6
and r7, r9, r7 //Exec z5_4 = m1 & y7 into r7
eor r7, r7, r3 //Exec z5_5 = z5_4 ^ (m0 | m1) into r7
eor r6, r6, r7 //Exec z5 = z5_3 ^ z5_5 into r6
ldr.w r7, [sp, #4 ] //Load y11 into r7
and r11, r7, r8 //Exec z6_0 = y11 & t42 into r11
and r7, r7, r12 //Exec z6_1 = y11 & m0 into r7
eor r7, r7, r12 //Exec z6_2 = z6_1 ^ m0 into r7
eor r7, r11, r7 //Exec z6_3 = z6_0 ^ z6_2 into r7
and r11, r9, r8 //Exec z6_4 = m1 & t42 into r11
eor r11, r11, r3 //Exec z6_5 = z6_4 ^ (m0 | m1) into r11
eor r7, r7, r11 //Exec z6 = z6_3 ^ z6_5 into r7
ldr r11, [sp, #8 ] //Load y17 into r11
str.w r7, [sp, #12] //Store r7/z6 on stack
and r7, r11, r14 //Exec z7_0 = y17 & t45 into r7
and r11, r11, r4 //Exec z7_1 = y17 & m2 into r11
eor r11, r11, r4 //Exec z7_2 = z7_1 ^ m2 into r11
eor r7, r7, r11 //Exec z7_3 = z7_0 ^ z7_2 into r7
and r11, r12, r14 //Exec z7_4 = m0 & t45 into r11
eor r11, r11, r3 //Exec z7_5 = z7_4 ^ (m0 | m1) into r11
eor r7, r7, r11 //Exec z7 = z7_3 ^ z7_5 into r7
ldr r11, [sp, #56] //Load hy10 into r11
str.w r6, [sp, #8 ] //Store r6/z5 on stack
eor r6, r5, r2 //Recompute t41 = t40 ^ t37 into r6
str.w r7, [sp, #4 ] //Store r7/z7 on stack
and r7, r11, r6 //Exec z8_0 = hy10 & t41 into r7
and r11, r11, r9 //Exec z8_1 = hy10 & m1 into r11
eor r11, r11, r9 //Exec z8_2 = z8_1 ^ m1 into r11
eor r7, r7, r11 //Exec z8_3 = z8_0 ^ z8_2 into r7
and r11, r4, r6 //Exec z8_4 = m2 & t41 into r11
eor r11, r11, r3 //Exec z8_5 = z8_4 ^ (m0 | m1) into r11
eor r7, r7, r11 //Exec z8 = z8_3 ^ z8_5 into r7
str.w r7, [sp, #56] //Store r7/z8 on stack
ldr r11, [sp, #44] //Load t44 into r11
ldr.w r7, [sp, #40] //Load y12 into r7
and r7, r11, r7 //Exec z9_0 = t44 & y12 into r7
eor r1, r7, r1 //Exec z9_3 = z9_0 ^ z0_2 into r1
ldr.w r7, [sp, #36] //Load t2_1 into r7
eor r7, r7, r3 //Exec z9_5 = t2_1 ^ (m0 | m1) into r7
eor r1, r1, r7 //Exec z9 = z9_3 ^ z9_5 into r1
ldr.w r7, [sp, #28] //Load hy3 into r7
and r7, r2, r7 //Exec z10_0 = t37 & hy3 into r7
and r2, r2, r12 //Exec z10_1 = t37 & m0 into r2
eor r2, r2, r12 //Exec z10_2 = z10_1 ^ m0 into r2
eor r2, r7, r2 //Exec z10_3 = z10_0 ^ z10_2 into r2
ldr.w r7, [sp, #24] //Load t3_1 into r7
eor r7, r7, r3 //Exec z10_5 = t3_1 ^ (m0 | m1) into r7
eor r2, r2, r7 //Exec z10 = z10_3 ^ z10_5 into r2
ldr.w r7, [sp, #92] //Load y4 into r7
ldr r11, [sp, #68] //Load z2_2 into r11
and r0, r0, r7 //Exec z11_0 = t33 & y4 into r0
eor r0, r0, r11 //Exec z11_3 = z11_0 ^ z2_2 into r0
and r7, r4, r7 //Exec z11_4 = m2 & y4 into r7
eor r7, r7, r3 //Exec z11_5 = z11_4 ^ (m0 | m1) into r7
eor r0, r0, r7 //Exec z11 = z11_3 ^ z11_5 into r0
ldr.w r7, [sp, #88] //Load y13 into r7
ldr r11, [sp, #104] //Load z3_4 into r11
and r10, r10, r7 //Exec z12_0 = t43 & y13 into r10
eor r11, r11, r12 //Exec z12_2 = z3_4 ^ m0 into r11
eor r10, r10, r11 //Exec z12_3 = z12_0 ^ z12_2 into r10
and r7, r4, r7 //Exec z12_4 = m2 & y13 into r7
eor r7, r7, r3 //Exec z12_5 = z12_4 ^ (m0 | m1) into r7
eor r7, r10, r7 //Exec z12 = z12_3 ^ z12_5 into r7
ldr r10, [sp, #60] //Load y5 into r10
ldr r11, [sp, #64] //Load t8_4 into r11
str.w r0, [sp, #104] //Store r0/z11 on stack
and r10, r10, r5 //Exec z13_0 = y5 & t40 into r10
eor r11, r11, r12 //Exec z13_2 = t8_4 ^ m0 into r11
eor r10, r10, r11 //Exec z13_3 = z13_0 ^ z13_2 into r10
and r5, r9, r5 //Exec z13_4 = m1 & t40 into r5
eor r5, r5, r3 //Exec z13_5 = z13_4 ^ (m0 | m1) into r5
eor r5, r10, r5 //Exec z13 = z13_3 ^ z13_5 into r5
ldr r10, [sp, #16] //Load t29 into r10
ldr r11, [sp, #76] //Load y2 into r11
ldr.w r0, [sp, #96] //Load t39_2 into r0
and r10, r10, r11 //Exec z14_0 = t29 & y2 into r10
eor r0, r10, r0 //Exec z14_3 = z14_0 ^ t39_2 into r0
and r10, r9, r11 //Exec z14_4 = m1 & y2 into r10
eor r10, r10, r3 //Exec z14_5 = z14_4 ^ (m0 | m1) into r10
eor r0, r0, r10 //Exec z14 = z14_3 ^ z14_5 into r0
ldr r10, [sp, #108] //Load y9 into r10
and r11, r10, r8 //Exec z15_0 = y9 & t42 into r11
and r10, r10, r12 //Exec z15_1 = y9 & m0 into r10
eor r10, r10, r12 //Exec z15_2 = z15_1 ^ m0 into r10
eor r10, r11, r10 //Exec z15_3 = z15_0 ^ z15_2 into r10
and r8, r4, r8 //Exec z15_4 = m2 & t42 into r8
eor r8, r8, r3 //Exec z15_5 = z15_4 ^ (m0 | m1) into r8
eor r8, r10, r8 //Exec z15 = z15_3 ^ z15_5 into r8
ldr r10, [sp, #80] //Load y14 into r10
and r11, r10, r14 //Exec z16_0 = y14 & t45 into r11
and r10, r10, r4 //Exec z16_1 = y14 & m2 into r10
eor r10, r10, r4 //Exec z16_2 = z16_1 ^ m2 into r10
eor r10, r11, r10 //Exec z16_3 = z16_0 ^ z16_2 into r10
and r11, r9, r14 //Exec z16_4 = m1 & t45 into r11
eor r11, r11, r3 //Exec z16_5 = z16_4 ^ (m0 | m1) into r11
eor r10, r10, r11 //Exec z16 = z16_3 ^ z16_5 into r10
ldr r11, [sp, #52] //Load y8 into r11
and r11, r6, r11 //Exec z17_0 = t41 & y8 into r11
and r6, r6, r12 //Exec z17_1 = t41 & m0 into r6
eor r6, r6, r12 //Exec z17_2 = z17_1 ^ m0 into r6
eor r6, r11, r6 //Exec z17_3 = z17_0 ^ z17_2 into r6
ldr r11, [sp, #0 ] //Load t15_1 into r11
eor r3, r11, r3 //Exec z17_5 = t15_1 ^ (m0 | m1) into r3
eor r3, r6, r3 //Exec z17 = z17_3 ^ z17_5 into r3
eor r6, r8, r10 //Exec tc1 = z15 ^ z16 into r6
eor r2, r2, r6 //Exec tc2 = z10 ^ tc1 into r2
eor r1, r1, r2 //Exec tc3 = z9 ^ tc2 into r1
ldr r10, [sp, #72] //Load z0 into r10
ldr r11, [sp, #32] //Load z2 into r11
ldr r14, [sp, #100] //Load z1 into r14
str.w r3, [sp, #112] //Store r3/z17 on stack
eor r11, r10, r11 //Exec tc4 = z0 ^ z2 into r11
eor r10, r14, r10 //Exec tc5 = z1 ^ z0 into r10
ldr r14, [sp, #20] //Load z3 into r14
ldr.w r4, [sp, #84] //Load z4 into r4
ldr r9, [sp, #4 ] //Load z7 into r9
ldr.w r3, [sp, #56] //Load z8 into r3
eor r4, r14, r4 //Exec tc6 = z3 ^ z4 into r4
eor r12, r7, r11 //Exec tc7 = z12 ^ tc4 into r12
eor r9, r9, r4 //Exec tc8 = z7 ^ tc6 into r9
eor r3, r3, r12 //Exec tc9 = z8 ^ tc7 into r3
eor r3, r9, r3 //Exec tc10 = tc8 ^ tc9 into r3
eor r4, r4, r10 //Exec tc11 = tc6 ^ tc5 into r4
ldr r10, [sp, #8 ] //Load z5 into r10
eor r10, r14, r10 //Exec tc12 = z3 ^ z5 into r10
eor r5, r5, r6 //Exec tc13 = z13 ^ tc1 into r5
eor r6, r11, r10 //Exec tc14 = tc4 ^ tc12 into r6
eor r4, r1, r4 //Exec S3 = tc3 ^ tc11 into r4
ldr r10, [sp, #12] //Load z6 into r10
eor r9, r10, r9 //Exec tc16 = z6 ^ tc8 into r9
eor r0, r0, r3 //Exec tc17 = z14 ^ tc10 into r0
eor r5, r5, r6 //Exec tc18 = tc13 ^ tc14 into r5
eor r7, r7, r5 //Exec S7 = z12 ^ tc18 ^ 1 into r7
eor r8, r8, r9 //Exec tc20 = z15 ^ tc16 into r8
ldr r10, [sp, #104] //Load z11 into r10
eor r2, r2, r10 //Exec tc21 = tc2 ^ z11 into r2
eor r1, r1, r9 //Exec o7 = tc3 ^ tc16 into r1
eor r3, r3, r5 //Exec o1 = tc10 ^ tc18 ^ 1 into r3
eor r5, r6, r4 //Exec S4 = tc14 ^ S3 into r5
eor r6, r4, r9 //Exec S1 = S3 ^ tc16 ^ 1 into r6
ldr r9, [sp, #112] //Load z17 into r9
eor r8, r0, r8 //Exec tc26 = tc17 ^ tc20 into r8
eor r8, r8, r9 //Exec S2 = tc26 ^ z17 ^ 1 into r8
eor r0, r2, r0 //Exec S5 = tc21 ^ tc17 into r0
ldr.w r2, [sp, #124] //Load m1 into r2
ldr r9, [sp, #128] //Load m0 into r9
ldr r10, [sp, #120] //Load m2 into r10
ldr r14, [sp, #132] // restore link register
eor r12, r8, r9 //Exec o5 = S2 ^ m0 into r12
eor r8, r5, r2 //Exec o3 = S4 ^ m1 into r8
eor r5, r6, r2 //Exec o6 = S1 ^ m1 into r5
eor r4, r4, r10 //Exec o4 = S3 ^ m2 into r4
eor r0, r0, r9 //Exec o2 = S5 ^ m0 into r0
eor r6, r7, r10 //Exec o0 = S7 ^ m2 into r6
bx lr
//[('r0', 'S5'), ('r1', 'S0'), ('r2', 'm1'), ('r3', 'S6'), ('r4', 'S3'),('r5', 'S1'),
//('r6', 'S7'), ('r7', -), ('r8', 'S4'), ('r9', 'm0'), ('r10', 'm0^m1'), ('r11', -),
//('r12', 'S2'), ('r14', -)]
/******************************************************************************
* Computation of the MixColumns transformation in the fixsliced representation.
* For fully-fixsliced implementations, it is used for rounds i s.t. (i%4) == 0.
* For semi-fixsliced implementations, it is used for rounds i s.t. (i%2) == 0.
* Note that 1st-order masking forces to do some remasking to ensure that masks
* are not cancelled through XOR operations.
******************************************************************************/
.align 2
mixcolumns_0:
str r14, [sp, #132]
eor r14, r1, r9 // r14<- S0 ^ m1 ^ m0 remask S0
movw r9, #0x0303
movt r9, #0x0303 // r9<- 0x03030303 (mask for BYTE_ROR_6)
and r11, r9, r14, lsr #6 // r11<- (S0 ^ m0 ^ m1 >> 6) & 0x03030303
bic r14, r14, r9, ror #2 // r14<- S0 ^ m0 ^ m1 & 0x3f3f3f3f
orr r7, r11, r14, lsl #2 // r7 <- BYTE_ROR_6(S0 ^ m0 ^ m1)
eor r14, r1, r7, ror #8 // r14<- S0 ^ BYTE_ROR_6(S0 >>> 8) ^ m0
and r11, r9, r6, lsr #6 // r11<- (S7 ^ m1 >> 6) & 0x03030303
bic r7, r6, r9, ror #2 // r7 <- S7^m1 & 0x3f3f3f3f
orr r7, r11, r7, lsl #2 // r7 <- BYTE_ROR_6(S7 ^ m1)
eor r10, r7, r10 // r10 <- BYTE_ROR_6(S7 ^ m1) ^ (m0 ^ m1) remask r7
eor r10, r6, r10, ror #8 // r10 <- S7 ^ (BYTE_ROR_6(S7) >>> 8) ^ m0 ^ m1
bic r11, r6, r9 // r11<- S7 ^ m1 & 0xfcfcfcfc
and r6, r6, r9 // r6 <- S7 ^m1 & 0x03030303
orr r6, r6, r11, ror #8 // r6 <- r6 ^r11 >>> 8
eor r11, r14, r6, ror #18 // r11<- S0 ^ BYTE_ROR_6(S0 >>> 8) ^ BYTE_ROR_2(S7 >>> 24) ^ m0
eor r7, r10, r2 // r7 <- S7 ^ (BYTE_ROR_6(S7) >>> 8) ^ m0 remask r10
and r6, r9, r7, lsr #6 // r6 <- (r7 >> 6) & 0x03030303
bic r7, r7, r9, ror #2 // r7 <- r7 & 0x3f3f3f3f
orr r7, r6, r7, lsl #2 // r7 <- BYTE_ROR_6(r7) ^ m0
eor r11, r11, r7, ror #8 // r11<- S'7 ^ m1
eor r10, r10, r14 // r10<- S7^S0 ^ BYTE_ROR_6(S7^S0 >>> 8) ^ m1
and r7, r9, r3, lsr #6 // r7 <- (S6 ^m0^m1 >> 6) & 0x03030303
bic r6, r3, r9, ror #2 // r6 <- S6^m0^m1 & 0x3f3f3f3f
orr r6, r7, r6, lsl #2 // r6 <- BYTE_ROR_6(S6 ^m0^m1)
eor r6, r6, r2 // r6 <- BYTE_ROR_6(S6 ^m0) remask r6
eor r6, r3, r6, ror #8 // r6 <- S6 ^ BYTE_ROR_6(S6 >>> 8)^m1
bic r7, r3, r9 // r7 <- S6 ^m0^m1 & 0xfcfcfcfc
and r3, r3, r9 // r3 <- S6 ^m0^m1 & 0x03030303
orr r3, r3, r7, ror #8 // r3 <- r3 ^r7 >>> 8
eor r10, r10, r3, ror #18 // r10<- ^m0
and r3, r9, r6, lsr #6 // r3 <- (r6 >> 6) & 0x03030303
bic r7, r6, r9, ror #2 // r7 <- r6 & 0x3f3f3f3f
orr r7, r3, r7, lsl #2 // r7 <- BYTE_ROR_6(r6) ^ m0
eor r10, r10, r7, ror #8
mov.w r3, r9 // move the mask for BYTEROR to r3
and r7, r3, r0, lsr #6 // r7 <- (S5 ^m0^m1 >> 6) & 0x03030303
bic r9, r0, r3, ror #2 // r9 <- S5^m0^m1 & 0x3f3f3f3f
orr r9, r7, r9, lsl #2 // r9 <- BYTE_ROR_6(S5 ^m0^m1)
eor r9, r9, r2 // r9 <- BYTE_ROR_6(S5 ^m0) remask r6
eor r7, r0, r9, ror #8 // r7 <- S5 ^ BYTE_ROR_6(S5) ^ m1
bic r9, r0, r3 // r9 <- S5 ^m0^m1 & 0xfcfcfcfc
and r0, r0, r3 // r0 <- S5 ^m0^m1 & 0x03030303
orr r0, r0, r9, ror #8 // r0 <- r0 ^r9 >>> 8
eor r9, r6, r0, ror #18 // r9 <- S6 ^ BYTE_ROR_6(S6 >>> 8) ^ BYTE_ROR_2(S5 >>> 24) ^m0
and r0, r3, r7, lsr #6 // r3 <- (r7 >> 6) & 0x03030303
bic r6, r7, r3, ror #2 // r6 <- r7 & 0x3f3f3f3f
orr r6, r0, r6, lsl #2 // r6 <- BYTE_ROR_6(r7) ^ m1
eor r9, r9, r6, ror #8 // r9 <- S'5 ^ m0 ^ m1
and r6, r3, r8, lsr #6 // r6 <- (S4 ^m0 >> 6) & 0x03030303
bic r0, r8, r3, ror #2 // r0 <- S4^m0 & 0x3f3f3f3f
orr r6, r6, r0, lsl #2 // r6 <- BYTE_ROR_6(S4 ^m0)
ldr.w r0, [sp, #120] // load m0 ^ m1
str.w r1, [sp] // store S0 ^ m1 to free r1
eor r6, r6, r0 // r6 <- BYTE_ROR_6(S4) ^ m1 remask r6
eor r6, r8, r6, ror #8 // r6 <- S4 ^ BYTE_ROR_6(S4 >>> 8) ^ m0 ^ m1
bic r1, r8, r3 // r1 <- S4 ^m0 & 0xfcfcfcfc
and r8, r8, r3 // r8 <- S4 ^m0 & 0x03030303
orr r8, r8, r1, ror #8 // r8 <- r8^r1 >>> 8
eor r8, r7, r8, ror #18 // r8 <- S5 ^ BYTE_ROR_6(S5 >>> 8) ^ BYTE_ROR_2(S4 >>> 24) ^m0^m1
eor r8, r8, r14 // r8 <- S5^S0 ^ BYTE_ROR_6(S5^S0 >>> 8) ^ BYTE_ROR_2(S4 >>> 24) ^m1
and r1, r3, r6, lsr #6 // r1 <- (r6 >> 6) & 0x03030303
bic r7, r6, r3, ror #2 // r7 <- r6 & 0x3f3f3f3f
orr r7, r1, r7, lsl #2 // r7 <- BYTE_ROR_6(r6) ^ m0 ^ m1
eor r8, r8, r7, ror #8 // r8 <- S'4 ^ m0
and r7, r3, r4, lsr #6 // r7 <- (S3 ^m0 >> 6) & 0x03030303
bic r1, r4, r3, ror #2 // r1 <- S3^m0 & 0x3f3f3f3f
orr r7, r7, r1, lsl #2 // r7 <- BYTE_ROR_6(S3 ^m0)
eor r7, r7, r0 // r7 <- BYTE_ROR_6(S3) ^ m1 remask r7
eor r1, r4, r7, ror #8 // r1 <- S3 ^ BYTE_ROR_6(S3) ^ m0 ^ m1
bic r7, r4, r3 // r1 <- S3 ^m0 & 0xfcfcfcfc
and r4, r4, r3 // r8 <- S3 ^m0 & 0x03030303
orr r4, r4, r7, ror #8 // r8 <- r8^r7 >>> 8
eor r7, r6, r4, ror #18 // r8 <- S4 ^ BYTE_ROR_6(S4 >>> 8) ^ BYTE_ROR_2(S3 >>> 24) ^m1
and r6, r3, r1, lsr #6 // r6 <- (r1 >> 6) & 0x03030303
bic r4, r1, r3, ror #2 // r4 <- r1 & 0x3f3f3f3f
orr r6, r6, r4, lsl #2 // r6 <- BYTE_ROR_6(r1) ^ m0 ^ m1
eor r7, r7, r6, ror #8 // r7 <- ^ m0
eor r7, r7, r0 // r7 <- ^ m1 remask r7
eor r7, r7, r14 // r7 <- S'3 ^ m0 ^ m1
eor r7, r7, r2 // r7 <- S'3 ^ m0 remask r7
and r4, r3, r12, lsr #6 // r4 <- (S2 ^m1 >> 6) & 0x03030303
bic r6, r12, r3, ror #2 // r6 <- S2^m1 & 0x3f3f3f3f
orr r4, r4, r6, lsl #2 // r4 <- BYTE_ROR_6(S2 ^m1)
eor r4, r4, r0 // r4 <- BYTE_ROR_6(S2) ^m0 remask r4
eor r4, r12, r4, ror #8 // r4 <- S2 ^ BYTE_ROR_6(s2) ^ m0^m1
bic r6, r12, r3 // r1 <- S2 ^m1 & 0xfcfcfcfc
and r12, r12, r3 // r12<- S2 ^m1 & 0x03030303
orr r12, r12, r6, ror #8 // r12<- r12^r6 >>> 8
eor r6, r1, r12, ror #18 // r6 <- S3 ^ BYTE_ROR_6(S3 >>> 8) ^ BYTE_ROR_2(S2 >>> 24) ^m0
and r12, r3, r4, lsr #6 // r12<- (r4 >> 6) & 0x03030303
bic r1, r4, r3, ror #2 // r1 <- r4 & 0x3f3f3f3f
orr r12, r12, r1, lsl #2 // r12<- BYTE_ROR_6(r4) ^ m0 ^ m1
eor r6, r6, r12, ror #8 // r6 <- S'2 ^ m1
and r1, r3, r5, lsr #6 // r1 <- (S1 ^m0 >> 6) & 0x03030303
bic r12, r5, r3, ror #2 // r12<- S1^m0 & 0x3f3f3f3f
orr r1, r1, r12, lsl #2 // r1 <- BYTE_ROR_6(S1 ^m0)
eor r1, r1, r0 // r1 <- BYTE_ROR_6(S1) ^m1
eor r1, r5, r1, ror #8 // r1 <- S1 ^ BYTE_ROR_6(S1) ^ m0 ^m1
bic r12, r5, r3 // r12<- S1 ^m0 & 0xfcfcfcfc
and r5, r5, r3 // r5 <- S1 ^m0 & 0x03030303
orr r5, r5, r12, ror #8 // r5 <- r5^r12 >>> 8
eor r5, r4, r5, ror #18 // r6 <- S2 ^ BYTE_ROR_6(S2 >>> 8) ^ BYTE_ROR_2(S1 >>> 24) ^m1
and r12, r3, r1, lsr #6 // r12<- (r1 >> 6) & 0x03030303
bic r4, r1, r3, ror #2 // r4 <- r1 & 0x3f3f3f3f
orr r12, r12, r4, lsl #2 // r12<- BYTE_ROR_6(r1) ^ m0 ^ m1
eor r5, r5, r12, ror #8 // r5 <- S'1 ^ m0
eor r1, r1, r2 // r1 <- S1^ BYTE_ROR_6(S1) ^ m0 remask r1
ldr.w r4, [sp] // load S0
and r12, r3, r14, lsr #6 // r12<- (r14 >> 6) & 0x03030303
bic r14, r14, r3, ror #2 // r4 <- r14 & 0x3f3f3f3f
orr r12, r12, r14, lsl #2 // r12<- BYTE_ROR_6(r14)
bic r14, r4, r3 // r14<- S0 ^m1 & 0xfcfcfcfc
and r4, r4, r3 // r4 <- S0 ^m1 & 0x03030303
orr r4, r4, r14, ror #8 // r4 <- r4^r14 >>> 8
ldr.w r14, [sp, #132] // restore link register
eor r4, r1, r4, ror #18 // r4 <- S1^ BYTE_ROR_6(S1) ^ BYTE_ROR_2(S0) ^ m0 ^ m1
eor r4, r4, r12, ror #8 // r4 <- S'0 ^ m1
bx lr
//(r0, m0^m1), (r1, S0), (r2, m1), (r3, S6), (r4, S0),(r5, S1), (r6, S2),
//(r7, S3), (r8, S4), (r9, S5), (r10, S6), (r11, S7), (r12, -), (r14, -)]
/******************************************************************************
* Computation of the MixColumns transformation in the fixsliced representation.
* For fully-fixsliced implementations only, for round i s.t. (i%4) == 1.
* Note that 1st-order masking forces to do some remasking to ensure that masks
* are not cancelled through XOR operations.
******************************************************************************/
.align 2
mixcolumns_1:
str r14, [sp, #132]
eor r14, r1, r9 // r14<- S0 ^ m1 ^ m0 remask S0
movw r9, #0x0f0f
movt r9, #0x0f0f // r9<- 0x0f0f0f0f (mask for BYTE_ROR_4)
and r11, r9, r6, lsr #4 // r11<- (S7 ^ m1 >> 4) & 0x0f0f0f0f
and r7, r9, r6 // r7 <- (S7 ^ m1) & 0x0f0f0f0f
orr r11, r11, r7, lsl #4 // r11<- BYTE_ROR_4(S7 ^ m1)
eor r7, r11, r10 // r7 <- BYTE_ROR_4(S7 ^ m1) ^ (m0 ^ m1) remask r7
eor r7, r6, r7, ror #8 // r7 <- S7 ^ (BYTE_ROR_4(S7) >>> 8) ^ m0 ^ m1
eor r10, r2, r7, ror #16 // r10<- (S7 ^ (BYTE_ROR_4(S7) >>> 8)) >>> 16 ^ m0 remask r7
eor r11, r10, r11, ror #8 // r11<- ^ m0 ^ m1
and r10, r9, r14, lsr #4 // r10<- (S0 ^ m0 ^ m1 >> 4) & 0x0f0f0f0f
and r14, r9, r14 // r14<- (S0 ^ m0 ^ m1) & 0x0f0f0f0f
orr r6, r10, r14, lsl #4 // r6 <- BYTE_ROR_4(S0 ^ m0 ^ m1)
eor r14, r1, r6, ror #8 // r14<- S0 ^ (BYTE_ROR_4(S0) >>> 8) ^ m0
mov.w r1, r6 // r1 <- BYTE_ROR_4(S0) ^ m0 ^ m1
eor r11, r11, r14 // r11<- S'7 ^ m1
eor r10, r7, r14 // r10 <- S7 ^ (BYTE_ROR_4(S7) >>> 8) ^ S0 ^ (BYTE_ROR_4(S0) >>> 8) ^ m1
and r6, r9, r3, lsr #4 // r6 <- (S6 ^ m0 ^ m1 >> 4) & 0x0f0f0f0f
and r7, r9, r3 // r7 <- (S6 ^ m0 ^ m1) & 0x0f0f0f0f
orr r7, r6, r7, lsl #4 // r7 <- BYTE_ROR_4(S6 ^ m0 ^ m1)
eor r10, r10, r7, ror #8 // r10 <- ^ m0
eor r6, r3, r2 // r6 <- S6 ^ m0 remask S6
eor r7, r6, r7, ror #8 // r7 <- S6 ^ BYTE_ROR_4(S6) >>> 8 ^ m1
eor r10, r10, r7, ror #16 // r10 <- S'6 ^ m0 ^ m1
mov.w r3, r9 // move the mask for BYTEROR to r3
and r6, r3, r0, lsr #4 // r6 <- (S5 ^ m0 ^ m1 >> 4) & 0x0f0f0f0f
and r9, r3, r0 // r9 <- (S5 ^ m0 ^ m1) & 0x0f0f0f0f
orr r6, r6, r9, lsl #4 // r6 <- BYTE_ROR_4(S5 ^ m0 ^ m1)
eor r7, r7, r6, ror #8 // r7 <- S6 ^ BYTE_ROR_4(S6) >>> 8 ^ BYTE_ROR_4(S5) ^ m0
eor r9, r0, r2 // r9 <- S5 ^ m0 remask S5
eor r6, r9, r6, ror #8 // r6 <- S5 ^ BYTE_ROR4(S5) >>> 8 ^ m1
eor r9, r7, r6, ror #16 // r9 <- S'5 ^ m0 ^ m1
and r7, r3, r8, lsr #4 // r7 <- (S4 ^ m0 >> 4) & 0x0f0f0f0f
and r0, r3, r8 // r0 <- (S4 ^ m0) & 0x0f0f0f0f
orr r7, r7, r0, lsl #4 // r7 <- BYTE_ROR_4(S4 ^ m0)
eor r6, r6, r7, ror #8 // r6 <- S5 ^ BYTE_ROR4(S5) >>> 8 ^ BYTE_ROR_4(S4) >>>8 ^ m1 ^ m0
ldr.w r0, [sp, #120] // load m0 ^ m1 in r0
eor r7, r7, r0 // r7 <- BYTE_ROR_4(S4) ^ m1 remask r7
eor r7, r8, r7, ror #8 // r7 <- S4 ^ BYTE_ROR_4(S4)>>>8 ^ m0 ^ m1
eor r8, r6, r14 // r8 <- ^ m1
eor r8, r8, r7, ror #16 // r8 <- S'4 ^ m0
eor r7, r7, r2 // r7 <- S4 ^ BYTE_ROR_4(S4)>>>8 ^ m0 remask r7
and r6, r3, r4, lsr #4 // r6 <- (S3 ^ m0 >> 4) & 0x0f0f0f0f
and r2, r3, r4 // r2 <- (S3 ^ m0) & 0x0f0f0f0f
orr r6, r6, r2, lsl #4 // r6 <- BYTE_ROR_4(S3 ^ m0)
eor r2, r6, r0 // r2 <- BYTE_ROR_4(S3) ^ m1 remask S3
eor r2, r4, r2, ror #8 // r2 <- S3 ^ BYTE_ROR_4(S3) >>> 8 ^ m0 ^ m1
eor r7, r7, r2, ror #16 // r7 <- ^ m1
eor r7, r7, r6, ror #8 // r7 <- ^ m0 ^ m1
eor r7, r7, r14 // r7 <- S'3 ^ m1
eor r7, r7, r0 // r7 <- S'3 ^ m0 remask S'3
and r4, r3, r12, lsr #4 // r4 <- (S2 ^ m1 >> 4) & 0x0f0f0f0f
and r6, r3, r12 // r3 <- (S2 ^ m1) & 0x0f0f0f0f
orr r4, r4, r6, lsl #4 // r4 <- BYTE_ROR_4(S2 ^ m1)
eor r6, r2, r4, ror #8 // r6 <- S3 ^ BYTE_ROR_4(S3) >>> 8 ^ BYTE_ROR_4(S2)>>>8 ^ m0
eor r4, r4, r0 // r4 <- BYTE_ROR_4(S2) ^ m0 remask r4
eor r4, r12, r4, ror #8 // r4 <- S2 ^ BYTE_ROR_4(S2)>>>8 ^ m1 ^ m0
eor r6, r6, r4, ror #16 // r6 <- S'2 ^ m1
and r12, r3, r5, lsr #4 // r12<- (S1 ^ m0 >> 4) & 0x0f0f0f0f
and r3, r3, r5 // r3 <- (S1 ^ m0) & 0x0f0f0f0f
orr r12, r12, r3, lsl #4 // r12<- BYTE_ROR_4(S1 ^ m0)
eor r4, r4, r12, ror #8 // r4 <- S2 ^ BYTE_ROR_4(S2)>>>8 ^ BYTE_ROR_4(S1)>>>8 ^ m1
eor r12, r12, r0 // r12<- BYTE_ROR_4(S1) ^ m1 remask r12
eor r12, r5, r12, ror #8 // r12<- S1 ^ BYTE_ROR_4(S1)>>>8 ^ m0 ^ m1
eor r5, r4, r12, ror #16 // r5 <- S'1 ^ m0
eor r4, r12, r14, ror #16 // r4 <- S1^BYTE_ROR_4(S1)>>>8 ^ (S0^BYTE_ROR_4(S0) >>> 8)>>>16 ^ m1
eor r4, r4, r1, ror #8 // r4 <- ^ m0
eor r4, r4, r0 // r4 <- S'0 ^ m1 remask
ldr r14, [sp, #132] // restore link register
ldr.w r2, [sp, #124] // load m1 in r2
bx lr
//(r0, m0^m1), (r1, S0), (r2, m1), (r3, S6), (r4, S0),(r5, S1), (r6, S2),
//(r7, S3), (r8, S4), (r9, S5), (r10, S6), (r11, S7), (r12, -), (r14, -)]
/******************************************************************************
* Computation of the MixColumns transformation in the fixsliced representation.
* For fully-fixsliced implementations only, for rounds i s.t. (i%4) == 2.
* Note that 1st-order masking forces to do some remasking to ensure that masks
* are not cancelled through XOR operations.
******************************************************************************/
.align 2
mixcolumns_2:
str r14, [sp, #132]
eor r14, r1, r9 // r14<- S0 ^ m1 ^ m0 remask S0
movw r9, #0x3f3f
movt r9, #0x3f3f // r9 <- 0x03030303 (mask for BYTE_ROR_2)
and r11, r9, r14, lsr #2 // r11<- (S0 ^ m0 ^ m1 >> 2) & 0x3f3f3f3f
bic r14, r14, r9, ror #6 // r14<- S0 ^ m0 ^ m1 & 0x03030303
orr r7, r11, r14, lsl #6 // r7 <- BYTE_ROR_2(S0 ^ m0 ^ m1)
eor r14, r1, r7, ror #8 // r14<- S0 ^ BYTE_ROR_2(S0 >>> 8) ^ m0
and r11, r9, r6, lsr #2 // r11<- (S7 ^ m1 >> 2) & 0x3f3f3f3f
bic r7, r6, r9, ror #6 // r7 <- S7^m1 & 0x03030303
orr r7, r11, r7, lsl #6 // r7 <- BYTE_ROR_2(S7 ^ m1)
eor r10, r7, r10 // r10<- BYTE_ROR_2(S7 ^ m1) ^ (m0 ^ m1) remask r7
eor r10, r6, r10, ror #8 // r10<- S7 ^ (BYTE_ROR_2(S7) >>> 8) ^ m0 ^ m1
bic r11, r6, r9 // r11<- S7 ^ m1 & 0xfcfcfcfc
and r6, r6, r9 // r6 <- S7 ^m1 & 0x3f3f3f3f
orr r6, r6, r11, ror #8 // r6 <- r6 ^r11 >>> 8
eor r11, r14, r6, ror #22 // r11<- S0 ^ BYTE_ROR_2(S0 >>> 8) ^ BYTE_ROR_6(S7 >>> 24) ^ m0
eor r7, r10, r2 // r7 <- S7 ^ (BYTE_ROR_2(S7) >>> 8) ^ m0 remask r10
and r6, r9, r7, lsr #2 // r6 <- (r7 >> 2) & 0x3f3f3f3f
bic r7, r7, r9, ror #6 // r7 <- r7 & 0x03030303
orr r7, r6, r7, lsl #6 // r7 <- BYTE_ROR_2(r7) ^ m0
eor r11, r11, r7, ror #8 // r11<- S'7 ^ m1
eor r10, r10, r14 // r10<- S7^S0 ^ BYTE_ROR_2(S7^S0 >>> 8) ^ m1
and r7, r9, r3, lsr #2 // r7 <- (S6 ^m0^m1 >> 2) & 0x3f3f3f3f
bic r6, r3, r9, ror #6 // r6 <- S6^m0^m1 & 0x03030303
orr r6, r7, r6, lsl #6 // r6 <- BYTE_ROR_2(S6 ^m0^m1)
eor r6, r6, r2 // r6 <- BYTE_ROR_2(S6 ^m0) remask r6
eor r6, r3, r6, ror #8 // r6 <- S6 ^ BYTE_ROR_2(S6 >>> 8)^m1
bic r7, r3, r9 // r7 <- S6 ^m0^m1 & 0xfcfcfcfc
and r3, r3, r9 // r3 <- S6 ^m0^m1 & 0x3f3f3f3f
orr r3, r3, r7, ror #8 // r3 <- r3 ^r7 >>> 8
eor r10, r10, r3, ror #22 // r10<- ^m0
and r3, r9, r6, lsr #2 // r3 <- (r6 >> 2) & 0x3f3f3f3f
bic r7, r6, r9, ror #6 // r7 <- r6 & 0x03030303
orr r7, r3, r7, lsl #6 // r7 <- BYTE_ROR_2(r6) ^ m0
eor r10, r10, r7, ror #8
mov.w r3, r9 // move the mask for BYTEROR to r3
and r7, r3, r0, lsr #2 // r7 <- (S5 ^m0^m1 >> 2) & 0x3f3f3f3f
bic r9, r0, r3, ror #6 // r9 <- S5^m0^m1 & 0x03030303
orr r9, r7, r9, lsl #6 // r9 <- BYTE_ROR_2(S5 ^m0^m1)
eor r9, r9, r2 // r9 <- BYTE_ROR_2(S5 ^m0) remask r6
eor r7, r0, r9, ror #8 // r7 <- S5 ^ BYTE_ROR_2(S5) ^ m1
bic r9, r0, r3 // r9 <- S5 ^m0^m1 & 0xfcfcfcfc
and r0, r0, r3 // r0 <- S5 ^m0^m1 & 0x3f3f3f3f
orr r0, r0, r9, ror #8 // r0 <- r0 ^r9 >>> 8
eor r9, r6, r0, ror #22 // r9 <- S6 ^ BYTE_ROR_2(S6 >>> 8) ^ BYTE_ROR_6(S5 >>> 24) ^m0
and r0, r3, r7, lsr #2 // r3 <- (r7 >> 2) & 0x3f3f3f3f
bic r6, r7, r3, ror #6 // r6 <- r7 & 0x03030303
orr r6, r0, r6, lsl #6 // r6 <- BYTE_ROR_2(r7) ^ m1
eor r9, r9, r6, ror #8 // r9 <- S'5 ^ m0 ^ m1
and r6, r3, r8, lsr #2 // r6 <- (S4 ^m0 >> 2) & 0x3f3f3f3f
bic r0, r8, r3, ror #6 // r0 <- S4^m0 & 0x03030303
orr r6, r6, r0, lsl #6 // r6 <- BYTE_ROR_2(S4 ^m0)
ldr.w r0, [sp, #120] // load m0 ^ m1
str.w r1, [sp] // store S0 ^ m1 to free r1
eor r6, r6, r0 // r6 <- BYTE_ROR_2(S4) ^ m1 remask r6
eor r6, r8, r6, ror #8 // r6 <- S4 ^ BYTE_ROR_2(S4 >>> 8) ^ m0 ^ m1
bic r1, r8, r3 // r1 <- S4 ^m0 & 0xfcfcfcfc
and r8, r8, r3 // r8 <- S4 ^m0 & 0x3f3f3f3f
orr r8, r8, r1, ror #8 // r8 <- r8^r1 >>> 8
eor r8, r7, r8, ror #22 // r8 <- S5 ^ BYTE_ROR_2(S5 >>> 8) ^ BYTE_ROR_6(S4 >>> 24) ^m0^m1
eor r8, r8, r14 // r8 <- S5^S0 ^ BYTE_ROR_2(S5^S0 >>> 8) ^ BYTE_ROR_6(S4 >>> 24) ^m1
and r1, r3, r6, lsr #2 // r1 <- (r6 >> 2) & 0x3f3f3f3f
bic r7, r6, r3, ror #6 // r7 <- r6 & 0x03030303
orr r7, r1, r7, lsl #6 // r7 <- BYTE_ROR_2(r6) ^ m0 ^ m1
eor r8, r8, r7, ror #8 // r8 <- S'4 ^ m0
and r7, r3, r4, lsr #2 // r7 <- (S3 ^m0 >> 2) & 0x3f3f3f3f
bic r1, r4, r3, ror #6 // r1 <- S3^m0 & 0x03030303
orr r7, r7, r1, lsl #6 // r7 <- BYTE_ROR_2(S3 ^m0)
eor r7, r7, r0 // r7 <- BYTE_ROR_2(S3) ^ m1 remask r7
eor r1, r4, r7, ror #8 // r1 <- S3 ^ BYTE_ROR_2(S3) ^ m0 ^ m1
bic r7, r4, r3 // r1 <- S3 ^m0 & 0xfcfcfcfc
and r4, r4, r3 // r8 <- S3 ^m0 & 0x3f3f3f3f
orr r4, r4, r7, ror #8 // r8 <- r8^r7 >>> 8
eor r7, r6, r4, ror #22 // r8 <- S4 ^ BYTE_ROR_2(S4 >>> 8) ^ BYTE_ROR_6(S3 >>> 24) ^m1
and r6, r3, r1, lsr #2 // r6 <- (r1 >> 2) & 0x3f3f3f3f
bic r4, r1, r3, ror #6 // r4 <- r1 & 0x03030303
orr r6, r6, r4, lsl #6 // r6 <- BYTE_ROR_2(r1) ^ m0 ^ m1
eor r7, r7, r6, ror #8 // r7 <- ^ m0
eor r7, r7, r0 // r7 <- ^ m1 remask r7
eor r7, r7, r14 // r7 <- S'3 ^ m0 ^ m1
eor r7, r7, r2 // r7 <- S'3 ^ m0 remask r7
and r4, r3, r12, lsr #2 // r4 <- (S2 ^m1 >> 2) & 0x3f3f3f3f
bic r6, r12, r3, ror #6 // r6 <- S2^m1 & 0x03030303
orr r4, r4, r6, lsl #6 // r4 <- BYTE_ROR_2(S2 ^m1)
eor r4, r4, r0 // r4 <- BYTE_ROR_2(S2) ^m0 remask r4
eor r4, r12, r4, ror #8 // r4 <- S2 ^ BYTE_ROR_2(s2) ^ m0^m1
bic r6, r12, r3 // r1 <- S2 ^m1 & 0xfcfcfcfc
and r12, r12, r3 // r12<- S2 ^m1 & 0x3f3f3f3f
orr r12, r12, r6, ror #8 // r12<- r12^r6 >>> 8
eor r6, r1, r12, ror #22 // r6 <- S3 ^ BYTE_ROR_2(S3 >>> 8) ^ BYTE_ROR_6(S2 >>> 24) ^m0
and r12, r3, r4, lsr #2 // r12<- (r4 >> 2) & 0x3f3f3f3f
bic r1, r4, r3, ror #6 // r1 <- r4 & 0x03030303
orr r12, r12, r1, lsl #6 // r12<- BYTE_ROR_2(r4) ^ m0 ^ m1
eor r6, r6, r12, ror #8 // r6 <- S'2 ^ m1
and r1, r3, r5, lsr #2 // r1 <- (S1 ^m0 >> 2) & 0x3f3f3f3f
bic r12, r5, r3, ror #6 // r12<- S1^m0 & 0x03030303
orr r1, r1, r12, lsl #6 // r1 <- BYTE_ROR_2(S1 ^m0)
eor r1, r1, r0 // r1 <- BYTE_ROR_2(S1) ^m1
eor r1, r5, r1, ror #8 // r1 <- S1 ^ BYTE_ROR_2(S1) ^ m0 ^m1
bic r12, r5, r3 // r12<- S1 ^m0 & 0xfcfcfcfc
and r5, r5, r3 // r5 <- S1 ^m0 & 0x3f3f3f3f
orr r5, r5, r12, ror #8 // r5 <- r5^r12 >>> 8
eor r5, r4, r5, ror #22 // r6 <- S2 ^ BYTE_ROR_2(S2 >>> 8) ^ BYTE_ROR_6(S1 >>> 24) ^m1
and r12, r3, r1, lsr #2 // r12<- (r1 >> 2) & 0x3f3f3f3f
bic r4, r1, r3, ror #6 // r4 <- r1 & 0x03030303
orr r12, r12, r4, lsl #6 // r12<- BYTE_ROR_2(r1) ^ m0 ^ m1
eor r5, r5, r12, ror #8 // r5 <- S'1 ^ m0
eor r1, r1, r2 // r1 <- S1^ BYTE_ROR_2(S1) ^ m0 remask r1
ldr.w r4, [sp] // load S0
and r12, r3, r14, lsr #2 // r12<- (r14 >> 2) & 0x3f3f3f3f
bic r14, r14, r3, ror #6 // r4 <- r14 & 0x03030303
orr r12, r12, r14, lsl #6 // r12<- BYTE_ROR_2(r14)
bic r14, r4, r3 // r14<- S0 ^m1 & 0xfcfcfcfc
and r4, r4, r3 // r4 <- S0 ^m1 & 0x3f3f3f3f
orr r4, r4, r14, ror #8 // r4 <- r4^r14 >>> 8
ldr r14, [sp, #132]
eor r4, r1, r4, ror #22 // r4 <- S1^ BYTE_ROR_2(S1) ^ BYTE_ROR_6(S0) ^ m0 ^ m1
eor r4, r4, r12, ror #8 // r4 <- S'0 ^ m1
bx lr
//(r0, m0^m1), (r1, S0), (r2, m1), (r3, S6), (r4, S0),(r5, S1), (r6, S2),
//(r7, S3), (r8, S4), (r9, S5), (r10, S6), (r11, S7), (r12, -), (r14, -)]
/******************************************************************************
* Computation of the MixColumns transformation in the fixsliced representation.
* For fully-fixsliced implementations, it is used for rounds i s.t. (i%4) == 3.
* For semi-fixsliced implementations, it is used for rounds i s.t. (i%2) == 1.
* Based on Käsper-Schwabe, similar to https://github.com/Ko-/aes-armcortexm.
* Note that 1st-order masking forces to do some remasking to ensure that masks
* are not cancelled through XOR operations.
******************************************************************************/
.align 2
mixcolumns_3:
str r14, [sp, #132]
eor r14, r1, r9 // r14<- S0 ^ m1 ^ m0 remask S0
eor r14, r1, r14, ror #8 // r14<- (S0 ^ S0 >>> 8) ^ m0
eor r7, r6, r10 // r7 <- S7 ^ m1 ^ (m0 ^ m1) remask S7
eor r7, r6, r7, ror #8 // r7 <- (S7 ^ S7 >>> 8) ^ m0 ^ m1
eor r11, r7, r2 // r11<- (S7 ^ S7 >>> 8) ^ m0 remask r7
eor r11, r6, r11, ror #8 // r11<- (S7 ^ S7 >>> 8 ^ S7 >>> 16) ^ m0 ^ m1
eor r11, r14, r11, ror #8 // r11<- S'7 ^ m1
eor r9, r3, r2 // r9 <- S6 ^ m0 ^ m1 ^ m1 remask S6
eor r9, r3, r9, ror #8 // r9 <- (S6 ^ S6 >>> 8) ^ m1
eor r10, r7, r9, ror #16 // r10<- (S7 ^ S7 >>> 8) ^(S6 >>> 16 ^ S6 >>> 24) ^ m0
eor r10, r10, r3, ror #8 // r10<- r10 ^ S6 >>> 8 ^ m0 ^ m1
eor r10, r10, r14 // r10<- S'6 ^ m0 ^ m1
eor r6, r0, r2 // r5 <- S5 ^ m0 ^ m1 ^ m1 remask S5
eor r6, r0, r6, ror #8 // r5 <- (S5 ^ S5 >>> 8) ^ m1
eor r9, r9, r0, ror #8 // r9 <- (S6 ^ S6 >>> 8) ^ S5 >>> 8 ^ m0
ldr.w r0, [sp, #120] // load m0 ^ m1
eor r9, r9, r6, ror #16 // r9 <- S'5 ^ m0 ^ m1
eor r7, r8, r0 // r7 <- S4 ^ m0 ^ (m0 ^ m1) remask S4
eor r7, r8, r7, ror #8 // r7 <- (S4 ^ S4 >>> 8) ^ m0 ^ m1
eor r3, r14, r7, ror #16 // r3 <- r7 >>> 8 ^ (S0 ^ S0 >>> 8) ^ m0
eor r3, r3, r8, ror #8 // r3 <- r3 ^ S4 >>> 8 ^ m0
eor r8, r3, r6 // r8 <- S'4 ^ m0
eor r6, r4, r0 // r6 <- S3 ^ m0 ^ (m0 ^ m1) remask S3
eor r6, r4, r6, ror #8 // r6 <- (S3 ^ S3 >>> 8) ^ m0 ^ m1
eor r7, r7, r2 // r7 <- (S4 ^ S4 >>> 8) ^ m0 remask r7
eor r7, r7, r6, ror #16 // r7 <- ^ m1
eor r7, r7, r4, ror #8 // r7 <- ^ m1 ^ m0
eor r7, r7, r14 // r7 <- S'3 ^ m1
eor r7, r7, r0 // r7 <- S'3 ^ m0 remask S'3
eor r3, r12, r0 // r3 <- S2 ^ m1 ^ (m0 ^ m1) remask S2
eor r3, r12, r3, ror #8 // r3 <- (S2 ^ S2 >>> 8) ^ m0 ^ m1
eor r6, r6, r12, ror #8 // r6 <- ^ m0
eor r6, r6, r3, ror #16 // r6 <- S'6 ^ m1
eor r4, r5, r0 // r4 <- S1 ^ m0 ^ m0 ^ m1 remask S1
eor r4, r5, r4, ror #8 // r4 <- (S1 ^ S1 >>> 8) ^ m0 ^ m1
eor r5, r3, r5, ror #8 // r5 <- ^ m1
eor r5, r5, r4, ror #16 // r5 <- S'1 ^ m0
eor r4, r4, r2 // r4 <- (S1 ^ S1 >>> 8) ^ m0 remask r4
eor r4, r4, r1, ror #8 // r4 <- S1 ^ S1 >>> 8 ^ S0 >>> 8 ^ m0 ^ m1
eor r4, r4, r14, ror #16 // r4 <- S'0 ^ m1
ldr r14, [sp, #132]
bx lr
//(r0, m0^m1), (r1, S0), (r2, m1), (r3, S6), (r4, S0),(r5, S1), (r6, S2),
//(r7, S3), (r8, S4), (r9, S5), (r10, S6), (r11, S7), (r12, -), (r14, -)]
/******************************************************************************
* Applies the ShiftRows transformation twice (i.e. SR^2) on the internal state.
******************************************************************************/
.align 2
double_shiftrows:
str r14, [sp, #132]
movw r14, #0x0f00
movt r14, #0x0f00 // r14<- 0x0f000f00
swpmv r6,r6,r6,r6, r14, #4, r11
swpmv r3,r3,r3,r3, r14, #4, r11
swpmv r0,r0,r0,r0, r14, #4, r11
swpmv r8,r8,r8,r8, r14, #4, r11
swpmv r4,r4,r4,r4, r14, #4, r11
swpmv r12,r12,r12,r12, r14, #4, r11
swpmv r5,r5,r5,r5, r14, #4, r11
swpmv r1,r1,r1,r1, r14, #4, r11
ldr r14, [sp, #132]
ldr.w r2, [sp, #124] // loads m1 in r2
bx lr
/******************************************************************************
* Fully fixsliced implementation of AES-128 with 1st-order masking.
*
* Two blocks are encrypted in parallel.
*
* Note that additional 4 bytes are allocated on the stack as the function takes
* 5 arguments as input.
*
* The masking step is specific to the STM32F407VG due to some specific values
* values and addresses related to the randomn number generator (RNG).
******************************************************************************/
@ void aes128_encrypt_ffs(u8* ctext, u8* ctext_bis, const u8* ptext,
@ const u8* ptext_bis, const u32* rkey);
.global aes128_encrypt_ffs
.type aes128_encrypt_ffs,%function
.align 2
aes128_encrypt_ffs:
push {r0-r12,r14}
sub.w sp, #136
ldr.w r4, [r2] // load the 1st 128-bit blocks in r4-r7
ldr r5, [r2, #4]
ldr r6, [r2, #8]
ldr r7, [r2, #12]
ldr.w r8, [r3] // load the 2nd 128-bit blocks in r8-r11
ldr r9, [r3, #4]
ldr r10,[r3, #8]
ldr r11,[r3, #12]
bl packing
// ------------------ MASKING ------------------
// generation of 1 random word
movw r0, 0x0804
movt r0, 0x5006 // r0 <- RNG_SR = 0x50060804
add.w r1, r0, #4 // r1 <- RNG_DR = 0x50060808
aes128_ffs_get_random_mask:
ldr.w r2, [r0]
cmp r2, #1 // check if RNG_SR == RNG_SR_DRDY
bne aes128_ffs_get_random_mask // loop while RNG status is not ready
ldr.w r2, [r1] // load the random number in r2
ubfx r12, r2, #0, #2 // r1 <- ab
orr r12, r12, r12, lsl #2 // r1 <- abab
orr r12, r12, r12, lsl #4 // r1 <- abababab
orr r12, r12, r12, lsl #8 // r1 <- abababababababab
orr r12, r12, r12, lsl #16 // r1 <- abababababababababababababababab
ubfx.w r2, r2, #2, #2 // r2 <- cd
orr r2, r2, r2, lsl #2 // r2 <- cdcd
orr r2, r2, r2, lsl #4 // r2 <- cdcdcdcd
orr r2, r2, r2, lsl #8 // r2 <- cdcdcdcdcdcdcdcd
orr r2, r2, r2, lsl #16 // r2 <- cdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcd
eor r0, r12, r2 // r2 <- m0 ^ m1
eor r4, r4, r2 // r4 <- state[0] ^ m1
eor r5, r5, r12 // r5 <- state[1] ^ m0
eor r6, r6, r2 // r6 <- state[2] ^ m1
eor r7, r7, r12 // r7 <- state[3] ^ m0
eor r8, r8, r12 // r8 <- state[4] ^ m0
eor r9, r9, r0 // r9 <- state[5] ^ m2
eor r10, r10, r0 // r10<- state[6] ^ m2
eor r11, r11, r2 // r11<- state[7] ^ m1
// ------------------ MASKING ------------------
// ------------------ CORE FUNCTION ------------------
ldr.w r3, [sp, #192] // to match add_round_key routine
str.w r3, [sp, #116] // to match add_round_key routine
str r12, [sp, #128]
strd r0, r2, [sp, #120]
bl add_round_key
bl sbox
bl mixcolumns_0
bl add_round_key
bl sbox
bl mixcolumns_1
bl add_round_key
bl sbox
bl mixcolumns_2
bl add_round_key
bl sbox
bl mixcolumns_3
bl add_round_key
bl sbox
bl mixcolumns_0
bl add_round_key
bl sbox
bl mixcolumns_1
bl add_round_key
bl sbox
bl mixcolumns_2
bl add_round_key
bl sbox
bl mixcolumns_3
bl add_round_key
bl sbox
bl mixcolumns_0
bl add_round_key
bl sbox
bl double_shiftrows
mov r11, r6 // to match add_round_key routine
mov r10, r3 // to match add_round_key routine
mov r9, r0 // to match add_round_key routine
mov.w r7, r4 // to match add_round_key routine
mov.w r6, r12 // to match add_round_key routine
mov.w r4, r1 // to match add_round_key routine
ldr.w r0, [sp, #120] // loads m0^m1 in r0
bl add_round_key
// ------------------ CORE FUNCTION ------------------
// ------------------ UNMASKING ------------------
eor r4, r4, r2 // r4 <- state[0] ^ m1
eor r5, r5, r12 // r5 <- state[1] ^ m0
eor r6, r6, r2 // r6 <- state[2] ^ m1
eor r7, r7, r12 // r7 <- state[3] ^ m0
eor r8, r8, r12 // r8 <- state[4] ^ m0
eor r9, r9, r0 // r9 <- state[5] ^ m0 ^ m1
eor r10, r10, r0 // r10<- state[6] ^ m0 ^ m1
eor r11, r11, r2 // r11<- state[7] ^ m1
// ------------------ UNMASKING ------------------
bl unpacking
ldrd r0, r1, [sp, #136]
add.w sp, #144
stm r0, {r4-r7}
stm r1, {r8-r11}
pop {r2-r12, r14}
bx lr
/******************************************************************************
* Semi fixsliced implementation of AES-128 with 1st-order masking.
*
* Two blocks are encrypted in parallel.
*
* Note that additional 4 bytes are allocated on the stack as the function takes
* 5 arguments as input.
*
* The masking step is specific to the STM32F407VG due to some specific values
* values and addresses related to the randomn number generator (RNG).
******************************************************************************/
@ void aes128_encrypt_sfs(u8* ctext, u8* ctext_bis, const u8* ptext,
@ const u8* ptext_bis, const u32* rkey);
.global aes128_encrypt_sfs
.type aes128_encrypt_sfs,%function
.align 2
aes128_encrypt_sfs:
push {r0-r12,r14}
sub.w sp, #136
ldr.w r4, [r2] // load the 1st 128-bit blocks in r4-r7
ldr r5, [r2, #4]
ldr r6, [r2, #8]
ldr r7, [r2, #12]
ldr.w r8, [r3] // load the 2nd 128-bit blocks in r8-r11
ldr r9, [r3, #4]
ldr r10,[r3, #8]
ldr r11,[r3, #12]
bl packing
// ------------------ MASKING ------------------
// generation of 1 random word
movw r0, 0x0804
movt r0, 0x5006 // r0 <- RNG_SR = 0x50060804
add.w r1, r0, #4 // r1 <- RNG_DR = 0x50060808
aes128_sfs_get_random_mask:
ldr.w r2, [r0]
cmp r2, #1 // check if RNG_SR == RNG_SR_DRDY
bne aes128_sfs_get_random_mask // loop while RNG status is not ready
ldr.w r2, [r1] // load the random number in r2
ubfx r12, r2, #0, #2 // r1 <- ab
orr r12, r12, r12, lsl #2 // r1 <- abab
orr r12, r12, r12, lsl #4 // r1 <- abababab
orr r12, r12, r12, lsl #8 // r1 <- abababababababab
orr r12, r12, r12, lsl #16 // r1 <- abababababababababababababababab
ubfx.w r2, r2, #2, #2 // r2 <- cd
orr r2, r2, r2, lsl #2 // r2 <- cdcd
orr r2, r2, r2, lsl #4 // r2 <- cdcdcdcd
orr r2, r2, r2, lsl #8 // r2 <- cdcdcdcdcdcdcdcd
orr r2, r2, r2, lsl #16 // r2 <- cdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcd
eor r0, r12, r2 // r2 <- m0 ^ m1
eor r4, r4, r2 // r4 <- state[0] ^ m1
eor r5, r5, r12 // r5 <- state[1] ^ m0
eor r6, r6, r2 // r6 <- state[2] ^ m1
eor r7, r7, r12 // r7 <- state[3] ^ m0
eor r8, r8, r12 // r8 <- state[4] ^ m0
eor r9, r9, r0 // r9 <- state[5] ^ m2
eor r10, r10, r0 // r10<- state[6] ^ m2
eor r11, r11, r2 // r11<- state[7] ^ m1
// ------------------ MASKING ------------------
// ------------------ CORE FUNCTION ------------------
ldr.w r3, [sp, #192] // to be compliant with add_round_key routine
str.w r3, [sp, #116] // to be compliant with add_round_key routine
str r12, [sp, #128]
strd r0, r2, [sp, #120]
bl add_round_key
bl sbox
bl mixcolumns_0
bl add_round_key
bl sbox
bl double_shiftrows
bl mixcolumns_3
bl add_round_key
bl sbox
bl mixcolumns_0
bl add_round_key
bl sbox
bl double_shiftrows
bl mixcolumns_3
bl add_round_key
bl sbox
bl mixcolumns_0
bl add_round_key
bl sbox
bl double_shiftrows
bl mixcolumns_3
bl add_round_key
bl sbox
bl mixcolumns_0
bl add_round_key
bl sbox
bl double_shiftrows
bl mixcolumns_3
bl add_round_key
bl sbox
bl mixcolumns_0
bl add_round_key
bl sbox
bl double_shiftrows
mov r11, r6 // to match add_round_key routine
mov r10, r3 // to match add_round_key routine
mov r9, r0 // to match add_round_key routine
mov.w r7, r4 // to match add_round_key routine
mov.w r6, r12 // to match add_round_key routine
mov.w r4, r1 // to match add_round_key routine
ldr.w r0, [sp, #120] // loads m0^m1 in r0
bl add_round_key
// ------------------ CORE FUNCTION ------------------
// ------------------ UNMASKING ------------------
eor r4, r4, r2 // r4 <- state[0] ^ m1
eor r5, r5, r12 // r5 <- state[1] ^ m0
eor r6, r6, r2 // r6 <- state[2] ^ m1
eor r7, r7, r12 // r7 <- state[3] ^ m0
eor r8, r8, r12 // r8 <- state[4] ^ m0
eor r9, r9, r0 // r9 <- state[5] ^ m0 ^ m1
eor r10, r10, r0 // r10<- state[6] ^ m0 ^ m1
eor r11, r11, r2 // r11<- state[7] ^ m1
// ------------------ UNMASKING ------------------
bl unpacking
ldrd r0, r1, [sp, #136]
add.w sp, #144
stm r0, {r4-r7}
stm r1, {r8-r11}
pop {r2-r12, r14}
bx lr
|
aadomn/aes
| 104,559
|
armcortexm/1storder_masking/aes_keyschedule.s
|
/******************************************************************************
* First-order masked bitsliced implementation of the AES-128 key schedule in
* ARM assembly.
*
* The masking scheme is the one described in "Masking AES with 2 random bits"
* available at https://eprint.iacr.org/2018/1007.
* All bytes within a round key are masked in the following way:
* m1 || m0^m1 || m0^m1 || m0 || m0 || m1 || m0 || m1 where m0, m1 are random
* bits. For each round key, m0 and m1 are picked randomly.
* Note that because the function prototype allows to pass 2 different keys as
* input parameters, 4 random bits are used instead of 2 to ensure that
* different round keys are masked with different masks.
*
* @author Alexandre Adomnicai, Nanyang Technological University, Singapore
* alexandre.adomnicai@ntu.edu.sg
*
* @date October 2020
******************************************************************************/
.syntax unified
.thumb
/******************************************************************************
* Macro to compute the SWAPMOVE technique: swap the bits in 'in1' masked by 'm'
* by the bits in 'in0' masked by 'm << n' and put the results in 'out0', 'out1'
******************************************************************************/
.macro swpmv out0, out1, in0, in1, m, n, tmp
eor \tmp, \in1, \in0, lsr \n
and \tmp, \m
eor \out1, \in1, \tmp
eor \out0, \in0, \tmp, lsl \n
.endm
/******************************************************************************
* Packing routine. Note that it is the same as the one used in the encryption
* function so some code size could be saved by merging the two files.
******************************************************************************/
.align 2
packing:
movw r3, #0x0f0f
movt r3, #0x0f0f // r3 <- 0x0f0f0f0f (mask for SWAPMOVE)
eor r2, r3, r3, lsl #2 // r2 <- 0x33333333 (mask for SWAPMOVE)
eor r1, r2, r2, lsl #1 // r1 <- 0x55555555 (mask for SWAPMOVE)
swpmv r8, r4, r8, r4, r1, #1, r12
swpmv r9, r5, r9, r5, r1, #1, r12
swpmv r10, r6, r10, r6, r1, #1, r12
swpmv r11, r7, r11, r7, r1, #1, r12
swpmv r0, r4, r5, r4, r2, #2, r12
swpmv r9, r5, r9, r8, r2, #2, r12
swpmv r7, r8, r7, r6, r2, #2, r12
swpmv r11, r2, r11, r10, r2, #2, r12
swpmv r8, r4, r8, r4, r3, #4, r12
swpmv r10, r6, r7, r0, r3, #4, r12
swpmv r11, r7, r11, r9, r3, #4, r12
swpmv r9, r5, r2, r5, r3, #4, r12
bx lr
/******************************************************************************
* 1st-order masked implementation of the S-box in a bitsliced manner.
* Credits to https://github.com/LaurenDM/TwoRandomBits.
* The bitsliced key state should be contained in r4-r11 while the masks
* m2=m0^m1, m1, m0 are supposed to be stored in sp[120,124,128].
* Note that it is the same subroutine as the one used in the encryption
* function so some code size could be saved by merging the two files.
******************************************************************************/
.align 2
sbox:
str r14, [sp, #132] // save link register
mov.w r14, r2
orr r0, r12, r14 //Exec (m0 | m1) = m0 | m1 into r0
eor r2, r7, r9 //Exec y14 = i4 ^ i2 into r2
str.w r0, [sp, #112] //Store r0/(m0 | m1) on stack
eor r0, r4, r10 //Exec y13 = i7 ^ i1 into r0
eor r1, r0, r14 //Exec hy13 = y13 ^ m1 into r1
eor r3, r4, r7 //Exec y9 = i7 ^ i4 into r3
str.w r3, [sp, #108] //Store r3/y9 on stack
eor r3, r3, r14 //Exec hy9 = y9 ^ m1 into r3
str.w r1, [sp, #104] //Store r1/hy13 on stack
eor r1, r4, r9 //Exec y8 = i7 ^ i2 into r1
eor r6, r5, r6 //Exec t0 = i6 ^ i5 into r6
str.w r3, [sp, #100] //Store r3/hy9 on stack
eor r3, r6, r11 //Exec y1 = t0 ^ i0 into r3
str.w r6, [sp, #96] //Store r6/t0 on stack
eor r6, r3, r14 //Exec hy1 = y1 ^ m1 into r6
eor r7, r6, r7 //Exec y4 = hy1 ^ i4 into r7
str.w r7, [sp, #92] //Store r7/y4 on stack
eor r7, r7, r12 //Exec hy4 = y4 ^ m0 into r7
str.w r0, [sp, #88] //Store r0/y13 on stack
eor r0, r0, r2 //Exec y12 = y13 ^ y14 into r0
str.w r6, [sp, #84] //Store r6/hy1 on stack
eor r6, r3, r4 //Exec y2 = y1 ^ i7 into r6
eor r10, r3, r10 //Exec y5 = y1 ^ i1 into r10
str.w r2, [sp, #80] //Store r2/y14 on stack
eor r2, r10, r1 //Exec y3 = y5 ^ y8 into r2
str r10, [sp, #60] //Store r10/y5 on stack
eor r2, r2, r14 //Exec hy3 = y3 ^ m1 into r2
eor r8, r8, r0 //Exec t1 = i3 ^ y12 into r8
eor r9, r8, r9 //Exec y15 = t1 ^ i2 into r9
str.w r6, [sp, #76] //Store r6/y2 on stack
eor r6, r9, r14 //Exec hy15 = y15 ^ m1 into r6
eor r5, r8, r5 //Exec y20 = t1 ^ i6 into r5
eor r8, r9, r11 //Exec y6 = y15 ^ i0 into r8
str.w r6, [sp, #72] //Store r6/hy15 on stack
eor r6, r8, r12 //Exec hy6 = y6 ^ m0 into r6
str.w r6, [sp, #68] //Store r6/hy6 on stack
ldr.w r6, [sp, #96] //Load t0 into r6
str.w r3, [sp, #64] //Store r3/y1 on stack
eor r3, r9, r6 //Exec y10 = y15 ^ t0 into r3
eor r10, r3, r12 //Exec hy10 = y10 ^ m0 into r10
str r10, [sp, #56] //Store r10/hy10 on stack
ldr r10, [sp, #100] //Load hy9 into r10
str.w r5, [sp, #100] //Store r5/y20 on stack
eor r10, r5, r10 //Exec y11 = y20 ^ hy9 into r10
eor r5, r10, r12 //Exec hy11 = y11 ^ m0 into r5
eor r14, r11, r5 //Exec y7 = i0 ^ hy11 into r14
eor r5, r3, r5 //Exec y17 = y10 ^ hy11 into r5
str.w r1, [sp, #52] //Store r1/y8 on stack
eor r1, r3, r1 //Exec y19 = y10 ^ y8 into r1
str.w r1, [sp, #96] //Store r1/y19 on stack
eor r6, r6, r10 //Exec y16 = t0 ^ y11 into r6
ldr.w r1, [sp, #104] //Load hy13 into r1
str.w r3, [sp, #48] //Store r3/y10 on stack
eor r3, r1, r6 //Exec y21 = hy13 ^ y16 into r3
str.w r3, [sp, #32] //Store r3/y21 on stack
eor r4, r4, r6 //Exec y18 = i7 ^ y16 into r4
str.w r4, [sp, #44] //Store r4/y18 on stack
and r4, r0, r9 //Exec t2_0 = y12 & y15 into r4
str.w r0, [sp, #40] //Store r0/y12 on stack
and r0, r0, r12 //Exec t2_1 = y12 & m0 into r0
str.w r0, [sp, #36] //Store r0/t2_1 on stack
eor r0, r0, r12 //Exec t2_2 = t2_1 ^ m0 into r0
eor r0, r4, r0 //Exec t2_3 = t2_0 ^ t2_2 into r0
ldr.w r4, [sp, #120] //Load m2 into r4
ldr.w r3, [sp, #112] //Load (m0 | m1) into r3
and r9, r4, r9 //Exec t2_4 = m2 & y15 into r9
eor r9, r9, r3 //Exec t2_5 = t2_4 ^ (m0 | m1) into r9
eor r0, r0, r9 //Exec t2 = t2_3 ^ t2_5 into r0
and r9, r2, r8 //Exec t3_0 = hy3 & y6 into r9
str.w r2, [sp, #28] //Store r2/hy3 on stack
and r2, r2, r4 //Exec t3_1 = hy3 & m2 into r2
str.w r2, [sp, #24] //Store r2/t3_1 on stack
eor r2, r2, r4 //Exec t3_2 = t3_1 ^ m2 into r2
eor r2, r9, r2 //Exec t3_3 = t3_0 ^ t3_2 into r2
and r8, r12, r8 //Exec t3_4 = m0 & y6 into r8
eor r8, r8, r3 //Exec t3_5 = t3_4 ^ (m0 | m1) into r8
eor r2, r2, r8 //Exec t3 = t3_3 ^ t3_5 into r2
eor r2, r2, r0 //Exec t4 = t3 ^ t2 into r2
and r8, r11, r7 //Exec t5_0 = i0 & hy4 into r8
and r9, r11, r4 //Exec t5_1 = i0 & m2 into r9
str r9, [sp, #20] //Store r9/t5_1 on stack
eor r9, r9, r4 //Exec t5_2 = t5_1 ^ m2 into r9
eor r8, r8, r9 //Exec t5_3 = t5_0 ^ t5_2 into r8
ldr r9, [sp, #124] //Load m1 into r9
and r7, r9, r7 //Exec t5_4 = m1 & hy4 into r7
eor r7, r7, r3 //Exec t5_5 = t5_4 ^ (m0 | m1) into r7
eor r7, r8, r7 //Exec t5 = t5_3 ^ t5_5 into r7
eor r0, r7, r0 //Exec t6 = t5 ^ t2 into r0
and r7, r1, r6 //Exec t7_0 = hy13 & y16 into r7
and r1, r1, r12 //Exec t7_1 = hy13 & m0 into r1
eor r1, r1, r12 //Exec t7_2 = t7_1 ^ m0 into r1
eor r1, r7, r1 //Exec t7_3 = t7_0 ^ t7_2 into r1
and r7, r6, r4 //Exec t7_4 = y16 & m2 into r7
eor r8, r7, r3 //Exec t7_5 = t7_4 ^ (m0 | m1) into r8
str.w r7, [sp, #104] //Store r7/t7_4 on stack
eor r1, r1, r8 //Exec t7 = t7_3 ^ t7_5 into r1
ldr r8, [sp, #64] //Load y1 into r8
ldr.w r7, [sp, #60] //Load y5 into r7
str.w r6, [sp, #16] //Store r6/y16 on stack
and r6, r8, r7 //Exec t8_0 = y1 & y5 into r6
and r8, r8, r9 //Exec t8_1 = y1 & m1 into r8
eor r8, r8, r9 //Exec t8_2 = t8_1 ^ m1 into r8
eor r6, r6, r8 //Exec t8_3 = t8_0 ^ t8_2 into r6
and r8, r7, r12 //Exec t8_4 = y5 & m0 into r8
str r8, [sp, #64] //Store r8/t8_4 on stack
eor r8, r8, r3 //Exec t8_5 = t8_4 ^ (m0 | m1) into r8
eor r6, r6, r8 //Exec t8 = t8_3 ^ t8_5 into r6
ldr r8, [sp, #76] //Load y2 into r8
str r14, [sp, #12] //Store r14/y7 on stack
eor r6, r6, r1 //Exec t9 = t8 ^ t7 into r6
and r7, r14, r8 //Exec t10_0 = y7 & y2 into r7
and r14, r14, r4 //Exec t10_1 = y7 & m2 into r14
eor r14, r14, r4 //Exec t10_2 = t10_1 ^ m2 into r14
eor r7, r7, r14 //Exec t10_3 = t10_0 ^ t10_2 into r7
and r14, r12, r8 //Exec t10_4 = m0 & y2 into r14
eor r14, r14, r3 //Exec t10_5 = t10_4 ^ (m0 | m1) into r14
eor r7, r7, r14 //Exec t10 = t10_3 ^ t10_5 into r7
eor r1, r7, r1 //Exec t11 = t10 ^ t7 into r1
ldr.w r7, [sp, #108] //Load y9 into r7
and r14, r10, r7 //Exec t12_0 = y11 & y9 into r14
and r8, r10, r4 //Exec t12_1 = y11 & m2 into r8
eor r8, r8, r4 //Exec t12_2 = t12_1 ^ m2 into r8
eor r8, r14, r8 //Exec t12_3 = t12_0 ^ t12_2 into r8
and r14, r9, r7 //Exec t12_4 = m1 & y9 into r14
eor r14, r14, r3 //Exec t12_5 = t12_4 ^ (m0 | m1) into r14
eor r8, r8, r14 //Exec t12 = t12_3 ^ t12_5 into r8
ldr r14, [sp, #80] //Load y14 into r14
str.w r5, [sp, #8 ] //Store r5/y17 on stack
and r7, r5, r14 //Exec t13_0 = y17 & y14 into r7
and r5, r5, r9 //Exec t13_1 = y17 & m1 into r5
eor r5, r5, r9 //Exec t13_2 = t13_1 ^ m1 into r5
eor r5, r7, r5 //Exec t13_3 = t13_0 ^ t13_2 into r5
and r7, r12, r14 //Exec t13_4 = m0 & y14 into r7
eor r7, r7, r3 //Exec t13_5 = t13_4 ^ (m0 | m1) into r7
eor r5, r5, r7 //Exec t13 = t13_3 ^ t13_5 into r5
eor r5, r5, r8 //Exec t14 = t13 ^ t12 into r5
ldr.w r7, [sp, #52] //Load y8 into r7
ldr r14, [sp, #48] //Load y10 into r14
str r10, [sp, #4 ] //Store r10/y11 on stack
and r10, r7, r14 //Exec t15_0 = y8 & y10 into r10
and r7, r7, r9 //Exec t15_1 = y8 & m1 into r7
str.w r7, [sp, #0 ] //Store r7/t15_1 on stack
eor r7, r7, r9 //Exec t15_2 = t15_1 ^ m1 into r7
eor r7, r10, r7 //Exec t15_3 = t15_0 ^ t15_2 into r7
and r10, r12, r14 //Exec t15_4 = m0 & y10 into r10
eor r10, r10, r3 //Exec t15_5 = t15_4 ^ (m0 | m1) into r10
eor r7, r7, r10 //Exec t15 = t15_3 ^ t15_5 into r7
eor r7, r7, r8 //Exec t16 = t15 ^ t12 into r7
ldr r8, [sp, #100] //Load y20 into r8
eor r2, r2, r8 //Exec t17 = t4 ^ y20 into r2
eor r0, r0, r7 //Exec t18 = t6 ^ t16 into r0
eor r6, r6, r5 //Exec t19 = t9 ^ t14 into r6
eor r1, r1, r7 //Exec t20 = t11 ^ t16 into r1
eor r2, r2, r5 //Exec t21 = t17 ^ t14 into r2
ldr.w r5, [sp, #96] //Load y19 into r5
eor r0, r0, r5 //Exec t22 = t18 ^ y19 into r0
ldr.w r5, [sp, #32] //Load y21 into r5
ldr.w r7, [sp, #44] //Load y18 into r7
str r11, [sp, #100] //Store r11/i0 on stack
eor r5, r6, r5 //Exec t23 = t19 ^ y21 into r5
eor r6, r5, r12 //Exec ht23 = t23 ^ m0 into r6
eor r1, r1, r7 //Exec t24 = t20 ^ y18 into r1
eor r7, r1, r12 //Exec ht24 = t24 ^ m0 into r7
eor r8, r2, r0 //Exec t25 = t21 ^ t22 into r8
and r10, r5, r2 //Exec t26_0 = t23 & t21 into r10
and r14, r5, r9 //Exec t26_1 = t23 & m1 into r14
eor r14, r14, r9 //Exec t26_2 = t26_1 ^ m1 into r14
eor r10, r10, r14 //Exec t26_3 = t26_0 ^ t26_2 into r10
and r2, r4, r2 //Exec t26_4 = m2 & t21 into r2
eor r2, r2, r3 //Exec t26_5 = t26_4 ^ (m0 | m1) into r2
eor r2, r10, r2 //Exec t26 = t26_3 ^ t26_5 into r2
eor r10, r1, r2 //Exec t27 = t24 ^ t26 into r10
and r14, r8, r10 //Exec t28_0 = t25 & t27 into r14
and r11, r8, r12 //Exec t28_1 = t25 & m0 into r11
eor r11, r11, r12 //Exec t28_2 = t28_1 ^ m0 into r11
eor r11, r14, r11 //Exec t28_3 = t28_0 ^ t28_2 into r11
and r14, r4, r10 //Exec t28_4 = m2 & t27 into r14
eor r14, r14, r3 //Exec t28_5 = t28_4 ^ (m0 | m1) into r14
eor r11, r11, r14 //Exec t28 = t28_3 ^ t28_5 into r11
eor r11, r11, r0 //Exec t29 = t28 ^ t22 into r11
eor r5, r5, r1 //Exec t30 = t23 ^ t24 into r5
eor r0, r0, r2 //Exec t31 = t22 ^ t26 into r0
and r2, r5, r0 //Exec t32_0 = t30 & t31 into r2
and r5, r5, r9 //Exec t32_1 = t30 & m1 into r5
eor r5, r5, r9 //Exec t32_2 = t32_1 ^ m1 into r5
eor r2, r2, r5 //Exec t32_3 = t32_0 ^ t32_2 into r2
and r0, r12, r0 //Exec t32_4 = m0 & t31 into r0
eor r0, r0, r3 //Exec t32_5 = t32_4 ^ (m0 | m1) into r0
eor r0, r2, r0 //Exec t32 = t32_3 ^ t32_5 into r0
eor r0, r0, r1 //Exec t33 = t32 ^ t24 into r0
eor r1, r0, r12 //Exec ht33 = t33 ^ m0 into r1
eor r2, r6, r0 //Exec t34 = ht23 ^ t33 into r2
eor r5, r10, r0 //Exec t35 = t27 ^ t33 into r5
and r6, r5, r7 //Exec t36_0 = t35 & ht24 into r6
and r5, r5, r4 //Exec t36_1 = t35 & m2 into r5
eor r5, r5, r4 //Exec t36_2 = t36_1 ^ m2 into r5
eor r5, r6, r5 //Exec t36_3 = t36_0 ^ t36_2 into r5
and r6, r9, r7 //Exec t36_4 = m1 & ht24 into r6
eor r6, r6, r3 //Exec t36_5 = t36_4 ^ (m0 | m1) into r6
eor r5, r5, r6 //Exec t36 = t36_3 ^ t36_5 into r5
eor r2, r5, r2 //Exec t37 = t36 ^ t34 into r2
eor r5, r10, r5 //Exec t38 = t27 ^ t36 into r5
and r6, r11, r5 //Exec t39_0 = t29 & t38 into r6
and r7, r11, r4 //Exec t39_1 = t29 & m2 into r7
eor r7, r7, r4 //Exec t39_2 = t39_1 ^ m2 into r7
eor r6, r6, r7 //Exec t39_3 = t39_0 ^ t39_2 into r6
str.w r7, [sp, #96] //Store r7/t39_2 on stack
and r5, r9, r5 //Exec t39_4 = m1 & t38 into r5
eor r5, r5, r3 //Exec t39_5 = t39_4 ^ (m0 | m1) into r5
eor r5, r6, r5 //Exec t39 = t39_3 ^ t39_5 into r5
eor r5, r8, r5 //Exec t40 = t25 ^ t39 into r5
eor r6, r5, r2 //Exec t41 = t40 ^ t37 into r6
eor r8, r11, r0 //Exec t42 = t29 ^ t33 into r8
eor r10, r11, r5 //Exec t43 = t29 ^ t40 into r10
eor r1, r1, r2 //Exec t44 = ht33 ^ t37 into r1
eor r14, r8, r6 //Exec t45 = t42 ^ t41 into r14
ldr.w r7, [sp, #72] //Load hy15 into r7
str.w r6, [sp, #48] //Store r6/t41 on stack
and r6, r1, r7 //Exec z0_0 = t44 & hy15 into r6
str.w r1, [sp, #44] //Store r1/t44 on stack
and r1, r1, r4 //Exec z0_1 = t44 & m2 into r1
eor r1, r1, r4 //Exec z0_2 = z0_1 ^ m2 into r1
eor r6, r6, r1 //Exec z0_3 = z0_0 ^ z0_2 into r6
and r7, r12, r7 //Exec z0_4 = m0 & hy15 into r7
eor r7, r7, r3 //Exec z0_5 = z0_4 ^ (m0 | m1) into r7
eor r6, r6, r7 //Exec z0 = z0_3 ^ z0_5 into r6
ldr.w r7, [sp, #68] //Load hy6 into r7
str.w r6, [sp, #72] //Store r6/z0 on stack
and r6, r7, r2 //Exec z1_0 = hy6 & t37 into r6
and r7, r7, r4 //Exec z1_1 = hy6 & m2 into r7
eor r7, r7, r4 //Exec z1_2 = z1_1 ^ m2 into r7
eor r6, r6, r7 //Exec z1_3 = z1_0 ^ z1_2 into r6
and r7, r9, r2 //Exec z1_4 = m1 & t37 into r7
eor r7, r7, r3 //Exec z1_5 = z1_4 ^ (m0 | m1) into r7
eor r6, r6, r7 //Exec z1 = z1_3 ^ z1_5 into r6
ldr.w r7, [sp, #100] //Load i0 into r7
str.w r6, [sp, #100] //Store r6/z1 on stack
and r7, r0, r7 //Exec z2_0 = t33 & i0 into r7
and r6, r0, r9 //Exec z2_1 = t33 & m1 into r6
eor r6, r6, r9 //Exec z2_2 = z2_1 ^ m1 into r6
str.w r6, [sp, #68] //Store r6/z2_2 on stack
eor r7, r7, r6 //Exec z2_3 = z2_0 ^ z2_2 into r7
ldr.w r6, [sp, #20] //Load t5_1 into r6
eor r6, r6, r3 //Exec z2_5 = t5_1 ^ (m0 | m1) into r6
eor r6, r7, r6 //Exec z2 = z2_3 ^ z2_5 into r6
str.w r6, [sp, #32] //Store r6/z2 on stack
ldr.w r7, [sp, #16] //Load y16 into r7
ldr.w r6, [sp, #104] //Load t7_4 into r6
and r7, r7, r10 //Exec z3_0 = y16 & t43 into r7
eor r6, r6, r4 //Exec z3_2 = t7_4 ^ m2 into r6
eor r6, r7, r6 //Exec z3_3 = z3_0 ^ z3_2 into r6
and r7, r12, r10 //Exec z3_4 = m0 & t43 into r7
str.w r7, [sp, #104] //Store r7/z3_4 on stack
eor r7, r7, r3 //Exec z3_5 = z3_4 ^ (m0 | m1) into r7
eor r6, r6, r7 //Exec z3 = z3_3 ^ z3_5 into r6
ldr.w r7, [sp, #84] //Load hy1 into r7
str.w r6, [sp, #20] //Store r6/z3 on stack
and r6, r7, r5 //Exec z4_0 = hy1 & t40 into r6
and r7, r7, r12 //Exec z4_1 = hy1 & m0 into r7
eor r7, r7, r12 //Exec z4_2 = z4_1 ^ m0 into r7
eor r6, r6, r7 //Exec z4_3 = z4_0 ^ z4_2 into r6
and r7, r4, r5 //Exec z4_4 = m2 & t40 into r7
eor r7, r7, r3 //Exec z4_5 = z4_4 ^ (m0 | m1) into r7
eor r6, r6, r7 //Exec z4 = z4_3 ^ z4_5 into r6
ldr.w r7, [sp, #12] //Load y7 into r7
str.w r6, [sp, #84] //Store r6/z4 on stack
and r6, r11, r7 //Exec z5_0 = t29 & y7 into r6
str r11, [sp, #16] //Store r11/t29 on stack
and r11, r11, r12 //Exec z5_1 = t29 & m0 into r11
eor r11, r11, r12 //Exec z5_2 = z5_1 ^ m0 into r11
eor r6, r6, r11 //Exec z5_3 = z5_0 ^ z5_2 into r6
and r7, r9, r7 //Exec z5_4 = m1 & y7 into r7
eor r7, r7, r3 //Exec z5_5 = z5_4 ^ (m0 | m1) into r7
eor r6, r6, r7 //Exec z5 = z5_3 ^ z5_5 into r6
ldr.w r7, [sp, #4 ] //Load y11 into r7
and r11, r7, r8 //Exec z6_0 = y11 & t42 into r11
and r7, r7, r12 //Exec z6_1 = y11 & m0 into r7
eor r7, r7, r12 //Exec z6_2 = z6_1 ^ m0 into r7
eor r7, r11, r7 //Exec z6_3 = z6_0 ^ z6_2 into r7
and r11, r9, r8 //Exec z6_4 = m1 & t42 into r11
eor r11, r11, r3 //Exec z6_5 = z6_4 ^ (m0 | m1) into r11
eor r7, r7, r11 //Exec z6 = z6_3 ^ z6_5 into r7
ldr r11, [sp, #8 ] //Load y17 into r11
str.w r7, [sp, #12] //Store r7/z6 on stack
and r7, r11, r14 //Exec z7_0 = y17 & t45 into r7
and r11, r11, r4 //Exec z7_1 = y17 & m2 into r11
eor r11, r11, r4 //Exec z7_2 = z7_1 ^ m2 into r11
eor r7, r7, r11 //Exec z7_3 = z7_0 ^ z7_2 into r7
and r11, r12, r14 //Exec z7_4 = m0 & t45 into r11
eor r11, r11, r3 //Exec z7_5 = z7_4 ^ (m0 | m1) into r11
eor r7, r7, r11 //Exec z7 = z7_3 ^ z7_5 into r7
ldr r11, [sp, #56] //Load hy10 into r11
str.w r6, [sp, #8 ] //Store r6/z5 on stack
eor r6, r5, r2 //Recompute t41 = t40 ^ t37 into r6
str.w r7, [sp, #4 ] //Store r7/z7 on stack
and r7, r11, r6 //Exec z8_0 = hy10 & t41 into r7
and r11, r11, r9 //Exec z8_1 = hy10 & m1 into r11
eor r11, r11, r9 //Exec z8_2 = z8_1 ^ m1 into r11
eor r7, r7, r11 //Exec z8_3 = z8_0 ^ z8_2 into r7
and r11, r4, r6 //Exec z8_4 = m2 & t41 into r11
eor r11, r11, r3 //Exec z8_5 = z8_4 ^ (m0 | m1) into r11
eor r7, r7, r11 //Exec z8 = z8_3 ^ z8_5 into r7
str.w r7, [sp, #56] //Store r7/z8 on stack
ldr r11, [sp, #44] //Load t44 into r11
ldr.w r7, [sp, #40] //Load y12 into r7
and r7, r11, r7 //Exec z9_0 = t44 & y12 into r7
eor r1, r7, r1 //Exec z9_3 = z9_0 ^ z0_2 into r1
ldr.w r7, [sp, #36] //Load t2_1 into r7
eor r7, r7, r3 //Exec z9_5 = t2_1 ^ (m0 | m1) into r7
eor r1, r1, r7 //Exec z9 = z9_3 ^ z9_5 into r1
ldr.w r7, [sp, #28] //Load hy3 into r7
and r7, r2, r7 //Exec z10_0 = t37 & hy3 into r7
and r2, r2, r12 //Exec z10_1 = t37 & m0 into r2
eor r2, r2, r12 //Exec z10_2 = z10_1 ^ m0 into r2
eor r2, r7, r2 //Exec z10_3 = z10_0 ^ z10_2 into r2
ldr.w r7, [sp, #24] //Load t3_1 into r7
eor r7, r7, r3 //Exec z10_5 = t3_1 ^ (m0 | m1) into r7
eor r2, r2, r7 //Exec z10 = z10_3 ^ z10_5 into r2
ldr.w r7, [sp, #92] //Load y4 into r7
ldr r11, [sp, #68] //Load z2_2 into r11
and r0, r0, r7 //Exec z11_0 = t33 & y4 into r0
eor r0, r0, r11 //Exec z11_3 = z11_0 ^ z2_2 into r0
and r7, r4, r7 //Exec z11_4 = m2 & y4 into r7
eor r7, r7, r3 //Exec z11_5 = z11_4 ^ (m0 | m1) into r7
eor r0, r0, r7 //Exec z11 = z11_3 ^ z11_5 into r0
ldr.w r7, [sp, #88] //Load y13 into r7
ldr r11, [sp, #104] //Load z3_4 into r11
and r10, r10, r7 //Exec z12_0 = t43 & y13 into r10
eor r11, r11, r12 //Exec z12_2 = z3_4 ^ m0 into r11
eor r10, r10, r11 //Exec z12_3 = z12_0 ^ z12_2 into r10
and r7, r4, r7 //Exec z12_4 = m2 & y13 into r7
eor r7, r7, r3 //Exec z12_5 = z12_4 ^ (m0 | m1) into r7
eor r7, r10, r7 //Exec z12 = z12_3 ^ z12_5 into r7
ldr r10, [sp, #60] //Load y5 into r10
ldr r11, [sp, #64] //Load t8_4 into r11
str.w r0, [sp, #104] //Store r0/z11 on stack
and r10, r10, r5 //Exec z13_0 = y5 & t40 into r10
eor r11, r11, r12 //Exec z13_2 = t8_4 ^ m0 into r11
eor r10, r10, r11 //Exec z13_3 = z13_0 ^ z13_2 into r10
and r5, r9, r5 //Exec z13_4 = m1 & t40 into r5
eor r5, r5, r3 //Exec z13_5 = z13_4 ^ (m0 | m1) into r5
eor r5, r10, r5 //Exec z13 = z13_3 ^ z13_5 into r5
ldr r10, [sp, #16] //Load t29 into r10
ldr r11, [sp, #76] //Load y2 into r11
ldr.w r0, [sp, #96] //Load t39_2 into r0
and r10, r10, r11 //Exec z14_0 = t29 & y2 into r10
eor r0, r10, r0 //Exec z14_3 = z14_0 ^ t39_2 into r0
and r10, r9, r11 //Exec z14_4 = m1 & y2 into r10
eor r10, r10, r3 //Exec z14_5 = z14_4 ^ (m0 | m1) into r10
eor r0, r0, r10 //Exec z14 = z14_3 ^ z14_5 into r0
ldr r10, [sp, #108] //Load y9 into r10
and r11, r10, r8 //Exec z15_0 = y9 & t42 into r11
and r10, r10, r12 //Exec z15_1 = y9 & m0 into r10
eor r10, r10, r12 //Exec z15_2 = z15_1 ^ m0 into r10
eor r10, r11, r10 //Exec z15_3 = z15_0 ^ z15_2 into r10
and r8, r4, r8 //Exec z15_4 = m2 & t42 into r8
eor r8, r8, r3 //Exec z15_5 = z15_4 ^ (m0 | m1) into r8
eor r8, r10, r8 //Exec z15 = z15_3 ^ z15_5 into r8
ldr r10, [sp, #80] //Load y14 into r10
and r11, r10, r14 //Exec z16_0 = y14 & t45 into r11
and r10, r10, r4 //Exec z16_1 = y14 & m2 into r10
eor r10, r10, r4 //Exec z16_2 = z16_1 ^ m2 into r10
eor r10, r11, r10 //Exec z16_3 = z16_0 ^ z16_2 into r10
and r11, r9, r14 //Exec z16_4 = m1 & t45 into r11
eor r11, r11, r3 //Exec z16_5 = z16_4 ^ (m0 | m1) into r11
eor r10, r10, r11 //Exec z16 = z16_3 ^ z16_5 into r10
ldr r11, [sp, #52] //Load y8 into r11
and r11, r6, r11 //Exec z17_0 = t41 & y8 into r11
and r6, r6, r12 //Exec z17_1 = t41 & m0 into r6
eor r6, r6, r12 //Exec z17_2 = z17_1 ^ m0 into r6
eor r6, r11, r6 //Exec z17_3 = z17_0 ^ z17_2 into r6
ldr r11, [sp, #0 ] //Load t15_1 into r11
eor r3, r11, r3 //Exec z17_5 = t15_1 ^ (m0 | m1) into r3
eor r3, r6, r3 //Exec z17 = z17_3 ^ z17_5 into r3
eor r6, r8, r10 //Exec tc1 = z15 ^ z16 into r6
eor r2, r2, r6 //Exec tc2 = z10 ^ tc1 into r2
eor r1, r1, r2 //Exec tc3 = z9 ^ tc2 into r1
ldr r10, [sp, #72] //Load z0 into r10
ldr r11, [sp, #32] //Load z2 into r11
ldr r14, [sp, #100] //Load z1 into r14
str.w r3, [sp, #112] //Store r3/z17 on stack
eor r11, r10, r11 //Exec tc4 = z0 ^ z2 into r11
eor r10, r14, r10 //Exec tc5 = z1 ^ z0 into r10
ldr r14, [sp, #20] //Load z3 into r14
ldr.w r4, [sp, #84] //Load z4 into r4
ldr r9, [sp, #4 ] //Load z7 into r9
ldr.w r3, [sp, #56] //Load z8 into r3
eor r4, r14, r4 //Exec tc6 = z3 ^ z4 into r4
eor r12, r7, r11 //Exec tc7 = z12 ^ tc4 into r12
eor r9, r9, r4 //Exec tc8 = z7 ^ tc6 into r9
eor r3, r3, r12 //Exec tc9 = z8 ^ tc7 into r3
eor r3, r9, r3 //Exec tc10 = tc8 ^ tc9 into r3
eor r4, r4, r10 //Exec tc11 = tc6 ^ tc5 into r4
ldr r10, [sp, #8 ] //Load z5 into r10
eor r10, r14, r10 //Exec tc12 = z3 ^ z5 into r10
eor r5, r5, r6 //Exec tc13 = z13 ^ tc1 into r5
eor r6, r11, r10 //Exec tc14 = tc4 ^ tc12 into r6
eor r4, r1, r4 //Exec S3 = tc3 ^ tc11 into r4
ldr r10, [sp, #12] //Load z6 into r10
eor r9, r10, r9 //Exec tc16 = z6 ^ tc8 into r9
eor r0, r0, r3 //Exec tc17 = z14 ^ tc10 into r0
eor r5, r5, r6 //Exec tc18 = tc13 ^ tc14 into r5
eor r7, r7, r5 //Exec S7 = z12 ^ tc18 ^ 1 into r7
eor r8, r8, r9 //Exec tc20 = z15 ^ tc16 into r8
ldr r10, [sp, #104] //Load z11 into r10
eor r2, r2, r10 //Exec tc21 = tc2 ^ z11 into r2
eor r1, r1, r9 //Exec o7 = tc3 ^ tc16 into r1
eor r3, r3, r5 //Exec o1 = tc10 ^ tc18 ^ 1 into r3
eor r5, r6, r4 //Exec S4 = tc14 ^ S3 into r5
eor r6, r4, r9 //Exec S1 = S3 ^ tc16 ^ 1 into r6
ldr r9, [sp, #112] //Load z17 into r9
eor r8, r0, r8 //Exec tc26 = tc17 ^ tc20 into r8
eor r8, r8, r9 //Exec S2 = tc26 ^ z17 ^ 1 into r8
eor r0, r2, r0 //Exec S5 = tc21 ^ tc17 into r0
ldr.w r2, [sp, #124] //Load m1 into r2
ldr r9, [sp, #128] //Load m0 into r9
ldr r10, [sp, #120] //Load m2 into r10
ldr r14, [sp, #132] // restore link register
eor r12, r8, r9 //Exec o5 = S2 ^ m0 into r12
eor r8, r5, r2 //Exec o3 = S4 ^ m1 into r8
eor r5, r6, r2 //Exec o6 = S1 ^ m1 into r5
eor r4, r4, r10 //Exec o4 = S3 ^ m2 into r4
eor r0, r0, r9 //Exec o2 = S5 ^ m0 into r0
eor r6, r7, r10 //Exec o0 = S7 ^ m2 into r6
bx lr
/******************************************************************************
* Subroutine that applies a new mask on the round key right after the sbox.
* Before this subroutine the rkey is masked with m0, m1, m0^m1 while after the
* masks are now m0^m'0, m1^m'1, m0^m1^m'0^m'1 where m' refers to new masks.
******************************************************************************/
.align 2
remask_rkey:
ldr.w r2, [sp, #136] // load NEW mask
lsr r2, r2, #4 // r2 <- r2 >> 4 (discard the 4 bits used in the prev round)
ubfx r9, r2, #0, #2 // r9 <- ab
orr r9, r9, r9, lsl #2 // r9 <- abab
orr r9, r9, r9, lsl #4 // r9 <- abababab
orr r9, r9, r9, lsl #8 // r9 <- abababababababab
orr r9, r9, r9, lsl #16 // r9 <- abababababababababababababababab
str.w r2, [sp, #136] // store the shifted mask for the next round
ubfx.w r2, r2, #2, #2 // r2 <- cd
orr r2, r2, r2, lsl #2 // r2 <- cdcd
orr r2, r2, r2, lsl #4 // r2 <- cdcdcdcd
orr r2, r2, r2, lsl #8 // r2 <- cdcdcdcdcdcdcdcd
orr r2, r2, r2, lsl #16 // r2 <- cdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcd
eor r10, r9, r2 // r10<- new_m0 ^ new_m1
eor r11, r6, r9 // r11<- key[7] ^ m1 ^ new_m1
eor r6, r12, r9 // r6 <- key[2] ^ m1 ^ new_m1
eor r8, r8, r2 // r8 <- key[4] ^ m0 ^ new_m0
eor r7, r4, r2 // r7 <- key[3] ^ m0 ^ new_m0
eor r1, r1, r9 // r1 <- key[0] ^ m1 ^ new_m1
eor r0, r0, r10 // r0 <- key[5] ^ m0 ^ m1 ^ new_m0 ^ new_m1
eor r4, r3, r10 // r4 <- key[6] ^ m0 ^ m1 ^ new_m0 ^ new_m1
eor r3, r5, r2 // r3 <- key[1] ^ m0 ^ new_m0
ldr r12, [sp, #116] // restore 'rkeys' address
ldr.w r5, [r12, #40] // load rkey word of rkey from prev round
str r10, [r12, #52] // store new km0 ^ km1 in output array
str r10, [sp, #120] // store new km0 ^ km1 on the stack
strd r2, r9, [r12, #44] // store new km0, km1 in output array
strd r9, r2, [sp, #124] // store new km0, km1 on the stack
mov.w r2, r4 // mov r4 to r2 to be compliant with 'xor_columns' routines
bx lr
/******************************************************************************
* Subroutine that XORs the columns after the S-box during the key schedule
* round function, for rounds i such that (i % 4) == 0.
* Note that the code size could be reduced at the cost of some instructions
* since some redundant code is applied on different registers.
******************************************************************************/
.align 2
xor_columns_0:
str r14, [sp, #132] // store link register
ldr r14, [r12, #4] // load old mask
movw r4, #0xc0c0
movt r4, #0xc0c0 // r4 <- 0xc0c0c0c0
eor r11, r5, r11, ror #2 // r11<- r5 ^ (r11 >>> 2)
bic r11, r4, r11 // r11<- ~r11 & 0xc0c0c0c0 (NOT omitted in sbox)
eor r9, r5, r11, ror #2 // r9 <- r5 ^ (r11 >>> 2)
and r9, r9, r4, ror #2 // r9 <- r9 & 0x30303030
orr r11, r11, r9 // r11<- r11 | r9
eor r9, r5, r11, ror #2 // r9 <- r5 ^ (r11 >>> 2)
and r9, r9, r4, ror #4 // r9 <- r9 & 0x0c0c0c0c
orr r11, r11, r9 // r11<- r11 | r9
eor r9, r5, r11, ror #2 // r9 <- r5 ^ (r11 >>> 2)
and r9, r9, r4, ror #6 // r9 <- r9 & 0x03030303
orr r11, r11, r9 // r11<- r11 | r9
mvn r9, r5 // NOT omitted in sbox
eor r5, r4, r4, lsr #4 // r5 <- 0xcccccccc
and r14, r14, r5, lsr #2 // r14<- r14 & 0x33333333
eor r11, r11, r14 // remask half of register
ldr.w r5, [r12, #36] // load rkey word of rkey from prev round
ldr r14, [r12, #8] // load old mask
str r9, [r12, #40] // store prev rkey word after NOT
str r11, [r12, #84] // store new rkey word in 'rkeys'
eor r10, r5, r2, ror #2 // r10<- r5 ^ (r2 >>> 2)
bic r10, r4, r10 // r10<- ~r10 & 0xc0c0c0c0 (NOT omitted in sbox)
eor r9, r5, r10, ror #2 // r9 <- r5 ^ (r10 >>> 2)
and r9, r9, r4, ror #2 // r9 <- r9 & 0x30303030
orr r10, r10, r9 // r10<- r10 | r9
eor r9, r5, r10, ror #2 // r9 <- r5 ^ (r10 >>> 2)
and r9, r9, r4, ror #4 // r9 <- r9 & 0x0c0c0c0c
orr r10, r10, r9 // r10<- r10 | r9
eor r9, r5, r10, ror #2 // r9 <- r5 ^ (r10 >>> 2)
and r9, r9, r4, ror #6 // r9 <- r9 & 0x03030303
orr r10, r10, r9 // r10<- r10 | r9
mvn r9, r5 // NOT omitted in sbox
eor r5, r4, r4, lsr #4 // r5 <- 0xcccccccc
and r14, r14, r5, lsr #2 // r14<- r14 & 0x33333333
eor r10, r10, r14 // remask half of register
ldr.w r2, [r12, #32] // load rkey word of rkey from prev round
str r9, [r12, #36] // store new rkey word after NOT
str r10, [r12, #80] // store new rkey word in 'rkeys'
eor r9, r2, r0, ror #2 // r9 <- r2 ^ (r9 >>> 2)
and r9, r4, r9 // r9 <- r9 & 0xc0c0c0c0
eor r0, r2, r9, ror #2 // r0 <- r2 ^ (r9 >>> 2)
and r0, r0, r4, ror #2 // r0 <- r0 & 0x30303030
orr r9, r9, r0 // r9 <- r9 | r0
eor r0, r2, r9, ror #2 // r0 <- r2 ^ (r9 >>> 2)
and r0, r0, r4, ror #4 // r0 <- r0 & 0x0c0c0c0c
orr r9, r9, r0 // r9 <- r9 | r0
eor r0, r2, r9, ror #2 // r0 <- r2 ^ (r9 >>> 2)
and r0, r0, r4, ror #6 // r0 <- r0 & 0x03030303
orr r9, r9, r0 // r9 <- r9 | r0
eor r9, r9, r14 // remask half of register
ldr.w r2, [r12, #28] // load rkey word of rkey from prev round
ldr r14, [r12] // load old mask
str.w r9, [r12, #76] // store new rkey word in 'rkeys'
eor r8, r2, r8, ror #2 // r8 <- r2 ^ (r8 >>> 2)
and r8, r4, r8 // r8 <- r8 & 0xc0c0c0c0
eor r0, r2, r8, ror #2 // r0 <- r2 ^ (r8 >>> 2)
and r0, r0, r4, ror #2 // r0 <- r0 & 0x30303030
orr r8, r8, r0 // r8 <- r8 | r0
eor r0, r2, r8, ror #2 // r0 <- r2 ^ (r8 >>> 2)
and r0, r0, r4, ror #4 // r0 <- r0 & 0x0c0c0c0c
orr r8, r8, r0 // r8 <- r8 | r0
eor r0, r2, r8, ror #2 // r0 <- r2 ^ (r8 >>> 2)
and r0, r0, r4, ror #6 // r0 <- r0 & 0x03030303
orr r8, r8, r0 // r8 <- r8 | r0
eor r2, r4, r4, lsr #4 // r2 <- 0xcccccccc
and r14, r14, r2, lsr #2 // r14<- r14 & 0x33333333
eor r8, r8, r14 // remask half of register
ldr.w r2, [r12, #24] // load rkey word of rkey from prev round
str.w r8, [r12, #72] // store new rkey word in 'rkeys'
eor r7, r2, r7, ror #2 // r7 <- r2 ^ (r7 >>> 2)
and r7, r4, r7 // r7 <- r7 & 0xc0c0c0c0
eor r0, r2, r7, ror #2 // r0 <- r2 ^ (r7 >>> 2)
and r0, r0, r4, ror #2 // r0 <- r0 & 0x30303030
orr r7, r7, r0 // r7 <- r7 | r0
eor r0, r2, r7, ror #2 // r0 <- r2 ^ (r7 >>> 2)
and r0, r0, r4, ror #4 // r0 <- r0 & 0x0c0c0c0c
orr r7, r7, r0 // r7 <- r7 | r0
eor r0, r2, r7, ror #2 // r0 <- r2 ^ (r7 >>> 2)
and r0, r0, r4, ror #6 // r0 <- r0 & 0x03030303
orr r7, r7, r0 // r7 <- r7 | r0
eor r7, r7, r14 // remask half of register
ldr.w r2, [r12, #20] // load rkey word of rkey from prev round
ldr r14, [r12, #4] // load old mask
str.w r7, [r12, #68] // store new rkey word in 'rkeys'
eor r6, r2, r6, ror #2 // r6 <- r2 ^ (r6 >>> 2)
bic r6, r4, r6 // r6 <- ~r6 & 0xc0c0c0c0 (NOT omitted in sbox)
eor r0, r2, r6, ror #2 // r0 <- r2 ^ (r6 >>> 2)
and r0, r0, r4, ror #2 // r0 <- r0 & 0x30303030
orr r6, r6, r0 // r6 <- r6 | r0
eor r0, r2, r6, ror #2 // r0 <- r2 ^ (r6 >>> 2)
and r0, r0, r4, ror #4 // r0 <- r0 & 0x0c0c0c0c
orr r6, r6, r0 // r6 <- r6 | r0
eor r0, r2, r6, ror #2 // r0 <- r2 ^ (r6 >>> 2)
and r0, r0, r4, ror #6 // r0 <- r0 & 0x03030303
orr r6, r6, r0 // r6 <- r6 | r0
mvn r0, r2 // NOT omitted in sbox
eor r2, r4, r4, lsr #4 // r2 <- 0xcccccccc
and r14, r14, r2, lsr #2 // r14<- r14 & 0x33333333
eor r6, r6, r14 // remask of half register
ldr.w r2, [r12, #16] // load rkey word of rkey from prev round
ldr r14, [r12] // load old mask
str.w r0, [r12, #20] // store new rkey word after NOT
str.w r6, [r12, #64] // store new rkey word in 'rkeys'
eor r5, r2, r3, ror #2 // r5 <- r2 ^ (r3 >>> 2)
bic r5, r4, r5 // r5 <- ~r5 & 0xc0c0c0c0 (NOT omitted in sbox)
eor r0, r2, r5, ror #2 // r0 <- r2 ^ (r5 >>> 2)
and r0, r0, r4, ror #2 // r0 <- r0 & 0x30303030
orr r5, r5, r0 // r5 <- r5 | r0
eor r0, r2, r5, ror #2 // r0 <- r2 ^ (r5 >>> 2)
and r0, r0, r4, ror #4 // r0 <- r0 & 0x0c0c0c0c
orr r5, r5, r0 // r5 <- r5 | r0
eor r0, r2, r5, ror #2 // r0 <- r2 ^ (r5 >>> 2)
and r0, r0, r4, ror #6 // r0 <- r0 & 0x03030303
orr r5, r5, r0 // r5 <- r5 | r0
mvn r0, r2 // NOT omitted in sbox
eor r2, r4, r4, lsr #4 // r2 <- 0xcccccccc
and r14, r14, r2, lsr #2 // r14<- r14 & 0x33333333
eor r5, r5, r14 // remask half of register
ldr.w r2, [r12, #12] // load rkey word of rkey from prev round
ldr r14, [r12, #4] // load old mask
str.w r0, [r12, #16] // store new rkey word after NOT
str.w r5, [r12, #60] // store new rkey word in 'rkeys'
eor r3, r2, r1, ror #2 // r3 <- r2 ^ (r1 >>> 2)
and r3, r4, r3 // r3 <- r3 & 0xc0c0c0c0
eor r0, r2, r3, ror #2 // r0 <- r2 ^ (r3 >>> 2)
and r0, r0, r4, ror #2 // r0 <- r0 & 0x30303030
orr r3, r3, r0 // r3 <- r3 | r0
eor r0, r2, r3, ror #2 // r0 <- r2 ^ (r3 >>> 2)
and r0, r0, r4, ror #4 // r0 <- r0 & 0x0c0c0c0c
orr r3, r3, r0 // r3 <- r3 | r0
eor r0, r2, r3, ror #2 // r0 <- r2 ^ (r3 >>> 2)
and r0, r0, r4, ror #6 // r0 <- r0 & 0x03030303
eor r2, r4, r4, lsr #4 // r2 <- 0xcccccccc
and r14, r14, r2, lsr #2 // r14<- r14 & 0x33333333
orr r4, r3, r0 // r4 <- r3 | r0
eor r4, r4, r14 // remask half of register
add r12, #44 // point to the next rkey address
str.w r4, [r12, #12] // store new rkey[0]
ldr.w r14, [sp, #132] // restore link register
str.w r12, [sp, #116] // store the new rkeys address on the stack
ldrd r12, r2, [r12] // load masks MASK1 and MASK2 in r12, r2 (for sbox routine)
bx lr
/******************************************************************************
* Subroutine that XORs the columns after the S-box during the key schedule
* round function, for rounds i such that (i % 4) == 1.
* Note that the code size could be reduced at the cost of some instructions
* since some redundant code is applied on different registers.
******************************************************************************/
.align 2
xor_columns_1:
str r14, [sp, #132]
ldr r14, [r12, #4] // load old mask
movw r4, #0xc0c0
movt r4, #0xc0c0 // r4 <- 0xc0c0c0c0
eor r11, r5, r11, ror #2 // r11<- r5 ^ (r11 >>> 2)
bic r11, r4, r11 // r11<- ~r11 & 0xc0c0c0c0 (NOT omitted in sbox)
eor r9, r5, r11, ror #2 // r9 <- r5 ^ (r11 >>> 2)
and r9, r9, r4, ror #2 // r9 <- r9 & 0x30303030
orr r11, r11, r9 // r11<- r11 | r9
eor r9, r5, r11, ror #2 // r9 <- r5 ^ (r11 >>> 2)
and r9, r9, r4, ror #4 // r9 <- r9 & 0x0c0c0c0c
orr r11, r11, r9 // r11<- r11 | r9
eor r9, r5, r11, ror #2 // r9 <- r5 ^ (r11 >>> 2)
and r9, r9, r4, ror #6 // r9 <- r9 & 0x03030303
orr r11, r11, r9 // r11<- r11 | r9
// applies ShiftRows^[-1]
and r9, r5, #0xfc00 // r9 <- r5 & 0x0000fc00
and r10, r5, #0x0300 // r10<- r5 & 0x00000300
orr r9, r9, r10, lsl #8 // r9 <- r9 | r10 << 8
and r10, r5, #0xf00000 // r10<- r5 & 0x00f00000
orr r9, r9, r10, lsr #2 // r9 <- r9 | r10 >> 2
and r10, r5, #0xf0000 // r10<- r5 & 0x000f0000
orr r9, r9, r10, lsl #6 // r9 <- r9 | r10 << 6
and r10, r5, #0xc0000000 // r10<- r5 & 0xc0000000
orr r9, r9, r10, lsr #4 // r9 <- r9 | r10 >> 4
and r10, r5, #0x3f000000 // r10<- r5 & 0x3f000000
orr r9, r9, r10, ror #28 // r9 <- r9 | (r10 >>> 28)
and r10, r5, #0xff // r10<- r5 & 0xff
orr r9, r10, r9, ror #2 // r9 <- ShiftRows^[-1](r5)
mvn r9, r9 // NOT that is omitted in sbox
eor r5, r4, r4, lsr #4 // r5 <- 0xcccccccc
and r14, r14, r5, lsr #2 // r14<- r14 & 0x33333333
eor r11, r11, r14 // remask half of register
ldr.w r5, [r12, #36] // load rkey word of rkey from prev round
ldr r14, [r12, #8] // load old mask
str r9, [r12, #40] // store the prev rkey word after ShiftRows^[-1]
str r11, [r12, #84] // store new rkey word in 'rkeys'
eor r10, r5, r2, ror #2 // r10<- r5 ^ (r2 >>> 2)
bic r10, r4, r10 // r10<- ~r10 & 0xc0c0c0c0 (NOT omitted in sbox)
eor r9, r5, r10, ror #2 // r9 <- r5 ^ (r10 >>> 2)
and r9, r9, r4, ror #2 // r9 <- r9 & 0x30303030
orr r10, r10, r9 // r10<- r10 | r9
eor r9, r5, r10, ror #2 // r9 <- r5 ^ (r10 >>> 2)
and r9, r9, r4, ror #4 // r9 <- r9 & 0x0c0c0c0c
orr r10, r10, r9 // r10<- r10 | r9
eor r9, r5, r10, ror #2 // r9 <- r5 ^ (r10 >>> 2)
and r9, r9, r4, ror #6 // r9 <- r9 & 0x03030303
orr r10, r10, r9 // r10<- r10 | r9
// applies ShiftRows^[-1]
and r9, r5, #0xfc00 // r9 <- r5 & 0x0000fc00
and r2, r5, #0x0300 // r2 <- r5 & 0x00000300
orr r9, r9, r2, lsl #8 // r9 <- r9 | r2 << 8
and r2, r5, #0xf00000 // r2 <- r5 & 0x00f00000
orr r9, r9, r2, lsr #2 // r9 <- r9 | r2 >> 2
and r2, r5, #0xf0000 // r2 <- r5 & 0x000f0000
orr r9, r9, r2, lsl #6 // r9 <- r9 | r2 << 6
and r2, r5, #0xc0000000 // r2 <- r5 & 0xc0000000
orr r9, r9, r2, lsr #4 // r9 <- r9 | r2 >> 4
and r2, r5, #0x3f000000 // r2 <- r5 & 0x3f000000
orr r9, r9, r2, ror #28 // r9 <- r9 | (r2 >>> 28)
and r2, r5, #0xff // r2 <- r5 & 0xff
orr r5, r2, r9, ror #2 // r5 <- ShiftRows^[-1](r5)
mvn r5, r5 // NOT that is omitted in sbox
eor r2, r4, r4, lsr #4 // r2 <- 0xcccccccc
and r14, r14, r2, lsr #2 // r14<- r14 & 0x33333333
eor r10, r10, r14 // remask half of register
ldr.w r2, [r12, #32] // load rkey word of rkey from prev round
str.w r5, [r12, #36] // store the rkey word after ShiftRows^[-1]
str r10, [r12, #80] // store new rkey word in 'rkeys'
eor r9, r2, r0, ror #2 // r9 <- r2 ^ (r9 >>> 2)
and r9, r4, r9 // r9 <- r9 & 0xc0c0c0c0
eor r0, r2, r9, ror #2 // r0 <- r2 ^ (r9 >>> 2)
and r0, r0, r4, ror #2 // r0 <- r0 & 0x30303030
orr r9, r9, r0 // r9 <- r9 | r0
eor r0, r2, r9, ror #2 // r0 <- r2 ^ (r9 >>> 2)
and r0, r0, r4, ror #4 // r0 <- r0 & 0x0c0c0c0c
orr r9, r9, r0 // r9 <- r9 | r0
eor r0, r2, r9, ror #2 // r0 <- r2 ^ (r9 >>> 2)
and r0, r0, r4, ror #6 // r0 <- r0 & 0x03030303
orr r9, r9, r0 // r9 <- r9 | r0
// applies ShiftRows^[-1]
and r5, r2, #0xfc00 // r5 <- r2 & 0x0000fc00
and r0, r2, #0x0300 // r0 <- r2 & 0x00000300
orr r5, r5, r0, lsl #8 // r5 <- r5 | r0 << 8
and r0, r2, #0xf00000 // r0 <- r2 & 0x00f00000
orr r5, r5, r0, lsr #2 // r5 <- r5 | r0 >> 2
and r0, r2, #0xf0000 // r0 <- r2 & 0x000f0000
orr r5, r5, r0, lsl #6 // r5 <- r5 | r0 << 6
and r0, r2, #0xc0000000 // r0 <- r2 & 0xc0000000
orr r5, r5, r0, lsr #4 // r5 <- r5 | r0 >> 4
and r0, r2, #0x3f000000 // r0 <- r2 & 0x3f000000
orr r5, r5, r0, ror #28 // r5 <- r5 | (r0 >>> 28)
and r0, r2, #0xff // r0 <- r2 & 0xff
orr r5, r0, r5, ror #2 // r5 <- ShiftRows^[-1](r2)
eor r9, r9, r14
ldr.w r2, [r12, #28] // load rkey word of rkey from prev round
ldr r14, [r12]
str.w r5, [r12, #32] // store the rkey word after ShiftRows^[-1]
str.w r9, [r12, #76] // store new rkey word in 'rkeys'
eor r8, r2, r8, ror #2 // r8 <- r2 ^ (r8 >>> 2)
and r8, r4, r8 // r8 <- r8 & 0xc0c0c0c0
eor r0, r2, r8, ror #2 // r0 <- r2 ^ (r8 >>> 2)
and r0, r0, r4, ror #2 // r0 <- r0 & 0x30303030
orr r8, r8, r0 // r8 <- r8 | r0
eor r0, r2, r8, ror #2 // r0 <- r2 ^ (r8 >>> 2)
and r0, r0, r4, ror #4 // r0 <- r0 & 0x0c0c0c0c
orr r8, r8, r0 // r8 <- r8 | r0
eor r0, r2, r8, ror #2 // r0 <- r2 ^ (r8 >>> 2)
and r0, r0, r4, ror #6 // r0 <- r0 & 0x03030303
orr r8, r8, r0 // r8 <- r8 | r0
// applies ShiftRows^[-1]
and r5, r2, #0xfc00 // r5 <- r2 & 0x0000fc00
and r0, r2, #0x0300 // r0 <- r2 & 0x00000300
orr r5, r5, r0, lsl #8 // r5 <- r5 | r0 << 8
and r0, r2, #0xf00000 // r0 <- r2 & 0x00f00000
orr r5, r5, r0, lsr #2 // r5 <- r5 | r0 >> 2
and r0, r2, #0xf0000 // r0 <- r2 & 0x000f0000
orr r5, r5, r0, lsl #6 // r5 <- r5 | r0 << 6
and r0, r2, #0xc0000000 // r0 <- r2 & 0xc0000000
orr r5, r5, r0, lsr #4 // r5 <- r5 | r0 >> 4
and r0, r2, #0x3f000000 // r0 <- r2 & 0x3f000000
orr r5, r5, r0, ror #28 // r5 <- r5 | (r0 >>> 28)
and r0, r2, #0xff // r0 <- r2 & 0xff
orr r5, r0, r5, ror #2 // r5 <- ShiftRows^[-1](r2)
eor r2, r4, r4, lsr #4 // r2 <- 0xcccccccc
and r14, r14, r2, lsr #2 // r14<- r14 & 0x33333333
eor r8, r8, r14 // remask half of register
ldr.w r2, [r12, #24] // load rkey word of rkey from prev round
str.w r5, [r12, #28] // store the rkey word after ShiftRows^[-1]
str.w r8, [r12, #72] // store new rkey word in 'rkeys'
eor r7, r2, r7, ror #2 // r7 <- r2 ^ (r7 >>> 2)
and r7, r4, r7 // r7 <- r7 & 0xc0c0c0c0
eor r0, r2, r7, ror #2 // r0 <- r2 ^ (r7 >>> 2)
and r0, r0, r4, ror #2 // r0 <- r0 & 0x30303030
orr r7, r7, r0 // r7 <- r7 | r0
eor r0, r2, r7, ror #2 // r0 <- r2 ^ (r7 >>> 2)
and r0, r0, r4, ror #4 // r0 <- r0 & 0x0c0c0c0c
orr r7, r7, r0 // r7 <- r7 | r0
eor r0, r2, r7, ror #2 // r0 <- r2 ^ (r7 >>> 2)
and r0, r0, r4, ror #6 // r0 <- r0 & 0x03030303
orr r7, r7, r0 // r7 <- r7 | r0
// applies ShiftRows^[-1]
and r5, r2, #0xfc00 // r5 <- r2 & 0x0000fc00
and r0, r2, #0x0300 // r0 <- r2 & 0x00000300
orr r5, r5, r0, lsl #8 // r5 <- r5 | r0 << 8
and r0, r2, #0xf00000 // r0 <- r2 & 0x00f00000
orr r5, r5, r0, lsr #2 // r5 <- r5 | r0 >> 2
and r0, r2, #0xf0000 // r0 <- r2 & 0x000f0000
orr r5, r5, r0, lsl #6 // r5 <- r5 | r0 << 6
and r0, r2, #0xc0000000 // r0 <- r2 & 0xc0000000
orr r5, r5, r0, lsr #4 // r5 <- r5 | r0 >> 4
and r0, r2, #0x3f000000 // r0 <- r2 & 0x3f000000
orr r5, r5, r0, ror #28 // r5 <- r5 | (r0 >>> 28)
and r0, r2, #0xff // r0 <- r2 & 0xff
orr r5, r0, r5, ror #2 // r5 <- ShiftRows^[-1](r2)
eor r7, r7, r14 // remask half of register
ldr.w r2, [r12, #20] // load rkey word of rkey from prev round
ldr r14, [r12, #4] // load old mask
str.w r5, [r12, #24] // store the rkey word after ShiftRows^[-1]
str.w r7, [r12, #68] // store new rkey word in 'rkeys'
eor r6, r2, r6, ror #2 // r6 <- r2 ^ (r6 >>> 2)
bic r6, r4, r6 // r6 <- ~r6 & 0xc0c0c0c0 (NOT omitted in sbox)
eor r0, r2, r6, ror #2 // r0 <- r2 ^ (r6 >>> 2)
and r0, r0, r4, ror #2 // r0 <- r0 & 0x30303030
orr r6, r6, r0 // r6 <- r6 | r0
eor r0, r2, r6, ror #2 // r0 <- r2 ^ (r6 >>> 2)
and r0, r0, r4, ror #4 // r0 <- r0 & 0x0c0c0c0c
orr r6, r6, r0 // r6 <- r6 | r0
eor r0, r2, r6, ror #2 // r0 <- r2 ^ (r6 >>> 2)
and r0, r0, r4, ror #6 // r0 <- r0 & 0x03030303
orr r6, r6, r0 // r6 <- r6 | r0
// applies ShiftRows^[-1]
and r5, r2, #0xfc00 // r5 <- r2 & 0x0000fc00
and r0, r2, #0x0300 // r0 <- r2 & 0x00000300
orr r5, r5, r0, lsl #8 // r5 <- r5 | r0 << 8
and r0, r2, #0xf00000 // r0 <- r2 & 0x00f00000
orr r5, r5, r0, lsr #2 // r5 <- r5 | r0 >> 2
and r0, r2, #0xf0000 // r0 <- r2 & 0x000f0000
orr r5, r5, r0, lsl #6 // r5 <- r5 | r0 << 6
and r0, r2, #0xc0000000 // r0 <- r2 & 0xc0000000
orr r5, r5, r0, lsr #4 // r5 <- r5 | r0 >> 4
and r0, r2, #0x3f000000 // r0 <- r2 & 0x3f000000
orr r5, r5, r0, ror #28 // r5 <- r5 | (r0 >>> 28)
and r0, r2, #0xff // r0 <- r2 & 0xff
orr r5, r0, r5, ror #2 // r5 <- ShiftRows^[-1](r2)
mvn r5, r5 // NOT that is omitted in sbox
eor r2, r4, r4, lsr #4 // r2 <- 0xcccccccc
and r14, r14, r2, lsr #2 // r14<- r14 & 0x33333333
eor r6, r6, r14 // remask half of register
ldr.w r2, [r12, #16] // load rkey word of rkey from prev round
ldr r14, [r12] // load old mask
str.w r5, [r12, #20] // store the rkey word after ShiftRows^[-1]
str.w r6, [r12, #64] // store new rkey word in 'rkeys'
eor r5, r2, r3, ror #2 // r5 <- r2 ^ (r3 >>> 2)
bic r5, r4, r5 // r5 <- ~r5 & 0xc0c0c0c0 (NOT omitted in sbox)
eor r0, r2, r5, ror #2 // r0 <- r2 ^ (r5 >>> 2)
and r0, r0, r4, ror #2 // r0 <- r0 & 0x30303030
orr r5, r5, r0 // r5 <- r5 | r0
eor r0, r2, r5, ror #2 // r0 <- r2 ^ (r5 >>> 2)
and r0, r0, r4, ror #4 // r0 <- r0 & 0x0c0c0c0c
orr r5, r5, r0 // r5 <- r5 | r0
eor r0, r2, r5, ror #2 // r0 <- r2 ^ (r5 >>> 2)
and r0, r0, r4, ror #6 // r0 <- r0 & 0x03030303
orr r5, r5, r0 // r5 <- r5 | r0
// applies ShiftRows^[-1]
and r3, r2, #0xfc00 // r3 <- r2 & 0x0000fc00
and r0, r2, #0x0300 // r0 <- r2 & 0x00000300
orr r3, r3, r0, lsl #8 // r3 <- r3 | r0 << 8
and r0, r2, #0xf00000 // r0 <- r2 & 0x00f00000
orr r3, r3, r0, lsr #2 // r3 <- r3 | r0 >> 2
and r0, r2, #0xf0000 // r0 <- r2 & 0x000f0000
orr r3, r3, r0, lsl #6 // r3 <- r3 | r0 << 6
and r0, r2, #0xc0000000 // r0 <- r2 & 0xc0000000
orr r3, r3, r0, lsr #4 // r3 <- r3 | r0 >> 4
and r0, r2, #0x3f000000 // r0 <- r2 & 0x3f000000
orr r3, r3, r0, ror #28 // r3 <- r3 | (r0 >>> 28)
and r0, r2, #0xff // r0 <- r2 & 0xff
orr r3, r0, r3, ror #2 // r3 <- ShiftRows^[-1](r2)
mvn r3, r3 // NOT that is omitted in sbox
eor r2, r4, r4, lsr #4 // r2 <- 0xcccccccc
and r14, r14, r2, lsr #2 // r14<- r14 & 0x33333333
eor r5, r5, r14 // remask half of register
ldr.w r2, [r12, #12] // load rkey word of rkey from prev round
ldr r14, [r12, #4] // load old mask
str.w r3, [r12, #16] // store new rkey word in 'rkeys'
str.w r5, [r12, #60]
eor r3, r2, r1, ror #2 // r3 <- r2 ^ (r1 >>> 2)
and r3, r4, r3 // r3 <- r3 & 0xc0c0c0c0
eor r0, r2, r3, ror #2 // r0 <- r2 ^ (r3 >>> 2)
and r0, r0, r4, ror #2 // r0 <- r0 & 0x30303030
orr r3, r3, r0 // r3 <- r3 | r0
eor r0, r2, r3, ror #2 // r0 <- r2 ^ (r3 >>> 2)
and r0, r0, r4, ror #4 // r0 <- r0 & 0x0c0c0c0c
orr r3, r3, r0 // r3 <- r3 | r0
eor r0, r2, r3, ror #2 // r0 <- r2 ^ (r3 >>> 2)
and r0, r0, r4, ror #6 // r0 <- r0 & 0x03030303
eor r1, r4, r4, lsr #4 // r1 <- 0xcccccccc
and r14, r14, r1, lsr #2 // r14<- r14 & 0x33333333
orr r4, r3, r0 // r4 <- r3 | r0
eor r4, r4, r14 // remask half of register
and r3, r2, #0xfc00 // r3 <- r2 & 0x0000fc00
and r0, r2, #0x0300 // r0 <- r2 & 0x00000300
orr r3, r3, r0, lsl #8 // r3 <- r3 | r0 << 8
and r0, r2, #0xf00000 // r0 <- r2 & 0x00f00000
orr r3, r3, r0, lsr #2 // r3 <- r3 | r0 >> 2
and r0, r2, #0xf0000 // r0 <- r2 & 0x000f0000
orr r3, r3, r0, lsl #6 // r3 <- r3 | r0 << 6
and r0, r2, #0xc0000000 // r0 <- r2 & 0xc0000000
orr r3, r3, r0, lsr #4 // r3 <- r3 | r0 >> 4
and r0, r2, #0x3f000000 // r0 <- r2 & 0x3f000000
orr r3, r3, r0, ror #28 // r3 <- r3 | (r0 >>> 28)
and r0, r2, #0xff // r0 <- r2 & 0xff
orr r3, r0, r3, ror #2 // r3 <- ShiftRows^[-1](r2)
add r12, #44 // point to the next rkey addr
ldr.w r14, [sp, #132]
str.w r4, [r12, #12] // store tmp rkey[0]
str.w r3, [r12, #-32] // store prev rkey after ShiftRows^[-1]
str.w r12, [sp, #116] // store the new rkeys address on the stack
ldrd r12, r2, [r12] // load masks MASK1 and MASK2 in r12, r2 (for sbox routine)
bx lr
/******************************************************************************
* Subroutine that XORs the columns after the S-box during the key schedule
* round function, for rounds i such that (i % 4) == 2.
* Note that the code size could be reduced at the cost of some instructions
* since some redundant code is applied on different registers.
******************************************************************************/
.align 2
xor_columns_2:
str.w r14, [sp, #112] // store link register
movw r4, #0xc0c0
movt r4, #0xc0c0 // r4 <- 0xc0c0c0c0
eor r11, r5, r11, ror #2 // r11<- r5 ^ (r11 >>> 2)
bic r11, r4, r11 // r11<- ~r11 & 0xc0c0c0c0 (NOT omitted in sbox)
eor r9, r5, r11, ror #2 // r9 <- r5 ^ (r11 >>> 2)
and r9, r9, r4, ror #2 // r9 <- r9 & 0x30303030
orr r11, r11, r9 // r11<- r11 | r9
eor r9, r5, r11, ror #2 // r9 <- r5 ^ (r11 >>> 2)
and r9, r9, r4, ror #4 // r9 <- r9 & 0x0c0c0c0c
orr r11, r11, r9 // r11<- r11 | r9
eor r9, r5, r11, ror #2 // r9 <- r5 ^ (r11 >>> 2)
and r9, r9, r4, ror #6 // r9 <- r9 & 0x03030303
orr r11, r11, r9 // r11<- r11 | r9
// applies ShiftRows^[-2]
movw r14, #0x0f00
movt r14, #0x0f00 // r14<- 0x0f000f00 for ShiftRows^[-2]
and r9, r14, r5, lsr #4 // r9 <- (r5 >> 4) & 0x0f000f00
and r10, r14, r5 // r10<- r5 & 0x0f000f00
orr r9, r9, r10, lsl #4 // r9 <- r9 | r10 << 4
eor r10, r14, r14, lsl #4 // r10<- 0xff00ff00
ldr r14, [r12, #4] // load prev mask
and r10, r5, r10, ror #8 // r10<- r5 & 0x00ff00f00
orr r9, r9, r10 // r9 <- ShiftRows^[-2](r5)
mvn r9, r9 // NOT that is omitted in sbox
eor r5, r4, r4, lsr #4 // r5 <- 0xcccccccc
and r14, r14, r5, lsr #2 // r14<- r14 & 0x33333333
eor r11, r11, r14 // remask half of register
ldr.w r5, [r12, #36] // load rkey word of rkey from prev round
str r9, [r12, #40] // store the rkey word after ShiftRows^[-1]
str r11, [r12, #84] // store new rkey word in 'rkeys'
eor r10, r5, r2, ror #2 // r10<- r5 ^ (r2 >>> 2)
bic r10, r4, r10 // r10<- ~r10 & 0xc0c0c0c0 (NOT omitted in sbox)
eor r9, r5, r10, ror #2 // r9 <- r5 ^ (r10 >>> 2)
and r9, r9, r4, ror #2 // r9 <- r9 & 0x30303030
orr r10, r10, r9 // r10<- r10 | r9
eor r9, r5, r10, ror #2 // r9 <- r5 ^ (r10 >>> 2)
and r9, r9, r4, ror #4 // r9 <- r9 & 0x0c0c0c0c
orr r10, r10, r9 // r10<- r10 | r9
eor r9, r5, r10, ror #2 // r9 <- r5 ^ (r10 >>> 2)
and r9, r9, r4, ror #6 // r9 <- r9 & 0x03030303
orr r10, r10, r9 // r10<- r10 | r9
// applies ShiftRows^[-2]
movw r14, #0x0f00
movt r14, #0x0f00 // r14<- 0x0f000f00 for ShiftRows^[-2]
and r9, r14, r5, lsr #4 // r9 <- (r5 >> 4) & 0x0f000f00
and r2, r14, r5 // r2 <- r5 & 0x0f000f00
orr r9, r9, r2, lsl #4 // r9 <- r9 | r2 << 4
eor r2, r14, r14, lsl #4 // r2 <- 0xff00ff00
ldr r14, [r12, #8] // load old mask
and r2, r5, r2, ror #8 // r2 <- r5 & 0x00ff00f00
orr r5, r9, r2 // r9 <- ShiftRows^[-2](r5)
mvn r5, r5 // NOT that is omitted in sbox
eor r2, r4, r4, lsr #4 // r5 <- 0xcccccccc
and r14, r14, r2, lsr #2 // r14<- r14 & 0x33333333
eor r10, r10, r14 // remask half of register
ldr.w r2, [r12, #32] // load rkey word of rkey from prev round
str.w r5, [r12, #36] // store the rkey word after ShiftRows^[-1]
str r10, [r12, #80] // store new rkey word in 'rkeys'
eor r9, r2, r0, ror #2 // r9 <- r2 ^ (r9 >>> 2)
and r9, r4, r9 // r9 <- r9 & 0xc0c0c0c0
eor r0, r2, r9, ror #2 // r0 <- r2 ^ (r9 >>> 2)
and r0, r0, r4, ror #2 // r0 <- r0 & 0x30303030
orr r9, r9, r0 // r9 <- r9 | r0
eor r0, r2, r9, ror #2 // r0 <- r2 ^ (r9 >>> 2)
and r0, r0, r4, ror #4 // r0 <- r0 & 0x0c0c0c0c
orr r9, r9, r0 // r9 <- r9 | r0
eor r0, r2, r9, ror #2 // r0 <- r2 ^ (r9 >>> 2)
and r0, r0, r4, ror #6 // r0 <- r0 & 0x03030303
orr r9, r9, r0 // r9 <- r9 | r0
// applies ShiftRows^[-2]
movw r14, #0x0f00
movt r14, #0x0f00 // r14<- 0x0f000f00 for ShiftRows^[-2]
and r5, r14, r2, lsr #4 // r5 <- (r2 >> 4) & 0x0f000f00
and r0, r14, r2 // r0 <- r2 & 0x0f000f00
orr r5, r5, r0, lsl #4 // r5 <- r5 | r0 << 4
eor r0, r14, r14, lsl #4 // r0 <- 0xff00ff00
ldr r14, [r12, #8] // load old mask
and r0, r2, r0, ror #8 // r0 <- r2 & 0x00ff00f00
orr r5, r5, r0 // r5 <- ShiftRows^[-2](r2)
eor r2, r4, r4, lsr #4 // r5 <- 0xcccccccc
and r14, r14, r2, lsr #2 // r14<- r14 & 0x33333333
eor r9, r9, r14 // remask half of register
ldr.w r2, [r12, #28] // load rkey word of rkey from prev round
str.w r5, [r12, #32] // store the rkey word after ShiftRows^[-1]
str.w r9, [r12, #76] // store new rkey word in 'rkeys'
eor r8, r2, r8, ror #2 // r8 <- r2 ^ (r8 >>> 2)
and r8, r4, r8 // r8 <- r8 & 0xc0c0c0c0
eor r0, r2, r8, ror #2 // r0 <- r2 ^ (r8 >>> 2)
and r0, r0, r4, ror #2 // r0 <- r0 & 0x30303030
orr r8, r8, r0 // r8 <- r8 | r0
eor r0, r2, r8, ror #2 // r0 <- r2 ^ (r8 >>> 2)
and r0, r0, r4, ror #4 // r0 <- r0 & 0x0c0c0c0c
orr r8, r8, r0 // r8 <- r8 | r0
eor r0, r2, r8, ror #2 // r0 <- r2 ^ (r8 >>> 2)
and r0, r0, r4, ror #6 // r0 <- r0 & 0x03030303
orr r8, r8, r0 // r8 <- r8 | r0
// applies ShiftRows^[-2]
movw r14, #0x0f00
movt r14, #0x0f00 // r14<- 0x0f000f00 for ShiftRows^[-2]
and r5, r14, r2, lsr #4 // r5 <- (r2 >> 4) & 0x0f000f00
and r0, r14, r2 // r0 <- r2 & 0x0f000f00
orr r5, r5, r0, lsl #4 // r5 <- r5 | r0 << 4
eor r0, r14, r14, lsl #4 // r0 <- 0xff00ff00
ldr r14, [r12] // load old mask
and r0, r2, r0, ror #8 // r0 <- r2 & 0x00ff00f00
orr r5, r5, r0 // r5 <- ShiftRows^[-2](r2)
eor r2, r4, r4, lsr #4 // r5 <- 0xcccccccc
and r14, r14, r2, lsr #2 // r14<- r14 & 0x33333333
eor r8, r8, r14 // remask half of register
ldr.w r2, [r12, #24] // load rkey word of rkey from prev round
str.w r5, [r12, #28] // store the rkey word after ShiftRows^[-1]
str.w r8, [r12, #72] // store new rkey word in 'rkeys'
eor r7, r2, r7, ror #2 // r7 <- r2 ^ (r7 >>> 2)
and r7, r4, r7 // r7 <- r7 & 0xc0c0c0c0
eor r0, r2, r7, ror #2 // r0 <- r2 ^ (r7 >>> 2)
and r0, r0, r4, ror #2 // r0 <- r0 & 0x30303030
orr r7, r7, r0 // r7 <- r7 | r0
eor r0, r2, r7, ror #2 // r0 <- r2 ^ (r7 >>> 2)
and r0, r0, r4, ror #4 // r0 <- r0 & 0x0c0c0c0c
orr r7, r7, r0 // r7 <- r7 | r0
eor r0, r2, r7, ror #2 // r0 <- r2 ^ (r7 >>> 2)
and r0, r0, r4, ror #6 // r0 <- r0 & 0x03030303
orr r7, r7, r0 // r7 <- r7 | r0
// applies ShiftRows^[-2]
movw r14, #0x0f00
movt r14, #0x0f00 // r14<- 0x0f000f00 for ShiftRows^[-2]
and r5, r14, r2, lsr #4 // r5 <- (r2 >> 4) & 0x0f000f00
and r0, r14, r2 // r0 <- r2 & 0x0f000f00
orr r5, r5, r0, lsl #4 // r5 <- r5 | r0 << 4
eor r0, r14, r14, lsl #4 // r0 <- 0xff00ff00
ldr r14, [r12] // load old mask
and r0, r2, r0, ror #8 // r0 <- r2 & 0x00ff00f00
orr r5, r5, r0 // r5 <- ShiftRows^[-2](r2)
eor r2, r4, r4, lsr #4 // r5 <- 0xcccccccc
and r14, r14, r2, lsr #2 // r14<- r14 & 0x33333333
eor r7, r7, r14 // remask half of register
ldr.w r2, [r12, #20] // load rkey word of rkey from prev round
str.w r5, [r12, #24] // store the rkey word after ShiftRows^[-1]
str.w r7, [r12, #68] // store new rkey word in 'rkeys'
eor r6, r2, r6, ror #2 // r6 <- r2 ^ (r6 >>> 2)
bic r6, r4, r6 // r6 <- ~r6 & 0xc0c0c0c0 (NOT omitted in sbox)
eor r0, r2, r6, ror #2 // r0 <- r2 ^ (r6 >>> 2)
and r0, r0, r4, ror #2 // r0 <- r0 & 0x30303030
orr r6, r6, r0 // r6 <- r6 | r0
eor r0, r2, r6, ror #2 // r0 <- r2 ^ (r6 >>> 2)
and r0, r0, r4, ror #4 // r0 <- r0 & 0x0c0c0c0c
orr r6, r6, r0 // r6 <- r6 | r0
eor r0, r2, r6, ror #2 // r0 <- r2 ^ (r6 >>> 2)
and r0, r0, r4, ror #6 // r0 <- r0 & 0x03030303
orr r6, r6, r0 // r6 <- r6 | r0
// applies ShiftRows^[-2]
movw r14, #0x0f00
movt r14, #0x0f00 // r14<- 0x0f000f00 for ShiftRows^[-2]
and r5, r14, r2, lsr #4 // r5 <- (r2 >> 4) & 0x0f000f00
and r0, r14, r2 // r0 <- r2 & 0x0f000f00
orr r5, r5, r0, lsl #4 // r5 <- r5 | r0 << 4
eor r0, r14, r14, lsl #4 // r0 <- 0xff00ff00
ldr r14, [r12, #4] // load old mask
and r0, r2, r0, ror #8 // r0 <- r2 & 0x00ff00f00
orr r5, r5, r0 // r5 <- ShiftRows^[-2](r2)
mvn r5, r5 // NOT that is omitted in sbox
eor r2, r4, r4, lsr #4 // r5 <- 0xcccccccc
and r14, r14, r2, lsr #2 // r14<- r14 & 0x33333333
eor r6, r6, r14 // remask half of register
ldr.w r2, [r12, #16] // load rkey word of rkey from prev round
str.w r5, [r12, #20] // store the rkey word after ShiftRows^[-1]
str.w r6, [r12, #64] // store new rkey word in 'rkeys'
eor r5, r2, r3, ror #2 // r5 <- r2 ^ (r3 >>> 2)
bic r5, r4, r5 // r5 <- ~r5 & 0xc0c0c0c0 (NOT omitted in sbox)
eor r0, r2, r5, ror #2 // r0 <- r2 ^ (r5 >>> 2)
and r0, r0, r4, ror #2 // r0 <- r0 & 0x30303030
orr r5, r5, r0 // r5 <- r5 | r0
eor r0, r2, r5, ror #2 // r0 <- r2 ^ (r5 >>> 2)
and r0, r0, r4, ror #4 // r0 <- r0 & 0x0c0c0c0c
orr r5, r5, r0 // r5 <- r5 | r0
eor r0, r2, r5, ror #2 // r0 <- r2 ^ (r5 >>> 2)
and r0, r0, r4, ror #6 // r0 <- r0 & 0x03030303
orr r5, r5, r0 // r5 <- r5 | r0
// applies ShiftRows^[-2]
movw r14, #0x0f00
movt r14, #0x0f00 // r14<- 0x0f000f00 for ShiftRows^[-2]
and r3, r14, r2, lsr #4 // r3 <- (r2 >> 4) & 0x0f000f00
and r0, r14, r2 // r0 <- r2 & 0x0f000f00
orr r3, r3, r0, lsl #4 // r3 <- r3 | r0 << 4
eor r0, r14, r14, lsl #4 // r0 <- 0xff00ff00
ldr r14, [r12] // load old mask
and r0, r2, r0, ror #8 // r0 <- r2 & 0x00ff00f00
orr r3, r3, r0 // r3 <- ShiftRows^[-2](r2)
mvn r3, r3 // NOT that is omitted in sbox
eor r2, r4, r4, lsr #4 // r5 <- 0xcccccccc
and r14, r14, r2, lsr #2 // r14<- r14 & 0x33333333
eor r5, r5, r14 // remask half of register
ldr.w r2, [r12, #12] // load rkey word of rkey from prev round
ldr r14, [r12, #4] // load old mask
str.w r3, [r12, #16] // store new rkey word in 'rkeys'
str.w r5, [r12, #60]
eor r3, r2, r1, ror #2 // r3 <- r2 ^ (r1 >>> 2)
and r3, r4, r3 // r3 <- r3 & 0xc0c0c0c0
eor r0, r2, r3, ror #2 // r0 <- r2 ^ (r3 >>> 2)
and r0, r0, r4, ror #2 // r0 <- r0 & 0x30303030
orr r3, r3, r0 // r3 <- r3 | r0
eor r0, r2, r3, ror #2 // r0 <- r2 ^ (r3 >>> 2)
and r0, r0, r4, ror #4 // r0 <- r0 & 0x0c0c0c0c
orr r3, r3, r0 // r3 <- r3 | r0
eor r0, r2, r3, ror #2 // r0 <- r2 ^ (r3 >>> 2)
and r0, r0, r4, ror #6 // r0 <- r0 & 0x03030303
eor r1, r4, r4, lsr #4 // r1 <- 0xcccccccc
orr r4, r3, r0 // r4 <- r3 | r0
and r14, r14, r1, lsr #2 // r14<- r14 & 0x33333333
eor r4, r4, r14 // remask half of register
// applies ShiftRows^[-2]
movw r14, #0x0f00
movt r14, #0x0f00 // r14<- 0x0f000f00 for ShiftRows^[-2]
and r3, r14, r2, lsr #4 // r3 <- (r2 >> 4) & 0x0f000f00
and r0, r14, r2 // r0 <- r2 & 0x0f000f00
orr r3, r3, r0, lsl #4 // r3 <- r3 | r0 << 4
eor r0, r14, r14, lsl #4 // r0 <- 0xff00ff00
and r0, r2, r0, ror #8 // r0 <- r2 & 0x00ff00f00
orr r3, r3, r0 // r3 <- ShiftRows^[-2](r2)
add r12, #44
ldr r14, [sp, #112] // restore link register
str.w r3, [r12, #-32]
str.w r4, [r12, #12]
str r12, [sp, #116] // store the new rkeys address on the stack
ldrd r12, r2, [r12] // load masks MASK1 and MASK2 in r12, r2 (for sbox routine)
bx lr
/******************************************************************************
* Subroutine that XORs the columns after the S-box during the key schedule
* round function, for rounds i such that (i % 4) == 3.
* Note that the code size could be reduced at the cost of some instructions
* since some redundant code is applied on different registers.
******************************************************************************/
.align 2
xor_columns_3:
str r14, [sp, #112] // store link register on the stack
ldr r14, [r12, #4] // load old mask
movw r4, #0xc0c0
movt r4, #0xc0c0 // r4 <- 0xc0c0c0c0
eor r11, r5, r11, ror #2 // r11<- r5 ^ (r11 >>> 2)
bic r11, r4, r11 // r11<- ~r11 & 0xc0c0c0c0 (NOT omitted in sbox)
eor r9, r5, r11, ror #2 // r9 <- r5 ^ (r11 >>> 2)
and r9, r9, r4, ror #2 // r9 <- r9 & 0x30303030
orr r11, r11, r9 // r11<- r11 | r9
eor r9, r5, r11, ror #2 // r9 <- r5 ^ (r11 >>> 2)
and r9, r9, r4, ror #4 // r9 <- r9 & 0x0c0c0c0c
orr r11, r11, r9 // r11<- r11 | r9
eor r9, r5, r11, ror #2 // r9 <- r5 ^ (r11 >>> 2)
and r9, r9, r4, ror #6 // r9 <- r9 & 0x03030303
orr r11, r11, r9 // r11<- r11 | r9
// applies ShiftRows^[-3]
and r9, r5, #0xc000 // r9 <- r5 & 0x0000c000
and r10, r5, #0x3f00 // r10<- r5 & 0x00003f00
orr r9, r9, r10, lsl #8 // r9 <- r9 | r10 << 8
and r10, r5, #0xf00000 // r10<- r5 & 0x00f00000
orr r9, r9, r10, lsl #2 // r9 <- r9 | r10 << 2
and r10, r5, #0xf0000 // r10<- r5 & 0x000f0000
orr r9, r9, r10, lsl #10 // r9 <- r9 | r10 << 10
and r10, r5, #0xfc000000 // r10<- r5 & 0xfc000000
orr r9, r9, r10, ror #28 // r9 <- r9 | r10 >>> 8
and r10, r5, #0x03000000 // r10<- r5 & 0x03000000
orr r9, r9, r10, ror #20 // r9 <- r9 | (r10 >>> 20)
and r10, r5, #0xff // r10<- r5 & 0xff
orr r9, r10, r9, ror #6 // r9 <- ShiftRows^[-3](r5)
mvn r9, r9 // NOT that is omitted in sbox
eor r5, r4, r4, lsr #4 // r5 <- 0xcccccccc
and r14, r14, r5, lsr #2 // r14<- r14 & 0x33333333
eor r11, r11, r14 // remask half of register
ldr.w r5, [r12, #36] // load rkey word of rkey from prev round
ldr r14, [r12, #8] // load old mask
str r9, [r12, #40] // store the rkey word after ShiftRows^[-1]
str r11, [r12, #84] // store new rkey word in 'rkeys'
eor r10, r5, r2, ror #2 // r10<- r5 ^ (r2 >>> 2)
bic r10, r4, r10 // r10<- ~r10 & 0xc0c0c0c0 (NOT omitted in sbox)
eor r9, r5, r10, ror #2 // r9 <- r5 ^ (r10 >>> 2)
and r9, r9, r4, ror #2 // r9 <- r9 & 0x30303030
orr r10, r10, r9 // r10<- r10 | r9
eor r9, r5, r10, ror #2 // r9 <- r5 ^ (r10 >>> 2)
and r9, r9, r4, ror #4 // r9 <- r9 & 0x0c0c0c0c
orr r10, r10, r9 // r10<- r10 | r9
eor r9, r5, r10, ror #2 // r9 <- r5 ^ (r10 >>> 2)
and r9, r9, r4, ror #6 // r9 <- r9 & 0x03030303
orr r10, r10, r9 // r10<- r10 | r9
// applies ShiftRows^[-3]
and r9, r5, #0xc000 // r9 <- r5 & 0x0000c000
and r2, r5, #0x3f00 // r2 <- r5 & 0x00003f00
orr r9, r9, r2, lsl #8 // r9 <- r9 | r2 << 8
and r2, r5, #0xf00000 // r2 <- r5 & 0x00f00000
orr r9, r9, r2, lsl #2 // r9 <- r9 | r2 << 2
and r2, r5, #0xf0000 // r2 <- r5 & 0x000f0000
orr r9, r9, r2, lsl #10 // r9 <- r9 | r2 << 10
and r2, r5, #0xfc000000 // r2 <- r5 & 0xfc000000
orr r9, r9, r2, ror #28 // r9 <- r9 | r2 >>> 8
and r2, r5, #0x03000000 // r2 <- r5 & 0x03000000
orr r9, r9, r2, ror #20 // r9 <- r9 | (r2 >>> 20)
and r2, r5, #0xff // r2 <- r5 & 0xff
orr r5, r2, r9, ror #6 // r5 <- ShiftRows^[-3](r5)
mvn r5, r5 // NOT that is omitted in sbox
eor r2, r4, r4, lsr #4 // r2 <- 0xcccccccc
and r14, r14, r2, lsr #2 // r14<- r14 & 0x33333333
eor r10, r10, r14 // remask half of register
ldr.w r2, [r12, #32] // load rkey word of rkey from prev round
str.w r5, [r12, #36] // store the rkey word after ShiftRows^[-1]
str r10, [r12, #80] // store new rkey word in 'rkeys'
eor r9, r2, r0, ror #2 // r9 <- r2 ^ (r9 >>> 2)
and r9, r4, r9 // r9 <- r9 & 0xc0c0c0c0
eor r0, r2, r9, ror #2 // r0 <- r2 ^ (r9 >>> 2)
and r0, r0, r4, ror #2 // r0 <- r0 & 0x30303030
orr r9, r9, r0 // r9 <- r9 | r0
eor r0, r2, r9, ror #2 // r0 <- r2 ^ (r9 >>> 2)
and r0, r0, r4, ror #4 // r0 <- r0 & 0x0c0c0c0c
orr r9, r9, r0 // r9 <- r9 | r0
eor r0, r2, r9, ror #2 // r0 <- r2 ^ (r9 >>> 2)
and r0, r0, r4, ror #6 // r0 <- r0 & 0x03030303
orr r9, r9, r0 // r9 <- r9 | r0
// applies ShiftRows^[-3]
and r5, r2, #0xc000 // r5 <- r2 & 0x0000c000
and r0, r2, #0x3f00 // r0 <- r2 & 0x00003f00
orr r5, r5, r0, lsl #8 // r5 <- r5 | r0 << 8
and r0, r2, #0xf00000 // r0 <- r2 & 0x00f00000
orr r5, r5, r0, lsl #2 // r5 <- r5 | r0 << 2
and r0, r2, #0xf0000 // r0 <- r2 & 0x000f0000
orr r5, r5, r0, lsl #10 // r5 <- r5 | r0 << 10
and r0, r2, #0xfc000000 // r0 <- r2 & 0xfc000000
orr r5, r5, r0, ror #28 // r5 <- r5 | r0 >>> 8
and r0, r2, #0x03000000 // r0 <- r2 & 0x03000000
orr r5, r5, r0, ror #20 // r5 <- r5 | (r0 >>> 20)
and r0, r2, #0xff // r0 <- r2 & 0xff
orr r5, r0, r5, ror #6 // r5 <- ShiftRows^[-3](r2)
eor r9, r9, r14 // remask half of register
ldr.w r2, [r12, #28] // load rkey word of rkey from prev round
ldr r14, [r12]
str.w r5, [r12, #32] // store the rkey word after ShiftRows^[-1]
str.w r9, [r12, #76] // store new rkey word in 'rkeys'
eor r8, r2, r8, ror #2 // r8 <- r2 ^ (r8 >>> 2)
and r8, r4, r8 // r8 <- r8 & 0xc0c0c0c0
eor r0, r2, r8, ror #2 // r0 <- r2 ^ (r8 >>> 2)
and r0, r0, r4, ror #2 // r0 <- r0 & 0x30303030
orr r8, r8, r0 // r8 <- r8 | r0
eor r0, r2, r8, ror #2 // r0 <- r2 ^ (r8 >>> 2)
and r0, r0, r4, ror #4 // r0 <- r0 & 0x0c0c0c0c
orr r8, r8, r0 // r8 <- r8 | r0
eor r0, r2, r8, ror #2 // r0 <- r2 ^ (r8 >>> 2)
and r0, r0, r4, ror #6 // r0 <- r0 & 0x03030303
orr r8, r8, r0 // r8 <- r8 | r0
// applies ShiftRows^[-3]
and r5, r2, #0xc000 // r5 <- r2 & 0x0000c000
and r0, r2, #0x3f00 // r0 <- r2 & 0x00003f00
orr r5, r5, r0, lsl #8 // r5 <- r5 | r0 << 8
and r0, r2, #0xf00000 // r0 <- r2 & 0x00f00000
orr r5, r5, r0, lsl #2 // r5 <- r5 | r0 << 2
and r0, r2, #0xf0000 // r0 <- r2 & 0x000f0000
orr r5, r5, r0, lsl #10 // r5 <- r5 | r0 << 10
and r0, r2, #0xfc000000 // r0 <- r2 & 0xfc000000
orr r5, r5, r0, ror #28 // r5 <- r5 | r0 >>> 8
and r0, r2, #0x03000000 // r0 <- r2 & 0x03000000
orr r5, r5, r0, ror #20 // r5 <- r5 | (r0 >>> 20)
and r0, r2, #0xff // r0 <- r2 & 0xff
orr r5, r0, r5, ror #6 // r5 <- ShiftRows^[-3](r2)
eor r2, r4, r4, lsr #4 // r2 <- 0xcccccccc
and r14, r14, r2, lsr #2 // r14<- r14 & 0x33333333
eor r8, r8, r14 // remask half register
ldr.w r2, [r12, #24] // load rkey word of rkey from prev round
str.w r5, [r12, #28] // store the rkey word after ShiftRows^[-1]
str.w r8, [r12, #72] // store new rkey word in 'rkeys'
eor r7, r2, r7, ror #2 // r7 <- r2 ^ (r7 >>> 2)
and r7, r4, r7 // r7 <- r7 & 0xc0c0c0c0
eor r0, r2, r7, ror #2 // r0 <- r2 ^ (r7 >>> 2)
and r0, r0, r4, ror #2 // r0 <- r0 & 0x30303030
orr r7, r7, r0 // r7 <- r7 | r0
eor r0, r2, r7, ror #2 // r0 <- r2 ^ (r7 >>> 2)
and r0, r0, r4, ror #4 // r0 <- r0 & 0x0c0c0c0c
orr r7, r7, r0 // r7 <- r7 | r0
eor r0, r2, r7, ror #2 // r0 <- r2 ^ (r7 >>> 2)
and r0, r0, r4, ror #6 // r0 <- r0 & 0x03030303
orr r7, r7, r0 // r7 <- r7 | r0
// applies ShiftRows^[-3]
and r5, r2, #0xc000 // r5 <- r2 & 0x0000c000
and r0, r2, #0x3f00 // r0 <- r2 & 0x00003f00
orr r5, r5, r0, lsl #8 // r5 <- r5 | r0 << 8
and r0, r2, #0xf00000 // r0 <- r2 & 0x00f00000
orr r5, r5, r0, lsl #2 // r5 <- r5 | r0 << 2
and r0, r2, #0xf0000 // r0 <- r2 & 0x000f0000
orr r5, r5, r0, lsl #10 // r5 <- r5 | r0 << 10
and r0, r2, #0xfc000000 // r0 <- r2 & 0xfc000000
orr r5, r5, r0, ror #28 // r5 <- r5 | r0 >>> 8
and r0, r2, #0x03000000 // r0 <- r2 & 0x03000000
orr r5, r5, r0, ror #20 // r5 <- r5 | (r0 >>> 20)
and r0, r2, #0xff // r0 <- r2 & 0xff
orr r5, r0, r5, ror #6 // r5 <- ShiftRows^[-3](r2)
eor r7, r7, r14 // remask half register
ldr.w r2, [r12, #20] // load rkey word of rkey from prev round
ldr r14, [r12, #4]
str.w r5, [r12, #24] // store the rkey word after ShiftRows^[-1]
str.w r7, [r12, #68] // store new rkey word in 'rkeys'
eor r6, r2, r6, ror #2 // r6 <- r2 ^ (r6 >>> 2)
bic r6, r4, r6 // r6 <- ~r6 & 0xc0c0c0c0 (NOT omitted in sbox)
eor r0, r2, r6, ror #2 // r0 <- r2 ^ (r6 >>> 2)
and r0, r0, r4, ror #2 // r0 <- r0 & 0x30303030
orr r6, r6, r0 // r6 <- r6 | r0
eor r0, r2, r6, ror #2 // r0 <- r2 ^ (r6 >>> 2)
and r0, r0, r4, ror #4 // r0 <- r0 & 0x0c0c0c0c
orr r6, r6, r0 // r6 <- r6 | r0
eor r0, r2, r6, ror #2 // r0 <- r2 ^ (r6 >>> 2)
and r0, r0, r4, ror #6 // r0 <- r0 & 0x03030303
orr r6, r6, r0 // r6 <- r6 | r0
// applies ShiftRows^[-3]
and r5, r2, #0xc000 // r5 <- r2 & 0x0000c000
and r0, r2, #0x3f00 // r0 <- r2 & 0x00003f00
orr r5, r5, r0, lsl #8 // r5 <- r5 | r0 << 8
and r0, r2, #0xf00000 // r0 <- r2 & 0x00f00000
orr r5, r5, r0, lsl #2 // r5 <- r5 | r0 << 2
and r0, r2, #0xf0000 // r0 <- r2 & 0x000f0000
orr r5, r5, r0, lsl #10 // r5 <- r5 | r0 << 10
and r0, r2, #0xfc000000 // r0 <- r2 & 0xfc000000
orr r5, r5, r0, ror #28 // r5 <- r5 | r0 >>> 8
and r0, r2, #0x03000000 // r0 <- r2 & 0x03000000
orr r5, r5, r0, ror #20 // r5 <- r5 | (r0 >>> 20)
and r0, r2, #0xff // r0 <- r2 & 0xff
orr r5, r0, r5, ror #6 // r5 <- ShiftRows^[-3](r2)
mvn r5, r5 // NOT omitted in sbox
eor r2, r4, r4, lsr #4 // r2 <- 0xcccccccc
and r14, r14, r2, lsr #2 // r14<- r14 & 0x33333333
eor r6, r6, r14 // remask half register
ldr.w r2, [r12, #16] // load rkey word of rkey from prev round
ldr r14, [r12] // load prev mask
str.w r5, [r12, #20] // store the rkey word after ShiftRows^[-1]
str.w r6, [r12, #64] // store new rkey word in 'rkeys'
eor r5, r2, r3, ror #2 // r5 <- r2 ^ (r3 >>> 2)
bic r5, r4, r5 // r5 <- ~r5 & 0xc0c0c0c0 (NOT omitted in sbox)
eor r0, r2, r5, ror #2 // r0 <- r2 ^ (r5 >>> 2)
and r0, r0, r4, ror #2 // r0 <- r0 & 0x30303030
orr r5, r5, r0 // r5 <- r5 | r0
eor r0, r2, r5, ror #2 // r0 <- r2 ^ (r5 >>> 2)
and r0, r0, r4, ror #4 // r0 <- r0 & 0x0c0c0c0c
orr r5, r5, r0 // r5 <- r5 | r0
eor r0, r2, r5, ror #2 // r0 <- r2 ^ (r5 >>> 2)
and r0, r0, r4, ror #6 // r0 <- r0 & 0x03030303
orr r5, r5, r0 // r5 <- r5 | r0
// applies ShiftRows^[-3]
and r3, r2, #0xc000 // r3 <- r2 & 0x0000c000
and r0, r2, #0x3f00 // r0 <- r2 & 0x00003f00
orr r3, r3, r0, lsl #8 // r3 <- r3 | r0 << 8
and r0, r2, #0xf00000 // r0 <- r2 & 0x00f00000
orr r3, r3, r0, lsl #2 // r3 <- r3 | r0 << 2
and r0, r2, #0xf0000 // r0 <- r2 & 0x000f0000
orr r3, r3, r0, lsl #10 // r3 <- r3 | r0 << 10
and r0, r2, #0xfc000000 // r0 <- r2 & 0xfc000000
orr r3, r3, r0, ror #28 // r3 <- r3 | r0 >>> 8
and r0, r2, #0x03000000 // r0 <- r2 & 0x03000000
orr r3, r3, r0, ror #20 // r3 <- r3 | (r0 >>> 20)
and r0, r2, #0xff // r0 <- r2 & 0xff
orr r3, r0, r3, ror #6 // r3 <- ShiftRows^[-3](r2)
mvn r3, r3 // NOT omitted in sbox
eor r2, r4, r4, lsr #4 // r2 <- 0xcccccccc
and r14, r14, r2, lsr #2 // r14<- r14 & 0x33333333
eor r5, r5, r14 // remask half register
ldr.w r2, [r12, #12] // load rkey word of rkey from prev round
ldr r14, [r12, #4]
str.w r3, [r12, #16] // store new rkey word in 'rkeys'
str.w r5, [r12, #60]
eor r3, r2, r1, ror #2 // r3 <- r2 ^ (r1 >>> 2)
and r3, r4, r3 // r3 <- r3 & 0xc0c0c0c0
eor r0, r2, r3, ror #2 // r0 <- r2 ^ (r3 >>> 2)
and r0, r0, r4, ror #2 // r0 <- r0 & 0x30303030
orr r3, r3, r0 // r3 <- r3 | r0
eor r0, r2, r3, ror #2 // r0 <- r2 ^ (r3 >>> 2)
and r0, r0, r4, ror #4 // r0 <- r0 & 0x0c0c0c0c
orr r3, r3, r0 // r3 <- r3 | r0
eor r0, r2, r3, ror #2 // r0 <- r2 ^ (r3 >>> 2)
and r0, r0, r4, ror #6 // r0 <- r0 & 0x03030303
eor r1, r4, r4, lsr #4 // r2 <- 0xcccccccc
and r14, r14, r1, lsr #2 // r14<- r14 & 0x33333333
orr r4, r3, r0 // r4 <- r3 | r0
eor r4, r4, r14 // remask half register
// applies ShiftRows^[-3]
and r3, r2, #0xc000 // r3 <- r2 & 0x0000c000
and r0, r2, #0x3f00 // r0 <- r2 & 0x00003f00
orr r3, r3, r0, lsl #8 // r3 <- r3 | r0 << 8
and r0, r2, #0xf00000 // r0 <- r2 & 0x00f00000
orr r3, r3, r0, lsl #2 // r3 <- r3 | r0 << 2
and r0, r2, #0xf0000 // r0 <- r2 & 0x000f0000
orr r3, r3, r0, lsl #10 // r3 <- r3 | r0 << 10
and r0, r2, #0xfc000000 // r0 <- r2 & 0xfc000000
orr r3, r3, r0, ror #28 // r3 <- r3 | r0 >>> 8
and r0, r2, #0x03000000 // r0 <- r2 & 0x03000000
orr r3, r3, r0, ror #20 // r3 <- r3 | (r0 >>> 20)
and r0, r2, #0xff // r0 <- r2 & 0xff
orr r3, r0, r3, ror #6 // r3 <- ShiftRows^[-3](r2)
add r12, #44
ldr r14, [sp, #112]
str.w r4, [r12, #12]
str.w r3, [r12, #-32]
str.w r12, [sp, #116] // store the new rkeys address on the stack
ldrd r12, r2, [r12] // load masks MASK1 and MASK2 in r12, r2 (for sbox routine)
bx lr
/******************************************************************************
* First-order masked AES-128 key schedule to match the fully-fixsliced (ffs)
* representation.
******************************************************************************/
@ void aes128_keyschedule_ffs(u32* rkeys, const u8* key);
.global aes128_keyschedule_ffs
.type aes128_keyschedule_ffs,%function
.align 2
aes128_keyschedule_ffs:
push {r0-r12,r14}
ldr.w r4, [r1] // load the 1st 128-bit key in r4-r7
ldr r5, [r1, #4]
ldr r6, [r1, #8]
ldr r7, [r1, #12]
ldr.w r8, [r2] // load the 2nd 128-bit key in r8-r11
ldr r9, [r2, #4]
ldr r10,[r2, #8]
ldr r11,[r2, #12]
bl packing // pack the master key
// ------------------ MASKING ------------------
// generation of 2 random words
movw r0, 0x0804
movt r0, 0x5006 // r0 <- RNG_SR = 0x50060804
add r12, r0, #4 // r1 <- RNG_DR = 0x50060808
mov r14, #2 // 2 random words to be generated
ffs_get_random_mask:
ldr.w r2, [r0]
cmp r2, #1 // check if RNG_SR == RNG_SR_DRDY
bne ffs_get_random_mask // loop while RNG status is not ready
ldr.w r1, [r12] // load the random number in r1
push {r1} // push the random word on the stack
subs r14, #1 // r14<- r14 -1
bne ffs_get_random_mask // loop till r14 > 0
ldr.w r1, [sp] // load back the last rnd generated
ubfx r2, r1, #0, #2 // r2 <- ab
orr r2, r2, r2, lsl #2 // r2 <- abab
orr r2, r2, r2, lsl #4 // r2 <- abababab
orr r2, r2, r2, lsl #8 // r2 <- abababababababab
orr r2, r2, r2, lsl #16 // r2 <- abababababababababababababababab
ubfx.w r1, r1, #2, #2 // r1 <- cd
orr r1, r1, r1, lsl #2 // r1 <- cdcd
orr r1, r1, r1, lsl #4 // r1 <- cdcdcdcd
orr r1, r1, r1, lsl #8 // r1 <- cdcdcdcdcdcdcdcd
orr r1, r1, r1, lsl #16 // r1 <- cdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcd
eor r0, r1, r2 // r0 <- m0 ^ m1
eor r4, r4, r1 // r4 <- key[0] ^ m1
eor r5, r5, r2 // r5 <- key[1] ^ m0
eor r6, r6, r1 // r6 <- key[2] ^ m1
eor r7, r7, r2 // r7 <- key[3] ^ m0
eor r8, r8, r2 // r8 <- key[4] ^ m0
eor r9, r9, r0 // r9 <- key[5] ^ m2
eor r10, r10, r0 // r10<- key[6] ^ m2
eor r11, r11, r1 // r11<- key[7] ^ m1
// ------------------ MASKING ------------------
sub.w sp, #136 // allow space on the stack for tmp var
ldr r14, [sp, #144] // restore 'rkeys' address
add r14, #12
stm r14, {r4-r11} // store masked master key
strd r2, r1, [r14, #-12]! // store the corresponding masks
str.w r0, [r14, #8] // store the corresponding masks
str r14, [sp ,#116] // store the rkey address on the stack
str r0, [sp, #120] // store the current masks on the stack for sbox computations
strd r1, r2, [sp, #124] // store the current masks on the stack for sbox computations
mov r12, r2 // for compliance to sbox routine
mov r2, r1 // for compliance to sbox routine
bl sbox // apply the sbox to the master key
bl remask_rkey
eor r11, r11, #0x00000300 // add the 1st rconst
bl xor_columns_0
bl sbox // apply the sbox to the master key
bl remask_rkey
eor r2, r2, #0x00000300 // add the 2nd rconst
bl xor_columns_1
bl sbox // apply the sbox to the master key
bl remask_rkey
eor r0, r0, #0x00000300 // add the 3rd rconst
bl xor_columns_2
bl sbox // apply the sbox to the master key
bl remask_rkey
eor r8, r8, #0x00000300 // add the 4th rconst
bl xor_columns_3
bl sbox // apply the sbox to the master key
bl remask_rkey
eor r7, r7, #0x00000300 // add the 5th rconst
bl xor_columns_0
bl sbox // apply the sbox to the master key
bl remask_rkey
eor r6, r6, #0x00000300 // add the 6th rconst
bl xor_columns_1
bl sbox // apply the sbox to the master key
bl remask_rkey
eor r3, r3, #0x00000300 // add the 7th rconst
bl xor_columns_2
bl sbox // apply the sbox to the master key
ldr.w r2, [sp, #140] // all bits within previous 32-bit random have been used
str.w r2, [sp, #136] // put the other random word at the right place for 'remask_rkey'
bl remask_rkey
eor r1, r1, #0x00000300 // add the 8th rconst
bl xor_columns_3
bl sbox // apply the sbox to the master key
bl remask_rkey
eor r11, r11, #0x00000300 // add the 9th rconst
eor r2, r2, #0x00000300 // add the 9th rconst
eor r8, r8, #0x00000300 // add the 9th rconst
eor r7, r7, #0x00000300 // add the 9th rconst
bl xor_columns_0
bl sbox // apply the sbox to the master key
bl remask_rkey
eor r2, r2, #0x00000300 // add the 10th rconst
eor r0, r0, #0x00000300 // add the 10th rconst
eor r7, r7, #0x00000300 // add the 10th rconst
eor r6, r6, #0x00000300 // add the 10th rconst
bl xor_columns_1
mvn r5, r5 // add the NOT omitted on sbox for the last rkey
mvn r6, r6 // add the NOT omitted on sbox for the last rkey
mvn r10, r10 // add the NOT omitted on sbox for the last rkey
mvn r11, r11 // add the NOT omitted on sbox for the last rkey
ldr.w r12, [sp, #116] // restore rkeys addr
strd r5, r6, [r12, #16] // store the last rkeys after adding the NOT
strd r10, r11, [r12, #36] // store the last rkeys after adding the NOT
ldrd r0, r1, [r12, #-424]
ldrd r2, r3, [r12, #-404]
mvn r0, r0 // remove the NOT omitted on sbox for the master rkey
mvn r1, r1 // remove the NOT omitted on sbox for the master rkey
mvn r2, r2 // remove the NOT omitted on sbox for the master rkey
mvn r3, r3 // remove the NOT omitted on sbox for the master rkey
strd r0, r1, [r12, #-424]
strd r2, r3, [r12, #-404]
add.w sp, #144 // restore stack
pop {r0-r12, r14} // restore context
bx lr
/******************************************************************************
* First-order masked AES-128 key schedule to match the semi-fixsliced (sfs)
* representation.
******************************************************************************/
@ void aes128_keyschedule_sfs(u32* rkeys, const u8* key);
.global aes128_keyschedule_sfs
.type aes128_keyschedule_sfs,%function
.align 2
aes128_keyschedule_sfs:
push {r0-r12,r14}
ldr.w r4, [r1] // load the 1st 128-bit key in r4-r7
ldr r5, [r1, #4]
ldr r6, [r1, #8]
ldr r7, [r1, #12]
ldr.w r8, [r2] // load the 2nd 128-bit key in r8-r11
ldr r9, [r2, #4]
ldr r10,[r2, #8]
ldr r11,[r2, #12]
bl packing // pack the master key
// ------------------ MASKING ------------------
// generation of 2 random words
movw r0, 0x0804
movt r0, 0x5006 // r0 <- RNG_SR = 0x50060804
add r12, r0, #4 // r1 <- RNG_DR = 0x50060808
mov r14, #2 // 2 random words to be generated
sfs_get_random_mask:
ldr.w r2, [r0]
cmp r2, #1 // check if RNG_SR == RNG_SR_DRDY
bne sfs_get_random_mask // loop while RNG status is not ready
ldr.w r1, [r12] // load the random number in r1
push {r1} // push the random word on the stack
subs r14, #1 // r14<- r14 - 1
bne sfs_get_random_mask // loop till r14 > 0
ldr.w r1, [sp] // load back the last rnd generated
ubfx r2, r1, #0, #2 // r2 <- ab
orr r2, r2, r2, lsl #2 // r2 <- abab
orr r2, r2, r2, lsl #4 // r2 <- abababab
orr r2, r2, r2, lsl #8 // r2 <- abababababababab
orr r2, r2, r2, lsl #16 // r2 <- abababababababababababababababab
ubfx.w r1, r1, #2, #2 // r1 <- cd
orr r1, r1, r1, lsl #2 // r1 <- cdcd
orr r1, r1, r1, lsl #4 // r1 <- cdcdcdcd
orr r1, r1, r1, lsl #8 // r1 <- cdcdcdcdcdcdcdcd
orr r1, r1, r1, lsl #16 // r1 <- cdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcd
eor r0, r1, r2 // r0 <- m0 ^ m1
eor r4, r4, r1 // r4 <- key[0] ^ m1
eor r5, r5, r2 // r5 <- key[1] ^ m0
eor r6, r6, r1 // r6 <- key[2] ^ m1
eor r7, r7, r2 // r7 <- key[3] ^ m0
eor r8, r8, r2 // r8 <- key[4] ^ m0
eor r9, r9, r0 // r9 <- key[5] ^ m2
eor r10, r10, r0 // r10<- key[6] ^ m2
eor r11, r11, r1 // r11<- key[7] ^ m1
// ------------------ MASKING ------------------
sub.w sp, #136 // allow space on the stack for tmp var
ldr r14, [sp, #144] // restore 'rkeys' address
add r14, #12
stm r14, {r4-r11} // store masked master key
strd r2, r1, [r14, #-12]! // store the corresponding masks
str.w r0, [r14, #8] // store the corresponding masks
str r14, [sp ,#116] // store the rkey address on the stack
str r0, [sp, #120] // store the current masks on the stack for sbox computations
strd r1, r2, [sp, #124] // store the current masks on the stack for sbox computations
mov r12, r2 // for compliance to sbox routine
mov r2, r1 // for compliance to sbox routine
bl sbox // apply the sbox to the master key
bl remask_rkey
eor r11, r11, #0x00000300 // add the 1st rconst
bl xor_columns_0
bl sbox // apply the sbox to the master key
bl remask_rkey
eor r2, r2, #0x00000300 // add the 2nd rconst
bl xor_columns_1
bl sbox // apply the sbox to the master key
bl remask_rkey
eor r0, r0, #0x00000300 // add the 3rd rconst
bl xor_columns_0
bl sbox // apply the sbox to the master key
bl remask_rkey
eor r8, r8, #0x00000300 // add the 4th rconst
bl xor_columns_1
bl sbox // apply the sbox to the master key
bl remask_rkey
eor r7, r7, #0x00000300 // add the 5th rconst
bl xor_columns_0
bl sbox // apply the sbox to the master key
bl remask_rkey
eor r6, r6, #0x00000300 // add the 6th rconst
bl xor_columns_1
bl sbox // apply the sbox to the master key
bl remask_rkey
eor r3, r3, #0x00000300 // add the 7th rconst
bl xor_columns_0
bl sbox // apply the sbox to the master key
ldr.w r2, [sp, #140] // all bits within previous 32-bit random have been used
str.w r2, [sp, #136] // put the other random word at the right place for 'remask_rkey'
bl remask_rkey
eor r1, r1, #0x00000300 // add the 8th rconst
bl xor_columns_1
bl sbox // apply the sbox to the master key
bl remask_rkey
eor r11, r11, #0x00000300 // add the 9th rconst
eor r2, r2, #0x00000300 // add the 9th rconst
eor r8, r8, #0x00000300 // add the 9th rconst
eor r7, r7, #0x00000300 // add the 9th rconst
bl xor_columns_0
bl sbox // apply the sbox to the master key
bl remask_rkey
eor r2, r2, #0x00000300 // add the 10th rconst
eor r0, r0, #0x00000300 // add the 10th rconst
eor r7, r7, #0x00000300 // add the 10th rconst
eor r6, r6, #0x00000300 // add the 10th rconst
bl xor_columns_1
mvn r5, r5 // add the NOT omitted on sbox for the last rkey
mvn r6, r6 // add the NOT omitted on sbox for the last rkey
mvn r10, r10 // add the NOT omitted on sbox for the last rkey
mvn r11, r11 // add the NOT omitted on sbox for the last rkey
ldr.w r12, [sp, #116] // restore rkeys addr
strd r5, r6, [r12, #16] // store the last rkeys after adding the NOT
strd r10, r11, [r12, #36] // store the last rkeys after adding the NOT
ldrd r0, r1, [r12, #-424]
ldrd r2, r3, [r12, #-404]
mvn r0, r0 // remove the NOT omitted on sbox for the master rkey
mvn r1, r1 // remove the NOT omitted on sbox for the master rkey
mvn r2, r2 // remove the NOT omitted on sbox for the master rkey
mvn r3, r3 // remove the NOT omitted on sbox for the master rkey
strd r0, r1, [r12, #-424]
strd r2, r3, [r12, #-404]
add.w sp, #144 // restore stack
pop {r0-r12, r14} // restore context
bx lr
|
aadomn/aes
| 29,671
|
riscv/barrel_shiftrows/aes_keyschedule_lut.S
|
/******************************************************************************
* RV32I assembly implementations of the AES-128 and AES-256 key schedules
* according to the barrel-shiftrows representation.
*
* See the paper at https://eprint.iacr.org/2020/1123.pdf for more details.
*
* @author Alexandre Adomnicai, Nanyang Technological University, Singapore
* alexandre.adomnicai@ntu.edu.sg
*
* @date August 2020
******************************************************************************/
.data
/******************************************************************************
* The AES Sbox represented as a look-up-table. Used during the key schedule.
******************************************************************************/
.align 2
sbox_lut:
.word 0x7b777c63, 0xc56f6bf2, 0x2b670130, 0x76abd7fe
.word 0x7dc982ca, 0xf04759fa, 0xafa2d4ad, 0xc072a49c
.word 0x2693fdb7, 0xccf73f36, 0xf1e5a534, 0x1531d871
.word 0xc323c704, 0x9a059618, 0xe2801207, 0x75b227eb
.word 0x1a2c8309, 0xa05a6e1b, 0xb3d63b52, 0x842fe329
.word 0xed00d153, 0x5bb1fc20, 0x39becb6a, 0xcf584c4a
.word 0xfbaaefd0, 0x85334d43, 0x7f02f945, 0xa89f3c50
.word 0x8f40a351, 0xf5389d92, 0x21dab6bc, 0xd2f3ff10
.word 0xec130ccd, 0x1744975f, 0x3d7ea7c4, 0x73195d64
.word 0xdc4f8160, 0x88902a22, 0x14b8ee46, 0xdb0b5ede
.word 0x0a3a32e0, 0x5c240649, 0x62acd3c2, 0x79e49591
.word 0x6d37c8e7, 0xa94ed58d, 0xeaf4566c, 0x08ae7a65
.word 0x2e2578ba, 0xc6b4a61c, 0x1f74dde8, 0x8a8bbd4b
.word 0x66b53e70, 0x0ef60348, 0xb9573561, 0x9e1dc186
.word 0x1198f8e1, 0x948ed969, 0xe9871e9b, 0xdf2855ce
.word 0x0d89a18c, 0x6842e6bf, 0x0f2d9941, 0x16bb54b0
/******************************************************************************
* The AES round constants represented as a look-up-table. Used during the key
* schedule.
******************************************************************************/
.align 2
rconst_lut:
.word 0x00000001, 0x00000002, 0x00000004, 0x00000008
.word 0x00000010, 0x00000020, 0x00000040, 0x00000080
.word 0x0000001b, 0x00000036
.text
/******************************************************************************
* Implementation of the SWAPMOVE technique for the packing/unpacking routines.
*
* Parameters:
* - out0-out1 are output registers.
* - in0-in1 are output registers.
* - mask is the mask.
* - c0 is the shift index (must be an immediate value)
* - r0 is used as a temporary register
******************************************************************************/
.macro swapmove out0,out1, in0,in1, mask, imm, r0
srli \r0, \in0, \imm
xor \r0, \r0, \in1
and \r0, \r0, \mask
xor \out1, \in1, \r0
slli \r0, \r0, \imm
xor \out0, \in0, \r0
.endm
/******************************************************************************
* Routine to spread the rkey bits in the entire 32-bit word to match the barrel
* shiftrows representation.
*
* Parameters:
* - ins0-ins2 are srli or slli instruction
* - mask is a mask to extract the right bits
******************************************************************************/
.macro spread_bits ins0, ins1, ins2, mask
and s1, t3, \mask
\ins0 s2, s1, 1
or s1, s1, s2
\ins1 s2, s1, 2
or s1, s1, s2
\ins2 s2, s1, 4
or s1, s1, s2
and s2, t4, \mask
\ins0 s3, s2, 1
or s2, s2, s3
\ins1 s3, s2, 2
or s2, s2, s3
\ins2 s3, s2, 4
or s2, s2, s3
and s3, t5, \mask
\ins0 s11, s3, 1
or s3, s3, s11
\ins1 s11, s3, 2
or s3, s3, s11
\ins2 s11, s3, 4
or s3, s3, s11
and s11, t6, \mask
\ins0 a1, s11, 1
or s11, s11, a1
\ins1 a1, s11, 2
or s11, s11, a1
\ins2 a1, s11, 4
or s11, s11, a1
.endm
/******************************************************************************
* Applies NOT to the round keys to save some cycles during Sbox calculations.
*
* Parameters:
* - rk0-rk3 are the round key words
******************************************************************************/
.macro not_rkeys rk0,rk1,rk2,rk3
not \rk0, \rk0
not \rk1, \rk1
not \rk2, \rk2
not \rk3, \rk3
.endm
/******************************************************************************
* Store the round keys in the corresponding array.
*
* Parameters:
* - rk0-rk3 are the round key words
* - addr is the address of the round keys array
******************************************************************************/
.macro store_rkeys rk0,rk1,rk2,rk3, addr
sw \rk0, \addr
sw \rk1, 32+\addr
sw \rk2, 64+\addr
sw \rk3, 96+\addr
.endm
/******************************************************************************
* Subroutine to pack the round keys according to the barrel-shiftrows rep.
******************************************************************************/
redundant_code:
store_rkeys s1, s2, s3, s11, 8(a0) // store round key words
spread_bits slli, slli, srli, s6 // extract and spread bits of t3-t6
store_rkeys s1, s2, s3, s11, 12(a0) // store round key words
spread_bits srli, srli, slli, s7 // extract and spread bits of t3-t6
store_rkeys s1, s2, s3, s11, 16(a0) // store round key words
spread_bits slli, srli, slli, s8 // extract and spread bits of t3-t6
store_rkeys s1, s2, s3, s11, 20(a0) // store round key words
spread_bits srli, slli, slli, s9 // extract and spread bits of t3-t6
ret
/******************************************************************************
* Subroutine to pack the round keys according to the barrel-shiftrows rep.
******************************************************************************/
swapmove_rkey:
swapmove t3, t4, a2, a3, t0, 8, s1 // SWAPMOVE(a2,a3, 0x00ff00ff, 8)
swapmove t5, t6, a4, a5, t0, 8, s1 // SWAPMOVE(a4,a5, 0x00ff00ff, 8)
swapmove t3, t5, t3, t5, t1, 16, s1 // SWAPMOVE(t3,t5, 0x0000ffff, 16)
swapmove t4, t6, t4, t6, t1, 16, s1 // SWAPMOVE(t4,t6, 0x0000ffff, 16)
ret
/******************************************************************************
* Round function of the AES-256 key schedule in the classical representation
* for rounds i s.t. i % 2 == 0. The key words are contained in registers a2-a5.
******************************************************************************/
aes256_rfunc_ks_0:
andi s8, t3, 0xff // s8 <- t3 & 0xff
andi t6, s8, 0xfc // ensure a 4-byte aligned address
add t6, t6, a6 // t6 points to the right sbox address
lw t6, 0(t6) // t6 <- sbox[t3 & 0xfc]
andi s8, s8, 0x03 // mask to extract the shift value
slli s8, s8, 3 // shift to compute the shift value
srl t6, t6, s8 // shift the 32-bit word
andi t6, t6, 0xff // extract the right byte
slli t6, t6, 24 // t6 <- sbox[t3 & 0xff] << 24
xor a2, a2, t6 // a2 <- a2 ^ (sbox[t3 & 0xff] << 24)
srli s8, t3, 8 // s8 <- t3 >> 8
andi s8, s8, 0xff // s8 <- (t3 >> 8) & 0xff
andi t6, s8, 0xfc
add t6, t6, a6 // t6 points to the right sbox address
lw t6, 0(t6) // t6 <- sbox[t3 & 0xff]
andi s8, s8, 0x03
slli s8, s8, 3
srl t6, t6, s8
andi t6, t6, 0xff
xor a2, a2, t6 // a2 <- a2 ^ t6
srli s8, t3, 24 // s8 <- t3 >> 24
andi s8, s8, 0xff // s8 <- (t3 >> 24) & 0xff
andi t6, s8, 0xfc
add t6, t6, a6 // t6 points to the right sbox address
lw t6, 0(t6) // t6 <- sbox[t3 & 0xff]
andi s8, s8, 0x03
slli s8, s8, 3
srl t6, t6, s8
andi t6, t6, 0xff
slli t6, t6, 16 // t6 <- sbox[(t3 >> 24) & 0xff] << 16
xor a2, a2, t6 // a2 <- a2 ^ (sbox[(t3 >> 24) & 0xff] << 16)
srl s8, t3, 16 // s8 <- t3 >> 16
andi s8, s8, 0xff // s8 <- (t3 >> 16) & 0xff
andi t6, s8, 0xfc
add t6, t6, a6 // t6 points to the right sbox address
lw t6, 0(t6) // t6 <- sbox[t3 & 0xff]
andi s8, s8, 0x03
slli s8, s8, 3
srl t6, t6, s8
andi t6, t6, 0xff
slli t6, t6, 8 // t6 <- sbox[(t3 >> 16) & 0xff] << 8
xor a2, a2, t6 // a2 <- a2 ^ ( sbox[(t3 >> 16) & 0xff] << 8)
lw t6, 0(a7) // load rconst
xor a2, a2, t6 // add rconst
addi a7, a7, 4 // point to the next rconst
xor a3, a3, a2 // update the rkey words
xor a4, a4, a3 // update the rkey words
xor a5, a5, a4 // update the rkey words
addi sp, sp, -16 // store rkey words on stack, to be packed later
sw a2, 0(sp) // store 1st rkey word
sw a3, 4(sp) // store 2nd rkey word
sw a4, 8(sp) // store 3rd rkey word
sw a5, 12(sp) // store 4th rkey word
ret
/******************************************************************************
* Round function of the AES-256 key schedule in the classical representation
* for rounds i s.t. i % 2 == 1. The key words are contained in registers t0-t3.
******************************************************************************/
aes256_rfunc_ks_1:
andi s8, a5, 0xff // s8 <- a5 & 0xff
andi t6, s8, 0xfc // ensure a 4-byte aligned address
add t6, t6, a6 // t6 points to the right sbox address
lw t6, 0(t6) // t6 <- sbox[a5 & 0xfc]
andi s8, s8, 0x03 // mask to extract the shift value
slli s8, s8, 3 // shift to compute the shift value
srl t6, t6, s8 // shift the 32-bit word
andi t6, t6, 0xff // extract the right byte
xor t0, t0, t6 // t0 <- t0 ^ (sbox[a5 & 0xff])
srli s8, a5, 8 // s8 <- a5 >> 8
andi s8, s8, 0xff // s8 <- (a5 >> 8) & 0xff
andi t6, s8, 0xfc
add t6, t6, a6 // t6 points to the right sbox address
lw t6, 0(t6) // t6 <- sbox[a5 & 0xff]
andi s8, s8, 0x03
slli s8, s8, 3
srl t6, t6, s8
andi t6, t6, 0xff
slli t6, t6, 8
xor t0, t0, t6 // t0 <- t0 ^ t6
srli s8, a5, 24 // s8 <- a5 >> 24
andi s8, s8, 0xff // s8 <- (a5 >> 24) & 0xff
andi t6, s8, 0xfc
add t6, t6, a6 // t6 points to the right sbox address
lw t6, 0(t6) // t6 <- sbox[a5 & 0xff]
andi s8, s8, 0x03
slli s8, s8, 3
srl t6, t6, s8
andi t6, t6, 0xff
slli t6, t6, 24 // t6 <- sbox[(a5 >> 24) & 0xff] << 24
xor t0, t0, t6 // t0 <- t0 ^ (sbox[(a5 >> 24) & 0xff] << 24)
srl s8, a5, 16 // s8 <- a5 >> 16
andi s8, s8, 0xff // s8 <- (a5 >> 16) & 0xff
andi t6, s8, 0xfc
add t6, t6, a6 // t6 points to the right sbox address
lw t6, 0(t6) // t6 <- sbox[a5 & 0xff]
andi s8, s8, 0x03
slli s8, s8, 3
srl t6, t6, s8
andi t6, t6, 0xff
slli t6, t6, 16 // t6 <- sbox[(a5 >> 16) & 0xff] << 16
xor t0, t0, t6 // t0 <- t0 ^ ( sbox[(a5 >> 16) & 0xff] << 16)
xor t1, t1, t0 // update the rkey words
xor t2, t2, t1 // update the rkey words
xor t3, t3, t2 // update the rkey words
addi sp, sp, -16 // store rkey words on stack, to be packed later
sw t0, 0(sp) // store 5th rkey word
sw t1, 4(sp) // store 6th rkey word
sw t2, 8(sp) // store 7th rkey word
sw t3, 12(sp) // store 8th rkey word
ret
/******************************************************************************
* AES-128 key schedule according to the barrel-shiftrows representation.
*
* The function prototype is:
* - void aes128_keyschedule_lut(uint32_t rkeys[352], const uint8_t key[16]);
******************************************************************************/
.globl aes128_keyschedule_lut
.type aes128_keyschedule_lut, %function
.align 2
aes128_keyschedule_lut:
addi sp, sp, -64 // allocate space on the stack
sw a0, 0(sp) // save context
sw a1, 4(sp) // save context
sw s0, 8(sp) // save context
sw s1, 12(sp) // save context
sw s2, 16(sp) // save context
sw s3, 20(sp) // save context
sw s4, 24(sp) // save context
sw s5, 28(sp) // save context
sw s6, 32(sp) // save context
sw s7, 36(sp) // save context
sw s8, 40(sp) // save context
sw s9, 44(sp) // save context
sw s10, 48(sp) // save context
sw s11, 52(sp) // save context
sw ra, 56(sp) // save context
lw a2, 0(a1) // load 1st key word
lw a3, 4(a1) // load 2nd key word
lw a4, 8(a1) // load 3rd key word
lw a5, 12(a1) // load 4th key word
addi s0, zero, 10 // set key_expansion loop counter
la a6, sbox_lut // load sbox address
la a7, rconst_lut // load rconst address
li t0, 0x00ff00ff // load mask for SWAPMOVE routines
li t1, 0x0000ffff // load mask for SWAPMOVE routines
li t2, 0x80808080 // mask for packing_rkey_loop
srli s4, t2, 1 // mask for packing_rkey_loop
srli s5, t2, 2 // mask for packing_rkey_loop
srli s6, t2, 3 // mask for packing_rkey_loop
srli s7, t2, 4 // mask for packing_rkey_loop
srli s8, t2, 5 // mask for packing_rkey_loop
srli s9, t2, 6 // mask for packing_rkey_loop
srli s10, t2, 7 // mask for packing_rkey_loop
jal swapmove_rkey
spread_bits srli, srli, srli, t2 // extract and spread bits of t3-t6
store_rkeys s1, s2, s3, s11, 0(a0) // store round key words
spread_bits slli, srli, srli, s4 // extract and spread bits of t3-t6
store_rkeys s1, s2, s3, s11, 4(a0) // store round key words
spread_bits srli, slli, srli, s5 // extract and spread bits of t3-t6
jal redundant_code
store_rkeys s1, s2, s3, s11, 24(a0) // store round key words
spread_bits slli, slli, slli, s10 // extract and spread bits of t3-t6
store_rkeys s1, s2, s3, s11, 28(a0) // store round key words
addi a0, a0, 128 // points to the next rkey
key_expansion: // key expansion routine
addi s0, s0, -1 // dec key_expansion loop counter
andi t3, a5, 0xff // t3 <- a5 & 0xff
andi t4, t3, 0xfc // ensure a 4-byte aligned address
add t4, t4, a6 // t4 points to the right sbox address
lw t4, 0(t4) // t4 <- sbox[a5 & 0xfc]
andi t3, t3, 0x03 // mask to extract the shift value
slli t3, t3, 3 // shift to compute the shift value
srl t4, t4, t3 // shift the 32-bit word
andi t4, t4, 0xff // extract the right byte
slli t4, t4, 24 // t4 <- sbox[a5 & 0xff] << 24
xor a2, a2, t4 // a2 <- a2 ^ (sbox[a5 & 0xff] << 24)
srli t3, a5, 8 // t3 <- a5 >> 8
andi t3, t3, 0xff // t3 <- (a5 >> 8) & 0xff
andi t4, t3, 0xfc // ensure a 4-byte aligned address
add t4, t4, a6 // t4 points to the right sbox address
lw t4, 0(t4) // t4 <- sbox[(a5 >> 8) & 0xff]
andi t3, t3, 0x03 // mask to extract the shift value
slli t3, t3, 3 // shift to compute the shift value
srl t4, t4, t3 // shift the 32-bit word
andi t4, t4, 0xff // extract the right byte
xor a2, a2, t4 // a2 <- a2 ^ t4
srli t3, a5, 24 // t3 <- a5 >> 24
andi t3, t3, 0xff // t3 <- (a5 >> 24) & 0xff
andi t4, t3, 0xfc // ensure a 4-byte aligned address
add t4, t4, a6 // t4 points to the right sbox address
lw t4, 0(t4) // t4 <- sbox[(a5 >> 24) & 0xff]
andi t3, t3, 0x03 // mask to extract the shift value
slli t3, t3, 3 // shift to compute the shift value
srl t4, t4, t3 // shift the 32-bit word
andi t4, t4, 0xff // extract the right byte
slli t4, t4, 16 // t4 <- sbox[(a5 >> 24) & 0xff] << 16
xor a2, a2, t4 // a2 <- a2 ^ (sbox[(a5 >> 24) & 0xff] << 16)
srli t3, a5, 16 // t3 <- a5 >> 16
andi t3, t3, 0xff // t3 <- (a5 >> 16) & 0xff
andi t4, t3, 0xfc // ensure a 4-byte aligned address
add t4, t4, a6 // t4 points to the right sbox address
lw t4, 0(t4) // t4 <- sbox[(a5 >> 16) & 0xff]
andi t3, t3, 0x03 // mask to extract the shift value
slli t3, t3, 3 // shift to compute the shift value
srl t4, t4, t3 // shift the 32-bit word
andi t4, t4, 0xff // extract the right byte
slli t4, t4, 8 // t4 <- sbox[(a5 >> 16) & 0xff] << 8
xor a2, a2, t4 // a2 <- a2 ^ ( sbox[(a5 >> 16) & 0xff] << 8)
lw t4, 0(a7) // load rconst
xor a2, a2, t4 // add rconst
addi a7, a7, 4 // point to the next rconst
xor a3, a3, a2 // update the rkey words
xor a4, a4, a3 // update the rkey words
xor a5, a5, a4 // update the rkey words
jal swapmove_rkey
spread_bits srli, srli, srli, t2 // extract and spread bits of t3-t6
store_rkeys s1, s2, s3, s11, 0(a0) // store round key words
spread_bits slli, srli, srli, s4 // extract and spread bits of t3-t6
not_rkeys s1, s2, s3, s11 // apply NOTs to speedup the sbox
store_rkeys s1, s2, s3, s11, 4(a0) // store round key words
spread_bits srli, slli, srli, s5 // extract and spread bits of t3-t6
not_rkeys s1, s2, s3, s11 // apply NOTs to speedup the sbox
jal redundant_code
not_rkeys s1, s2, s3, s11 // apply NOTs to speedup the sbox
store_rkeys s1, s2, s3, s11, 24(a0) // store round key words
spread_bits slli, slli, slli, s10 // extract and spread bits of t3-t6
not_rkeys s1, s2, s3, s11 // apply NOTs to speedup the sbox
store_rkeys s1, s2, s3, s11, 28(a0) // store round key words
addi a0, a0, 128 // points to the next rkey
bne s0, zero, key_expansion // loop until necessary
lw a0, 0(sp) // restore context
lw a1, 4(sp) // restore context
lw s0, 8(sp) // restore context
lw s1, 12(sp) // restore context
lw s2, 16(sp) // restore context
lw s3, 20(sp) // restore context
lw s4, 24(sp) // restore context
lw s5, 28(sp) // restore context
lw s6, 32(sp) // restore context
lw s7, 36(sp) // restore context
lw s8, 40(sp) // restore context
lw s9, 44(sp) // restore context
lw s10, 48(sp) // restore context
lw s11, 52(sp) // restore context
lw ra, 56(sp)
addi sp, sp, 64 // restore stack pointer
ret // exit
.size aes128_keyschedule_lut,.-aes128_keyschedule_lut
/******************************************************************************
* AES-256 key schedule according to the barrel-shiftrows representation.
*
* The function prototype is:
* - void aes256_keyschedule_lut(uint32_t rkeys[480], const uint8_t key[32]);
******************************************************************************/
.globl aes256_keyschedule_lut
.type aes256_keyschedule_lut, %function
.align 2
aes256_keyschedule_lut:
addi sp, sp, -64 // allocate space on the stack
sw a0, 0(sp) // save context
sw a1, 4(sp) // save context
sw s0, 8(sp) // save context
sw s1, 12(sp) // save context
sw s2, 16(sp) // save context
sw s3, 20(sp) // save context
sw s4, 24(sp) // save context
sw s5, 28(sp) // save context
sw s6, 32(sp) // save context
sw s7, 36(sp) // save context
sw s8, 40(sp) // save context
sw s9, 44(sp) // save context
sw s10, 48(sp) // save context
sw s11, 52(sp) // save context
sw ra, 56(sp) // save context
lw a2, 0(a1) // load 1st key word
lw a3, 4(a1) // load 2nd key word
lw a4, 8(a1) // load 3rd key word
lw a5, 12(a1) // load 4th key word
lw t0, 16(a1) // load 5th key word
lw t1, 20(a1) // load 6th key word
lw t2, 24(a1) // load 7th key word
lw t3, 28(a1) // load 8th key word
la a6, sbox_lut // load sbox address
la a7, rconst_lut // load rconst address
jal aes256_rfunc_ks_0
jal aes256_rfunc_ks_1
jal aes256_rfunc_ks_0
jal aes256_rfunc_ks_1
jal aes256_rfunc_ks_0
jal aes256_rfunc_ks_1
jal aes256_rfunc_ks_0
jal aes256_rfunc_ks_1
jal aes256_rfunc_ks_0
jal aes256_rfunc_ks_1
jal aes256_rfunc_ks_0
jal aes256_rfunc_ks_1
jal aes256_rfunc_ks_0
// Now pack all round key to match the semi-fixsliced representation
li t0, 0x00ff00ff // load mask for SWAPMOVE routines
li t1, 0x0000ffff // load mask for SWAPMOVE routines
li t2, 0x80808080 // mask for packing_rkey_loop
srli s4, t2, 1 // mask for packing_rkey_loop
srli s5, t2, 2 // mask for packing_rkey_loop
srli s6, t2, 3 // mask for packing_rkey_loop
srli s7, t2, 4 // mask for packing_rkey_loop
srli s8, t2, 5 // mask for packing_rkey_loop
srli s9, t2, 6 // mask for packing_rkey_loop
srli s10, t2, 7 // mask for packing_rkey_loop
lw a2, 0(a1) // load 1st key word
lw a3, 4(a1) // load 2nd key word
lw a4, 8(a1) // load 3rd key word
lw a5, 12(a1) // load 4th key word
jal swapmove_rkey
spread_bits srli, srli, srli, t2 // extract and spread bits of t3-t6
store_rkeys s1, s2, s3, s11, 0(a0) // store round key words
spread_bits slli, srli, srli, s4 // extract and spread bits of t3-t6
store_rkeys s1, s2, s3, s11, 4(a0) // store round key words
spread_bits srli, slli, srli, s5 // extract and spread bits of t3-t6
jal redundant_code
store_rkeys s1, s2, s3, s11, 24(a0) // store round key words
spread_bits slli, slli, slli, s10 // extract and spread bits of t3-t6
store_rkeys s1, s2, s3, s11, 28(a0) // store round key words
addi a0, a0, 128 // points to the next rkey
lw a1, 212(sp) // restore key address
lw a2, 16(a1) // load 5th key word
lw a3, 20(a1) // load 6th key word
lw a4, 24(a1) // load 7th key word
lw a5, 28(a1) // load 8th key word
addi s0, zero, 14
addi sp, sp, 192 // points to the 1st rkey to pack
aes256_packing_rkeys:
addi s0, s0, -1 // dec loop counter
jal swapmove_rkey
spread_bits srli, srli, srli, t2 // extract and spread bits of t3-t6
store_rkeys s1, s2, s3, s11, 0(a0) // store round key words
spread_bits slli, srli, srli, s4 // extract and spread bits of t3-t6
not_rkeys s1, s2, s3, s11 // apply NOTs to speedup the sbox
store_rkeys s1, s2, s3, s11, 4(a0) // store round key words
spread_bits srli, slli, srli, s5 // extract and spread bits of t3-t6
not_rkeys s1, s2, s3, s11 // apply NOTs to speedup the sbox
jal redundant_code
not_rkeys s1, s2, s3, s11 // apply NOTs to speedup the sbox
store_rkeys s1, s2, s3, s11, 24(a0) // store round key words
spread_bits slli, slli, slli, s10 // extract and spread bits of t3-t6
not_rkeys s1, s2, s3, s11 // apply NOTs to speedup the sbox
store_rkeys s1, s2, s3, s11, 28(a0) // store round key words
lw a2, 0(sp) // load next rkey words from the stack
lw a3, 4(sp) // load next rkey words from the stack
lw a4, 8(sp) // load next rkey words from the stack
lw a5, 12(sp) // load next rkey words from the stack
addi sp, sp, -16 // points to the next rkey
addi a0, a0, 128 // points to the next rkey
bne s0,zero,aes256_packing_rkeys// loop until necessary
addi sp, sp, 304 // restore stack pointer
lw a0, -64(sp) // restore context
lw a1, -60(sp) // restore context
lw s0, -56(sp) // restore context
lw s1, -52(sp) // restore context
lw s2, -48(sp) // restore context
lw s3, -44(sp) // restore context
lw s4, -40(sp) // restore context
lw s5, -36(sp) // restore context
lw s6, -32(sp) // restore context
lw s7, -28(sp) // restore context
lw s8, -24(sp) // restore context
lw s9, -20(sp) // restore context
lw s10, -16(sp) // restore context
lw s11, -12(sp) // restore context
lw ra, -8(sp)
ret // exit
.size aes256_keyschedule_lut,.-aes256_keyschedule_lut
|
aadomn/aes
| 54,401
|
riscv/barrel_shiftrows/aes_encrypt.S
|
/******************************************************************************
* Bitsliced AES-128 and AES-256 (encryption-only) implementations in RV32I
* assembly, using the base instruction set only (no RISC-V extension needed).
*
* See the paper at https://eprint.iacr.org/2020/1123.pdf for more details.
*
* @author Alexandre Adomnicai, Nanyang Technological University, Singapore
* alexandre.adomnicai@ntu.edu.sg
*
* @date August 2020
******************************************************************************/
.text
/******************************************************************************
* Implementation of the SWAPMOVE technique for the packing/unpacking routines.
*
* - out0-out1 are output registers.
* - in0-in1 are output registers.
* - mask is the mask.
* - c0 is the shift index (must be an immediate value)
* - r0 is used as a temporary register
******************************************************************************/
.macro swapmove out0,out1, in0,in1, mask, imm, r0
srli \r0, \in0, \imm
xor \r0, \r0, \in1
and \r0, \r0, \mask
xor \out1, \in1, \r0
slli \r0, \r0, \imm
xor \out0, \in0, \r0
.endm
/******************************************************************************
* Addition of the round key on a quarter of the internal state.
*
* - b0-b7 contains a quarter of the state.
* - rk points to the round key.
* - r0-r1 are temporary registers.
******************************************************************************/
.macro addroundkey b0,b1,b2,b3,b4,b5,b6,b7, rk, r0,r1
lw \r0, 0+\rk
lw \r1, 4+\rk
xor \b0, \b0, \r0
xor \b1, \b1, \r1
lw \r0, 8+\rk
lw \r1, 12+\rk
xor \b2, \b2, \r0
xor \b3, \b3, \r1
lw \r0, 16+\rk
lw \r1, 20+\rk
xor \b4, \b4, \r0
xor \b5, \b5, \r1
lw \r0, 24+\rk
lw \r1, 28+\rk
xor \b6, \b6, \r0
xor \b7, \b7, \r1
.endm
/******************************************************************************
* Computes the SBox on a quarter of the internal state.
* Credits to Ko Stoffelen (see https://github.com/Ko-/riscvcrypto/).
*
* - b0-b7 contains a quarter of the state.
* - r0-r17 are temporary registers.
******************************************************************************/
.macro sbox b0,b1,b2,b3,b4,b5,b6,b7, r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12,r13,r14,r15,r16,r17
xor \r0, \b3, \b5 // Exec y14 = U3 ^ U5 into r0
xor \r1, \b0, \b6 // Exec y13 = U0 ^ U6 into r1
xor \r2, \r1, \r0 // Exec y12 = y13 ^ y14 into r2
xor \r3, \b4, \r2 // Exec t1 = U4 ^ y12 into r3
xor \r4, \r3, \b5 // Exec y15 = t1 ^ U5 into r4
and \r5, \r2, \r4 // Exec t2 = y12 & y15 into r5
xor \r6, \r4, \b7 // Exec y6 = y15 ^ U7 into r6
xor \r7, \r3, \b1 // Exec y20 = t1 ^ U1 into r7
xor \r8, \b0, \b3 // Exec y9 = U0 ^ U3 into r8
xor \r9, \r7, \r8 // Exec y11 = y20 ^ y9 into r9
and \r10, \r8, \r9 // Exec t12 = y9 & y11 into r10
xor \r11, \b7, \r9 // Exec y7 = U7 ^ y11 into r11
xor \r12, \b0, \b5 // Exec y8 = U0 ^ U5 into r12
xor \r13, \b1, \b2 // Exec t0 = U1 ^ U2 into r13
xor \r14, \r4, \r13 // Exec y10 = y15 ^ t0 into r14
xor \r15, \r14, \r9 // Exec y17 = y10 ^ y11 into r15
and \r16, \r0, \r15 // Exec t13 = y14 & y17 into r16
xor \r17, \r16, \r10 // Exec t14 = t13 ^ t12 into r17
xor \b1, \r14, \r12 // Exec y19 = y10 ^ y8 into b1
and \b2, \r12, \r14 // Exec t15 = y8 & y10 into b2
xor \b2, \b2, \r10 // Exec t16 = t15 ^ t12 into b2
xor \b4, \r13, \r9 // Exec y16 = t0 ^ y11 into b4
xor \b5, \r1, \b4 // Exec y21 = y13 ^ y16 into b5
and \r3, \r1, \b4 // Exec t7 = y13 & y16 into r3
xor \r10, \b0, \b4 // Exec y18 = U0 ^ y16 into r10
xor \r13, \r13, \b7 // Exec y1 = t0 ^ U7 into r13
xor \b3, \r13, \b3 // Exec y4 = y1 ^ U3 into b3
and \r16, \b3, \b7 // Exec t5 = y4 & U7 into r16
xor \r16, \r16, \r5 // Exec t6 = t5 ^ t2 into r16
xor \r16, \r16, \b2 // Exec t18 = t6 ^ t16 into r16
xor \b1, \r16, \b1 // Exec t22 = t18 ^ y19 into b1
xor \b0, \r13, \b0 // Exec y2 = y1 ^ U0 into b0
and \r16, \b0, \r11 // Exec t10 = y2 & y7 into r16
xor \r16, \r16, \r3 // Exec t11 = t10 ^ t7 into r16
xor \b2, \r16, \b2 // Exec t20 = t11 ^ t16 into b2
xor \b2, \b2, \r10 // Exec t24 = t20 ^ y18 into b2
xor \b6, \r13, \b6 // Exec y5 = y1 ^ U6 into b6
and \r10, \b6, \r13 // Exec t8 = y5 & y1 into r10
xor \r3, \r10, \r3 // Exec t9 = t8 ^ t7 into r3
xor \r3, \r3, \r17 // Exec t19 = t9 ^ t14 into r3
xor \b5, \r3, \b5 // Exec t23 = t19 ^ y21 into b5
xor \r3, \b6, \r12 // Exec y3 = y5 ^ y8 into r3
and \r10, \r3, \r6 // Exec t3 = y3 & y6 into r10
xor \r5, \r10, \r5 // Exec t4 = t3 ^ t2 into r5
xor \r5, \r5, \r7 // Exec t17 = t4 ^ y20 into r5
xor \r5, \r5, \r17 // Exec t21 = t17 ^ t14 into r5
and \r7, \r5, \b5 // Exec t26 = t21 & t23 into r7
xor \r10, \b2, \r7 // Exec t27 = t24 ^ t26 into r10
xor \r7, \b1, \r7 // Exec t31 = t22 ^ t26 into r7
xor \r5, \r5, \b1 // Exec t25 = t21 ^ t22 into r5
and \r16, \r5, \r10 // Exec t28 = t25 & t27 into r16
xor \b1, \r16, \b1 // Exec t29 = t28 ^ t22 into b1
and \r17, \b1, \b0 // Exec z14 = t29 & y2 into r17
and \r11, \b1, \r11 // Exec z5 = t29 & y7 into r11
xor \r16, \b5, \b2 // Exec t30 = t23 ^ t24 into r16
and \r7, \r7, \r16 // Exec t32 = t31 & t30 into r7
xor \r7, \r7, \b2 // Exec t33 = t32 ^ t24 into r7
xor \r16, \r10, \r7 // Exec t35 = t27 ^ t33 into r16
and \b2, \b2, \r16 // Exec t36 = t24 & t35 into b2
xor \r10, \r10, \b2 // Exec t38 = t27 ^ t36 into r10
and \r10, \b1, \r10 // Exec t39 = t29 & t38 into r10
xor \r5, \r5, \r10 // Exec t40 = t25 ^ t39 into r5
xor \r10, \b1, \r5 // Exec t43 = t29 ^ t40 into r10
and \b4, \r10, \b4 // Exec z3 = t43 & y16 into b4
xor \r11, \b4, \r11 // Exec tc12 = z3 ^ z5 into r11
and \r1, \r10, \r1 // Exec z12 = t43 & y13 into r1
and \b6, \r5, \b6 // Exec z13 = t40 & y5 into b6
and \r10, \r5, \r13 // Exec z4 = t40 & y1 into r10
xor \b4, \b4, \r10 // Exec tc6 = z3 ^ z4 into b4
xor \b5, \b5, \r7 // Exec t34 = t23 ^ t33 into b5
xor \b2, \b2, \b5 // Exec t37 = t36 ^ t34 into b2
xor \b5, \r5, \b2 // Exec t41 = t40 ^ t37 into b5
and \r5, \b5, \r14 // Exec z8 = t41 & y10 into r5
and \r10, \b5, \r12 // Exec z17 = t41 & y8 into r10
xor \r12, \r7, \b2 // Exec t44 = t33 ^ t37 into r12
and \r4, \r12, \r4 // Exec z0 = t44 & y15 into r4
and \r2, \r12, \r2 // Exec z9 = t44 & y12 into r2
and \r3, \b2, \r3 // Exec z10 = t37 & y3 into r3
and \b2, \b2, \r6 // Exec z1 = t37 & y6 into b2
xor \b2, \b2, \r4 // Exec tc5 = z1 ^ z0 into b2
xor \r13, \b4, \b2 // Exec tc11 = tc6 ^ tc5 into r13
and \b3, \r7, \b3 // Exec z11 = t33 & y4 into b3
xor \b1, \b1, \r7 // Exec t42 = t29 ^ t33 into b1
xor \b5, \b1, \b5 // Exec t45 = t42 ^ t41 into b5
and \r6, \b5, \r15 // Exec z7 = t45 & y17 into r6
xor \b4, \r6, \b4 // Exec tc8 = z7 ^ tc6 into b4
and \r0, \b5, \r0 // Exec z16 = t45 & y14 into r0
and \b5, \b1, \r9 // Exec z6 = t42 & y11 into b5
xor \b5, \b5, \b4 // Exec tc16 = z6 ^ tc8 into b5
and \b1, \b1, \r8 // Exec z15 = t42 & y9 into b1
xor \r6, \b1, \b5 // Exec tc20 = z15 ^ tc16 into r6
xor \r0, \b1, \r0 // Exec tc1 = z15 ^ z16 into r0
xor \b1, \r3, \r0 // Exec tc2 = z10 ^ tc1 into b1
xor \r15, \b1, \b3 // Exec tc21 = tc2 ^ z11 into r15
xor \r2, \r2, \b1 // Exec tc3 = z9 ^ tc2 into r2
xor \b0, \r2, \b5 // Exec S0 = tc3 ^ tc16 into b0
xor \b3, \r2, \r13 // Exec S3 = tc3 ^ tc11 into b3
xor \b1, \b3, \b5 // Exec S1 = S3 ^ tc16 ^ 1 into b1
xor \r0, \b6, \r0 // Exec tc13 = z13 ^ tc1 into r0
and \b5, \r7, \b7 // Exec z2 = t33 & U7 into b5
xor \r14, \r4, \b5 // Exec tc4 = z0 ^ z2 into r14
xor \b6, \r1, \r14 // Exec tc7 = z12 ^ tc4 into b6
xor \b6, \r5, \b6 // Exec tc9 = z8 ^ tc7 into b6
xor \b6, \b4, \b6 // Exec tc10 = tc8 ^ tc9 into b6
xor \b2, \r17, \b6 // Exec tc17 = z14 ^ tc10 into b2
xor \b5, \r15, \b2 // Exec S5 = tc21 ^ tc17 into b5
xor \b2, \b2, \r6 // Exec tc26 = tc17 ^ tc20 into b2
xor \b2, \b2, \r10 // Exec S2 = tc26 ^ z17 ^ 1 into b2
xor \r14, \r14, \r11 // Exec tc14 = tc4 ^ tc12 into r14
xor \r0, \r0, \r14 // Exec tc18 = tc13 ^ tc14 into r0
xor \b6, \b6, \r0 // Exec S6 = tc10 ^ tc18 ^ 1 into b6
xor \b7, \r1, \r0 // Exec S7 = z12 ^ tc18 ^ 1 into b7
xor \b4, \r14, \b3 // Exec S4 = tc14 ^ S3 into b4
.endm
/******************************************************************************
* Computes a 32-bit rotation to the right.
*
* - out is the output register
* - in is the input register
* - imm is the shift index (must be an immediate value)
* - r0, r1 are temporary registers
******************************************************************************/
.macro ror out, in, imm, r0, r1
srli \r0, \in, \imm
slli \r1, \in, 32-\imm
or \out, \r0, \r1
.endm
/******************************************************************************
* Computes the ShiftRows operation on the entire state.
* Only 32-bit word rotations are required thanks to the barrel-shiftrows
* representation.
*
* Requires:
* - s0-s7 to contain state[24]...state[31].
* At the output:
* - s0-s7 contains state[8]...state[15]
* - s8, s10 contain state[16], state[23]
* - s9, s11 contain state[24], state[31]
* - the rest of the state is stored on the stack
******************************************************************************/
.macro shiftrows
addi sp, sp, -128
ror s9, s0, 24, t0, t1
ror s1, s1, 24, t0, t1
ror s2, s2, 24, t0, t1
ror s3, s3, 24, t0, t1
ror s4, s4, 24, t0, t1
ror s5, s5, 24, t0, t1
ror s6, s6, 24, t0, t1
ror s11, s7, 24, t0, t1
sw s1, 100(sp)
sw s2, 104(sp)
sw s3, 108(sp)
sw s4, 112(sp)
sw s5, 116(sp)
sw s6, 120(sp)
lw s0, 64(sp)
lw s1, 68(sp)
lw s2, 72(sp)
lw s3, 76(sp)
lw s4, 80(sp)
lw s5, 84(sp)
lw s6, 88(sp)
lw s7, 92(sp)
ror s8, s0, 16, t0, t1
ror s1, s1, 16, t0, t1
ror s2, s2, 16, t0, t1
ror s3, s3, 16, t0, t1
ror s4, s4, 16, t0, t1
ror s5, s5, 16, t0, t1
ror s6, s6, 16, t0, t1
ror s10, s7, 16, t0, t1
sw s1, 68(sp)
sw s2, 72(sp)
sw s3, 76(sp)
sw s4, 80(sp)
sw s5, 84(sp)
sw s6, 88(sp)
lw s0, 32(sp)
lw s1, 36(sp)
lw s2, 40(sp)
lw s3, 44(sp)
lw s4, 48(sp)
lw s5, 52(sp)
lw s6, 56(sp)
lw s7, 60(sp)
ror s0, s0, 8, t0, t1
ror s1, s1, 8, t0, t1
ror s2, s2, 8, t0, t1
ror s3, s3, 8, t0, t1
ror s4, s4, 8, t0, t1
ror s5, s5, 8, t0, t1
ror s6, s6, 8, t0, t1
ror s7, s7, 8, t0, t1
.endm
/******************************************************************************
* Computes the MixColumns operation on the entire state.
* Only XORs are required thanks to the barrel-shiftrows representation.
*
* Requires:
* - s0-s7 to contain state[8]...state[15]
* - s8, s10 contain state[16], state[23]
* - s9, s11 contain state[24], state[31]
* - the rest of the state is stored on the stack
* At the output:
* - s0-s7 contains state[0]...state[7]
* - the rest of the state is stored on the stack
******************************************************************************/
.macro mixcolumns
lw a1, 0(sp) // a1 <- S0
lw a2, 4(sp) // a2 <- S1
lw a3, 8(sp) // a3 <- S2
lw a4, 12(sp) // a4 <- S3
lw a5, 16(sp) // a5 <- S4
lw a6, 20(sp) // a6 <- S5
lw a7, 24(sp) // a7 <- S6
lw t4, 28(sp) // t4 <- S7
xor t0, a1, s0 // t0 <- S0 ^ S8
xor t1, s0, s8 // t1 <- S8 ^ S16
xor t2, s8, s9 // t2 <- S16 ^ S24
xor t3, s9, a1 // t3 <- S24 ^ S0
xor a2, s11, t4 // a2 <- S31 ^ S7 (overwrites S1)
xor t4, t4, s7 // t4 <- S7 ^ S15
xor t5, s7, s10 // t5 <- S15 ^ S23
xor t6, s10, s11 // t6 <- S23 ^ S31
xor a3, t0, t6 // a3 <- S0 ^ S8 ^ S23 ^ S31 (overwrites S2)
xor s7, a3, s7 // s7 <- S0 ^ S8 ^ S23 ^ S31 ^ S15
lw a3, 28(sp) // load S7
xor a3, a3, t6 // a3 <- S23 ^ S31 ^ S7
xor a3, a3, t1 // a3 <- S8 ^ S16 ^ S23 ^ S31 ^ S7
sw a3, 60(sp) // store new S15
xor a3, t2, t4 // a3 <- S16 ^ S24 ^ S7 ^ S15
xor a3, a3, s11 // a3 <- S16 ^ S24 ^ S7 ^ S15 ^ S31
sw a3, 92(sp) // store new S23
lw s11, 88(sp) // load S22
xor a3, t3, t4 // a3 <- S24 ^ S0 ^ S7 ^ S15
xor a3, a3, s10 // a3 <- S24 ^ S0 ^ S7 ^ S15
sw a3, 124(sp) // store new S31
lw s10, 120(sp) // load S30
xor a3, s10, a7 // a3 <- S30 ^ S6
xor t6, t6, t2 // t6 <- S23 ^ S31 ^ S16 ^ S24
xor t6, t6, a3 // t6 <- S23 ^ S31 ^ S16 ^ S24 ^ S30 ^ S6
xor t6, t6, s6 // t6 <- S23 ^ S31 ^ S16 ^ S24 ^ S30 ^ S6 ^ S14
sw t6, 88(sp) // store new S22
xor t6, a7, s6 // t6 <- S6 ^ S14
xor s10, s11, s10 // s10 <- S22 ^ S30
xor a2, a2, t3 // a2 <- S31 ^ S7 ^ S24 ^ S0
xor a2, a2, t6 // a2 <- S31 ^ S7 ^ S24 ^ S0 ^ S6 ^ S14
xor a2, a2, s11 // a2 <- a2 ^ S22
sw a2, 120(sp) // store new S30
xor t5, t5, t1 // t5 <- S15 ^ S23 ^ S8 ^ S16
xor t5, t5, s10 // t5 <- S15 ^ S23 ^ S8 ^ S16 ^ S22 ^ S30
xor t5, t5, a7 // t5 <- S15 ^ S23 ^ S8 ^ S16 ^ S22 ^ S30 ^ S6
sw t5, 56(sp) // store new S14
xor t4, t4, t0 // t4 <- S7 ^ S15 ^ S0 ^ S8
xor t4, t4, s10 // t4 <- S7 ^ S15 ^ S0 ^ S8 ^ S22 ^ S30
xor t5, s6, s11 // t5 <- S14 ^ S22
xor s6, t4, s6 // s6 <- S7 ^ S15 ^ S0 ^ S8 ^ S22 ^ S30 ^ S14
lw s11, 84(sp) // load S21
lw a7, 116(sp) // load S29
xor a2, a7, s11 // S29 ^ S21
xor t4, a6, s5 // t4 <- S5 ^ S13
xor a3, a3, t4 // a3 <- S30 ^ S6 ^ S5 ^ S13
xor a3, a3, s11 // a3 <- a3 ^ S21
sw a3, 116(sp) // store new S29
xor a3, a7, a6 // a3 <- S29 ^ S5
xor s10, s10, a3 // s10 <- S22 ^ S30 ^ S29 ^ S5
xor s10, s10, s5 // s10 <- S22 ^ S30 ^ S29 ^ S5 ^ S13
sw s10, 84(sp) // store new S21
xor t5, t5, a2 // t5 <- S14 ^ S22 ^ S21 ^ S29
xor t5, t5, a6 // t5 <- t5 ^ S5
sw t5, 52(sp) // store new S13
xor t5, s5, s11 // t5 <- S13 ^ S21
xor t6, t6, t5 // a7 <- S6 ^ S14 ^ S13 ^ S21
xor s5, t6, a7 // a7 <- a7 ^ S29
lw s11, 80(sp) // load S20
lw s10, 112(sp) // load S28
xor t6, s10, s11 // t6 <- S28 ^ S20
xor a7, s4, a5 // a7 <- S12 ^ S4
xor a3, a3, t3 // a3 <- S29 ^ S5 ^ S24 ^ S0
xor a3, a3, a7 // a3 <- S29 ^ S5 ^ S24 ^ S0 ^ S12 ^S4
xor a3, a3, s11 // a3 <- S29 ^ S5 ^ S24 ^ S0 ^ S12 ^S4 ^ S20
sw a3, 112(sp) // store new S28
xor a3, s10, a5 // a3 <- S28 ^ S4
xor a2, a2, t2 // a2 <- S21 ^ S29 ^ S16 ^ S24
xor a2, a2, a3 // a2 <- S21 ^ S29 ^ S16 ^ S24 ^ S28 ^ S4
xor a2, a2, s4 // a2 <- a2 ^ S12
sw a2, 80(sp) // store new S20
xor t5, t5, t1 // t5 <- S13 ^ S21 ^ S8 ^ S16
xor t5, t5, t6 // t5 <- S13 ^ S21 ^ S8 ^ S16 ^ S20 ^ S28
xor t5, t5, a5 // t5 <- S13 ^ S21 ^ S8 ^ S16 ^ S20 ^ S28 ^ S4
sw t5, 48(sp) // store new S12
xor a6, s4, s11 // a6 <- S12 ^ S20
xor t4, t4, t0 // t4 <- S5 ^ S13 ^ S0 ^ S8
xor t4, t4, s10 // t4 <- S5 ^ S13 ^ S0 ^ S8 ^ S28
xor s4, t4, a6 // t4 <- S5 ^ S13 ^ S0 ^ S8 ^ S28 ^ S12 ^ S20
lw s11, 76(sp) // load S19
lw s10, 108(sp) // load S27
xor t4, s3, a4 // t4 <- S11 ^ S3
xor a4, s10, a4 // a4 <- S27 ^ S3
xor a3, a3, t3 // a3 <- S28 ^ S4 ^ S24 ^S0
xor a3, a3, t4 // a3 <- S28 ^ S4 ^ S24 ^S0 ^ S11 ^ S3
xor a3, a3, s11 // a3 <- S28 ^ S4 ^ S24 ^S0 ^ S11 ^ S3 ^S19
sw a3, 108(sp) // store new S27
xor t6, t6, t2 // t6 <- S20 ^ S28 ^ S16 ^ S24
xor t6, t6, t4 // t6 <- S20 ^ S28 ^ S16 ^ S24 ^ S11 ^ S3
xor t6, t6, s10 // t6 <- S20 ^ S28 ^ S16 ^ S24 ^ S11 ^ S3 ^ S27
sw t6, 76(sp) // store new S19
xor a3, s10, s11 // a3 <- S27 ^ S19
xor a6, a6, t1 // a6 <- S12 ^ S20 ^ S8 ^ S16
xor a6, a6, a4 // a6 <- S12 ^ S20 ^ S8 ^ S16 ^ S27 ^S3
xor a6, a6, s11 // a6 <- S12 ^ S20 ^ S8 ^ S16 ^ S27 ^S3 ^ S19
sw a6, 44(sp) // store new S11
xor a6, s11, s3 // a6 <- S19 ^ S11
xor a7, a7, t0 // a7 <- S12 ^ S4 ^ S0 ^ S8
xor a7, a7, a3 // a7 <- S12 ^ S4 ^ S0 ^ S8 ^ S19 ^ S27
xor s3, a7, s3 // s3 <- S12 ^ S4 ^ S0 ^ S8 ^ S19 ^ S27 ^ S11
lw s11, 72(sp) // load S18
lw s10, 104(sp) // load S26
lw a5, 8(sp) // load S2
xor a7, s2, a5 // a7 <- S10 ^ s2
xor a4, a4, a7 // a4 <- S27 ^ S3 ^ S10 ^ S2
xor a4, a4, s11 // a4 <- S27 ^ S3 ^ S10 ^ S2 ^ S18
sw a4, 104(sp) // store new S26
xor a4, s10, a5 // a4 <- S26 ^ S2
xor a3, a3, a4 // a2 <- S19 ^ S27 ^ S26 ^ S2
xor a3, a3, s2 // a2 <- S19 ^ S27 ^ S26 ^ S2 ^ S10
sw a3, 72(sp) // store new S18
xor a3, s10, s11 // a3 <- S26 ^ S18
xor a6, a6, a3 // a6 <- S11 ^ S19 ^ S26 ^ S18
xor a6, a6, a5 // a6 <- S11 ^ S19 ^ S26 ^ S18 ^ S2
sw a6, 40(sp) // store new S10
xor t5, s2, s11 // t5 <- S10 ^ S18
xor t4, t4, t5 // t4 <- S11 ^ S3 ^ S10 ^ S18
xor s2, t4, s10 // s2 <- S11 ^ S3 ^ S10 ^ S18 ^ S26
lw s11, 68(sp) // load S17
lw s10, 100(sp) // load S25
lw a5, 4(sp) // load S1
xor a6, s1, a5 // a6 <- S9 ^ S1
xor a4, a4, a6 // a4 <- S26 ^ S2 ^ S9 ^ S1
xor a4, a4, s11 // a4 <- S26 ^ S2 ^ S9 ^ S1 ^ S17
sw a4, 100(sp) // store new S25
xor t4, s10, a5 // t4 <- S25 ^ S1
xor a3, a3, t4 // a3 <- S26 ^ S18 ^ S25 ^ S1
xor a3, a3, s1 // a3 <- S26 ^ S18 ^ S25 ^ S1 ^ S9
sw a3, 68(sp) // store new S17
xor t6, s10, s11 // t6 <- S25 ^ S17
xor t5, t5, t6 // t5 <- S10 ^ S18 ^ S25 ^ S17
xor t5, t5, a5 // t5 <- S10 ^ S18 ^ S25 ^ S17 ^ S1
sw t5, 36(sp) // store new S9
xor a2, s11, s1 // a2 <- S17 ^ s9
xor a7, a7, a2 // a7 <- S10 ^ S2 ^ S17 ^ S9
xor s1, a7, s10 // a7 <- S10 ^ S2 ^ S17 ^ S9 ^ S25
xor t4, t4, t0 // t4 <- S25 ^ S1 ^ S8 ^ S0
xor t4, t4, s8 // t4 <- S25 ^ S1 ^ S8 ^ S0 ^ S16
sw t4, 96(sp) // store new S24
xor t6, t6, t3 // t6 <- S25 ^ S17 ^ S24 ^ S0
xor t6, t6, s0 // t6 <- S25 ^ S17 ^ S24 ^ S0 ^ S8
sw t6, 64(sp) // store new S16
xor a2, a2, t2 // a2 <- S17 ^ s9 ^ S24 ^ S16
xor a2, a2, a1 // a2 <- S17 ^ s9 ^ S24 ^ S16 ^ S0
sw a2, 32(sp) // store new S8
xor a6, a6, t1 // a6 <- S9 ^ S1 ^ S8 ^ S16
xor s0, a6, s9 // a6 < - S9 ^ S1 ^ S8 ^ S16 ^ S24
.endm
/******************************************************************************
* Encrypts 8 blocks at a time using AES-128, without any operation mode.
*
* The function prototype is:
* - void aes128_encrypt(uint8_t*, const uint8_t*, const uint32_t*)
******************************************************************************/
.globl aes128_encrypt
.type aes128_encrypt, %function
.align 2
aes128_encrypt:
addi sp, sp, -192 // allocate space on the stack
sw ra, 60(sp) // save context
sw a1, 56(sp) // save context
sw a0, 52(sp) // save context
sw a2, 48(sp) // save context
sw s0, 44(sp) // save context
sw s1, 40(sp) // save context
sw s2, 36(sp) // save context
sw s3, 32(sp) // save context
sw s4, 28(sp) // save context
sw s5, 24(sp) // save context
sw s6, 20(sp) // save context
sw s7, 16(sp) // save context
sw s8, 12(sp) // save context
sw s9, 8(sp) // save context
sw s10, 4(sp) // save context
sw s11, 0(sp) // save context
addi sp, sp, 64 // now points to the internal state
addi a5, a1, 128 // set packing_loop_0 counter
li t4, 0x00ff00ff // mask for SWAPMOVE
packing_loop_0: // for(i=0; i < 8; i)
lw s0, 0(a1) // load input word
lw s1, 4(a1) // load input word
lw s2, 8(a1) // load input word
lw s3, 12(a1) // load input word
addi a1, a1, 16 // now points to the next input word
swapmove s0, s1, s0, s1, t4, 8, a6 // SWAPMOVE(s0, s1, 0x00ff00ff, 8)
swapmove s2, s3, s2, s3, t4, 8, a6 // SWAPMOVE(s2, s3, 0x00ff00ff, 8)
sw s0, 0(sp) // state[i] <- s0
sw s1, 32(sp) // state[i+8] <- s1
sw s2, 64(sp) // state[i+16] <- s2
sw s3, 96(sp) // state[i+24] <- s3
addi sp, sp, 4 // i <- i+1
bne a1, a5, packing_loop_0 // loop until i <8
addi sp, sp, -32 // i <- 0
addi a5, sp, 64 // set packing_loop_1 counter
li t4, 0x0000ffff // mask for SWAPMOVE
packing_loop_1: // for(i = 0; i < 16; i)
lw s0, 0(sp) // load state[i]
lw s1, 64(sp) // load state[i+16]
swapmove s0, s1, s0, s1, t4, 16, a6 // SWAPMOVE(s0, s1, 0x0000ffff, 16)
sw s0, 0(sp) // store state[i]
sw s1, 64(sp) // store state[i+16]
addi sp, sp, 4 // i <- i + 1
bne sp, a5, packing_loop_1 // loop until i < 16
li t4, 0x55555555 // mask for SWAPMOVE
li t5, 0x33333333 // mask for SWAPMOVE
li t6, 0x0f0f0f0f // mask for SWAPMOVE
addi sp, sp, 32 // points to state[24]
addi a5, sp, -128 // set packing_loop_2 counter
packing_loop_2: // for(i = 24; i >= 0; i)
lw s0, 0(sp) // load state[i]
lw s1, 4(sp) // load state[i+1]
lw s2, 8(sp) // load state[i+2]
lw s3, 12(sp) // load state[i+3]
lw s4, 16(sp) // load state[i+4]
lw s5, 20(sp) // load state[i+5]
lw s6, 24(sp) // load state[i+6]
lw s7, 28(sp) // load state[i+7]
swapmove s1, s0, s1, s0, t4, 1, a6 // SWAPMOVE(s1, s0, 0x55555555, 1)
swapmove s3, s2, s3, s2, t4, 1, a6 // SWAPMOVE(s3, s2, 0x55555555, 1)
swapmove s5, s4, s5, s4, t4, 1, a6 // SWAPMOVE(s5, s4, 0x55555555, 1)
swapmove s7, s6, s7, s6, t4, 1, a6 // SWAPMOVE(s7, s6, 0x55555555, 1)
swapmove s2, s0, s2, s0, t5, 2, a6 // SWAPMOVE(s2, s0, 0x33333333, 2)
swapmove s3, s1, s3, s1, t5, 2, a6 // SWAPMOVE(s3, s1, 0x33333333, 2)
swapmove s6, s4, s6, s4, t5, 2, a6 // SWAPMOVE(s6, s4, 0x33333333, 2)
swapmove s7, s5, s7, s5, t5, 2, a6 // SWAPMOVE(s7, s5, 0x33333333, 2)
swapmove s4, s0, s4, s0, t6, 4, a6 // SWAPMOVE(s4, s0, 0x0f0f0f0f, 4)
swapmove s5, s1, s5, s1, t6, 4, a6 // SWAPMOVE(s5, s1, 0x0f0f0f0f, 4)
swapmove s6, s2, s6, s2, t6, 4, a6 // SWAPMOVE(s6, s2, 0x0f0f0f0f, 4)
swapmove s7, s3, s7, s3, t6, 4, a6 // SWAPMOVE(s7, s3, 0x0f0f0f0f, 4)
sw s0, 0(sp) // store state[i]
sw s1, 4(sp) // store state[i+1]
sw s2, 8(sp) // store state[i+2]
sw s3, 12(sp) // store state[i+3]
sw s4, 16(sp) // store state[i+4]
sw s5, 20(sp) // store state[i+5]
sw s6, 24(sp) // store state[i+6]
sw s7, 28(sp) // store state[i+7]
addi sp, sp, -32 // i <- i - 8
bne sp, a5, packing_loop_2 // loop until i >= 0
addi sp, sp, 32 // now points to state[0]
addi ra, zero, 40 // set main loop counter
xor a0, a2, zero // put the rkeys address in a0
aes128_addroundkey_sbox: // for(j = 40; j > 0; j)
addi ra, ra, -1 // j <- j - 1
addroundkey s0,s1,s2,s3,s4,s5,s6,s7,0(a0), t0,t1
addi a0, a0, 32 // points to the next rkey
sbox s0,s1,s2,s3,s4,s5,s6,s7, t0,t1,t2,t3,t4,t5,t6,a1,a2,a3,a4,a5,a6,a7,s8,s9,s10,s11
addi sp, sp, 32 // i <- i + 8
andi s11, ra, 3 // if(j % 4 == 0)
beqz s11, aes128_linear_layer // then jump to the linear layer
sw s0, -32(sp) // store state[i-8]
sw s1, -28(sp) // store state[i-7]
sw s2, -24(sp) // store state[i-6]
sw s3, -20(sp) // store state[i-5]
sw s4, -16(sp) // store state[i-4]
sw s5, -12(sp) // store state[i-3]
sw s6, -8(sp) // store state[i-2]
sw s7, -4(sp) // store state[i-1]
lw s0, 0(sp) // load state[i]
lw s1, 4(sp) // load state[i+1]
lw s2, 8(sp) // load state[i+2]
lw s3, 12(sp) // load state[i+3]
lw s4, 16(sp) // load state[i+4]
lw s5, 20(sp) // load state[i+5]
lw s6, 24(sp) // load state[i+6]
lw s7, 28(sp) // load state[i+7]
j aes128_addroundkey_sbox // ark and sbox on the rest of the state
aes128_linear_layer:
shiftrows // shiftrows on the entire state
beqz ra, unpacking // omit mixcolumns during last round
mixcolumns // mixcolumns on the entire state
j aes128_addroundkey_sbox // go to next round
unpacking:
addroundkey s0,s1,s2,s3,s4,s5,s6,s7, 32(a0), t0,t1
sw s0, 32(sp) // store state[8] after last addroundkey
sw s1, 36(sp) // store state[9] after last addroundkey
sw s2, 40(sp) // store state[10] after last addroundkey
sw s3, 44(sp) // store state[11] after last addroundkey
sw s4, 48(sp) // store state[12] after last addroundkey
sw s5, 52(sp) // store state[13] after last addroundkey
sw s6, 56(sp) // store state[14] after last addroundkey
sw s7, 60(sp) // store state[15] after last addroundkey
lw s1, 68(sp) // load state[17]
lw s2, 72(sp) // load state[18]
lw s3, 76(sp) // load state[19]
lw s4, 80(sp) // load state[20]
lw s5, 84(sp) // load state[21]
lw s6, 88(sp) // load state[22]
addroundkey s8,s1,s2,s3,s4,s5,s6,s10, 64(a0), t0,t1
sw s8, 64(sp) // store state[16] after last addroundkey
sw s1, 68(sp) // store state[17] after last addroundkey
sw s2, 72(sp) // store state[18] after last addroundkey
sw s3, 76(sp) // store state[19] after last addroundkey
sw s4, 80(sp) // store state[20] after last addroundkey
sw s5, 84(sp) // store state[21] after last addroundkey
sw s6, 88(sp) // store state[22] after last addroundkey
sw s10, 92(sp) // store state[23] after last addroundkey
lw s1, 100(sp) // load state[25]
lw s2, 104(sp) // load state[26]
lw s3, 108(sp) // load state[27]
lw s4, 112(sp) // load state[28]
lw s5, 116(sp) // load state[29]
lw s6, 120(sp) // load state[30]
addroundkey s9,s1,s2,s3,s4,s5,s6,s11, 96(a0), t0,t1
sw s9, 96(sp) // store state[24] after last addroundkey
sw s1, 100(sp) // store state[25] after last addroundkey
sw s2, 104(sp) // store state[26] after last addroundkey
sw s3, 108(sp) // store state[27] after last addroundkey
sw s4, 112(sp) // store state[28] after last addroundkey
sw s5, 116(sp) // store state[29] after last addroundkey
sw s6, 120(sp) // store state[30] after last addroundkey
sw s11, 124(sp) // store state[31] after last addroundkey
lw s0, 0(sp) // load state[0]
lw s1, 4(sp) // load state[1]
lw s2, 8(sp) // load state[2]
lw s3, 12(sp) // load state[3]
lw s4, 16(sp) // load state[4]
lw s5, 20(sp) // load state[5]
lw s6, 24(sp) // load state[6]
lw s7, 28(sp) // load state[7]
addroundkey s0,s1,s2,s3,s4,s5,s6,s7, 0(a0), t0,t1
li t4, 0x55555555 // mask for SWAPMOVE
li t5, 0x33333333 // mask for SWAPMOVE
li t6, 0x0f0f0f0f // mask for SWAPMOVE
addi a5, sp, 128 // set unpacking_loop_2_bis counter
j unpacking_loop_2_bis // state[0...7] already in s0-s7, no need loads
unpacking_loop_2: // for(i = 0; i < 32; i)
lw s0, 0(sp) // load state[i]
lw s1, 4(sp) // load state[i+1]
lw s2, 8(sp) // load state[i+2]
lw s3, 12(sp) // load state[i+3]
lw s4, 16(sp) // load state[i+4]
lw s5, 20(sp) // load state[i+5]
lw s6, 24(sp) // load state[i+6]
lw s7, 28(sp) // load state[i+7]
unpacking_loop_2_bis:
swapmove s4, s0, s4, s0, t6, 4, a6 // SWAPMOVE(s4, s0, 0x0f0f0f0f, 4)
swapmove s5, s1, s5, s1, t6, 4, a6 // SWAPMOVE(s5, s1, 0x0f0f0f0f, 4)
swapmove s6, s2, s6, s2, t6, 4, a6 // SWAPMOVE(s6, s2, 0x0f0f0f0f, 4)
swapmove s7, s3, s7, s3, t6, 4, a6 // SWAPMOVE(s7, s3, 0x0f0f0f0f, 4)
swapmove s2, s0, s2, s0, t5, 2, a6 // SWAPMOVE(s2, s0, 0x33333333, 2)
swapmove s3, s1, s3, s1, t5, 2, a6 // SWAPMOVE(s3, s1, 0x33333333, 2)
swapmove s6, s4, s6, s4, t5, 2, a6 // SWAPMOVE(s6, s4, 0x33333333, 2)
swapmove s7, s5, s7, s5, t5, 2, a6 // SWAPMOVE(s7, s5, 0x33333333, 2)
swapmove s1, s0, s1, s0, t4, 1, a6 // SWAPMOVE(s1, s0, 0x55555555, 1)
swapmove s3, s2, s3, s2, t4, 1, a6 // SWAPMOVE(s3, s2, 0x55555555, 1)
swapmove s5, s4, s5, s4, t4, 1, a6 // SWAPMOVE(s5, s4, 0x55555555, 1)
swapmove s7, s6, s7, s6, t4, 1, a6 // SWAPMOVE(s7, s6, 0x55555555, 1)
sw s0, 0(sp) // store state[i]
sw s1, 4(sp) // store state[i+1]
sw s2, 8(sp) // store state[i+2]
sw s3, 12(sp) // store state[i+3]
sw s4, 16(sp) // store state[i+4]
sw s5, 20(sp) // store state[i+5]
sw s6, 24(sp) // store state[i+6]
sw s7, 28(sp) // store state[i+7]
addi sp, sp, 32 // i <- i + 8
bne sp, a5, unpacking_loop_2 // loop until i < 32
addi sp, sp, -128 // points to state[0]
addi a5, sp, 64 // set unpacking_loop_1 counter
li t4, 0x0000ffff // mask for SWAPMOVE
unpacking_loop_1: // for(i = 0; i < 16; i)
lw s0, 0(sp) // load state[i]
lw s1, 64(sp) // load state[i+16]
swapmove s0, s1, s0, s1, t4, 16, a6 // SWAPMOVE(s0, s1, 0x0000ffff, 16)
sw s0, 0(sp) // store state[i]
sw s1, 64(sp) // store state[i+16]
addi sp, sp, 4 // i <- i + 1
bne sp, a5, unpacking_loop_1 // loop until i < 16
addi sp, sp, -64 // points to state[0]
lw a0, -12(sp) // restore output address
addi a5, sp, 32 // set unpacking_loop_0 counter
li t4, 0x00ff00ff // mask for SWAPMOVE
unpacking_loop_0: // for (i = 0; i < 8; i)
lw s0, 0(sp) // load state[i]
lw s1, 32(sp) // load state[i+8]
lw s2, 64(sp) // load state[i+16]
lw s3, 96(sp) // load state[i+24]
addi sp, sp, 4 // i <- i + 1
swapmove s0, s1, s0, s1, t4, 8, a6 // SWAPMOVE(s0, s1, 0x00ff00ff, 8)
swapmove s2, s3, s2, s3, t4, 8, a6 // SWAPMOVE(s2, s3, 0x00ff00ff, 8)
sw s0, 0(a0) // store state[i] in output array
sw s1, 4(a0) // store state[i+8] in output array
sw s2, 8(a0) // store state[i+16] in output array
sw s3, 12(a0) // store state[i+24] in output array
addi a0, a0, 16 // increments output array address
bne sp, a5, unpacking_loop_0 // loop until i < 8
addi sp, sp, -96 // now points at the bottom of the stack
lw ra, 60(sp) // restore context
lw a0, 52(sp) // restore context
lw a1, 56(sp) // restore context
lw a2, 48(sp) // restore context
lw s0, 44(sp) // restore context
lw s1, 40(sp) // restore context
lw s2, 36(sp) // restore context
lw s3, 32(sp) // restore context
lw s4, 28(sp) // restore context
lw s5, 24(sp) // restore context
lw s6, 20(sp) // restore context
lw s7, 16(sp) // restore context
lw s8, 12(sp) // restore context
lw s9, 8(sp) // restore context
lw s10, 4(sp) // restore context
lw s11, 0(sp) // restore context
addi sp, sp, 192 // restore stack pointer
ret // exit function
.size aes128_encrypt,.-aes128_encrypt
/******************************************************************************
* Encrypts 8 blocks at a time using AES-256, without any operation mode.
*
* The function prototype is:
* - void aes256_encrypt(uint8_t*, const uint8_t*, const uint32_t*)
******************************************************************************/
.globl aes256_encrypt
.type aes256_encrypt, %function
.align 2
aes256_encrypt:
addi sp, sp, -192 // allocate space on the stack
sw ra, 60(sp) // save context
sw a1, 56(sp) // save context
sw a0, 52(sp) // save context
sw a2, 48(sp) // save context
sw s0, 44(sp) // save context
sw s1, 40(sp) // save context
sw s2, 36(sp) // save context
sw s3, 32(sp) // save context
sw s4, 28(sp) // save context
sw s5, 24(sp) // save context
sw s6, 20(sp) // save context
sw s7, 16(sp) // save context
sw s8, 12(sp) // save context
sw s9, 8(sp) // save context
sw s10, 4(sp) // save context
sw s11, 0(sp) // save context
addi sp, sp, 64 // now points to the internal state
addi a5, a1, 128 // set packing_loop_0 counter
li t4, 0x00ff00ff // mask for SWAPMOVE
aes256_packing_loop0: // for(i=0; i < 8; i)
lw s0, 0(a1) // load input word
lw s1, 4(a1) // load input word
lw s2, 8(a1) // load input word
lw s3, 12(a1) // load input word
addi a1, a1, 16 // now points to the next input word
swapmove s0, s1, s0, s1, t4, 8, a6 // SWAPMOVE(s0, s1, 0x00ff00ff, 8)
swapmove s2, s3, s2, s3, t4, 8, a6 // SWAPMOVE(s2, s3, 0x00ff00ff, 8)
sw s0, 0(sp) // state[i] <- s0
sw s1, 32(sp) // state[i+8] <- s1
sw s2, 64(sp) // state[i+16] <- s2
sw s3, 96(sp) // state[i+24] <- s3
addi sp, sp, 4 // i <- i+1
bne a1, a5, aes256_packing_loop0// loop until i <8
addi sp, sp, -32 // i <- 0
addi a5, sp, 64 // set packing_loop_1 counter
li t4, 0x0000ffff // mask for SWAPMOVE
aes256_packing_loop1: // for(i = 0; i < 16; i)
lw s0, 0(sp) // load state[i]
lw s1, 64(sp) // load state[i+16]
swapmove s0, s1, s0, s1, t4, 16, a6 // SWAPMOVE(s0, s1, 0x0000ffff, 16)
sw s0, 0(sp) // store state[i]
sw s1, 64(sp) // store state[i+16]
addi sp, sp, 4 // i <- i + 1
bne sp, a5, aes256_packing_loop1// loop until i < 16
li t4, 0x55555555 // mask for SWAPMOVE
li t5, 0x33333333 // mask for SWAPMOVE
li t6, 0x0f0f0f0f // mask for SWAPMOVE
addi sp, sp, 32 // points to state[24]
addi a5, sp, -128 // set packing_loop_2 counter
aes256_packing_loop2: // for(i = 24; i >= 0; i)
lw s0, 0(sp) // load state[i]
lw s1, 4(sp) // load state[i+1]
lw s2, 8(sp) // load state[i+2]
lw s3, 12(sp) // load state[i+3]
lw s4, 16(sp) // load state[i+4]
lw s5, 20(sp) // load state[i+5]
lw s6, 24(sp) // load state[i+6]
lw s7, 28(sp) // load state[i+7]
swapmove s1, s0, s1, s0, t4, 1, a6 // SWAPMOVE(s1, s0, 0x55555555, 1)
swapmove s3, s2, s3, s2, t4, 1, a6 // SWAPMOVE(s3, s2, 0x55555555, 1)
swapmove s5, s4, s5, s4, t4, 1, a6 // SWAPMOVE(s5, s4, 0x55555555, 1)
swapmove s7, s6, s7, s6, t4, 1, a6 // SWAPMOVE(s7, s6, 0x55555555, 1)
swapmove s2, s0, s2, s0, t5, 2, a6 // SWAPMOVE(s2, s0, 0x33333333, 2)
swapmove s3, s1, s3, s1, t5, 2, a6 // SWAPMOVE(s3, s1, 0x33333333, 2)
swapmove s6, s4, s6, s4, t5, 2, a6 // SWAPMOVE(s6, s4, 0x33333333, 2)
swapmove s7, s5, s7, s5, t5, 2, a6 // SWAPMOVE(s7, s5, 0x33333333, 2)
swapmove s4, s0, s4, s0, t6, 4, a6 // SWAPMOVE(s4, s0, 0x0f0f0f0f, 4)
swapmove s5, s1, s5, s1, t6, 4, a6 // SWAPMOVE(s5, s1, 0x0f0f0f0f, 4)
swapmove s6, s2, s6, s2, t6, 4, a6 // SWAPMOVE(s6, s2, 0x0f0f0f0f, 4)
swapmove s7, s3, s7, s3, t6, 4, a6 // SWAPMOVE(s7, s3, 0x0f0f0f0f, 4)
sw s0, 0(sp) // store state[i]
sw s1, 4(sp) // store state[i+1]
sw s2, 8(sp) // store state[i+2]
sw s3, 12(sp) // store state[i+3]
sw s4, 16(sp) // store state[i+4]
sw s5, 20(sp) // store state[i+5]
sw s6, 24(sp) // store state[i+6]
sw s7, 28(sp) // store state[i+7]
addi sp, sp, -32 // i <- i - 8
bne sp, a5, aes256_packing_loop2// loop until i >= 0
addi sp, sp, 32 // now points to state[0]
addi ra, zero, 56 // set main loop counter
xor a0, a2, zero // put the rkeys address in a0
aes256_addroundkey_sbox: // for(j = 56; j >0; j)
addi ra, ra, -1 // j <- j - 1
addroundkey s0,s1,s2,s3,s4,s5,s6,s7,0(a0), t0,t1
addi a0, a0, 32 // points to the next rkey
sbox s0,s1,s2,s3,s4,s5,s6,s7, t0,t1,t2,t3,t4,t5,t6,a1,a2,a3,a4,a5,a6,a7,s8,s9,s10,s11
addi sp, sp, 32 // i <- i + 8
andi s11, ra, 3 // if(j % 4 == 0)
beqz s11, aes256_linear_layer // then jump to the linear layer
sw s0, -32(sp) // store state[i-8]
sw s1, -28(sp) // store state[i-7]
sw s2, -24(sp) // store state[i-6]
sw s3, -20(sp) // store state[i-5]
sw s4, -16(sp) // store state[i-4]
sw s5, -12(sp) // store state[i-3]
sw s6, -8(sp) // store state[i-2]
sw s7, -4(sp) // store state[i-1]
lw s0, 0(sp) // load state[i]
lw s1, 4(sp) // load state[i+1]
lw s2, 8(sp) // load state[i+2]
lw s3, 12(sp) // load state[i+3]
lw s4, 16(sp) // load state[i+4]
lw s5, 20(sp) // load state[i+5]
lw s6, 24(sp) // load state[i+6]
lw s7, 28(sp) // load state[i+7]
j aes256_addroundkey_sbox // ark and sbox on the rest of the state
aes256_linear_layer:
shiftrows // shiftrows on the entire state
beqz ra, aes256_unpacking // omit mixcolumns during last round
mixcolumns // mixcolumns on the entire state
j aes256_addroundkey_sbox // go to next round
aes256_unpacking:
addroundkey s0,s1,s2,s3,s4,s5,s6,s7, 32(a0), t0,t1
sw s0, 32(sp) // store state[8] after last addroundkey
sw s1, 36(sp) // store state[9] after last addroundkey
sw s2, 40(sp) // store state[10] after last addroundkey
sw s3, 44(sp) // store state[11] after last addroundkey
sw s4, 48(sp) // store state[12] after last addroundkey
sw s5, 52(sp) // store state[13] after last addroundkey
sw s6, 56(sp) // store state[14] after last addroundkey
sw s7, 60(sp) // store state[15] after last addroundkey
lw s1, 68(sp) // load state[17]
lw s2, 72(sp) // load state[18]
lw s3, 76(sp) // load state[19]
lw s4, 80(sp) // load state[20]
lw s5, 84(sp) // load state[21]
lw s6, 88(sp) // load state[22]
addroundkey s8,s1,s2,s3,s4,s5,s6,s10, 64(a0), t0,t1
sw s8, 64(sp) // store state[16] after last addroundkey
sw s1, 68(sp) // store state[17] after last addroundkey
sw s2, 72(sp) // store state[18] after last addroundkey
sw s3, 76(sp) // store state[19] after last addroundkey
sw s4, 80(sp) // store state[20] after last addroundkey
sw s5, 84(sp) // store state[21] after last addroundkey
sw s6, 88(sp) // store state[22] after last addroundkey
sw s10, 92(sp) // store state[23] after last addroundkey
lw s1, 100(sp) // load state[25]
lw s2, 104(sp) // load state[26]
lw s3, 108(sp) // load state[27]
lw s4, 112(sp) // load state[28]
lw s5, 116(sp) // load state[29]
lw s6, 120(sp) // load state[30]
addroundkey s9,s1,s2,s3,s4,s5,s6,s11, 96(a0), t0,t1
sw s9, 96(sp) // store state[24] after last addroundkey
sw s1, 100(sp) // store state[25] after last addroundkey
sw s2, 104(sp) // store state[26] after last addroundkey
sw s3, 108(sp) // store state[27] after last addroundkey
sw s4, 112(sp) // store state[28] after last addroundkey
sw s5, 116(sp) // store state[29] after last addroundkey
sw s6, 120(sp) // store state[30] after last addroundkey
sw s11, 124(sp) // store state[31] after last addroundkey
lw s0, 0(sp) // load state[0]
lw s1, 4(sp) // load state[1]
lw s2, 8(sp) // load state[2]
lw s3, 12(sp) // load state[3]
lw s4, 16(sp) // load state[4]
lw s5, 20(sp) // load state[5]
lw s6, 24(sp) // load state[6]
lw s7, 28(sp) // load state[7]
addroundkey s0,s1,s2,s3,s4,s5,s6,s7, 0(a0), t0,t1
li t4, 0x55555555 // mask for SWAPMOVE
li t5, 0x33333333 // mask for SWAPMOVE
li t6, 0x0f0f0f0f // mask for SWAPMOVE
addi a5, sp, 128 // set unpacking_loop_2_bis counter
j aes256_unpacking_loop2_bis // state[0...7] already in s0-s7, no need loads
aes256_unpacking_loop2: // for(i = 0; i < 32; i)
lw s0, 0(sp) // load state[i]
lw s1, 4(sp) // load state[i+1]
lw s2, 8(sp) // load state[i+2]
lw s3, 12(sp) // load state[i+3]
lw s4, 16(sp) // load state[i+4]
lw s5, 20(sp) // load state[i+5]
lw s6, 24(sp) // load state[i+6]
lw s7, 28(sp) // load state[i+7]
aes256_unpacking_loop2_bis:
swapmove s4, s0, s4, s0, t6, 4, a6 // SWAPMOVE(s4, s0, 0x0f0f0f0f, 4)
swapmove s5, s1, s5, s1, t6, 4, a6 // SWAPMOVE(s5, s1, 0x0f0f0f0f, 4)
swapmove s6, s2, s6, s2, t6, 4, a6 // SWAPMOVE(s6, s2, 0x0f0f0f0f, 4)
swapmove s7, s3, s7, s3, t6, 4, a6 // SWAPMOVE(s7, s3, 0x0f0f0f0f, 4)
swapmove s2, s0, s2, s0, t5, 2, a6 // SWAPMOVE(s2, s0, 0x33333333, 2)
swapmove s3, s1, s3, s1, t5, 2, a6 // SWAPMOVE(s3, s1, 0x33333333, 2)
swapmove s6, s4, s6, s4, t5, 2, a6 // SWAPMOVE(s6, s4, 0x33333333, 2)
swapmove s7, s5, s7, s5, t5, 2, a6 // SWAPMOVE(s7, s5, 0x33333333, 2)
swapmove s1, s0, s1, s0, t4, 1, a6 // SWAPMOVE(s1, s0, 0x55555555, 1)
swapmove s3, s2, s3, s2, t4, 1, a6 // SWAPMOVE(s3, s2, 0x55555555, 1)
swapmove s5, s4, s5, s4, t4, 1, a6 // SWAPMOVE(s5, s4, 0x55555555, 1)
swapmove s7, s6, s7, s6, t4, 1, a6 // SWAPMOVE(s7, s6, 0x55555555, 1)
sw s0, 0(sp) // store state[i]
sw s1, 4(sp) // store state[i+1]
sw s2, 8(sp) // store state[i+2]
sw s3, 12(sp) // store state[i+3]
sw s4, 16(sp) // store state[i+4]
sw s5, 20(sp) // store state[i+5]
sw s6, 24(sp) // store state[i+6]
sw s7, 28(sp) // store state[i+7]
addi sp, sp, 32 // i <- i + 8
bne sp,a5,aes256_unpacking_loop2// loop until i < 32
addi sp, sp, -128 // points to state[0]
addi a5, sp, 64 // set unpacking_loop_1 counter
li t4, 0x0000ffff // mask for SWAPMOVE
aes256_unpacking_loop1: // for(i = 0; i < 16; i)
lw s0, 0(sp) // load state[i]
lw s1, 64(sp) // load state[i+16]
swapmove s0, s1, s0, s1, t4, 16, a6 // SWAPMOVE(s0, s1, 0x0000ffff, 16)
sw s0, 0(sp) // store state[i]
sw s1, 64(sp) // store state[i+16]
addi sp, sp, 4 // i <- i + 1
bne sp,a5,aes256_unpacking_loop1// loop until i < 16
addi sp, sp, -64 // points to state[0]
lw a0, -12(sp) // restore output address
addi a5, sp, 32 // set unpacking_loop_0 counter
li t4, 0x00ff00ff // mask for SWAPMOVE
aes256_unpacking_loop0: // for (i = 0; i < 8; i)
lw s0, 0(sp) // load state[i]
lw s1, 32(sp) // load state[i+8]
lw s2, 64(sp) // load state[i+16]
lw s3, 96(sp) // load state[i+24]
addi sp, sp, 4 // i <- i + 1
swapmove s0, s1, s0, s1, t4, 8, a6 // SWAPMOVE(s0, s1, 0x00ff00ff, 8)
swapmove s2, s3, s2, s3, t4, 8, a6 // SWAPMOVE(s2, s3, 0x00ff00ff, 8)
sw s0, 0(a0) // store state[i] in output array
sw s1, 4(a0) // store state[i+8] in output array
sw s2, 8(a0) // store state[i+16] in output array
sw s3, 12(a0) // store state[i+24] in output array
addi a0, a0, 16 // increments output array address
bne sp,a5,aes256_unpacking_loop0// loop until i < 8
addi sp, sp, -96 // now points at the bottom of the stack
lw ra, 60(sp) // restore context
lw a0, 52(sp) // restore context
lw a1, 56(sp) // restore context
lw a2, 48(sp) // restore context
lw s0, 44(sp) // restore context
lw s1, 40(sp) // restore context
lw s2, 36(sp) // restore context
lw s3, 32(sp) // restore context
lw s4, 28(sp) // restore context
lw s5, 24(sp) // restore context
lw s6, 20(sp) // restore context
lw s7, 16(sp) // restore context
lw s8, 12(sp) // restore context
lw s9, 8(sp) // restore context
lw s10, 4(sp) // restore context
lw s11, 0(sp) // restore context
addi sp, sp, 192 // restore stack pointer
ret // exit function
.size aes256_encrypt,.-aes256_encrypt
|
aadomn/aes
| 42,523
|
riscv/fixslicing/aes_keyschedule_lut.S
|
/******************************************************************************
* RV32I assembly implementations of the AES-128 and AES-256 key schedules
* according to fixslicing.
*
* Note that this implementation relies on Look-Up Tables (LUT) and therefore
* might not run in constant-time on some platforms. See the 'aes_keyschedule.S'
* file for fully bitsliced implementation of the key schedule.
*
* See the paper at https://eprint.iacr.org/2020/1123.pdf for more details.
*
* @author Alexandre Adomnicai, Nanyang Technological University, Singapore
* alexandre.adomnicai@ntu.edu.sg
*
* @date August 2020
******************************************************************************/
.data
/******************************************************************************
* The AES Sbox represented as a look-up-table.
******************************************************************************/
.align 2
sbox_lut:
.word 0x7b777c63, 0xc56f6bf2, 0x2b670130, 0x76abd7fe
.word 0x7dc982ca, 0xf04759fa, 0xafa2d4ad, 0xc072a49c
.word 0x2693fdb7, 0xccf73f36, 0xf1e5a534, 0x1531d871
.word 0xc323c704, 0x9a059618, 0xe2801207, 0x75b227eb
.word 0x1a2c8309, 0xa05a6e1b, 0xb3d63b52, 0x842fe329
.word 0xed00d153, 0x5bb1fc20, 0x39becb6a, 0xcf584c4a
.word 0xfbaaefd0, 0x85334d43, 0x7f02f945, 0xa89f3c50
.word 0x8f40a351, 0xf5389d92, 0x21dab6bc, 0xd2f3ff10
.word 0xec130ccd, 0x1744975f, 0x3d7ea7c4, 0x73195d64
.word 0xdc4f8160, 0x88902a22, 0x14b8ee46, 0xdb0b5ede
.word 0x0a3a32e0, 0x5c240649, 0x62acd3c2, 0x79e49591
.word 0x6d37c8e7, 0xa94ed58d, 0xeaf4566c, 0x08ae7a65
.word 0x2e2578ba, 0xc6b4a61c, 0x1f74dde8, 0x8a8bbd4b
.word 0x66b53e70, 0x0ef60348, 0xb9573561, 0x9e1dc186
.word 0x1198f8e1, 0x948ed969, 0xe9871e9b, 0xdf2855ce
.word 0x0d89a18c, 0x6842e6bf, 0x0f2d9941, 0x16bb54b0
/******************************************************************************
* The AES round constants represented as a look-up-table.
******************************************************************************/
.align 2
rconst_lut:
.word 0x00000001, 0x00000002, 0x00000004, 0x00000008
.word 0x00000010, 0x00000020, 0x00000040, 0x00000080
.word 0x0000001b, 0x00000036
.text
/******************************************************************************
* Implementation of the SWAPMOVE technique for the packing/unpacking routines.
*
* Parameters:
* - out0-out1 output registers
* - in0-in1 input registers
* - mask mask
* - c0 shift value (must be an immediate value)
* - tmp temporary register
******************************************************************************/
.macro swapmove out0,out1, in0,in1, mask, imm, tmp
srli \tmp, \in0, \imm
xor \tmp, \tmp, \in1
and \tmp, \tmp, \mask
xor \out1, \in1, \tmp
slli \tmp, \tmp, \imm
xor \out0, \in0, \tmp
.endm
/******************************************************************************
* Applies NOT to the round keys to save some cycles during Sbox calculations.
******************************************************************************/
.macro not_rkey
not s1, s1
not s2, s2
not s6, s6
not s7, s7
.endm
/******************************************************************************
* Store the round keys in the corresponding array.
******************************************************************************/
.macro store_rkey
sw s0, 0(a0)
sw s1, 4(a0)
sw s2, 8(a0)
sw s3, 12(a0)
sw s4, 16(a0)
sw s5, 20(a0)
sw s6, 24(a0)
sw s7, 28(a0)
addi a0, a0, 32 // points to the next rkey
.endm
/******************************************************************************
* Applies NOT to the round keys to save some cycles during Sbox calculations.
******************************************************************************/
store_not_rkey:
not s1, s1
not s2, s2
not s6, s6
not s7, s7
sw s0, 0(a0)
sw s1, 4(a0)
sw s2, 8(a0)
sw s3, 12(a0)
sw s4, 16(a0)
sw s5, 20(a0)
sw s6, 24(a0)
sw s7, 28(a0)
addi a0, a0, 32 // points to the next rkey
ret
/******************************************************************************
* Compute ShiftRows^(-1) on the entire round key in order to match fixslicing.
******************************************************************************/
inv_shiftrows_1:
andi s1, a2, 0xff // s1 <- rkey[i] & 0x000000ff
and t6, a5, t3 // t6 <- rkeys[i+3] & 0x0000ff00
or s1, s1, t6 // s1 <- s1 | t6
and t6, a4, t4 // t6 <- rkeys[i+2] & 0x00ff0000
or s1, s1, t6 // s1 <- s1 | t6
and t6, a3, t5 // t6 <- rkeys[i+1] & 0xff000000
or s1, s1, t6 // s1 <- s1 | t6
andi s3, a3, 0xff // s3 <- rkeys[i+1] & 0x000000ff
and t6, a2, t3 // t6 <- rkeys[i] & 0x0000ff00
or s3, s3, t6 // s3 <- s3 | t6
and t6, a5, t4 // t6 <- rkeys[i+3] & 0x00ff0000
or s3, s3, t6 // s3 <- s3 | t6
and t6, a4, t5 // t6 <- rkeys[i+2] & 0xff000000
or s3, s3, t6 // s3 <- s3 | t6
andi s5, a4, 0xff // s5 <- rkeys[i+2] & 0x000000ff
and t6, a3, t3 // t6 <- rkeys[i+1] & 0x0000ff00
or s5, s5, t6 // s5 <- s5 | t6
and t6, a2, t4 // t6 <- rkeys[i] & 0x00ff0000
or s5, s5, t6 // s5 <- s5 | t6
and t6, a5, t5 // t6 <- rkeys[i+3] & 0xff000000
or s5, s5, t6 // s5 <- s5 | t6
andi s7, a5, 0xff // s7 <- rkeys[i+3] & 0x000000ff
and t6, a4, t3 // t6 <- rkeys[i+2] & 0x0000ff00
or s7, s7, t6 // s7 <- s7 | t6
and t6, a3, t4 // t6 <- rkeys[i+1] & 0x00ff0000
or s7, s7, t6 // s7 <- s7 | t6
and t6, a2, t5 // t6 <- rkeys[i] & 0xff000000
or s7, s7, t6 // s7 <- s7 | t6
ret
/******************************************************************************
* Compute ShiftRows^(-2) on the entire round key in order to match fixslicing.
******************************************************************************/
inv_shiftrows_2:
and s1, a2, s10 // s1 <- rkey[i] & 0x00ff00ff
and t6, a4, s11 // t6 <- rkey[i+2] & 0xff00ff00
or s1, s1, t6 // s1 <- s1 | t6
and s3, a3, s10 // s3 <- rkey[i+1] & 0x00ff00ff
and t6, a5, s11 // t6 <- rkey[i+3] & 0xff00ff00
or s3, s3, t6 // s3 <- s3 | t6
and s5, a4, s10 // s5 <- rkey[i+2] & 0x00ff00ff
and t6, a2, s11 // t6 <- rkey[i] & 0xff00ff00
or s5, s5, t6 // s5 <- s5 | t6
and s7, a5, s10 // s7 <- rkey[i+3] & 0x00ff00ff
and t6, a3, s11 // t6 <- rkey[i+1] & 0xff00ff00
or s7, s7, t6 // s7 <- s7 | t6
ret
/******************************************************************************
* Compute ShiftRows^(-3) on the entire round key in order to match fixslicing.
******************************************************************************/
inv_shiftrows_3:
andi s1, a2, 0xff // s1 <- rkey[i] & 0x000000ff
and t6, a3, t3 // t6 <- rkeys[i+1] & 0x0000ff00
or s1, s1, t6 // s1 <- s1 | t6
and t6, a4, t4 // t6 <- rkeys[i+2] & 0x00ff0000
or s1, s1, t6 // s1 <- s1 | t6
and t6, a5, t5 // t6 <- rkeys[i+3] & 0xff000000
or s1, s1, t6 // s1 <- s1 | t6
andi s3, a3, 0xff // s3 <- rkeys[i+1] & 0x000000ff
and t6, a4, t3 // t6 <- rkeys[i+2] & 0x0000ff00
or s3, s3, t6 // s3 <- s3 | t6
and t6, a5, t4 // t6 <- rkeys[i+3] & 0x00ff0000
or s3, s3, t6 // s3 <- s3 | t6
and t6, a2, t5 // t6 <- rkeys[i] & 0xff000000
or s3, s3, t6 // s3 <- s3 | t6
andi s5, a4, 0xff // s5 <- rkeys[i+2] & 0x000000ff
and t6, a5, t3 // t6 <- rkeys[i+3] & 0x0000ff00
or s5, s5, t6 // s5 <- s5 | t6
and t6, a2, t4 // t6 <- rkeys[i] & 0x00ff0000
or s5, s5, t6 // s5 <- s5 | t6
and t6, a3, t5 // t6 <- rkeys[i+1] & 0xff000000
or s5, s5, t6 // s5 <- s5 | t6
andi s7, a5, 0xff // s7 <- rkeys[i+3] & 0x000000ff
and t6, a2, t3 // t6 <- rkeys[i] & 0x0000ff00
or s7, s7, t6 // s7 <- s7 | t6
and t6, a3, t4 // t6 <- rkeys[i+1] & 0x00ff0000
or s7, s7, t6 // s7 <- s7 | t6
and t6, a4, t5 // t6 <- rkeys[i+2] & 0xff000000
or s7, s7, t6 // s7 <- s7 | t6
ret
/******************************************************************************
* Execute a round of the AES-128 key schedule in the classical representation.
* The key words are contained in registers a2-a5.
******************************************************************************/
aes128_rfunc_ks:
andi s8, a5, 0xff // s8 <- a5 & 0xff
andi t6, s8, 0xfc // ensure a 4-byte aligned address
add t6, t6, a6 // t6 points to the right sbox address
lw t6, 0(t6) // t6 <- sbox[a5 & 0xfc]
andi s8, s8, 0x03 // mask to extract the shift value
slli s8, s8, 3 // shift to compute the shift value
srl t6, t6, s8 // shift the 32-bit word
andi t6, t6, 0xff // extract the right byte
slli t6, t6, 24 // t6 <- sbox[a5 & 0xff] << 24
xor a2, a2, t6 // a2 <- a2 ^ (sbox[a5 & 0xff] << 24)
srli s8, a5, 8 // s8 <- a5 >> 8
andi s8, s8, 0xff // s8 <- (a5 >> 8) & 0xff
andi t6, s8, 0xfc
add t6, t6, a6 // t6 points to the right sbox address
lw t6, 0(t6) // t6 <- sbox[a5 & 0xff]
andi s8, s8, 0x03
slli s8, s8, 3
srl t6, t6, s8
andi t6, t6, 0xff
xor a2, a2, t6 // a2 <- a2 ^ t6
srli s8, a5, 24 // s8 <- a5 >> 24
andi s8, s8, 0xff // s8 <- (a5 >> 24) & 0xff
andi t6, s8, 0xfc
add t6, t6, a6 // t6 points to the right sbox address
lw t6, 0(t6) // t6 <- sbox[a5 & 0xff]
andi s8, s8, 0x03
slli s8, s8, 3
srl t6, t6, s8
andi t6, t6, 0xff
slli t6, t6, 16 // t6 <- sbox[(a5 >> 24) & 0xff] << 16
xor a2, a2, t6 // a2 <- a2 ^ (sbox[(a5 >> 24) & 0xff] << 16)
srl s8, a5, 16 // s8 <- a5 >> 16
andi s8, s8, 0xff // s8 <- (a5 >> 16) & 0xff
andi t6, s8, 0xfc
add t6, t6, a6 // t6 points to the right sbox address
lw t6, 0(t6) // t6 <- sbox[a5 & 0xff]
andi s8, s8, 0x03
slli s8, s8, 3
srl t6, t6, s8
andi t6, t6, 0xff
slli t6, t6, 8 // t6 <- sbox[(a5 >> 16) & 0xff] << 8
xor a2, a2, t6 // a2 <- a2 ^ ( sbox[(a5 >> 16) & 0xff] << 8)
lw t6, 0(a7) // load rconst
xor a2, a2, t6 // add rconst
addi a7, a7, 4 // point to the next rconst
xor a3, a3, a2 // update the rkey words
xor a4, a4, a3 // update the rkey words
xor a5, a5, a4 // update the rkey words
ret
/******************************************************************************
* Round function of the AES-256 key schedule in the classical representation
* for rounds i s.t. i % 2 == 0. The key words are contained in registers a2-a5.
******************************************************************************/
aes256_rfunc_ks_0:
andi s8, t3, 0xff // s8 <- t3 & 0xff
andi t6, s8, 0xfc // ensure a 4-byte aligned address
add t6, t6, a6 // t6 points to the right sbox address
lw t6, 0(t6) // t6 <- sbox[t3 & 0xfc]
andi s8, s8, 0x03 // mask to extract the shift value
slli s8, s8, 3 // shift to compute the shift value
srl t6, t6, s8 // shift the 32-bit word
andi t6, t6, 0xff // extract the right byte
slli t6, t6, 24 // t6 <- sbox[t3 & 0xff] << 24
xor a2, a2, t6 // a2 <- a2 ^ (sbox[t3 & 0xff] << 24)
srli s8, t3, 8 // s8 <- t3 >> 8
andi s8, s8, 0xff // s8 <- (t3 >> 8) & 0xff
andi t6, s8, 0xfc
add t6, t6, a6 // t6 points to the right sbox address
lw t6, 0(t6) // t6 <- sbox[t3 & 0xff]
andi s8, s8, 0x03
slli s8, s8, 3
srl t6, t6, s8
andi t6, t6, 0xff
xor a2, a2, t6 // a2 <- a2 ^ t6
srli s8, t3, 24 // s8 <- t3 >> 24
andi s8, s8, 0xff // s8 <- (t3 >> 24) & 0xff
andi t6, s8, 0xfc
add t6, t6, a6 // t6 points to the right sbox address
lw t6, 0(t6) // t6 <- sbox[t3 & 0xff]
andi s8, s8, 0x03
slli s8, s8, 3
srl t6, t6, s8
andi t6, t6, 0xff
slli t6, t6, 16 // t6 <- sbox[(t3 >> 24) & 0xff] << 16
xor a2, a2, t6 // a2 <- a2 ^ (sbox[(t3 >> 24) & 0xff] << 16)
srl s8, t3, 16 // s8 <- t3 >> 16
andi s8, s8, 0xff // s8 <- (t3 >> 16) & 0xff
andi t6, s8, 0xfc
add t6, t6, a6 // t6 points to the right sbox address
lw t6, 0(t6) // t6 <- sbox[t3 & 0xff]
andi s8, s8, 0x03
slli s8, s8, 3
srl t6, t6, s8
andi t6, t6, 0xff
slli t6, t6, 8 // t6 <- sbox[(t3 >> 16) & 0xff] << 8
xor a2, a2, t6 // a2 <- a2 ^ ( sbox[(t3 >> 16) & 0xff] << 8)
lw t6, 0(a7) // load rconst
xor a2, a2, t6 // add rconst
addi a7, a7, 4 // point to the next rconst
xor a3, a3, a2 // update the rkey words
xor a4, a4, a3 // update the rkey words
xor a5, a5, a4 // update the rkey words
addi sp, sp, -16 // store rkey words on stack, to be packed later
sw a2, 0(sp) // store 1st rkey word
sw a3, 4(sp) // store 2nd rkey word
sw a4, 8(sp) // store 3rd rkey word
sw a5, 12(sp) // store 4th rkey word
ret
/******************************************************************************
* Round function of the AES-256 key schedule in the classical representation
* for rounds i s.t. i % 2 == 1. The key words are contained in registers t0-t3.
* It differs from 'aes256_rfunc_ks_0' by omitting the RotWord operation.
******************************************************************************/
aes256_rfunc_ks_1:
andi s8, a5, 0xff // s8 <- a5 & 0xff
andi t6, s8, 0xfc // ensure a 4-byte aligned address
add t6, t6, a6 // t6 points to the right sbox address
lw t6, 0(t6) // t6 <- sbox[a5 & 0xfc]
andi s8, s8, 0x03 // mask to extract the shift value
slli s8, s8, 3 // shift to compute the shift value
srl t6, t6, s8 // shift the 32-bit word
andi t6, t6, 0xff // extract the right byte
xor t0, t0, t6 // t0 <- t0 ^ (sbox[a5 & 0xff])
srli s8, a5, 8 // s8 <- a5 >> 8
andi s8, s8, 0xff // s8 <- (a5 >> 8) & 0xff
andi t6, s8, 0xfc
add t6, t6, a6 // t6 points to the right sbox address
lw t6, 0(t6) // t6 <- sbox[a5 & 0xff]
andi s8, s8, 0x03
slli s8, s8, 3
srl t6, t6, s8
andi t6, t6, 0xff
slli t6, t6, 8
xor t0, t0, t6 // t0 <- t0 ^ t6
srli s8, a5, 24 // s8 <- a5 >> 24
andi s8, s8, 0xff // s8 <- (a5 >> 24) & 0xff
andi t6, s8, 0xfc
add t6, t6, a6 // t6 points to the right sbox address
lw t6, 0(t6) // t6 <- sbox[a5 & 0xff]
andi s8, s8, 0x03
slli s8, s8, 3
srl t6, t6, s8
andi t6, t6, 0xff
slli t6, t6, 24 // t6 <- sbox[(a5 >> 24) & 0xff] << 24
xor t0, t0, t6 // t0 <- t0 ^ (sbox[(a5 >> 24) & 0xff] << 24)
srl s8, a5, 16 // s8 <- a5 >> 16
andi s8, s8, 0xff // s8 <- (a5 >> 16) & 0xff
andi t6, s8, 0xfc
add t6, t6, a6 // t6 points to the right sbox address
lw t6, 0(t6) // t6 <- sbox[a5 & 0xff]
andi s8, s8, 0x03
slli s8, s8, 3
srl t6, t6, s8
andi t6, t6, 0xff
slli t6, t6, 16 // t6 <- sbox[(a5 >> 16) & 0xff] << 16
xor t0, t0, t6 // t0 <- t0 ^ ( sbox[(a5 >> 16) & 0xff] << 16)
xor t1, t1, t0 // update the rkey words
xor t2, t2, t1 // update the rkey words
xor t3, t3, t2 // update the rkey words
addi sp, sp, -16 // store rkey words on stack, to be packed later
sw t0, 0(sp) // store 5th rkey word
sw t1, 4(sp) // store 6th rkey word
sw t2, 8(sp) // store 7th rkey word
sw t3, 12(sp) // store 8th rkey word
ret
/******************************************************************************
* Subroutine to pack the round keys in order to match fixslicing.
******************************************************************************/
subpacking_rkey:
swapmove s2,s0, s2, s0, t1, 2, t6 // SWAPMOVE(s2,s0, 0x33333333, 2)
swapmove s3,s1, s3, s1, t1, 2, t6 // SWAPMOVE(s3,s1, 0x33333333, 2)
swapmove s6,s4, s6, s4, t1, 2, t6 // SWAPMOVE(s6,s4, 0x33333333, 2)
swapmove s7,s5, s7, s5, t1, 2, t6 // SWAPMOVE(s7,s5, 0x33333333, 2)
swapmove s4,s0, s4, s0, t2, 4, t6 // SWAPMOVE(s4,s0, 0x0f0f0f0f, 4)
swapmove s5,s1, s5, s1, t2, 4, t6 // SWAPMOVE(s5,s1, 0x0f0f0f0f, 4)
swapmove s6,s2, s6, s2, t2, 4, t6 // SWAPMOVE(s6,s2, 0x0f0f0f0f, 4)
swapmove s7,s3, s7, s3, t2, 4, t6 // SWAPMOVE(s7,s3, 0x0f0f0f0f, 4)
ret
/******************************************************************************
* Subroutine to pack the round keys in order to match fixslicing.
******************************************************************************/
.macro packing_rkey rk0, rk1, rk2, rk3
swapmove s1,s0, \rk0,\rk0, t0, 1, t6 // SWAPMOVE(a2,a2, 0x55555555, 1)
swapmove s3,s2, \rk1,\rk1, t0, 1, t6 // SWAPMOVE(a3,a3, 0x55555555, 1)
swapmove s5,s4, \rk2,\rk2, t0, 1, t6 // SWAPMOVE(a4,a4, 0x55555555, 1)
swapmove s7,s6, \rk3,\rk3, t0, 1, t6 // SWAPMOVE(a5,a5, 0x55555555, 1)
jal subpacking_rkey
.endm
/******************************************************************************
* AES-128 key schedule according to the fully-fixsliced representation.
* The key schedule is done in the classical representation. At the end of each
* round, the current round key is packed to match the fully-fixsliced (ffs)
* representation.
*
* The function prototype is:
* - void aes128_keyschedule_ffs_lut(uint32_t* rkeys, const uint8_t* key);
******************************************************************************/
.globl aes128_keyschedule_ffs_lut
.type aes128_keyschedule_ffs_lut, %function
.align 2
aes128_keyschedule_ffs_lut:
addi sp, sp, -64 // allocate space on the stack
sw a0, 0(sp) // save context
sw a1, 4(sp) // save context
sw s0, 8(sp) // save context
sw s1, 12(sp) // save context
sw s2, 16(sp) // save context
sw s3, 20(sp) // save context
sw s4, 24(sp) // save context
sw s5, 28(sp) // save context
sw s6, 32(sp) // save context
sw s7, 36(sp) // save context
sw s8, 40(sp) // save context
sw ra, 44(sp) // save context
sw s10, 48(sp) // save context
sw s11, 52(sp) // save context
sw s9, 56(sp) // save context
addi s9, zero, 2 // init loop counter
lw a2, 0(a1) // load 1st key word
lw a3, 4(a1) // load 2nd key word
lw a4, 8(a1) // load 3rd key word
lw a5, 12(a1) // load 4th key word
la a6, sbox_lut // load sbox address
la a7, rconst_lut // load rconst address
li t0, 0x55555555 // mask for SWAPMOVE routines
li t1, 0x33333333 // mask for SWAPMOVE routines
li t2, 0x0f0f0f0f // mask for packing_rkey_loop
li t3, 0x0000ff00 // mask for inv_shiftrows_1/3
slli t4, t3, 8 // mask for inv_shiftrows_1/3
slli t5, t3, 16 // mask for inv_shiftrows_1/3
li s10, 0x00ff00ff // mask for inv_shiftrows_2
slli s11, s10, 8 // mask for inv_shiftrows_2
packing_rkey a2, a3, a4, a5
store_rkey
key_exp_loop_ffs:
addi s9, s9, -1 // decrement loop counter
jal aes128_rfunc_ks
jal inv_shiftrows_1 // inv_shiftrows to match semi-fixslicing
packing_rkey s1, s3, s5, s7
jal store_not_rkey
jal aes128_rfunc_ks
jal inv_shiftrows_2 // inv_shiftrows to match semi-fixslicing
packing_rkey s1, s3, s5, s7
jal store_not_rkey
jal aes128_rfunc_ks
jal inv_shiftrows_3 // inv_shiftrows to match semi-fixslicing
packing_rkey s1, s3, s5, s7
jal store_not_rkey
jal aes128_rfunc_ks
packing_rkey a2, a3, a4, a5
jal store_not_rkey
bne s9, zero, key_exp_loop_ffs
jal aes128_rfunc_ks
jal inv_shiftrows_1 // inv_shiftrows to match semi-fixslicing
packing_rkey s1, s3, s5, s7
jal store_not_rkey
jal aes128_rfunc_ks
packing_rkey a2, a3, a4, a5
jal store_not_rkey
lw a0, 0(sp) // restore context
lw a1, 4(sp) // restore context
lw s0, 8(sp) // restore context
lw s1, 12(sp) // restore context
lw s2, 16(sp) // restore context
lw s3, 20(sp) // restore context
lw s4, 24(sp) // restore context
lw s5, 28(sp) // restore context
lw s6, 32(sp) // restore context
lw s7, 36(sp) // restore context
lw s8, 40(sp) // restore context
lw ra, 44(sp) // restore context
lw s10, 48(sp) // restore context
lw s11, 52(sp) // restore context
lw s9, 56(sp) // restore context
addi sp, sp, 64 // restore stack pointer
ret // exit
.size aes128_keyschedule_ffs_lut,.-aes128_keyschedule_ffs_lut
/******************************************************************************
* AES-256 key schedule according to the fully-fixsliced representation.
* Contrary to the AES-128 key schedule defined above, because the key state is
* 256-bit long, we first run the entire key schedule in the classical
* representation and then pack all round keys at the end. Note that it requires
* 208 additional bytes on the stack.
*
* The function prototype is:
* - void aes256_keyschedule_ffs_lut(uint32_t* rkeys, const uint8_t* key);
******************************************************************************/
.globl aes256_keyschedule_ffs_lut
.type aes256_keyschedule_ffs_lut, %function
.align 2
aes256_keyschedule_ffs_lut:
addi sp, sp, -64 // allocate space on the stack
sw a0, 0(sp) // save context
sw a1, 4(sp) // save context
sw s0, 8(sp) // save context
sw s1, 12(sp) // save context
sw s2, 16(sp) // save context
sw s3, 20(sp) // save context
sw s4, 24(sp) // save context
sw s5, 28(sp) // save context
sw s6, 32(sp) // save context
sw s7, 36(sp) // save context
sw s8, 40(sp) // save context
sw ra, 44(sp) // save context
sw s10, 48(sp) // save context
sw s11, 52(sp) // save context
sw s9, 56(sp) // save context
lw a2, 0(a1) // load 1st key word
lw a3, 4(a1) // load 2nd key word
lw a4, 8(a1) // load 3rd key word
lw a5, 12(a1) // load 4th key word
lw t0, 16(a1) // load 5th key word
lw t1, 20(a1) // load 6th key word
lw t2, 24(a1) // load 7th key word
lw t3, 28(a1) // load 8th key word
la a6, sbox_lut // load sbox address
la a7, rconst_lut // load rconst address
// AES-256 key expansion the classical representation
jal aes256_rfunc_ks_0
jal aes256_rfunc_ks_1
jal aes256_rfunc_ks_0
jal aes256_rfunc_ks_1
jal aes256_rfunc_ks_0
jal aes256_rfunc_ks_1
jal aes256_rfunc_ks_0
jal aes256_rfunc_ks_1
jal aes256_rfunc_ks_0
jal aes256_rfunc_ks_1
jal aes256_rfunc_ks_0
jal aes256_rfunc_ks_1
jal aes256_rfunc_ks_0
// Now pack all round key to match the semi-fixsliced representation
li t0, 0x55555555 // mask for SWAPMOVE routines
li t1, 0x33333333 // mask for SWAPMOVE routines
li t2, 0x0f0f0f0f // mask for packing_rkey_loop
li t3, 0x0000ff00 // mask for inv_shiftrows_1/3
slli t4, t3, 8 // mask for inv_shiftrows_1/3
slli t5, t3, 16 // mask for inv_shiftrows_1/3
li s10, 0x00ff00ff // mask for inv_shiftrows_2
slli s11, s10, 8 // mask for inv_shiftrows_2
lw a2, 0(a1) // load 1st key word
lw a3, 4(a1) // load 2nd key word
lw a4, 8(a1) // load 3rd key word
lw a5, 12(a1) // load 4th key word
packing_rkey a2, a3, a4, a5
store_rkey
lw a2, 16(a1)
lw a3, 20(a1)
lw a4, 24(a1)
lw a5, 28(a1)
jal inv_shiftrows_1 // inv_shiftrows to match semi-fixslicing
packing_rkey s1, s3, s5, s7
jal store_not_rkey
addi sp, sp, 144
addi s9, zero, 3 // init loop counter
aes256_packing_loop_ffs:
addi s9, s9, -1 // decrement loop counter
lw a2, 48(sp)
lw a3, 52(sp)
lw a4, 56(sp)
lw a5, 60(sp)
jal inv_shiftrows_2
packing_rkey s1, s3, s5, s7
jal store_not_rkey
lw a2, 32(sp)
lw a3, 36(sp)
lw a4, 40(sp)
lw a5, 44(sp)
jal inv_shiftrows_3
packing_rkey s1, s3, s5, s7
jal store_not_rkey
lw a2, 16(sp)
lw a3, 20(sp)
lw a4, 24(sp)
lw a5, 28(sp)
packing_rkey a2, a3, a4, a5
jal store_not_rkey
lw a2, 0(sp)
lw a3, 4(sp)
lw a4, 8(sp)
lw a5, 12(sp)
jal inv_shiftrows_1
packing_rkey s1, s3, s5, s7
jal store_not_rkey
addi sp, sp, -64 // points to the next rkey
bne s9, zero, aes256_packing_loop_ffs
lw a2, 48(sp)
lw a3, 52(sp)
lw a4, 56(sp)
lw a5, 60(sp)
packing_rkey a2, a3, a4, a5
jal store_not_rkey
addi sp, sp, 256 // restore stack pointer
lw a0, 0(sp) // restore context
lw a1, 4(sp) // restore context
lw s0, 8(sp) // restore context
lw s1, 12(sp) // restore context
lw s2, 16(sp) // restore context
lw s3, 20(sp) // restore context
lw s4, 24(sp) // restore context
lw s5, 28(sp) // restore context
lw s6, 32(sp) // restore context
lw s7, 36(sp) // restore context
lw s8, 40(sp) // restore context
lw ra, 44(sp) // restore context
lw s10, 48(sp) // restore context
lw s11, 52(sp) // restore context
lw s9, 56(sp) // restore context
addi sp, sp, 64 // restore stack pointer
ret // exit
.size aes256_keyschedule_ffs_lut,.-aes256_keyschedule_ffs_lut
/******************************************************************************
* AES-128 key schedule according to the fully-fixsliced representation.
* The key schedule is done in the classical representation. At the end of each
* round, the current round key is packed to match the semi-fixsliced (sfs)
* representation.
*
* The function prototype is:
* - void aes128_keyschedule_sfs_lut(uint32_t* rkeys, const uint8_t* key);
******************************************************************************/
.globl aes128_keyschedule_sfs_lut
.type aes128_keyschedule_sfs_lut, %function
.align 2
aes128_keyschedule_sfs_lut:
addi sp, sp, -56 // allocate space on the stack
sw a0, 0(sp) // save context
sw a1, 4(sp) // save context
sw s0, 8(sp) // save context
sw s1, 12(sp) // save context
sw s2, 16(sp) // save context
sw s3, 20(sp) // save context
sw s4, 24(sp) // save context
sw s5, 28(sp) // save context
sw s6, 32(sp) // save context
sw s7, 36(sp) // save context
sw s8, 40(sp) // save context
sw s9, 44(sp) // save context
sw ra, 48(sp) // save context
addi s9, zero, 5 // set aes128_rfunc_ks_loop counter
lw a2, 0(a1) // load 1st key word
lw a3, 4(a1) // load 2nd key word
lw a4, 8(a1) // load 3rd key word
lw a5, 12(a1) // load 4th key word
la a6, sbox_lut // load sbox address
la a7, rconst_lut // load rconst address
li t0, 0x55555555 // mask for SWAPMOVE routines
li t1, 0x33333333 // mask for SWAPMOVE routines
li t2, 0x0f0f0f0f // mask for packing_rkey_loop
li t3, 0x0000ff00 // mask for inv_shiftrows_1
slli t4, t3, 8 // mask for inv_shiftrows_1
slli t5, t3, 16 // mask for inv_shiftrows_1
packing_rkey a2, a3, a4, a5
store_rkey
key_exp_loop_sfs:
addi s9, s9, -1 // decrement loop counter
jal aes128_rfunc_ks
jal inv_shiftrows_1 // inv_shiftrows to match semi-fixslicing
packing_rkey s1, s3, s5, s7
jal store_not_rkey
jal aes128_rfunc_ks
packing_rkey a2, a3, a4, a5
jal store_not_rkey
bne s9, zero, key_exp_loop_sfs
lw a0, 0(sp) // restore context
lw a1, 4(sp) // restore context
lw s0, 8(sp) // restore context
lw s1, 12(sp) // restore context
lw s2, 16(sp) // restore context
lw s3, 20(sp) // restore context
lw s4, 24(sp) // restore context
lw s5, 28(sp) // restore context
lw s6, 32(sp) // restore context
lw s7, 36(sp) // restore context
lw s8, 40(sp) // restore context
lw s9, 44(sp) // restore context
lw ra, 48(sp) // restore context
addi sp, sp, 56 // restore stack pointer
ret // exit
.size aes128_keyschedule_sfs_lut,.-aes128_keyschedule_sfs_lut
/******************************************************************************
* AES-256 key schedule according to the semi-fixsliced representation.
* Contrary to the AES-128 key schedule defined above, because the key state is
* 256-bit long, we first run the entire key schedule in the classical
* representation and then pack all round keys at the end. Note that it requires
* 208 additional bytes on the stack.
*
* The function prototype is:
* - void aes256_keyschedule_sfs_lut(uint32_t* rkeys, const uint8_t* key);
******************************************************************************/
.globl aes256_keyschedule_sfs_lut
.type aes256_keyschedule_sfs_lut, %function
.align 2
aes256_keyschedule_sfs_lut:
addi sp, sp, -64 // allocate space on the stack
sw a0, 0(sp) // save context
sw a1, 4(sp) // save context
sw s0, 8(sp) // save context
sw s1, 12(sp) // save context
sw s2, 16(sp) // save context
sw s3, 20(sp) // save context
sw s4, 24(sp) // save context
sw s5, 28(sp) // save context
sw s6, 32(sp) // save context
sw s7, 36(sp) // save context
sw s8, 40(sp) // save context
sw ra, 44(sp) // save context
sw s10, 48(sp) // save context
sw s11, 52(sp) // save context
sw s9, 56(sp) // save context
lw a2, 0(a1) // load 1st key word
lw a3, 4(a1) // load 2nd key word
lw a4, 8(a1) // load 3rd key word
lw a5, 12(a1) // load 4th key word
lw t0, 16(a1) // load 5th key word
lw t1, 20(a1) // load 6th key word
lw t2, 24(a1) // load 7th key word
lw t3, 28(a1) // load 8th key word
la a6, sbox_lut // load sbox address
la a7, rconst_lut // load rconst address
// AES-256 key expansion the classical representation
jal aes256_rfunc_ks_0
jal aes256_rfunc_ks_1
jal aes256_rfunc_ks_0
jal aes256_rfunc_ks_1
jal aes256_rfunc_ks_0
jal aes256_rfunc_ks_1
jal aes256_rfunc_ks_0
jal aes256_rfunc_ks_1
jal aes256_rfunc_ks_0
jal aes256_rfunc_ks_1
jal aes256_rfunc_ks_0
jal aes256_rfunc_ks_1
jal aes256_rfunc_ks_0
// Now pack all round key to match the semi-fixsliced representation
li t0, 0x55555555 // mask for SWAPMOVE routines
li t1, 0x33333333 // mask for SWAPMOVE routines
li t2, 0x0f0f0f0f // mask for packing_rkey_loop
li t3, 0x0000ff00 // mask for inv_shiftrows_1/3
slli t4, t3, 8 // mask for inv_shiftrows_1/3
slli t5, t3, 16 // mask for inv_shiftrows_1/3
li s10, 0x00ff00ff // mask for inv_shiftrows_2
slli s11, s10, 8 // mask for inv_shiftrows_2
lw a2, 0(a1) // load 1st key word
lw a3, 4(a1) // load 2nd key word
lw a4, 8(a1) // load 3rd key word
lw a5, 12(a1) // load 4th key word
packing_rkey a2, a3, a4, a5
store_rkey
lw a2, 16(a1)
lw a3, 20(a1)
lw a4, 24(a1)
lw a5, 28(a1)
jal inv_shiftrows_1 // inv_shiftrows to match semi-fixslicing
packing_rkey s1, s3, s5, s7
jal store_not_rkey
addi sp, sp, 176
addi s9, zero, 6 // init loop counter
aes256_packing_loop_sfs:
addi s9, s9, -1 // decrement loop counter
lw a2, 16(sp)
lw a3, 20(sp)
lw a4, 24(sp)
lw a5, 28(sp)
packing_rkey a2, a3, a4, a5
jal store_not_rkey
lw a2, 0(sp)
lw a3, 4(sp)
lw a4, 8(sp)
lw a5, 12(sp)
jal inv_shiftrows_1
packing_rkey s1, s3, s5, s7
jal store_not_rkey
addi sp, sp, -32 // points to the next rkey
bne s9, zero, aes256_packing_loop_sfs
lw a2, 16(sp)
lw a3, 20(sp)
lw a4, 24(sp)
lw a5, 28(sp)
packing_rkey a2, a3, a4, a5
jal store_not_rkey
addi sp, sp, 224 // restore stack pointer
lw a0, 0(sp) // restore context
lw a1, 4(sp) // restore context
lw s0, 8(sp) // restore context
lw s1, 12(sp) // restore context
lw s2, 16(sp) // restore context
lw s3, 20(sp) // restore context
lw s4, 24(sp) // restore context
lw s5, 28(sp) // restore context
lw s6, 32(sp) // restore context
lw s7, 36(sp) // restore context
lw s8, 40(sp) // restore context
lw ra, 44(sp) // restore context
lw s10, 48(sp) // restore context
lw s11, 52(sp) // restore context
lw s9, 56(sp) // restore context
addi sp, sp, 64 // restore stack pointer
ret // exit
.size aes256_keyschedule_sfs_lut,.-aes256_keyschedule_sfs_lut
|
aadomn/aes
| 52,117
|
riscv/fixslicing/aes_encrypt.S
|
/******************************************************************************
* Fixsliced AES-128 and AES-256 implementations (encryption-only) in RV32I
* assembly language, using the base instruction set only.
*
* See the paper at https://eprint.iacr.org/2020/1123.pdf for more details.
*
* @author Alexandre Adomnicai, Nanyang Technological University, Singapore
* alexandre.adomnicai@ntu.edu.sg
*
* @date August 2020
******************************************************************************/
.text
/******************************************************************************
* Implementation of the SWAPMOVE technique for the packing/unpacking routines.
*
* Parameters:
* - out0-out1 output registers
* - in0-in1 input registers
* - mask mask
* - c0 shift value (must be an immediate value)
* - tmp temporary register
******************************************************************************/
.macro swapmove out0,out1, in0,in1, mask, imm, tmp
srli \tmp, \in0, \imm
xor \tmp, \tmp, \in1
and \tmp, \tmp, \mask
xor \out1, \in1, \tmp
slli \tmp, \tmp, \imm
xor \out0, \in0, \tmp
.endm
/******************************************************************************
* Computes a 32-bit rotation to the right.
*
* Parameters:
* - out output register
* - in input register
* - imm rotation value (must be an immediate value)
* - tmp0-1 temporary registers
******************************************************************************/
.macro rori out, in, imm, tmp0, tmp1
srli \tmp0, \in, \imm
slli \tmp1, \in, 32-\imm
or \out, \tmp0, \tmp1
.endm
/******************************************************************************
* Computes byte-wise rotations on a 32-bit word
*
* Parameters:
* - out output register
* - in input register
* - imm rotation value (must be an immediate value)
* - mask0-1 masks
* - tmp0-1 temporary registers
******************************************************************************/
.macro byte_rori out, in, mask0, mask1, imm, tmp0, tmp1
srli \tmp0, \in, \imm
and \tmp0, \tmp0, \mask0
and \tmp1, \in, \mask1
slli \tmp1, \tmp1, 8-\imm
or \out, \tmp0, \tmp1
.endm
/******************************************************************************
* Computes byte-wise rotations on a 32-bit word
*
* Parameters:
* - out output register
* - in input register
* - rval rotation value (must be a register)
* - inv_rval 8 - rotation value (must be a register)
* - mask0-1 masks
* - tmp0-1 temporary registers
******************************************************************************/
.macro byte_ror out, in, mask0, mask1, rval, inv_rval, tmp0, tmp1
srl \tmp0, \in, \rval
and \tmp0, \tmp0, \mask0
and \tmp1, \in, \mask1
sll \tmp1, \tmp1, \inv_rval
or \out, \tmp0, \tmp1
.endm
/******************************************************************************
* Addition of the round key on a quarter of the internal state.
*
* Parameters:
* - b0-b7 a quarter of the state
* - rk pointer to the round key
* - r0-r1 temporary registers
******************************************************************************/
.macro addroundkey b0,b1,b2,b3,b4,b5,b6,b7, rk, r0,r1
lw \r0, 0+\rk
lw \r1, 4+\rk
xor \b0, \b0, \r0
xor \b1, \b1, \r1
lw \r0, 8+\rk
lw \r1, 12+\rk
xor \b2, \b2, \r0
xor \b3, \b3, \r1
lw \r0, 16+\rk
lw \r1, 20+\rk
xor \b4, \b4, \r0
xor \b5, \b5, \r1
lw \r0, 24+\rk
lw \r1, 28+\rk
xor \b6, \b6, \r0
xor \b7, \b7, \r1
.endm
/******************************************************************************
* Subroutine that computes the AddRoundKey and the S-box.
* Credits to https://github.com/Ko-/riscvcrypto for the S-box implementation
******************************************************************************/
ark_sbox:
addroundkey s0,s1,s2,s3,s4,s5,s6,s7, 0(a0), s8,s9
addi a0, a0, 32
xor t0, s3, s5 // Exec y14 = U3 ^ U5 into r0
xor t1, s0, s6 // Execy13 = U0 ^ U6 into r1
xor t2, t1, t0 // Execy12 = y13 ^ y14 into r2
xor t3, s4, t2 // Exect1 = U4 ^ y12 into r3
xor t4, t3, s5 // Execy15 = t1 ^ U5 into r4
and t5, t2, t4 // Exect2 = y12 & y15 into r5
xor t6, t4, s7 // Execy6 = y15 ^ U7 into r6
xor a1, t3, s1 // Execy20 = t1 ^ U1 into r7
xor a2, s0, s3 // Execy9 = U0 ^ U3 into r8
xor a3, a1, a2 // Execy11 = y20 ^ y9 into r9
and a4, a2, a3 // Exec t12 = y9 & y11 into r10
xor a5, s7, a3 // Exec y7 = U7 ^ y11 into r11
xor a6, s0, s5 // Exec y8 = U0 ^ U5 into r12
xor a7, s1, s2 // Exec t0 = U1 ^ U2 into r13
xor s8, t4, a7 // Exec y10 = y15 ^ t0 into r14
xor s9, s8, a3 // Exec y17 = y10 ^ y11 into r15
and s10, t0, s9 // Exec t13 = y14 & y17 into r16
xor s11, s10, a4 // Exec t14 = t13 ^ t12 into r17
xor s1, s8, a6 // Exec y19 = y10 ^ y8 into b1
and s2, a6, s8 // Exec t15 = y8 & y10 into b2
xor s2, s2, a4 // Exec t16 = t15 ^ t12 into b2
xor s4, a7, a3 // Exec y16 = t0 ^ y11 into b4
xor s5, t1, s4 // Execy21 = y13 ^ y16 into b5
and t3, t1, s4 // Exect7 = y13 & y16 into r3
xor a4, s0, s4 // Exec y18 = U0 ^ y16 into r10
xor a7, a7, s7 // Exec y1 = t0 ^ U7 into r13
xor s3, a7, s3 // Exec y4 = y1 ^ U3 into b3
and s10, s3, s7 // Exec t5 = y4 & U7 into r16
xor s10, s10, t5 // Exec t6 = t5 ^ t2 into r16
xor s10, s10, s2 // Exec t18 = t6 ^ t16 into r16
xor s1, s10, s1 // Exec t22 = t18 ^ y19 into b1
xor s0, a7, s0 // Exec y2 = y1 ^ U0 into b0
and s10, s0, a5 // Exec t10 = y2 & y7 into r16
xor s10, s10, t3 // Exec t11 = t10 ^ t7 into r16
xor s2, s10, s2 // Exec t20 = t11 ^ t16 into b2
xor s2, s2, a4 // Exec t24 = t20 ^ y18 into b2
xor s6, a7, s6 // Exec y5 = y1 ^ U6 into b6
and a4, s6, a7 // Exec t8 = y5 & y1 into r10
xor t3, a4, t3 // Exec t9 = t8 ^ t7 into r3
xor t3, t3, s11 // Exec t19 = t9 ^ t14 into r3
xor s5, t3, s5 // Exect23 = t19 ^ y21 into b5
xor t3, s6, a6 // Exec y3 = y5 ^ y8 into r3
and a4, t3, t6 // Exec t3 = y3 & y6 into r10
xor t5, a4, t5 // Exec t4 = t3 ^ t2 into r5
xor t5, t5, a1 // Exect17 = t4 ^ y20 into r5
xor t5, t5, s11 // Exec t21 = t17 ^ t14 into r5
and a1, t5, s5 // Exect26 = t21 & t23 into r7
xor a4, s2, a1 // Exec t27 = t24 ^ t26 into r10
xor a1, s1, a1 // Exect31 = t22 ^ t26 into r7
xor t5, t5, s1 // Exect25 = t21 ^ t22 into r5
and s10, t5, a4 // Exec t28 = t25 & t27 into r16
xor s1, s10, s1 // Exec t29 = t28 ^ t22 into b1
and s11, s1, s0 // Exec z14 = t29 & y2 into r17
and a5, s1, a5 // Exec z5 = t29 & y7 into r11
xor s10, s5, s2 // Exec t30 = t23 ^ t24 into r16
and a1, a1, s10 // Exec t32 = t31 & t30 into r7
xor a1, a1, s2 // Exect33 = t32 ^ t24 into r7
xor s10, a4, a1 // Exec t35 = t27 ^ t33 into r16
and s2, s2, s10 // Exec t36 = t24 & t35 into b2
xor a4, a4, s2 // Exec t38 = t27 ^ t36 into r10
and a4, s1, a4 // Exec t39 = t29 & t38 into r10
xor t5, t5, a4 // Exec t40 = t25 ^ t39 into r5
xor a4, s1, t5 // Exec t43 = t29 ^ t40 into r10
and s4, a4, s4 // Exec z3 = t43 & y16 into b4
xor a5, s4, a5 // Exec tc12 = z3 ^ z5 into r11
and t1, a4, t1 // Exec z12 = t43 & y13 into r1
and s6, t5, s6 // Execz13 = t40 & y5 into b6
and a4, t5, a7 // Exec z4 = t40 & y1 into r10
xor s4, s4, a4 // Exec tc6 = z3 ^ z4 into b4
xor s5, s5, a1 // Exect34 = t23 ^ t33 into b5
xor s2, s2, s5 // Exect37 = t36 ^ t34 into b2
xor s5, t5, s2 // Exect41 = t40 ^ t37 into b5
and t5, s5, s8 // Exec z8 = t41 & y10 into r5
and a4, s5, a6 // Exec z17 = t41 & y8 into r10
xor a6, a1, s2 // Exec t44 = t33 ^ t37 into r12
and t4, a6, t4 // Exec z0 = t44 & y15 into r4
and t2, a6, t2 // Exec z9 = t44 & y12 into r2
and t3, s2, t3 // Execz10 = t37 & y3 into r3
and s2, s2, t6 // Execz1 = t37 & y6 into b2
xor s2, s2, t4 // Exectc5 = z1 ^ z0 into b2
xor a7, s4, s2 // Exec tc11 = tc6 ^ tc5 into r13
and s3, a1, s3 // Execz11 = t33 & y4 into b3
xor s1, s1, a1 // Exect42 = t29 ^ t33 into b1
xor s5, s1, s5 // Exect45 = t42 ^ t41 into b5
and t6, s5, s9 // Exec z7 = t45 & y17 into r6
xor s4, t6, s4 // Exectc8 = z7 ^ tc6 into b4
and t0, s5, t0 // Execz16 = t45 & y14 into r0
and s5, s1, a3 // Execz6 = t42 & y11 into b5
xor s5, s5, s4 // Exectc16 = z6 ^ tc8 into b5
and s1, s1, a2 // Execz15 = t42 & y9 into b1
xor t6, s1, s5 // Exectc20 = z15 ^ tc16 into r6
xor t0, s1, t0 // Exectc1 = z15 ^ z16 into r0
xor s1, t3, t0 // Exectc2 = z10 ^ tc1 into b1
xor s9, s1, s3 // Exec tc21 = tc2 ^ z11 into r15
xor t2, t2, s1 // Exectc3 = z9 ^ tc2 into r2
xor s0, t2, s5 // ExecS0 = tc3 ^ tc16 into b0
xor s3, t2, a7 // Exec S3 = tc3 ^ tc11 into b3
xor s1, s3, s5 // ExecS1 = S3 ^ tc16 ^ 1 into b1
xor t0, s6, t0 // Exectc13 = z13 ^ tc1 into r0
and s5, a1, s7 // Execz2 = t33 & U7 into b5
xor s8, t4, s5 // Exec tc4 = z0 ^ z2 into r14
xor s6, t1, s8 // Exec tc7 = z12 ^ tc4 into b6
xor s6, t5, s6 // Exectc9 = z8 ^ tc7 into b6
xor s6, s4, s6 // Exectc10 = tc8 ^ tc9 into b6
xor s2, s11, s6 // Exec tc17 = z14 ^ tc10 into b2
xor s5, s9, s2 // Exec S5 = tc21 ^ tc17 into b5
xor s2, s2, t6 // Exectc26 = tc17 ^ tc20 into b2
xor s2, s2, a4 // Exec S2 = tc26 ^ z17 ^ 1 into b2
xor s8, s8, a5 // Exec tc14 = tc4 ^ tc12 into r14
xor t0, t0, s8 // Exec tc18 = tc13 ^ tc14 into r0
xor s6, s6, t0 // ExecS6 = tc10 ^ tc18 ^ 1 into b6
xor s7, t1, t0 // ExecS7 = z12 ^ tc18 ^ 1 into b7
xor s4, s8, s3 // Exec S4 = tc14 ^ S3 into b4
ret
/******************************************************************************
* Computation of the MixColumns transformation in the fixsliced representation.
* Note that it can be used for rounds i s.t. i % 4 == 0 or i % 4 == 2.
*
* Requirements:
* - for i % 4 == 0:
* - t4 to contain 0x03030303
* - t5 to contain 0x3f3f3f3f
* - a2 to contain 6
* - a3 to contain 2
* - for i % 4 == 2:
* - t4 to contain 0x3f3f3f3f
* - t5 to contain 0x03030303
* - a2 to contain 2
* - a3 to contain 6
******************************************************************************/
mixcolumns0:
li t6, 0x0f0f0f0f
byte_ror t3,s0,t4,t5,a2,a3,s8,s9 // t3 <- BYTE_ROR_6(state[0])
rori t3, t3, 8, s8, s9 // t3 <- BYTE_ROR_6(state[0]) >>> 8
xor t0, s0, t3 // t0 <- state[0] ^ t3
byte_ror t1,s7,t4,t5,a2,a3,s8,s9 // t1 <- BYTE_ROR_6(state[7])
rori t1, t1, 8, s8, s9 // t1 <- BYTE_ROR_6(state[7]) >>> 8
xor t2, s7, t1 // t2 <- state[7] ^ t1
byte_rori a1, t2, t6, t6, 4, s8, s9 // a1 <- BYTE_ROR_4(t2)
rori a1, a1, 16, s8, s9 // a1 <- BYTE_ROR_4(t2) >>> 16
xor s7, a1, t0 // s7 <- a1 ^ t0
xor s7, s7, t1 // s7 <- a1 ^ t0 ^ t1
byte_ror t1,s6,t4,t5,a2,a3,s8,s9 // t1 <- BYTE_ROR_6(state[6])
rori t1, t1, 8, s8, s9 // t1 <- BYTE_ROR_6(state[6]) >>> 8
xor a1, t1, s6 // a1 <- t1 ^ s6
byte_rori a4, a1, t6, t6, 4, s8, s9 // a4 <- BYTE_ROR_4(a1)
rori a4, a4, 16, s8, s9 // a4 <- BYTE_ROR_4(a1) >>> 16
xor s6, t2, t0 // s6 <- t2 ^ t0
xor s6, s6, t1 // s6 <- t2 ^ t0 ^ t1
xor s6, s6, a4 // s6 <- t2 ^ t0 ^ t1 ^ a4
byte_ror t1,s5,t4,t5,a2,a3,s8,s9 // t1 <- BYTE_ROR_6(state[5])
rori t1, t1, 8, s8, s9 // t1 <- BYTE_ROR_6(state[5]) >>> 8
xor t2, t1, s5 // t2 <- t1 ^ state[5]
byte_rori a4, t2, t6, t6, 4, s8, s9 // a4 <- BYTE_ROR_4(t2)
rori a4, a4, 16, s8, s9 // a4 <- BYTE_ROR_4(t2) >>> 16
xor s5, a1, t1 // s5 <- a1 ^ t1
xor s5, s5, a4 // s5 <- a1 ^ t1 ^ a4
byte_ror t1,s4,t4,t5,a2,a3,s8,s9 // t1 <- BYTE_ROR_6(state[4])
rori t1, t1, 8, s8, s9 // t1 <- BYTE_ROR_6(state[4]) >>> 8
xor a1, t1, s4 // a1 <- t1 ^ state[4]
byte_rori a4, a1, t6, t6, 4, s8, s9 // a4 <- BYTE_ROR_4(a1)
rori a4, a4, 16, s8, s9 // a4 <- BYTE_ROR_4(a1) >>> 16
xor s4, t2, t0 // s4 <- t2 ^ t0
xor s4, s4, t1 // s4 <- t2 ^ t0 ^ t1
xor s4, s4, a4 // s4 <- t2 ^ t0 ^ t1 ^ a4
byte_ror t1,s3,t4,t5,a2,a3,s8,s9 // t1 <- BYTE_ROR_6(state[3])
rori t1, t1, 8, s8, s9 // t1 <- BYTE_ROR_6(state[3]) >>> 8
xor t2, t1, s3 // t2 <- t1 ^ state[3]
byte_rori a4, t2, t6, t6, 4, s8, s9 // a4 <- BYTE_ROR_4(t2)
rori a4, a4, 16, s8, s9 // a4 <- BYTE_ROR_4(t2) >>> 16
xor s3, a1, t0 // s3 <- a1 ^ t0
xor s3, s3, t1 // s3 <- a1 ^ t0 ^ t1
xor s3, s3, a4 // s3 <- a1 ^ t0 ^ t1 ^ a4
byte_ror t1,s2,t4,t5,a2,a3,s8,s9 // t1 <- BYTE_ROR_6(state[2])
rori t1, t1, 8, s8, s9 // t1 <- BYTE_ROR_6(state[2]) >>> 8
xor a1, t1, s2 // a1 <- t1 ^ state[2]
byte_rori a4, a1, t6, t6, 4, s8, s9 // a4 <- BYTE_ROR_4(a1)
rori a4, a4, 16, s8, s9 // a4 <- BYTE_ROR_4(a1) >>> 16
xor s2, t2, t1 // s2 <- t2 ^ t1
xor s2, s2, a4 // s2 <- t2 ^ t1 ^ a4
byte_ror t1,s1,t4,t5,a2,a3,s8,s9 // t1 <- BYTE_ROR_6(state[1])
rori t1, t1, 8, s8, s9 // t1 <- BYTE_ROR_6(state[1]) >>> 8
xor t2, t1, s1 // t2 <- t1 ^ state[1]
byte_rori a4, t2, t6, t6, 4, s8, s9 // a4 <- BYTE_ROR_4(t2)
rori a4, a4, 16, s8, s9 // a4 <- BYTE_ROR_4(t2) >>> 16
xor s1, a1, t1 // s1 <- a1 ^ t1
xor s1, s1, a4 // s1 <- a1 ^ t1 ^ a4
byte_rori t0, t0, t6, t6, 4, s8, s9 // t0 <- BYTE_ROR_4(t0)
rori t0, t0, 16, s8, s9 // t0 <- BYTE_ROR_4(t0) >>> 16
xor s0, t2, t3 // s0 <- t2 ^ t3
xor s0, s0, t0 // s0 <- t2 ^ t3 ^ t0
ret
/******************************************************************************
* Computation of the MixColumns transformation in the fixsliced representation.
* For fully-fixsliced implementations only, for round i s.t. (i%4) == 1.
******************************************************************************/
mixcolumns1:
li t5, 0x0f0f0f0f // mask for byte_rori
byte_rori t0, s0, t5, t5, 4, s8, s9 // t0 <- BYTE_ROR_4(state[0])
rori a2, t0, 8, s8, s9 // a2 <- BYTE_ROR_4(state[0]) >>> 8
xor t0, a2, s0 // t0 <- state[0] ^ a2
byte_rori t1, s7, t5, t5, 4, s8, s9 // t1 <- BYTE_ROR_4(state[7])
rori t1, t1, 8, s8, s9 // t1 <- BYTE_ROR_4(state[7]) >>> 8
xor t1, t1, s7 // t1 <- state[7] ^ t1
xor t3, t1, t0 // t3 <- t1 ^ t0
rori t4, t1, 16, s8, s9 // t4 <- t1 >>> 16
xor s7, s7, t3 // s7 <- s7 ^ t1 ^ t0
xor s7, s7, t4 // s7 <- s7 ^ t1 ^ t0 ^ (t1 >>> 16)
byte_rori t1, s6, t5, t5, 4, s8, s9 // t1 <- BYTE_ROR_4(state[6])
rori t1, t1, 8, s8, s9 // t1 <- BYTE_ROR_4(state[6]) >>> 8
xor t2, s6, t1 // t2 <- state[6] ^ t1
xor s6, t3, t1 // s6 <- t3 ^ t1
rori t4, t2, 16, s8, s9 // t4 <- t2 >>> 16
xor s6, s6, t4 // s6 <- s6 ^ (t2 >>> 16)
byte_rori t1, s5, t5, t5, 4, s8, s9 // t1 <- BYTE_ROR_4(state[5])
rori t1, t1, 8, s8, s9 // t1 <- BYTE_ROR_4(state[5]) >>> 8
xor t3, s5, t1 // t3 <- state[5] ^ t1
xor s5, t2, t1 // s5 <- t2 ^ t1
rori t4, t3, 16, s8, s9 // t4 <- t3 >>> 16
xor s5, s5, t4 // s5 <- s5 ^ (t3 >>> 16)
byte_rori t1, s4, t5, t5, 4, s8, s9 // t1 <- BYTE_ROR_4(state[4])
rori t1, t1, 8, s8, s9 // t1 <- BYTE_ROR_4(state[4]) >>> 8
xor t2, s4, t1 // t2 <- state[4] ^ t1
xor s4, t3, t1 // s4 <- t3 ^ t1
xor s4, s4, t0 // s4 <- t3 ^ t1 ^ t0
rori t4, t2, 16, s8, s9 // t4 <- t2 >>> 16
xor s4, s4, t4 // s4 <- s4 ^ (t2 >>> 16)
byte_rori t1, s3, t5, t5, 4, s8, s9 // t1 <- BYTE_ROR_4(state[3])
rori t1, t1, 8, s8, s9 // t1 <- BYTE_ROR_4(state[3]) >>> 8
xor t3, s3, t1 // t3 <- state[3] ^ t1
xor s3, t2, t1 // s3 <- t2 ^ t1
xor s3, s3, t0 // s3 <- t2 ^ t1 ^ t0
rori t4, t3, 16, s8, s9 // t4 <- t3 >>> 16
xor s3, s3, t4 // s3 <- s3 ^ (t3 >>> 16)
byte_rori t1, s2, t5, t5, 4, s8, s9 // t1 <- BYTE_ROR_4(state[2])
rori t1, t1, 8, s8, s9 // t1 <- BYTE_ROR_4(state[2]) >>> 8
xor t2, s2, t1 // t2 <- state[2] ^ t1
xor s2, t3, t1 // s2 <- t3 ^ t1
rori t4, t2, 16, s8, s9 // t4 <- t2 >>> 16
xor s2, s2, t4 // s2 <- s2 ^ (t2 >>> 16)
byte_rori t1, s1, t5, t5, 4, s8, s9 // t1 <- BYTE_ROR_4(state[1])
rori t1, t1, 8, s8, s9 // t1 <- BYTE_ROR_4(state[1]) >>> 8
xor t3, s1, t1 // t3 <- state[1] ^ t1
xor s1, t2, t1 // s1 <- t2 ^ t1
rori t4, t3, 16, s8, s9 // t4 <- t3 >>> 16
xor s1, s1, t4 // s1 <- s1 ^ (t3 >>> 16)
xor t2, s0, a2 // t2 <- state[0] ^ t1
xor s0, t3, a2 // s0 <- t3 ^ t1
rori t4, t2, 16, s8, s9 // t4 <- t2 >>> 16
xor s0, s0, t4 // s0 <- s0 ^ (t2 >>> 16)
ret
/******************************************************************************
* Computation of the MixColumns transformation in the fixsliced representation.
* For fully-fixsliced implementations, it is used for rounds i s.t. (i%4) == 3.
* For semi-fixsliced implementations, it is used for rounds i s.t. (i%2) == 1.
* Based on Käsper-Schwabe, similar to https://github.com/Ko-/riscvcrypto.
******************************************************************************/
mixcolumns3:
rori t3, s7, 8, s8, s9 // t3 <- state[7] >>> 8
xor t0, t3, s7 // t0 <- state[7] ^ (state[7] >>> 8)
rori t5, s0, 8, s8, s9 // t5 <- state[0] >>> 8
xor t2, t5, s0 // t2 <- state[0] ^ (state[0] >>> 8)
rori t4, t0, 16, s8, s9 // t4 <- t0 >>> 16
xor s7, t3, t4 // s7 <- t0 >>> 16 ^ state[7] >>> 8
xor s7, s7, t2 // s7 <- s7 ^ t2
rori t3, s6, 8, s8, s9 // t3 <- state[6] >>> 8
xor t1, t3, s6 // t1 <- state[6] ^ (state[6] >>> 8)
rori t4, t1, 16, s8, s9 // t4 <- t1 >>> 16
xor s6, t3, t4 // s6 <- t1 >>> 16 ^ state[6] >>> 8
xor s6, s6, t0 // s6 <- s6 ^ t0
xor s6, s6, t2 // s6 <- s6 ^ t2
rori t3, s5, 8, s8, s9 // t3 <- state[5] >>> 8
xor t0, t3, s5 // t0 <- state[5] ^ (state[5] >>> 8)
rori t4, t0, 16, s8, s9 // t4 <- t0 >>> 16
xor s5, t3, t4 // s5 <- t0 >>> 16 ^ state[5] >>> 8
xor s5, s5, t1 // s5 <- s5 ^ t1
rori t3, s4, 8, s8, s9 // t3 <- state[4] >>> 8
xor t1, t3, s4 // t1 <- state[4] ^ (state[4] >>> 8)
rori t4, t1, 16, s8, s9 // t4 <- t1 >>> 16
xor s4, t3, t4 // s4 <- t1 >>> 16 ^ state[4] >>> 8
xor s4, s4, t0 // s4 <- s4 ^ t0
xor s4, s4, t2 // s4 <- s4 ^ t2
rori t3, s3, 8, s8, s9 // t3 <- state[3] >>> 8
xor t0, t3, s3 // t0 <- state[3] ^ (state[3] >>> 8)
rori t4, t0, 16, s8, s9 // t4 <- t0 >>> 16
xor s3, t3, t4 // s3 <- t0 >>> 16 ^ state[3] >>> 8
xor s3, s3, t1 // s3 <- s3 ^ t1
xor s3, s3, t2 // s3 <- s3 ^ t2
rori t3, s2, 8, s8, s9 // t3 <- state[2] >>> 8
xor t1, t3, s2 // t1 <- state[2] ^ (state[2] >>> 8)
rori t4, t1, 16, s8, s9 // t4 <- t1 >>> 16
xor s2, t3, t4 // s2 <- t1 >>> 16 ^ state[2] >>> 8
xor s2, s2, t0 // s2 <- s2 ^ t0
rori t3, s1, 8, s8, s9 // t3 <- state[1] >>> 8
xor t0, t3, s1 // t0 <- state[1] ^ (state[1] >>> 8)
rori t4, t0, 16, s8, s9 // t4 <- t0 >>> 16
xor s1, t3, t4 // s3 <- t0 >>> 16 ^ state[1] >>> 8
xor s1, s1, t1 // s3 <- s3 ^ t1
rori t4, t2, 16, s8, s9 // t4 <- t2 >>> 16
xor s0, t5, t4 // s0 <- t5 ^ t4
xor s0, s0, t0 // s0 <- s0 ^ t0
ret
/******************************************************************************
* Applies the ShiftRows transformation twice (i.e. SR^2) on the internal state.
******************************************************************************/
double_shiftrows:
li t1, 0x0f000f00
swapmove s0,s0,s0,s0, t1, 4, t0
swapmove s1,s1,s1,s1, t1, 4, t0
swapmove s2,s2,s2,s2, t1, 4, t0
swapmove s3,s3,s3,s3, t1, 4, t0
swapmove s4,s4,s4,s4, t1, 4, t0
swapmove s5,s5,s5,s5, t1, 4, t0
swapmove s6,s6,s6,s6, t1, 4, t0
swapmove s7,s7,s7,s7, t1, 4, t0
ret
/******************************************************************************
* Subroutine to bitslice the two 128-bit input blocs as follows
* s0 = b_24 b_56 b_88 b_120 || ... || b_0 b_32 b_64 b_96
* s1 = b_25 b_57 b_89 b_121 || ... || b_1 b_33 b_65 b_97
* s2 = b_26 b_58 b_90 b_122 || ... || b_2 b_34 b_66 b_98
* s3 = b_27 b_59 b_91 b_123 || ... || b_3 b_35 b_67 b_99
* s4 = b_28 b_60 b_92 b_124 || ... || b_4 b_36 b_68 b_100
* s5 = b_29 b_61 b_93 b_125 || ... || b_5 b_37 b_69 b_101
* s6 = b_30 b_62 b_94 b_126 || ... || b_6 b_38 b_70 b_102
* s7 = b_31 b_63 b_95 b_127 || ... || b_7 b_39 b_71 b_103
* Note that it has been divided in 3 subroutine to avoid additional code size
* to unpack.
******************************************************************************/
packing_0:
swapmove s1, s0, s1, s0, t0, 1, s8
swapmove s3, s2, s3, s2, t0, 1, s8
swapmove s5, s4, s5, s4, t0, 1, s8
swapmove s7, s6, s7, s6, t0, 1, s8
ret
packing_1:
swapmove s2, s0, s2, s0, t1, 2, s8
swapmove s3, s1, s3, s1, t1, 2, s8
swapmove s6, s4, s6, s4, t1, 2, s8
swapmove s7, s5, s7, s5, t1, 2, s8
ret
packing_2:
swapmove s4, s0, s4, s0, t2, 4, s8
swapmove s5, s1, s5, s1, t2, 4, s8
swapmove s6, s2, s6, s2, t2, 4, s8
swapmove s7, s3, s7, s3, t2, 4, s8
ret
/******************************************************************************
* Fully-fixsliced implementation of AES-128.
* Two blocks are encrypted in parallel, without any operating mode.
*
* The function prototype is:
* - void aes128_encrypt_ffs(uint8_t* out0, uint8_t* out1, const uint8_t* in0,
const uint8_t* in1, const uint32_t* rkeys)
******************************************************************************/
.globl aes128_encrypt_ffs
.type aes128_encrypt_ffs, %function
.align 2
aes128_encrypt_ffs:
addi sp, sp, -72
sw ra, 68(sp) // save context
sw a0, 64(sp) // save context
sw a1, 60(sp) // save context
sw a2, 56(sp) // save context
sw a3, 52(sp) // save context
sw a4, 48(sp) // save context
sw s0, 44(sp) // save context
sw s1, 40(sp) // save context
sw s2, 36(sp) // save context
sw s3, 32(sp) // save context
sw s4, 28(sp) // save context
sw s5, 24(sp) // save context
sw s6, 20(sp) // save context
sw s7, 16(sp) // save context
sw s8, 12(sp) // save context
sw s9, 8(sp) // save context
sw s10, 4(sp) // save context
sw s11, 0(sp) // save context
add a0, a4, zero // put rkeys address in a0
lw s0, 0(a2) // load input word
lw s1, 0(a3) // load input word
lw s2, 4(a2) // load input word
lw s3, 4(a3) // load input word
lw s4, 8(a2) // load input word
lw s5, 8(a3) // load input word
lw s6, 12(a2) // load input word
lw s7, 12(a3) // load input word
li t0, 0x55555555 // mask for SWAPMOVE
li t1, 0x33333333 // mask for SWAPMOVE
li t2, 0x0f0f0f0f // mask for SWAPMOVE
jal packing_0
jal packing_1
jal packing_2
jal ark_sbox
li t4, 0x03030303 // mask for byte_rori
li t5, 0x3f3f3f3f // mask for byte_rori
addi a2, zero, 6
addi a3, a2, -4
jal mixcolumns0
jal ark_sbox
jal mixcolumns1
jal ark_sbox
li t5, 0x03030303 // mask for byte_rori
li t4, 0x3f3f3f3f // mask for byte_rori
addi a2, zero, 2
addi a3, a2, 4
jal mixcolumns0
jal ark_sbox
jal mixcolumns3
jal ark_sbox
li t4, 0x03030303 // mask for byte_rori
li t5, 0x3f3f3f3f // mask for byte_rori
addi a2, zero, 6
addi a3, a2, -4
jal mixcolumns0
jal ark_sbox
jal mixcolumns1
jal ark_sbox
li t5, 0x03030303 // mask for byte_rori
li t4, 0x3f3f3f3f // mask for byte_rori
addi a2, zero, 2
addi a3, a2, 4
jal mixcolumns0
jal ark_sbox
jal mixcolumns3
jal ark_sbox
li t4, 0x03030303 // mask for byte_rori
li t5, 0x3f3f3f3f // mask for byte_rori
addi a2, zero, 6
addi a3, a2, -4
jal mixcolumns0
jal ark_sbox
jal double_shiftrows // double shiftrows for resynch
addroundkey s0,s1,s2,s3,s4,s5,s6,s7, 0(a0), s8,s9
li t0, 0x55555555
li t1, 0x33333333
li t2, 0x0f0f0f0f
jal packing_2
jal packing_1
jal packing_0
lw a0, 64(sp) // restore pointer to output array
lw a1, 60(sp) // restore pointer to output array
sw s0, 0(a0) // store output
sw s1, 0(a1) // store ouput
sw s2, 4(a0) // store ouput
sw s3, 4(a1) // store ouput
sw s4, 8(a0) // store ouput
sw s5, 8(a1) // store ouput
sw s6, 12(a0) // store ouput
sw s7, 12(a1) // store ouput
lw ra, 68(sp) // save context
lw a2, 56(sp) // save context
lw a3, 52(sp) // save context
lw a4, 48(sp) // save context
lw s0, 44(sp) // restore context
lw s1, 40(sp) // restore context
lw s2, 36(sp) // restore context
lw s3, 32(sp) // restore context
lw s4, 28(sp) // restore context
lw s5, 24(sp) // restore context
lw s6, 20(sp) // restore context
lw s7, 16(sp) // restore context
lw s8, 12(sp) // restore context
lw s9, 8(sp) // restore context
lw s10, 4(sp) // restore context
lw s11, 0(sp) // restore context
addi sp, sp, 72
ret
.size aes128_encrypt_ffs,.-aes128_encrypt_ffs
/******************************************************************************
* Fully-fixsliced implementation of AES-256.
* Two blocks are encrypted in parallel, without any operating mode.
*
* The function prototype is:
* - void aes256_encrypt_ffs(uint8_t* out0, uint8_t* out1, const uint8_t* in0,
const uint8_t* in1, const uint32_t* rkeys)
******************************************************************************/
.globl aes256_encrypt_ffs
.type aes256_encrypt_ffs, %function
.align 2
aes256_encrypt_ffs:
addi sp, sp, -72
sw ra, 68(sp) // save context
sw a0, 64(sp) // save context
sw a1, 60(sp) // save context
sw a2, 56(sp) // save context
sw a3, 52(sp) // save context
sw a4, 48(sp) // save context
sw s0, 44(sp) // save context
sw s1, 40(sp) // save context
sw s2, 36(sp) // save context
sw s3, 32(sp) // save context
sw s4, 28(sp) // save context
sw s5, 24(sp) // save context
sw s6, 20(sp) // save context
sw s7, 16(sp) // save context
sw s8, 12(sp) // save context
sw s9, 8(sp) // save context
sw s10, 4(sp) // save context
sw s11, 0(sp) // save context
add a0, a4, zero // put rkeys address in a0
lw s0, 0(a2) // load input word
lw s1, 0(a3) // load input word
lw s2, 4(a2) // load input word
lw s3, 4(a3) // load input word
lw s4, 8(a2) // load input word
lw s5, 8(a3) // load input word
lw s6, 12(a2) // load input word
lw s7, 12(a3) // load input word
li t0, 0x55555555 // mask for SWAPMOVE
li t1, 0x33333333 // mask for SWAPMOVE
li t2, 0x0f0f0f0f // mask for SWAPMOVE
jal packing_0
jal packing_1
jal packing_2
jal ark_sbox
li t4, 0x03030303 // mask for byte_rori
li t5, 0x3f3f3f3f // mask for byte_rori
addi a2, zero, 6
addi a3, a2, -4
jal mixcolumns0
jal ark_sbox
jal mixcolumns1
jal ark_sbox
li t5, 0x03030303 // mask for byte_rori
li t4, 0x3f3f3f3f // mask for byte_rori
addi a2, zero, 2
addi a3, a2, 4
jal mixcolumns0
jal ark_sbox
jal mixcolumns3
jal ark_sbox
li t4, 0x03030303 // mask for byte_rori
li t5, 0x3f3f3f3f // mask for byte_rori
addi a2, zero, 6
addi a3, a2, -4
jal mixcolumns0
jal ark_sbox
jal mixcolumns1
jal ark_sbox
li t5, 0x03030303 // mask for byte_rori
li t4, 0x3f3f3f3f // mask for byte_rori
addi a2, zero, 2
addi a3, a2, 4
jal mixcolumns0
jal ark_sbox
jal mixcolumns3
jal ark_sbox
li t4, 0x03030303 // mask for byte_rori
li t5, 0x3f3f3f3f // mask for byte_rori
addi a2, zero, 6
addi a3, a2, -4
jal mixcolumns0
jal ark_sbox
jal mixcolumns1
jal ark_sbox
li t5, 0x03030303 // mask for byte_rori
li t4, 0x3f3f3f3f // mask for byte_rori
addi a2, zero, 2
addi a3, a2, 4
jal mixcolumns0
jal ark_sbox
jal mixcolumns3
jal ark_sbox
li t4, 0x03030303 // mask for byte_rori
li t5, 0x3f3f3f3f // mask for byte_rori
addi a2, zero, 6
addi a3, a2, -4
jal mixcolumns0
jal ark_sbox
jal double_shiftrows // double shiftrows for resynch
addroundkey s0,s1,s2,s3,s4,s5,s6,s7, 0(a0), s8,s9
li t0, 0x55555555
li t1, 0x33333333
li t2, 0x0f0f0f0f
jal packing_2
jal packing_1
jal packing_0
lw a0, 64(sp) // restore pointer to output array
lw a1, 60(sp) // restore pointer to output array
sw s0, 0(a0) // store output
sw s1, 0(a1) // store ouput
sw s2, 4(a0) // store ouput
sw s3, 4(a1) // store ouput
sw s4, 8(a0) // store ouput
sw s5, 8(a1) // store ouput
sw s6, 12(a0) // store ouput
sw s7, 12(a1) // store ouput
lw ra, 68(sp) // save context
lw a2, 56(sp) // save context
lw a3, 52(sp) // save context
lw a4, 48(sp) // save context
lw s0, 44(sp) // restore context
lw s1, 40(sp) // restore context
lw s2, 36(sp) // restore context
lw s3, 32(sp) // restore context
lw s4, 28(sp) // restore context
lw s5, 24(sp) // restore context
lw s6, 20(sp) // restore context
lw s7, 16(sp) // restore context
lw s8, 12(sp) // restore context
lw s9, 8(sp) // restore context
lw s10, 4(sp) // restore context
lw s11, 0(sp) // restore context
addi sp, sp, 72
ret
.size aes256_encrypt_ffs,.-aes256_encrypt_ffs
/******************************************************************************
* Semi-fixsliced implementation of AES-128.
* Two blocks are encrypted in parallel, without any operating mode.
*
* The function prototype is:
* - void aes128_encrypt_sfs(uint8_t* out0, uint8_t* out1, const uint8_t* in0,
const uint8_t* in1, const uint32_t* rkeys)
******************************************************************************/
.globl aes128_encrypt_sfs
.type aes128_encrypt_sfs, %function
.align 2
aes128_encrypt_sfs:
addi sp, sp, -72
sw ra, 68(sp) // save context
sw a0, 64(sp) // save context
sw a1, 60(sp) // save context
sw a2, 56(sp) // save context
sw a3, 52(sp) // save context
sw a4, 48(sp) // save context
sw s0, 44(sp) // save context
sw s1, 40(sp) // save context
sw s2, 36(sp) // save context
sw s3, 32(sp) // save context
sw s4, 28(sp) // save context
sw s5, 24(sp) // save context
sw s6, 20(sp) // save context
sw s7, 16(sp) // save context
sw s8, 12(sp) // save context
sw s9, 8(sp) // save context
sw s10, 4(sp) // save context
sw s11, 0(sp) // save context
add a0, a4, zero // put rkeys address in a0
lw s0, 0(a2) // load input word
lw s1, 0(a3) // load input word
lw s2, 4(a2) // load input word
lw s3, 4(a3) // load input word
lw s4, 8(a2) // load input word
lw s5, 8(a3) // load input word
lw s6, 12(a2) // load input word
lw s7, 12(a3) // load input word
li t0, 0x55555555 // mask for SWAPMOVE
li t1, 0x33333333 // mask for SWAPMOVE
li t2, 0x0f0f0f0f // mask for SWAPMOVE
jal packing_0
jal packing_1
jal packing_2
jal ark_sbox
li t4, 0x03030303 // mask for byte_rori
li t5, 0x3f3f3f3f // mask for byte_rori
addi a2, zero, 6
addi a3, a2, -4
jal mixcolumns0
jal ark_sbox
jal double_shiftrows // shiftrows every 2 rounds
jal mixcolumns3
jal ark_sbox
li t4, 0x03030303 // mask for byte_rori
li t5, 0x3f3f3f3f // mask for byte_rori
addi a2, zero, 6
addi a3, a2, -4
jal mixcolumns0
jal ark_sbox
jal double_shiftrows // shiftrows every 2 rounds
jal mixcolumns3
jal ark_sbox
li t4, 0x03030303 // mask for byte_rori
li t5, 0x3f3f3f3f // mask for byte_rori
addi a2, zero, 6
addi a3, a2, -4
jal mixcolumns0
jal ark_sbox
jal double_shiftrows // shiftrows every 2 rounds
jal mixcolumns3
jal ark_sbox
li t4, 0x03030303 // mask for byte_rori
li t5, 0x3f3f3f3f // mask for byte_rori
addi a2, zero, 6
addi a3, a2, -4
jal mixcolumns0
jal ark_sbox
jal double_shiftrows // shiftrows every 2 rounds
jal mixcolumns3
jal ark_sbox
li t4, 0x03030303 // mask for byte_rori
li t5, 0x3f3f3f3f // mask for byte_rori
addi a2, zero, 6
addi a3, a2, -4
jal mixcolumns0
jal ark_sbox
jal double_shiftrows // shiftrows every 2 rounds
addroundkey s0,s1,s2,s3,s4,s5,s6,s7, 0(a0), s8,s9
li t0, 0x55555555
li t1, 0x33333333
li t2, 0x0f0f0f0f
jal packing_2
jal packing_1
jal packing_0
lw a0, 64(sp) // restore pointer to output array
lw a1, 60(sp) // restore pointer to output array
sw s0, 0(a0) // store output
sw s1, 0(a1) // store ouput
sw s2, 4(a0) // store ouput
sw s3, 4(a1) // store ouput
sw s4, 8(a0) // store ouput
sw s5, 8(a1) // store ouput
sw s6, 12(a0) // store ouput
sw s7, 12(a1) // store ouput
lw ra, 68(sp) // save context
lw a2, 56(sp) // save context
lw a3, 52(sp) // save context
lw a4, 48(sp) // save context
lw s0, 44(sp) // restore context
lw s1, 40(sp) // restore context
lw s2, 36(sp) // restore context
lw s3, 32(sp) // restore context
lw s4, 28(sp) // restore context
lw s5, 24(sp) // restore context
lw s6, 20(sp) // restore context
lw s7, 16(sp) // restore context
lw s8, 12(sp) // restore context
lw s9, 8(sp) // restore context
lw s10, 4(sp) // restore context
lw s11, 0(sp) // restore context
addi sp, sp, 72
ret
.size aes128_encrypt_sfs,.-aes128_encrypt_sfs
/******************************************************************************
* Semi-fixsliced implementation of AES-256.
* Two blocks are encrypted in parallel, without any operating mode.
*
* The function prototype is:
* - void aes256_encrypt_sfs(uint8_t* out0, uint8_t* out1, const uint8_t* in0,
const uint8_t* in1, const uint32_t* rkeys)
******************************************************************************/
.globl aes256_encrypt_sfs
.type aes256_encrypt_sfs, %function
.align 2
aes256_encrypt_sfs:
addi sp, sp, -72
sw ra, 68(sp) // save context
sw a0, 64(sp) // save context
sw a1, 60(sp) // save context
sw a2, 56(sp) // save context
sw a3, 52(sp) // save context
sw a4, 48(sp) // save context
sw s0, 44(sp) // save context
sw s1, 40(sp) // save context
sw s2, 36(sp) // save context
sw s3, 32(sp) // save context
sw s4, 28(sp) // save context
sw s5, 24(sp) // save context
sw s6, 20(sp) // save context
sw s7, 16(sp) // save context
sw s8, 12(sp) // save context
sw s9, 8(sp) // save context
sw s10, 4(sp) // save context
sw s11, 0(sp) // save context
add a0, a4, zero // put rkeys address in a0
lw s0, 0(a2) // load input word
lw s1, 0(a3) // load input word
lw s2, 4(a2) // load input word
lw s3, 4(a3) // load input word
lw s4, 8(a2) // load input word
lw s5, 8(a3) // load input word
lw s6, 12(a2) // load input word
lw s7, 12(a3) // load input word
li t0, 0x55555555 // mask for SWAPMOVE
li t1, 0x33333333 // mask for SWAPMOVE
li t2, 0x0f0f0f0f // mask for SWAPMOVE
jal packing_0
jal packing_1
jal packing_2
jal ark_sbox
li t4, 0x03030303 // mask for byte_rori
li t5, 0x3f3f3f3f // mask for byte_rori
addi a2, zero, 6
addi a3, a2, -4
jal mixcolumns0
jal ark_sbox
jal double_shiftrows // shiftrows every 2 rounds
jal mixcolumns3
jal ark_sbox
li t4, 0x03030303 // mask for byte_rori
li t5, 0x3f3f3f3f // mask for byte_rori
addi a2, zero, 6
addi a3, a2, -4
jal mixcolumns0
jal ark_sbox
jal double_shiftrows // shiftrows every 2 rounds
jal mixcolumns3
jal ark_sbox
li t4, 0x03030303 // mask for byte_rori
li t5, 0x3f3f3f3f // mask for byte_rori
addi a2, zero, 6
addi a3, a2, -4
jal mixcolumns0
jal ark_sbox
jal double_shiftrows // shiftrows every 2 rounds
jal mixcolumns3
jal ark_sbox
li t4, 0x03030303 // mask for byte_rori
li t5, 0x3f3f3f3f // mask for byte_rori
addi a2, zero, 6
addi a3, a2, -4
jal mixcolumns0
jal ark_sbox
jal double_shiftrows // shiftrows every 2 rounds
jal mixcolumns3
jal ark_sbox
li t4, 0x03030303 // mask for byte_rori
li t5, 0x3f3f3f3f // mask for byte_rori
addi a2, zero, 6
addi a3, a2, -4
jal mixcolumns0
jal ark_sbox
jal double_shiftrows // shiftrows every 2 rounds
jal mixcolumns3
jal ark_sbox
li t4, 0x03030303 // mask for byte_rori
li t5, 0x3f3f3f3f // mask for byte_rori
addi a2, zero, 6
addi a3, a2, -4
jal mixcolumns0
jal ark_sbox
jal double_shiftrows // shiftrows every 2 rounds
jal mixcolumns3
jal ark_sbox
li t4, 0x03030303 // mask for byte_rori
li t5, 0x3f3f3f3f // mask for byte_rori
addi a2, zero, 6
addi a3, a2, -4
jal mixcolumns0
jal ark_sbox
jal double_shiftrows // shiftrows every 2 rounds
addroundkey s0,s1,s2,s3,s4,s5,s6,s7, 0(a0), s8,s9
li t0, 0x55555555
li t1, 0x33333333
li t2, 0x0f0f0f0f
jal packing_2
jal packing_1
jal packing_0
lw a0, 64(sp) // restore pointer to output array
lw a1, 60(sp) // restore pointer to output array
sw s0, 0(a0) // store output
sw s1, 0(a1) // store ouput
sw s2, 4(a0) // store ouput
sw s3, 4(a1) // store ouput
sw s4, 8(a0) // store ouput
sw s5, 8(a1) // store ouput
sw s6, 12(a0) // store ouput
sw s7, 12(a1) // store ouput
lw ra, 68(sp) // save context
lw a2, 56(sp) // save context
lw a3, 52(sp) // save context
lw a4, 48(sp) // save context
lw s0, 44(sp) // restore context
lw s1, 40(sp) // restore context
lw s2, 36(sp) // restore context
lw s3, 32(sp) // restore context
lw s4, 28(sp) // restore context
lw s5, 24(sp) // restore context
lw s6, 20(sp) // restore context
lw s7, 16(sp) // restore context
lw s8, 12(sp) // restore context
lw s9, 8(sp) // restore context
lw s10, 4(sp) // restore context
lw s11, 0(sp) // restore context
addi sp, sp, 72
ret
.size aes256_encrypt_sfs,.-aes256_encrypt_sfs
|
aadomn/aes
| 44,769
|
riscv/fixslicing/aes_keyschedule.S
|
/******************************************************************************
* RV32I assembly implementations of the AES-128 and AES-256 key schedule
* according to fixslicing.
* Note that those implementations are fully bitsliced and do not rely on any
* Look-Up Table (LUT).
*
* See the paper at https://eprint.iacr.org/2020/1123.pdf for more details.
*
* @author Alexandre Adomnicai, Nanyang Technological University, Singapore
* alexandre.adomnicai@ntu.edu.sg
*
* @date August 2020
******************************************************************************/
.text
/******************************************************************************
* Implementation of the SWAPMOVE technique for the packing/unpacking routines.
*
* Parameters:
* - out0-out1 output registers
* - in0-in1 input registers
* - mask mask
* - c0 shift value (must be an immediate value)
* - tmp temporary register
******************************************************************************/
.macro swapmove out0,out1, in0,in1, mask, imm, tmp
srli \tmp, \in0, \imm
xor \tmp, \tmp, \in1
and \tmp, \tmp, \mask
xor \out1, \in1, \tmp
slli \tmp, \tmp, \imm
xor \out0, \in0, \tmp
.endm
/******************************************************************************
* Computes a 32-bit rotation to the right.
*
* Parameters:
* - out output register
* - in input register
* - imm rotation value (must be an immediate value)
* - tmp0-1 temporary registers
******************************************************************************/
.macro rori out, in, imm, tmp0, tmp1
srli \tmp0, \in, \imm
slli \tmp1, \in, 32-\imm
or \out, \tmp0, \tmp1
.endm
/******************************************************************************
* Store the round keys in the corresponding array.
*
* Parameters:
* - rk0-rk7 are the round key words
* - addr is the address of the round keys array
******************************************************************************/
.macro store_rkey r0, r1, r2, r3, r4, r5, r6, r7, addr
sw \r0, 0+\addr
sw \r1, 4+\addr
sw \r2, 8+\addr
sw \r3, 12+\addr
sw \r4, 16+\addr
sw \r5, 20+\addr
sw \r6, 24+\addr
sw \r7, 28+\addr
.endm
/******************************************************************************
* Applies NOT to the round keys to save some cycles during Sbox calculations.
*
* Parameters:
* - rk0-rk7 are the round key words
* - addr is the address of the round keys array
******************************************************************************/
.macro store_not_rkey r0, r1, r2, r3, r4, r5, r6, r7, addr
not \r1, \r1 // NOT omitted in sbox
not \r2, \r2 // NOT omitted in sbox
not \r6, \r6 // NOT omitted in sbox
not \r7, \r7 // NOT omitted in sbox
sw \r0, 0+\addr
sw \r1, 4+\addr
sw \r2, 8+\addr
sw \r3, 12+\addr
sw \r4, 16+\addr
sw \r5, 20+\addr
sw \r6, 24+\addr
sw \r7, 28+\addr
.endm
/******************************************************************************
* Packing routine. Note that it is the same as the one used in the encryption
* function so some code size could be saved by merging the two files.
******************************************************************************/
.macro packing
swapmove s1, s0, s1, s0, t0, 1, s8
swapmove s3, s2, s3, s2, t0, 1, s8
swapmove s5, s4, s5, s4, t0, 1, s8
swapmove s7, s6, s7, s6, t0, 1, s8
swapmove s2, s0, s2, s0, t1, 2, s8
swapmove s3, s1, s3, s1, t1, 2, s8
swapmove s6, s4, s6, s4, t1, 2, s8
swapmove s7, s5, s7, s5, t1, 2, s8
swapmove s4, s0, s4, s0, t2, 4, s8
swapmove s5, s1, s5, s1, t2, 4, s8
swapmove s6, s2, s6, s2, t2, 4, s8
swapmove s7, s3, s7, s3, t2, 4, s8
.endm
/******************************************************************************
* Subroutine that computes S-box. Note that the same code is used in the
* encryption function, so some code size could be saved by merging the 2 files.
* Credits to https://github.com/Ko-/riscvcrypto.
******************************************************************************/
sbox:
xor t0, s3, s5 // Exec y14 = U3 ^ U5 into r0
xor t1, s0, s6 // Execy13 = U0 ^ U6 into r1
xor t2, t1, t0 // Execy12 = y13 ^ y14 into r2
xor t3, s4, t2 // Exect1 = U4 ^ y12 into r3
xor t4, t3, s5 // Execy15 = t1 ^ U5 into r4
and t5, t2, t4 // Exect2 = y12 & y15 into r5
xor t6, t4, s7 // Execy6 = y15 ^ U7 into r6
xor a1, t3, s1 // Execy20 = t1 ^ U1 into r7
xor a2, s0, s3 // Execy9 = U0 ^ U3 into r8
xor a3, a1, a2 // Execy11 = y20 ^ y9 into r9
and a4, a2, a3 // Exec t12 = y9 & y11 into r10
xor a5, s7, a3 // Exec y7 = U7 ^ y11 into r11
xor a6, s0, s5 // Exec y8 = U0 ^ U5 into r12
xor a7, s1, s2 // Exec t0 = U1 ^ U2 into r13
xor s8, t4, a7 // Exec y10 = y15 ^ t0 into r14
xor s9, s8, a3 // Exec y17 = y10 ^ y11 into r15
and s10, t0, s9 // Exec t13 = y14 & y17 into r16
xor s11, s10, a4 // Exec t14 = t13 ^ t12 into r17
xor s1, s8, a6 // Exec y19 = y10 ^ y8 into b1
and s2, a6, s8 // Exec t15 = y8 & y10 into b2
xor s2, s2, a4 // Exec t16 = t15 ^ t12 into b2
xor s4, a7, a3 // Exec y16 = t0 ^ y11 into b4
xor s5, t1, s4 // Execy21 = y13 ^ y16 into b5
and t3, t1, s4 // Exect7 = y13 & y16 into r3
xor a4, s0, s4 // Exec y18 = U0 ^ y16 into r10
xor a7, a7, s7 // Exec y1 = t0 ^ U7 into r13
xor s3, a7, s3 // Exec y4 = y1 ^ U3 into b3
and s10, s3, s7 // Exec t5 = y4 & U7 into r16
xor s10, s10, t5 // Exec t6 = t5 ^ t2 into r16
xor s10, s10, s2 // Exec t18 = t6 ^ t16 into r16
xor s1, s10, s1 // Exec t22 = t18 ^ y19 into b1
xor s0, a7, s0 // Exec y2 = y1 ^ U0 into b0
and s10, s0, a5 // Exec t10 = y2 & y7 into r16
xor s10, s10, t3 // Exec t11 = t10 ^ t7 into r16
xor s2, s10, s2 // Exec t20 = t11 ^ t16 into b2
xor s2, s2, a4 // Exec t24 = t20 ^ y18 into b2
xor s6, a7, s6 // Exec y5 = y1 ^ U6 into b6
and a4, s6, a7 // Exec t8 = y5 & y1 into r10
xor t3, a4, t3 // Exec t9 = t8 ^ t7 into r3
xor t3, t3, s11 // Exec t19 = t9 ^ t14 into r3
xor s5, t3, s5 // Exect23 = t19 ^ y21 into b5
xor t3, s6, a6 // Exec y3 = y5 ^ y8 into r3
and a4, t3, t6 // Exec t3 = y3 & y6 into r10
xor t5, a4, t5 // Exec t4 = t3 ^ t2 into r5
xor t5, t5, a1 // Exect17 = t4 ^ y20 into r5
xor t5, t5, s11 // Exec t21 = t17 ^ t14 into r5
and a1, t5, s5 // Exect26 = t21 & t23 into r7
xor a4, s2, a1 // Exec t27 = t24 ^ t26 into r10
xor a1, s1, a1 // Exect31 = t22 ^ t26 into r7
xor t5, t5, s1 // Exect25 = t21 ^ t22 into r5
and s10, t5, a4 // Exec t28 = t25 & t27 into r16
xor s1, s10, s1 // Exec t29 = t28 ^ t22 into b1
and s11, s1, s0 // Exec z14 = t29 & y2 into r17
and a5, s1, a5 // Exec z5 = t29 & y7 into r11
xor s10, s5, s2 // Exec t30 = t23 ^ t24 into r16
and a1, a1, s10 // Exec t32 = t31 & t30 into r7
xor a1, a1, s2 // Exect33 = t32 ^ t24 into r7
xor s10, a4, a1 // Exec t35 = t27 ^ t33 into r16
and s2, s2, s10 // Exec t36 = t24 & t35 into b2
xor a4, a4, s2 // Exec t38 = t27 ^ t36 into r10
and a4, s1, a4 // Exec t39 = t29 & t38 into r10
xor t5, t5, a4 // Exec t40 = t25 ^ t39 into r5
xor a4, s1, t5 // Exec t43 = t29 ^ t40 into r10
and s4, a4, s4 // Exec z3 = t43 & y16 into b4
xor a5, s4, a5 // Exec tc12 = z3 ^ z5 into r11
and t1, a4, t1 // Exec z12 = t43 & y13 into r1
and s6, t5, s6 // Execz13 = t40 & y5 into b6
and a4, t5, a7 // Exec z4 = t40 & y1 into r10
xor s4, s4, a4 // Exec tc6 = z3 ^ z4 into b4
xor s5, s5, a1 // Exect34 = t23 ^ t33 into b5
xor s2, s2, s5 // Exect37 = t36 ^ t34 into b2
xor s5, t5, s2 // Exect41 = t40 ^ t37 into b5
and t5, s5, s8 // Exec z8 = t41 & y10 into r5
and a4, s5, a6 // Exec z17 = t41 & y8 into r10
xor a6, a1, s2 // Exec t44 = t33 ^ t37 into r12
and t4, a6, t4 // Exec z0 = t44 & y15 into r4
and t2, a6, t2 // Exec z9 = t44 & y12 into r2
and t3, s2, t3 // Execz10 = t37 & y3 into r3
and s2, s2, t6 // Execz1 = t37 & y6 into b2
xor s2, s2, t4 // Exectc5 = z1 ^ z0 into b2
xor a7, s4, s2 // Exec tc11 = tc6 ^ tc5 into r13
and s3, a1, s3 // Execz11 = t33 & y4 into b3
xor s1, s1, a1 // Exect42 = t29 ^ t33 into b1
xor s5, s1, s5 // Exect45 = t42 ^ t41 into b5
and t6, s5, s9 // Exec z7 = t45 & y17 into r6
xor s4, t6, s4 // Exectc8 = z7 ^ tc6 into b4
and t0, s5, t0 // Execz16 = t45 & y14 into r0
and s5, s1, a3 // Execz6 = t42 & y11 into b5
xor s5, s5, s4 // Exectc16 = z6 ^ tc8 into b5
and s1, s1, a2 // Execz15 = t42 & y9 into b1
xor t6, s1, s5 // Exectc20 = z15 ^ tc16 into r6
xor t0, s1, t0 // Exectc1 = z15 ^ z16 into r0
xor s1, t3, t0 // Exectc2 = z10 ^ tc1 into b1
xor s9, s1, s3 // Exec tc21 = tc2 ^ z11 into r15
xor t2, t2, s1 // Exectc3 = z9 ^ tc2 into r2
xor s0, t2, s5 // ExecS0 = tc3 ^ tc16 into b0
xor s3, t2, a7 // Exec S3 = tc3 ^ tc11 into b3
xor s1, s3, s5 // ExecS1 = S3 ^ tc16 ^ 1 into b1
xor t0, s6, t0 // Exectc13 = z13 ^ tc1 into r0
and s5, a1, s7 // Execz2 = t33 & U7 into b5
xor s8, t4, s5 // Exec tc4 = z0 ^ z2 into r14
xor s6, t1, s8 // Exec tc7 = z12 ^ tc4 into b6
xor s6, t5, s6 // Exectc9 = z8 ^ tc7 into b6
xor s6, s4, s6 // Exectc10 = tc8 ^ tc9 into b6
xor s2, s11, s6 // Exec tc17 = z14 ^ tc10 into b2
xor s5, s9, s2 // Exec S5 = tc21 ^ tc17 into b5
xor s2, s2, t6 // Exectc26 = tc17 ^ tc20 into b2
xor s2, s2, a4 // Exec S2 = tc26 ^ z17 ^ 1 into b2
xor s8, s8, a5 // Exec tc14 = tc4 ^ tc12 into r14
xor t0, t0, s8 // Exec tc18 = tc13 ^ tc14 into r0
xor s6, s6, t0 // ExecS6 = tc10 ^ tc18 ^ 1 into b6
xor s7, t1, t0 // ExecS7 = z12 ^ tc18 ^ 1 into b7
xor s4, s8, s3 // Exec S4 = tc14 ^ S3 into b4
not s1, s1 // can be moved outside to match with the round func
not s2, s2 // can be moved outside to match with the round func
not s6, s6 // can be moved outside to match with the round func
not s7, s7 // can be moved outside to match with the round func
ret
/******************************************************************************
* Applies the ShiftRows transformation twice (i.e. SR^2) on the internal state.
* Note that the same subroutine is used in the encryption function so some code
* size could be saved by merging the two files.
******************************************************************************/
double_shiftrows:
li a1, 0x0f000f00
swapmove t0,t0,t0,t0, a1, 4, a2
swapmove t1,t1,t1,t1, a1, 4, a2
swapmove t2,t2,t2,t2, a1, 4, a2
swapmove t3,t3,t3,t3, a1, 4, a2
swapmove t4,t4,t4,t4, a1, 4, a2
swapmove t5,t5,t5,t5, a1, 4, a2
swapmove t6,t6,t6,t6, a1, 4, a2
swapmove a7,a7,a7,a7, a1, 4, a2
ret
/******************************************************************************
* Compute ShiftRows^(-1) on the entire round key in order to match fixslicing.
******************************************************************************/
inv_shiftrows_1:
li a1, 0x0c0f0300 // a1 <- 0x0c0f0300
li a2, 0x33003300 // a2 <- 0x33003300
swapmove a7,a7,a7,a7, a1, 4, a3
swapmove a7,a7,a7,a7, a2, 2, a3
swapmove t6,t6,t6,t6, a1, 4, a3
swapmove t6,t6,t6,t6, a2, 2, a3
swapmove t5,t5,t5,t5, a1, 4, a3
swapmove t5,t5,t5,t5, a2, 2, a3
swapmove t4,t4,t4,t4, a1, 4, a3
swapmove t4,t4,t4,t4, a2, 2, a3
swapmove t3,t3,t3,t3, a1, 4, a3
swapmove t3,t3,t3,t3, a2, 2, a3
swapmove t2,t2,t2,t2, a1, 4, a3
swapmove t2,t2,t2,t2, a2, 2, a3
swapmove t1,t1,t1,t1, a1, 4, a3
swapmove t1,t1,t1,t1, a2, 2, a3
swapmove t0,t0,t0,t0, a1, 4, a3
swapmove t0,t0,t0,t0, a2, 2, a3
ret
/******************************************************************************
* Compute ShiftRows^(-3) on the entire round key in order to match fixslicing.
******************************************************************************/
inv_shiftrows_3:
li a1, 0x030f0c00 // a1 <- 0x0c0f0300
li a2, 0x33003300 // a2 <- 0x33003300
swapmove a7,a7,a7,a7, a1, 4, a3
swapmove a7,a7,a7,a7, a2, 2, a3
swapmove t6,t6,t6,t6, a1, 4, a3
swapmove t6,t6,t6,t6, a2, 2, a3
swapmove t5,t5,t5,t5, a1, 4, a3
swapmove t5,t5,t5,t5, a2, 2, a3
swapmove t4,t4,t4,t4, a1, 4, a3
swapmove t4,t4,t4,t4, a2, 2, a3
swapmove t3,t3,t3,t3, a1, 4, a3
swapmove t3,t3,t3,t3, a2, 2, a3
swapmove t2,t2,t2,t2, a1, 4, a3
swapmove t2,t2,t2,t2, a2, 2, a3
swapmove t1,t1,t1,t1, a1, 4, a3
swapmove t1,t1,t1,t1, a2, 2, a3
swapmove t0,t0,t0,t0, a1, 4, a3
swapmove t0,t0,t0,t0, a2, 2, a3
ret
/******************************************************************************
* XOR two columns during a round of the AES-128 key schedule.
*
* Parameters:
* - new new round key
* - old round key from the previous round
* - tmp0-1 temporary registers
* - imm immediate value for rotation
* Prerequisites:
* - a1 to contain 0xc0c0c0c0
* - a2 to contain 0x30303030
* - a3 to contain 0x0c0c0c0c
* - a4 to contain 0x03030303
******************************************************************************/
.macro xor_column new, old, tmp0, tmp1, imm
rori \new, \new,\imm,\tmp0,\tmp1 // new <- ROR(s7,imm)
xor \new, \new, \old // new <- new ^ old
and \new, \new, a1 // new <- new & 0xc0c0c0c0
srli \tmp0, \new, 2 // tmp <- new >> 2
xor \tmp0, \tmp0, \old // tmp <- tmp ^ old
and \tmp0, \tmp0, a2 // tmp <- tmp & 0x30303030
or \new, \new, \tmp0 // new <- new | tmp
srli \tmp0, \new, 2 // tmp <- new >> 2
xor \tmp0, \tmp0, \old // tmp <- tmp ^ old
and \tmp0, \tmp0, a3 // tmp <- tmp & 0x0c0c0c0c
or \new, \new, \tmp0 // new <- new | tmp
srli \tmp0, \new, 2 // tmp <- new >> 2
xor \tmp0, \tmp0, \old // tmp <- tmp ^ old
and \tmp0, \tmp0, a4 // tmp <- tmp & 0x03030303
or \new, \new, \tmp0 // new <- new | tmp
.endm
/******************************************************************************
* XOR all the columns during a round of the AES-128 key schedule.
******************************************************************************/
aes128_xorcolumns_rotword:
li a1, 0xc0c0c0c0 // a1 <- 0xc0c0c0c0
srli a2, a1, 2 // a2 <- 0x30303030
srli a3, a1, 4 // a3 <- 0x0c0c0c0c
srli a4, a1, 6 // a4 <- 0x03030303
lw t0, 0(a0) // load 1st prev rkey word
lw t1, 4(a0) // load 2nd prev rkey word
lw t2, 8(a0) // load 3rd prev rkey word
lw t3, 12(a0) // load 4th prev rkey word
lw t4, 16(a0) // load 5th prev rkey word
lw t5, 20(a0) // load 6th prev rkey word
lw t6, 24(a0) // load 7th prev rkey word
lw a7, 28(a0) // load 8th prev rkey word
xor_column s7, a7, a5, a6, 2
xor_column s6, t6, a5, a6, 2
xor_column s5, t5, a5, a6, 2
xor_column s4, t4, a5, a6, 2
xor_column s3, t3, a5, a6, 2
xor_column s2, t2, a5, a6, 2
xor_column s1, t1, a5, a6, 2
xor_column s0, t0, a5, a6, 2
store_rkey s0,s1,s2,s3,s4,s5,s6,s7,32(a0)
addi a0, a0, 32 // points to the next rkey
ret
/******************************************************************************
* XOR all the columns during a round of the AES-128 key schedule.
******************************************************************************/
aes256_xorcolumns_rotword:
li a1, 0xc0c0c0c0 // a1 <- 0xc0c0c0c0
srli a2, a1, 2 // a2 <- 0x30303030
srli a3, a1, 4 // a3 <- 0x0c0c0c0c
srli a4, a1, 6 // a4 <- 0x03030303
lw t0, 0(a0) // load 1st prev rkey word
lw t1, 4(a0) // load 2nd prev rkey word
lw t2, 8(a0) // load 3rd prev rkey word
lw t3, 12(a0) // load 4th prev rkey word
lw t4, 16(a0) // load 5th prev rkey word
lw t5, 20(a0) // load 6th prev rkey word
lw t6, 24(a0) // load 7th prev rkey word
lw a7, 28(a0) // load 8th prev rkey word
xor_column s7, a7, a5, a6, 2
xor_column s6, t6, a5, a6, 2
xor_column s5, t5, a5, a6, 2
xor_column s4, t4, a5, a6, 2
xor_column s3, t3, a5, a6, 2
xor_column s2, t2, a5, a6, 2
xor_column s1, t1, a5, a6, 2
xor_column s0, t0, a5, a6, 2
store_rkey s0,s1,s2,s3,s4,s5,s6,s7,64(a0)
addi a0, a0, 32 // points to the next rkey
ret
/******************************************************************************
* XOR all the columns during a round of the AES-128 key schedule.
******************************************************************************/
aes256_xorcolumns:
li a1, 0xc0c0c0c0 // a1 <- 0xc0c0c0c0
srli a2, a1, 2 // a2 <- 0x30303030
srli a3, a1, 4 // a3 <- 0x0c0c0c0c
srli a4, a1, 6 // a4 <- 0x03030303
lw t0, 0(a0) // load 1st prev rkey word
lw t1, 4(a0) // load 2nd prev rkey word
lw t2, 8(a0) // load 3rd prev rkey word
lw t3, 12(a0) // load 4th prev rkey word
lw t4, 16(a0) // load 5th prev rkey word
lw t5, 20(a0) // load 6th prev rkey word
lw t6, 24(a0) // load 7th prev rkey word
lw a7, 28(a0) // load 8th prev rkey word
xor_column s7, a7, a5, a6, 26
xor_column s6, t6, a5, a6, 26
xor_column s5, t5, a5, a6, 26
xor_column s4, t4, a5, a6, 26
xor_column s3, t3, a5, a6, 26
xor_column s2, t2, a5, a6, 26
xor_column s1, t1, a5, a6, 26
xor_column s0, t0, a5, a6, 26
store_rkey s0,s1,s2,s3,s4,s5,s6,s7,64(a0)
addi a0, a0, 32 // points to the next rkey
ret
/******************************************************************************
* Fully bitsliced AES-128 key schedule according to the fully-fixsliced (ffs)
* representation.
*
* The function prototype is:
* - void aes128_keyschedule_ffs(uint32_t* rkeys, const uint8_t* key0,
* const uint8_t* key1);
******************************************************************************/
.globl aes128_keyschedule_ffs
.type aes128_keyschedule_ffs, %function
.align 2
aes128_keyschedule_ffs:
addi sp, sp, -64 // allocate space on the stack
sw a0, 0(sp) // save context
sw a1, 4(sp) // save context
sw s0, 8(sp) // save context
sw s1, 12(sp) // save context
sw s2, 16(sp) // save context
sw s3, 20(sp) // save context
sw s4, 24(sp) // save context
sw s5, 28(sp) // save context
sw s6, 32(sp) // save context
sw s7, 36(sp) // save context
sw s8, 40(sp) // save context
sw ra, 44(sp) // save context
sw s10, 48(sp) // save context
sw s11, 52(sp) // save context
sw s9, 56(sp) // save context
lw s0, 0(a1) // load input word
lw s1, 0(a2) // load input word
lw s2, 4(a1) // load input word
lw s3, 4(a2) // load input word
lw s4, 8(a1) // load input word
lw s5, 8(a2) // load input word
lw s6, 12(a1) // load input word
lw s7, 12(a2) // load input word
li t0, 0x55555555 // mask for SWAPMOVE
li t1, 0x33333333 // mask for SWAPMOVE
li t2, 0x0f0f0f0f // mask for SWAPMOVE
packing
store_rkey s0,s1,s2,s3,s4,s5,s6,s7, 0(a0)
jal sbox
xori s7, s7, 0x300 // add the 1st rconst
jal aes128_xorcolumns_rotword
jal sbox
xori s6, s6, 0x300 // add the 2nd rconst
jal aes128_xorcolumns_rotword
jal inv_shiftrows_1
store_not_rkey t0,t1,t2,t3,t4,t5,t6,a7, -32(a0)
jal sbox
xori s5, s5, 0x300 // add the 3rd rconst
jal aes128_xorcolumns_rotword
jal double_shiftrows
store_not_rkey t0,t1,t2,t3,t4,t5,t6,a7, -32(a0)
jal sbox
xori s4, s4, 0x300 // add the 4th rconst
jal aes128_xorcolumns_rotword
jal inv_shiftrows_3
store_not_rkey t0,t1,t2,t3,t4,t5,t6,a7, -32(a0)
jal sbox
xori s3, s3, 0x300 // add the 5th rconst
jal aes128_xorcolumns_rotword
store_not_rkey t0,t1,t2,t3,t4,t5,t6,a7, -32(a0)
jal sbox
xori s2, s2, 0x300 // add the 6th rconst
jal aes128_xorcolumns_rotword
jal inv_shiftrows_1
store_not_rkey t0,t1,t2,t3,t4,t5,t6,a7, -32(a0)
jal sbox
xori s1, s1, 0x300 // add the 7th rconst
jal aes128_xorcolumns_rotword
jal double_shiftrows
store_not_rkey t0,t1,t2,t3,t4,t5,t6,a7, -32(a0)
jal sbox
xori s0, s0, 0x300 // add the 8th rconst
jal aes128_xorcolumns_rotword
jal inv_shiftrows_3
store_not_rkey t0,t1,t2,t3,t4,t5,t6,a7, -32(a0)
jal sbox
xori s7, s7, 0x300 // add the 9th rconst
xori s6, s6, 0x300 // add the 9th rconst
xori s4, s4, 0x300 // add the 9th rconst
xori s3, s3, 0x300 // add the 9th rconst
jal aes128_xorcolumns_rotword
store_not_rkey t0,t1,t2,t3,t4,t5,t6,a7, -32(a0)
jal sbox
xori s6, s6, 0x300 // add the 10th rconst
xori s5, s5, 0x300 // add the 10th rconst
xori s3, s3, 0x300 // add the 10th rconst
xori s2, s2, 0x300 // add the 10th rconst
jal aes128_xorcolumns_rotword
jal inv_shiftrows_1
store_not_rkey t0,t1,t2,t3,t4,t5,t6,a7, -32(a0)
store_not_rkey s0,s1,s2,s3,s4,s5,s6,s7, 0(a0)
lw a0, 0(sp) // restore context
lw a1, 4(sp) // restore context
lw s0, 8(sp) // restore context
lw s1, 12(sp) // restore context
lw s2, 16(sp) // restore context
lw s3, 20(sp) // restore context
lw s4, 24(sp) // restore context
lw s5, 28(sp) // restore context
lw s6, 32(sp) // restore context
lw s7, 36(sp) // restore context
lw s8, 40(sp) // restore context
lw ra, 44(sp) // restore context
lw s10, 48(sp) // restore context
lw s11, 52(sp) // restore context
lw s9, 56(sp) // restore context
addi sp, sp, 64 // restore stack pointer
ret // exit
.size aes128_keyschedule_ffs,.-aes128_keyschedule_ffs
/******************************************************************************
* Fully bitsliced AES-256 key schedule according to the fully-fixsliced (ffs)
* representation.
*
* The function prototype is:
* - void aes256_keyschedule_ffs(uint32_t* rkeys, const uint8_t* key0,
* const uint8_t* key1);
******************************************************************************/
.globl aes256_keyschedule_ffs
.type aes256_keyschedule_ffs, %function
.align 2
aes256_keyschedule_ffs:
addi sp, sp, -64 // allocate space on the stack
sw a0, 0(sp) // save context
sw a1, 4(sp) // save context
sw s0, 8(sp) // save context
sw s1, 12(sp) // save context
sw s2, 16(sp) // save context
sw s3, 20(sp) // save context
sw s4, 24(sp) // save context
sw s5, 28(sp) // save context
sw s6, 32(sp) // save context
sw s7, 36(sp) // save context
sw s8, 40(sp) // save context
sw ra, 44(sp) // save context
sw s10, 48(sp) // save context
sw s11, 52(sp) // save context
sw s9, 56(sp) // save context
lw s0, 0(a1) // load first 128 key bits
lw s1, 0(a2) // load first 128 key bits
lw s2, 4(a1) // load first 128 key bits
lw s3, 4(a2) // load first 128 key bits
lw s4, 8(a1) // load first 128 key bits
lw s5, 8(a2) // load first 128 key bits
lw s6, 12(a1) // load first 128 key bits
lw s7, 12(a2) // load first 128 key bits
li t0, 0x55555555 // mask for SWAPMOVE
li t1, 0x33333333 // mask for SWAPMOVE
li t2, 0x0f0f0f0f // mask for SWAPMOVE
packing
store_rkey s0,s1,s2,s3,s4,s5,s6,s7, 0(a0)
lw s0, 16(a1) // load last 128 key bits
lw s1, 16(a2) // load last 128 key bits
lw s2, 20(a1) // load last 128 key bits
lw s3, 20(a2) // load last 128 key bits
lw s4, 24(a1) // load last 128 key bits
lw s5, 24(a2) // load last 128 key bits
lw s6, 28(a1) // load last 128 key bits
lw s7, 28(a2) // load last 128 key bits
li t0, 0x55555555 // mask for SWAPMOVE
li t1, 0x33333333 // mask for SWAPMOVE
li t2, 0x0f0f0f0f // mask for SWAPMOVE
packing
store_rkey s0,s1,s2,s3,s4,s5,s6,s7, 32(a0)
jal sbox
xori s7, s7, 0x300 // add the 1st rconst
jal aes256_xorcolumns_rotword
jal sbox
jal aes256_xorcolumns
jal inv_shiftrows_1
store_not_rkey t0,t1,t2,t3,t4,t5,t6,a7, -32(a0)
jal sbox
xori s6, s6, 0x300 // add the 2nd rconst
jal aes256_xorcolumns_rotword
jal double_shiftrows
store_not_rkey t0,t1,t2,t3,t4,t5,t6,a7, -32(a0)
jal sbox
jal aes256_xorcolumns
jal inv_shiftrows_3
store_not_rkey t0,t1,t2,t3,t4,t5,t6,a7, -32(a0)
jal sbox
xori s5, s5, 0x300 // add the 3rd rconst
jal aes256_xorcolumns_rotword
store_not_rkey t0,t1,t2,t3,t4,t5,t6,a7, -32(a0)
jal sbox
jal aes256_xorcolumns
jal inv_shiftrows_1
store_not_rkey t0,t1,t2,t3,t4,t5,t6,a7, -32(a0)
jal sbox
xori s4, s4, 0x300 // add the 4th rconst
jal aes256_xorcolumns_rotword
jal double_shiftrows
store_not_rkey t0,t1,t2,t3,t4,t5,t6,a7, -32(a0)
jal sbox
jal aes256_xorcolumns
jal inv_shiftrows_3
store_not_rkey t0,t1,t2,t3,t4,t5,t6,a7, -32(a0)
jal sbox
xori s3, s3, 0x300 // add the 5th rconst
jal aes256_xorcolumns_rotword
store_not_rkey t0,t1,t2,t3,t4,t5,t6,a7, -32(a0)
jal sbox
jal aes256_xorcolumns
jal inv_shiftrows_1
store_not_rkey t0,t1,t2,t3,t4,t5,t6,a7, -32(a0)
jal sbox
xori s2, s2, 0x300 // add the 6th rconst
jal aes256_xorcolumns_rotword
jal double_shiftrows
store_not_rkey t0,t1,t2,t3,t4,t5,t6,a7, -32(a0)
jal sbox
jal aes256_xorcolumns
jal inv_shiftrows_3
store_not_rkey t0,t1,t2,t3,t4,t5,t6,a7, -32(a0)
jal sbox
xori s1, s1, 0x300 // add the 7th rconst
jal aes256_xorcolumns_rotword
store_not_rkey t0,t1,t2,t3,t4,t5,t6,a7, -32(a0)
lw t0, 0(a0) // load 1st prev rkey word
lw t1, 4(a0) // load 2nd prev rkey word
lw t2, 8(a0) // load 3rd prev rkey word
lw t3, 12(a0) // load 4th prev rkey word
lw t4, 16(a0) // load 5th prev rkey word
lw t5, 20(a0) // load 6th prev rkey word
lw t6, 24(a0) // load 7th prev rkey word
lw a7, 28(a0) // load 8th prev rkey word
jal inv_shiftrows_1
store_not_rkey t0,t1,t2,t3,t4,t5,t6,a7, 0(a0)
store_not_rkey s0,s1,s2,s3,s4,s5,s6,s7, 32(a0)
lw a0, 0(sp) // restore context
lw a1, 4(sp) // restore context
lw s0, 8(sp) // restore context
lw s1, 12(sp) // restore context
lw s2, 16(sp) // restore context
lw s3, 20(sp) // restore context
lw s4, 24(sp) // restore context
lw s5, 28(sp) // restore context
lw s6, 32(sp) // restore context
lw s7, 36(sp) // restore context
lw s8, 40(sp) // restore context
lw ra, 44(sp) // restore context
lw s10, 48(sp) // restore context
lw s11, 52(sp) // restore context
lw s9, 56(sp) // restore context
addi sp, sp, 64 // restore stack pointer
ret // exit
.size aes256_keyschedule_ffs,.-aes256_keyschedule_ffs
/******************************************************************************
* Fully bitsliced AES-128 key schedule according to the semi-fixsliced (sfs)
* representation.
*
* The function prototype is:
* - void aes128_keyschedule_sfs(uint32_t* rkeys, const uint8_t* key0,
* const uint8_t* key1);
******************************************************************************/
.globl aes128_keyschedule_sfs
.type aes128_keyschedule_sfs, %function
.align 2
aes128_keyschedule_sfs:
addi sp, sp, -64 // allocate space on the stack
sw a0, 0(sp) // save context
sw a1, 4(sp) // save context
sw s0, 8(sp) // save context
sw s1, 12(sp) // save context
sw s2, 16(sp) // save context
sw s3, 20(sp) // save context
sw s4, 24(sp) // save context
sw s5, 28(sp) // save context
sw s6, 32(sp) // save context
sw s7, 36(sp) // save context
sw s8, 40(sp) // save context
sw ra, 44(sp) // save context
sw s10, 48(sp) // save context
sw s11, 52(sp) // save context
sw s9, 56(sp) // save context
lw s0, 0(a1) // load input word
lw s1, 0(a2) // load input word
lw s2, 4(a1) // load input word
lw s3, 4(a2) // load input word
lw s4, 8(a1) // load input word
lw s5, 8(a2) // load input word
lw s6, 12(a1) // load input word
lw s7, 12(a2) // load input word
li t0, 0x55555555 // mask for SWAPMOVE
li t1, 0x33333333 // mask for SWAPMOVE
li t2, 0x0f0f0f0f // mask for SWAPMOVE
packing
store_rkey s0,s1,s2,s3,s4,s5,s6,s7, 0(a0)
jal sbox
xori s7, s7, 0x300 // add the 1st rconst
jal aes128_xorcolumns_rotword
jal sbox
xori s6, s6, 0x300 // add the 2nd rconst
jal aes128_xorcolumns_rotword
jal inv_shiftrows_1
store_not_rkey t0,t1,t2,t3,t4,t5,t6,a7, -32(a0)
jal sbox
xori s5, s5, 0x300 // add the 3rd rconst
jal aes128_xorcolumns_rotword
store_not_rkey t0,t1,t2,t3,t4,t5,t6,a7, -32(a0)
jal sbox
xori s4, s4, 0x300 // add the 4th rconst
jal aes128_xorcolumns_rotword
jal inv_shiftrows_1
store_not_rkey t0,t1,t2,t3,t4,t5,t6,a7, -32(a0)
jal sbox
xori s3, s3, 0x300 // add the 5th rconst
jal aes128_xorcolumns_rotword
store_not_rkey t0,t1,t2,t3,t4,t5,t6,a7, -32(a0)
jal sbox
xori s2, s2, 0x300 // add the 6th rconst
jal aes128_xorcolumns_rotword
jal inv_shiftrows_1
store_not_rkey t0,t1,t2,t3,t4,t5,t6,a7, -32(a0)
jal sbox
xori s1, s1, 0x300 // add the 7th rconst
jal aes128_xorcolumns_rotword
store_not_rkey t0,t1,t2,t3,t4,t5,t6,a7, -32(a0)
jal sbox
xori s0, s0, 0x300 // add the 8th rconst
jal aes128_xorcolumns_rotword
jal inv_shiftrows_1
store_not_rkey t0,t1,t2,t3,t4,t5,t6,a7, -32(a0)
jal sbox
xori s7, s7, 0x300 // add the 9th rconst
xori s6, s6, 0x300 // add the 9th rconst
xori s4, s4, 0x300 // add the 9th rconst
xori s3, s3, 0x300 // add the 9th rconst
jal aes128_xorcolumns_rotword
store_not_rkey t0,t1,t2,t3,t4,t5,t6,a7, -32(a0)
jal sbox
xori s6, s6, 0x300 // add the 10th rconst
xori s5, s5, 0x300 // add the 10th rconst
xori s3, s3, 0x300 // add the 10th rconst
xori s2, s2, 0x300 // add the 10th rconst
jal aes128_xorcolumns_rotword
jal inv_shiftrows_1
store_not_rkey t0,t1,t2,t3,t4,t5,t6,a7, -32(a0)
store_not_rkey s0,s1,s2,s3,s4,s5,s6,s7, 0(a0)
lw a0, 0(sp) // restore context
lw a1, 4(sp) // restore context
lw s0, 8(sp) // restore context
lw s1, 12(sp) // restore context
lw s2, 16(sp) // restore context
lw s3, 20(sp) // restore context
lw s4, 24(sp) // restore context
lw s5, 28(sp) // restore context
lw s6, 32(sp) // restore context
lw s7, 36(sp) // restore context
lw s8, 40(sp) // restore context
lw ra, 44(sp) // restore context
lw s10, 48(sp) // restore context
lw s11, 52(sp) // restore context
lw s9, 56(sp) // restore context
addi sp, sp, 64 // restore stack pointer
ret // exit
.size aes128_keyschedule_sfs,.-aes128_keyschedule_sfs
/******************************************************************************
* Fully bitsliced AES-256 key schedule according to the semi-fixsliced (sfs)
* representation.
*
* The function prototype is:
* - void aes256_keyschedule_sfs(uint32_t* rkeys, const uint8_t* key0,
* const uint8_t* key1);
******************************************************************************/
.globl aes256_keyschedule_sfs
.type aes256_keyschedule_sfs, %function
.align 2
aes256_keyschedule_sfs:
addi sp, sp, -64 // allocate space on the stack
sw a0, 0(sp) // save context
sw a1, 4(sp) // save context
sw s0, 8(sp) // save context
sw s1, 12(sp) // save context
sw s2, 16(sp) // save context
sw s3, 20(sp) // save context
sw s4, 24(sp) // save context
sw s5, 28(sp) // save context
sw s6, 32(sp) // save context
sw s7, 36(sp) // save context
sw s8, 40(sp) // save context
sw ra, 44(sp) // save context
sw s10, 48(sp) // save context
sw s11, 52(sp) // save context
sw s9, 56(sp) // save context
lw s0, 0(a1) // load first 128 key bits
lw s1, 0(a2) // load first 128 key bits
lw s2, 4(a1) // load first 128 key bits
lw s3, 4(a2) // load first 128 key bits
lw s4, 8(a1) // load first 128 key bits
lw s5, 8(a2) // load first 128 key bits
lw s6, 12(a1) // load first 128 key bits
lw s7, 12(a2) // load first 128 key bits
li t0, 0x55555555 // mask for SWAPMOVE
li t1, 0x33333333 // mask for SWAPMOVE
li t2, 0x0f0f0f0f // mask for SWAPMOVE
packing
store_rkey s0,s1,s2,s3,s4,s5,s6,s7, 0(a0)
lw s0, 16(a1) // load last 128 key bits
lw s1, 16(a2) // load last 128 key bits
lw s2, 20(a1) // load last 128 key bits
lw s3, 20(a2) // load last 128 key bits
lw s4, 24(a1) // load last 128 key bits
lw s5, 24(a2) // load last 128 key bits
lw s6, 28(a1) // load last 128 key bits
lw s7, 28(a2) // load last 128 key bits
li t0, 0x55555555 // mask for SWAPMOVE
li t1, 0x33333333 // mask for SWAPMOVE
li t2, 0x0f0f0f0f // mask for SWAPMOVE
packing
store_rkey s0,s1,s2,s3,s4,s5,s6,s7, 32(a0)
jal sbox
xori s7, s7, 0x300 // add the 1st rconst
jal aes256_xorcolumns_rotword
jal sbox
jal aes256_xorcolumns
jal inv_shiftrows_1
store_not_rkey t0,t1,t2,t3,t4,t5,t6,a7, -32(a0)
jal sbox
xori s6, s6, 0x300 // add the 2nd rconst
jal aes256_xorcolumns_rotword
store_not_rkey t0,t1,t2,t3,t4,t5,t6,a7, -32(a0)
jal sbox
jal aes256_xorcolumns
jal inv_shiftrows_1
store_not_rkey t0,t1,t2,t3,t4,t5,t6,a7, -32(a0)
jal sbox
xori s5, s5, 0x300 // add the 3rd rconst
jal aes256_xorcolumns_rotword
store_not_rkey t0,t1,t2,t3,t4,t5,t6,a7, -32(a0)
jal sbox
jal aes256_xorcolumns
jal inv_shiftrows_1
store_not_rkey t0,t1,t2,t3,t4,t5,t6,a7, -32(a0)
jal sbox
xori s4, s4, 0x300 // add the 4th rconst
jal aes256_xorcolumns_rotword
store_not_rkey t0,t1,t2,t3,t4,t5,t6,a7, -32(a0)
jal sbox
jal aes256_xorcolumns
jal inv_shiftrows_1
store_not_rkey t0,t1,t2,t3,t4,t5,t6,a7, -32(a0)
jal sbox
xori s3, s3, 0x300 // add the 5th rconst
jal aes256_xorcolumns_rotword
store_not_rkey t0,t1,t2,t3,t4,t5,t6,a7, -32(a0)
jal sbox
jal aes256_xorcolumns
jal inv_shiftrows_1
store_not_rkey t0,t1,t2,t3,t4,t5,t6,a7, -32(a0)
jal sbox
xori s2, s2, 0x300 // add the 6th rconst
jal aes256_xorcolumns_rotword
store_not_rkey t0,t1,t2,t3,t4,t5,t6,a7, -32(a0)
jal sbox
jal aes256_xorcolumns
jal inv_shiftrows_1
store_not_rkey t0,t1,t2,t3,t4,t5,t6,a7, -32(a0)
jal sbox
xori s1, s1, 0x300 // add the 7th rconst
jal aes256_xorcolumns_rotword
store_not_rkey t0,t1,t2,t3,t4,t5,t6,a7, -32(a0)
lw t0, 0(a0) // load 1st prev rkey word
lw t1, 4(a0) // load 2nd prev rkey word
lw t2, 8(a0) // load 3rd prev rkey word
lw t3, 12(a0) // load 4th prev rkey word
lw t4, 16(a0) // load 5th prev rkey word
lw t5, 20(a0) // load 6th prev rkey word
lw t6, 24(a0) // load 7th prev rkey word
lw a7, 28(a0) // load 8th prev rkey word
jal inv_shiftrows_1
store_not_rkey t0,t1,t2,t3,t4,t5,t6,a7, 0(a0)
store_not_rkey s0,s1,s2,s3,s4,s5,s6,s7, 32(a0)
lw a0, 0(sp) // restore context
lw a1, 4(sp) // restore context
lw s0, 8(sp) // restore context
lw s1, 12(sp) // restore context
lw s2, 16(sp) // restore context
lw s3, 20(sp) // restore context
lw s4, 24(sp) // restore context
lw s5, 28(sp) // restore context
lw s6, 32(sp) // restore context
lw s7, 36(sp) // restore context
lw s8, 40(sp) // restore context
lw ra, 44(sp) // restore context
lw s10, 48(sp) // restore context
lw s11, 52(sp) // restore context
lw s9, 56(sp) // restore context
addi sp, sp, 64 // restore stack pointer
ret // exit
.size aes256_keyschedule_sfs,.-aes256_keyschedule_sfs
|
aadomn/cymric
| 28,705
|
artifact_tches2025-3/benchmark_avr/cymric_aes/cymric_aes/aes/rijndaelfast.s
|
; Copyright (C) 2003,2006 B. Poettering
;
; This program is free software; you can redistribute and/or modify
; it under the terms of the GNU General Public License as published by
; the Free Software Foundation; either version 2 of the License, or
; (at your option) any later version. Whenever you redistribute a copy
; of this document, make sure to include the copyright and license
; agreement without modification.
;
; This program is distributed in the hope that it will be useful,
; but WITHOUT ANY WARRANTY; without even the implied warranty of
; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
; GNU General Public License for more details.
;
; You should have received a copy of the GNU General Public License
; along with this program; if not, write to the Free Software
; Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
; The license text can be found here: http://www.gnu.org/licenses/gpl.txt
; http://point-at-infinity.org/avraes/
;
; This AES implementation was written in May 2003 by B. Poettering. It is
; published under the terms of the GNU General Public License. If you need
; AES code, but this license is unsuitable for your project, feel free to
; contact me: avraes AT point-at-infinity.org
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;
; RijndaelFast
;
; This is a microcontroller implementation of the Rijndael block cipher, better
; known as AES. The target device class is Atmel's AVR, a family of very fast
; and very powerful flash MCUs, operating at clock rates up to 16 MHz,
; executing one instruction per clock cycle (16 MIPS). The implementation
; given here is optimized for speed (versus codesize), and achieves an
; encryption rate of more than 100 kByte per second (on a 16MHz MCU).
; The decryption performs about 40% slower than encryption (typical for
; Rijndael).
;
; The implemented algorithm is restricted to block and key sizes of 128 bit.
; Larger key sizes can be obtained by altering the key scheduling code, which
; should be easy. As the cipher's state is completely kept in registers
; (which are limited in number), the block size is not that easy to enlarge.
;
; This implementation makes extensive use of the AVR's "lpm" instruction,
; which loads data bytes from program memory at given addresses (the s-boxes
; are realized that way). Some members of the AVR family don't offer that
; instruction at all (e.g. AT90S1200), others only in a restricted way
; (forcing the target register to be r0). The code below requires the least
; restricted lpm instruction (with free choice of the target register).
; The ATmega161 devices meet the above mentioned requirements.
;
; Statistics:
;
; 16 MHz MCU | clock cycles | blocks per second | bytes per second
; -----------+--------------+-------------------+------------------
; encryption | 2474 | 6467 | 103476
; decryption | 3411 | 4691 | 75051
;
; KEY SETUP TIME
; encryption: 756 clock cycles
; decryption: 756 + 4221 = 4977 clock cycles
;
; CODE SIZE
; instructions: 1306 byte ( 653 words)
; sboxes: 1792 byte ( 896 words) = 7 * 256 byte
; total: 3098 byte (1549 words)
;
; RAM REQUIREMENTS
; 16 * 11 = 176 byte for each expanded key
;
;
; This source code consists of four routines and an example application,
; which encrypts a certain plaintext and decrypts it afterwards with the
; same key. Comments in the code clarify the interaction between the key
; expansion and the encryption/decryption routines.
;
; I encourage to read the following Rijndael-related papers/books/sites:
; [1] "The Design of Rijndael", Daemen & Rijmen, Springer, ISBN 3-540-42580-2
; [2] http://www.esat.kuleuven.ac.be/~rijmen/rijndael/
; [3] http://www.esat.kuleuven.ac.be/~rijmen/rijndael/rijndaeldocV2.zip
; [4] http://www.esat.kuleuven.ac.be/~rijmen/rijndael/atmal.zip
; [5] http://csrc.nist.gov/CryptoToolkit/aes/rijndael/
;
; [1] is *the* book about Rijndael, [2] is the official Rijndael homepage,
; [3] contains the complete Rijndael AES specification, [4] is another
; Rijndael-implementation for AVR MCUs (but much slower than this one,
; taking 3815 clock cycles per encryption), [5] is the official NIST AES
; site with further links.
;
; AVR and ATmega are registered trademarks by the ATMEL corporation.
; See http://www.atmel.com and http://www.atmel.com/products/avr/ for
; further details.
;;; ***************************************************************************
;;; The Rijndael cipher acts on a so-called (128 bit) "state matrix",
;;; represented here by the 4x4 state bytes ST11-ST44. To guarantee maximum
;;; performance on AVR MCUs, these bytes are kept in registers (defaulted to
;;; the 16 low order registers r0-r15, but this may be changed if required).
;;;
;;; The implementation makes use of six auxiliary registers (H1-H5 and I),
;;; some of which must reside in the upper registers (r16-r31). In addition
;;; ramp-registers YH:YL and ZH:ZL are used.
;;;
;;; If the context *really* requires more registers than the remaining ones,
;;; it seems promising to move the I-register to a (fixed) ram location.
;;; In the time crititcal routines the I-value is rarely used, thus the
;;; speed loss obtained by dropping it from the register file is acceptible.
#include <avr/io.h>
#define ST11 r0
#define ST21 r1
#define ST31 r2
#define ST41 r3
#define ST12 r4
#define ST22 r5
#define ST32 r6
#define ST42 r7
#define ST13 r8
#define ST23 r9
#define ST33 r10
#define ST43 r11
#define ST14 r12
#define ST24 r13
#define ST34 r14
#define ST44 r15
#define H1 r16
#define H2 r17
#define H3 r18
#define H4 r19
#define H5 r20
#define I r21
; Argument registers for function calls
#define ARG1 r24
#define ARG2 r22
#define ARG3 r20
/**
* push_registers macro:
*
* Pushes a given range of registers in ascending order
* To be called like: push_registers 0,15
*/
.macro push_registers from:req, to:req
push \from
.if \to-\from
push_registers "(\from+1)",\to
.endif
.endm
/**
* pop_registers macro:
*
* Pops a given range of registers in descending order
* To be called like: pop_registers 0,15
*/
.macro pop_registers from:req, to:req
pop \to
.if \to-\from
pop_registers \from,"(\to-1)"
.endif
.endm
; saves registers, ensures calling convention is followed
.global expand_key
expand_key:
; Save r2-r17,r28-r29
push_registers 2,17
push_registers 28,29
; Save the argument pointers to Z (key) and X (plaintext)
movw XL, ARG2
movw YL, ARG1
; Load the plaintext given by argument to register 0-15
.irp param,r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12,r13,r14,r15
ld \param, X+
.endr
; Core function
ldi H1, 1
ldi H2, 0x1b
ldi ZH, hi8(sbox)
rjmp keyexp1
keyexp0:
mov ZL, ST24
ld H3, Z
eor ST11, H3
eor ST11, H1
mov ZL, ST34
ld H3, Z
eor ST21, H3
mov ZL, ST44
ld H3, Z
eor ST31, H3
mov ZL, ST14
ld H3, Z
eor ST41, H3
eor ST12, ST11
eor ST22, ST21
eor ST32, ST31
eor ST42, ST41
eor ST13, ST12
eor ST23, ST22
eor ST33, ST32
eor ST43, ST42
eor ST14, ST13
eor ST24, ST23
eor ST34, ST33
eor ST44, ST43
lsl H1
brcc keyexp1
eor H1, H2
keyexp1:
st Y+, ST11
st Y+, ST21
st Y+, ST31
st Y+, ST41
st Y+, ST12
st Y+, ST22
st Y+, ST32
st Y+, ST42
st Y+, ST13
st Y+, ST23
st Y+, ST33
st Y+, ST43
st Y+, ST14
st Y+, ST24
st Y+, ST34
st Y+, ST44
cpi H1, 0x6c
brne keyexp0
; Restore r2-r17,r28-r29
pop_registers 28,29
pop_registers 2,17
clr r1
ret
.size expand_key, .-expand_key
.global encrypt_data
encrypt_data:
;mov SP to X
in r26, 0x3d
in r27, 0x3e
; Save registers r2-17,r28-29
push_registers 2,17
push_registers 28,29
; Save the argument pointers to Z (key) and X (plaintext)
movw XL, ARG2
movw YL, ARG3
; Load the plaintext given by argument to register 0-15
.irp param,r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12,r13,r14,r15
ld \param, X+
.endr
rcall encrypt
; Save the final state from the registers to Y (ARG1)
movw YL, ARG1
.irp param,r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12,r13,r14,r15
st Y+, \param
.endr
; Restore registers r2-17,r28-29
pop_registers 28,29
pop_registers 2,17
clr r1
ret
.size encrypt_data, .-encrypt_data
;;; ***************************************************************************
;;;
;;; ENCRYPT
;;; This routine encrypts a 128 bit plaintext block (supplied in ST11-ST44),
;;; using an expanded key given in YH:YL. The resulting 128 bit ciphertext
;;; block is stored in ST11-ST44.
;;;
;;; Parameters:
;;; YH:YL: pointer to expanded key
;;; ST11-ST44: 128 bit plaintext block
;;; Touched registers:
;;; ST11-ST41,H1-H5,I,ZH,ZL,YH,YL
;;; Clock cycles: 2474
encrypt:
rcall encryp1
ldi ZH, hi8(sbox)
ldi I, 8
encryp0:mov ZL, ST11 ; 1
ld H2, Z
mov H3, H2
mov H4, H2
ldi ZH, hi8(sbox02)
ld H1, Z
eor H4, H1
mov ZL, ST22
ld H5, Z
eor H1, H5
eor H2, H5
ldi ZH, hi8(sbox)
ld H5, Z
eor H1, H5
eor H3, H5
eor H4, H5
mov ZL, ST33
ld H5, Z
eor H1, H5
eor H2, H5
eor H4, H5
ldi ZH, hi8(sbox02)
ld H5, Z
eor H2, H5
eor H3, H5
mov ZL, ST44
ld H5, Z
eor H3, H5
eor H4, H5
ldi ZH, hi8(sbox)
ld H5, Z
eor H1, H5
eor H2, H5
eor H3, H5
ldd ST11, Y+0
eor ST11, H1
mov ZL, ST41 ; 2
ldd ST41, Y+3
eor ST41, H4
ld H1, Z
mov H4, H1
mov ST33, H1
ldi ZH, hi8(sbox02)
ld ST44, Z
eor ST33, ST44
mov ZL, ST12
ld H5, Z
eor H1, H5
eor ST44, H5
ldi ZH, hi8(sbox)
ld H5, Z
eor H4, H5
eor ST33, H5
eor ST44, H5
mov ZL, ST23
ld H5, Z
eor H1, H5
eor ST33, H5
eor ST44, H5
ldi ZH, hi8(sbox02)
ld H5, Z
eor H1, H5
eor H4, H5
mov ZL, ST34
ld H5, Z
eor H4, H5
eor ST33, H5
ldi ZH, hi8(sbox)
ld H5, Z
eor H1, H5
eor H4, H5
eor ST44, H5
ldd ST12, Y+4
eor ST12, H1
ldd ST22, Y+5
eor ST22, H4
mov ZL, ST31 ; 3
ldd ST31, Y+2
eor ST31, H3
ld ST34, Z
mov H3, ST34
mov H1, ST34
ldi ZH, hi8(sbox02)
ld H4, Z
eor H3, H4
mov ZL, ST42
ldd ST42, Y+7
eor ST42, ST44
ld H5, Z
eor H4, H5
eor H1, H5
ldi ZH, hi8(sbox)
ld H5, Z
eor ST34, H5
eor H3, H5
eor H4, H5
mov ZL, ST13
ld H5, Z
eor H3, H5
eor H4, H5
eor H1, H5
ldi ZH, hi8(sbox02)
ld H5, Z
eor ST34, H5
eor H1, H5
mov ZL, ST24
ld H5, Z
eor ST34, H5
eor H3, H5
ldi ZH, hi8(sbox)
ld H5, Z
eor ST34, H5
eor H4, H5
eor H1, H5
ldd ST13, Y+8
eor ST13, ST34
ldd ST23, Y+9
eor ST23, H3
mov ZL, ST32 ; 4
ldd ST32, Y+6
eor ST32, ST33
ldd ST33, Y+10
eor ST33, H4
ld ST24, Z
mov ST34, ST24
mov H4, ST24
ldi ZH, hi8(sbox02)
ld H3, Z
eor ST34, H3
mov ZL, ST43
ldd ST43, Y+11
eor ST43, H1
ld H5, Z
eor H3, H5
eor H4, H5
ldi ZH, hi8(sbox)
ld H5, Z
eor ST24, H5
eor ST34, H5
eor H3, H5
mov ZL, ST14
ld H5, Z
eor ST34, H5
eor H3, H5
eor H4, H5
ldi ZH, hi8(sbox02)
ld H5, Z
eor ST24, H5
eor H4, H5
mov ZL, ST21
ld H5, Z
eor ST24, H5
eor ST34, H5
ldi ZH, hi8(sbox)
ld H5, Z
eor ST24, H5
eor H3, H5
eor H4, H5
ldd ST21, Y+1
eor ST21, H2
ldd ST14, Y+12
eor ST14, ST24
ldd ST24, Y+13
eor ST24, ST34
ldd ST34, Y+14
eor ST34, H3
ldd ST44, Y+15
eor ST44, H4
adiw Y, 16
dec I
sbrs I,7
jmp encryp0
; Omit MixColumns for the last round
mov ZL, ST11
ld ST11, Z
mov ZL, ST12
ld ST12, Z
mov ZL, ST13
ld ST13, Z
mov ZL, ST14
ld ST14, Z
mov H1, ST21
mov ZL, ST22
ld ST21, Z
mov ZL, ST23
ld ST22, Z
mov ZL, ST24
ld ST23, Z
mov ZL, H1
ld ST24, Z
mov H1, ST31
mov ZL, ST33
ld ST31, Z
mov ZL, H1
ld ST33, Z
mov H1, ST32
mov ZL, ST34
ld ST32, Z
mov ZL, H1
ld ST34, Z
mov H1, ST41
mov ZL, ST44
ld ST41, Z
mov ZL, ST43
ld ST44, Z
mov ZL, ST42
ld ST43, Z
mov ZL, H1
ld ST42, Z
encryp1:
; AddRoundKey
ld H1, Y+
eor ST11, H1
ld H1, Y+
eor ST21, H1
ld H1, Y+
eor ST31, H1
ld H1, Y+
eor ST41, H1
ld H1, Y+
eor ST12, H1
ld H1, Y+
eor ST22, H1
ld H1, Y+
eor ST32, H1
ld H1, Y+
eor ST42, H1
ld H1, Y+
eor ST13, H1
ld H1, Y+
eor ST23, H1
ld H1, Y+
eor ST33, H1
ld H1, Y+
eor ST43, H1
ld H1, Y+
eor ST14, H1
ld H1, Y+
eor ST24, H1
ld H1, Y+
eor ST34, H1
ld H1, Y+
eor ST44, H1
ret
.size encrypt, .-encrypt
.global decrypt_data
decrypt_data:
;mov SP to X
in r26, 0x3d
in r27, 0x3e
; Save registers r2-17,r28-29
push_registers 2,17
push_registers 28,29
; Save the argument pointers to Z (key) and X (plaintext)
movw XL, ARG2
movw YL, ARG3
; Load the ciphertext given by argument to register 0-15
.irp param,r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12,r13,r14,r15
ld \param, X+
.endr
rcall decrypt
; Save the final state from the registers to Y (ARG1)
movw YL, ARG1
.irp param,r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12,r13,r14,r15
st Y+, \param
.endr
; Restore registers r2-17,r28-29
pop_registers 28,29
pop_registers 2,17
clr r1
ret
;;; ***************************************************************************
;;;
;;; DECRYPT
;;; This routine decrypts a 128 bit ciphertext block (given in ST11-ST44),
;;; using an expanded (and patched) key supplied in the 16*11 memory locations
;;; BEFORE YH:YL (YH:YL points behind the last byte of key material!). The
;;; resulting 128 bit plaintext block is stored in ST11-ST44. The "equivalent
;;; decryption algorithm" of Rijndael is implemented, so the MixColumns
;;; diffusion operator has to be applied to the expanded key (done
;;; with the routine patch_decryption_key) before calling the "decrypt"
;;; routine.
;;;
;;; Parameters:
;;; YH:YL: pointer behind patched key
;;; ST11-ST44: 128 bit ciphertext block
;;; Touched registers:
;;; ST11-ST41,H1-H5,I,ZH,ZL,YH,YL
;;; Clock cycles: 3411
decrypt:rcall decryp1
ldi I, 8
ldi ZH, hi8(isbox0e)
decryp0:sbiw Y, 16
mov ZL, ST11 ; 1
ld H1, Z
ldi ZH, hi8(isbox09)
ld H2, Z
ldi ZH, hi8(isbox0d)
ld H3, Z
ldi ZH, hi8(isbox0b)
ld H4, Z
mov ZL, ST24
ld H5, Z
eor H1, H5
ldi ZH, hi8(isbox0d)
ld H5, Z
eor H4, H5
ldi ZH, hi8(isbox09)
ld H5, Z
eor H3, H5
ldi ZH, hi8(isbox0e)
ld H5, Z
eor H2, H5
mov ZL, ST33
ld H5, Z
eor H3, H5
ldi ZH, hi8(isbox09)
ld H5, Z
eor H4, H5
ldi ZH, hi8(isbox0d)
ld H5, Z
eor H1, H5
ldi ZH, hi8(isbox0b)
ld H5, Z
eor H2, H5
mov ZL, ST42
ld H5, Z
eor H3, H5
ldi ZH, hi8(isbox0d)
ld H5, Z
eor H2, H5
ldi ZH, hi8(isbox09)
ld H5, Z
eor H1, H5
ldi ZH, hi8(isbox0e)
ld H5, Z
eor H4, H5
ldd ST11, Y+0
eor ST11, H1
mov ZL, ST21 ; 2
ldd ST21, Y+1
eor ST21, H2
ld H2, Z
ldi ZH, hi8(isbox09)
ld ST24, Z
ldi ZH, hi8(isbox0d)
ld ST33, Z
ldi ZH, hi8(isbox0b)
ld H1, Z
mov ZL, ST12
ld H5, Z
eor ST33, H5
ldi ZH, hi8(isbox0d)
ld H5, Z
eor ST24, H5
ldi ZH, hi8(isbox09)
ld H5, Z
eor H2, H5
ldi ZH, hi8(isbox0e)
ld H5, Z
eor H1, H5
mov ZL, ST34
ld H5, Z
eor ST24, H5
ldi ZH, hi8(isbox09)
ld H5, Z
eor ST33, H5
ldi ZH, hi8(isbox0d)
ld H5, Z
eor H1, H5
ldi ZH, hi8(isbox0b)
ld H5, Z
eor H2, H5
mov ZL, ST43
ld H5, Z
eor ST24, H5
ldi ZH, hi8(isbox0d)
ld H5, Z
eor H2, H5
ldi ZH, hi8(isbox09)
ld H5, Z
eor H1, H5
ldi ZH, hi8(isbox0e)
ld H5, Z
eor ST33, H5
ldd ST12, Y+4
eor ST12, H1
ldd ST42, Y+7
eor ST42, ST33
mov ZL, ST31 ; 3
ldd ST31, Y+2
eor ST31, H3
ld ST34, Z
ldi ZH, hi8(isbox09)
ld H3, Z
ldi ZH, hi8(isbox0d)
ld H1, Z
ldi ZH, hi8(isbox0b)
ld ST33, Z
mov ZL, ST13
ld H5, Z
eor H3, H5
ldi ZH, hi8(isbox0d)
ld H5, Z
eor ST34, H5
ldi ZH, hi8(isbox09)
ld H5, Z
eor ST33, H5
ldi ZH, hi8(isbox0e)
ld H5, Z
eor H1, H5
mov ZL, ST22
ld H5, Z
eor ST33, H5
ldi ZH, hi8(isbox09)
ld H5, Z
eor ST34, H5
ldi ZH, hi8(isbox0d)
ld H5, Z
eor H3, H5
ldi ZH, hi8(isbox0b)
ld H5, Z
eor H1, H5
mov ZL, ST44
ld H5, Z
eor ST34, H5
ldi ZH, hi8(isbox0d)
ld H5, Z
eor ST33, H5
ldi ZH, hi8(isbox09)
ld H5, Z
eor H1, H5
ldi ZH, hi8(isbox0e)
ld H5, Z
eor H3, H5
ldd ST13, Y+8
eor ST13, H1
ldd ST43, Y+11
eor ST43, H3
ldd ST22, Y+5
eor ST22, H2
mov ZL, ST41 ; 4
ldd ST41, Y+3
eor ST41, H4
ld H4, Z
ldi ZH, hi8(isbox09)
ld H1, Z
ldi ZH, hi8(isbox0d)
ld H2, Z
ldi ZH, hi8(isbox0b)
ld H3, Z
mov ZL, ST14
ld H5, Z
eor H4, H5
ldi ZH, hi8(isbox0d)
ld H5, Z
eor H3, H5
ldi ZH, hi8(isbox09)
ld H5, Z
eor H2, H5
ldi ZH, hi8(isbox0e)
ld H5, Z
eor H1, H5
mov ZL, ST23
ld H5, Z
eor H2, H5
ldi ZH, hi8(isbox09)
ld H5, Z
eor H3, H5
ldi ZH, hi8(isbox0d)
ld H5, Z
eor H4, H5
ldi ZH, hi8(isbox0b)
ld H5, Z
eor H1, H5
mov ZL, ST32
ld H5, Z
eor H2, H5
ldi ZH, hi8(isbox0d)
ld H5, Z
eor H1, H5
ldi ZH, hi8(isbox09)
ld H5, Z
eor H4, H5
ldi ZH, hi8(isbox0e)
ld H5, Z
eor H3, H5
ldd ST14, Y+12
eor ST14, H1
ldd ST23, Y+9
eor ST23, ST33
ldd ST32, Y+6
eor ST32, ST24
ldd ST33, Y+10
eor ST33, ST34
ldd ST34, Y+14
eor ST34, H3
ldd ST44, Y+15
eor ST44, H4
ldd ST24, Y+13
eor ST24, H2
dec I
sbrs I,7
jmp decryp0
ldi ZH, hi8(isbox)
mov ZL, ST11
ld ST11, Z
mov ZL, ST12
ld ST12, Z
mov ZL, ST13
ld ST13, Z
mov ZL, ST14
ld ST14, Z
mov H1, ST24
mov ZL, ST23
ld ST24, Z
mov ZL, ST22
ld ST23, Z
mov ZL, ST21
ld ST22, Z
mov ZL, H1
ld ST21, Z
mov H1, ST31
mov ZL, ST33
ld ST31, Z
mov ZL, H1
ld ST33, Z
mov H1, ST32
mov ZL, ST34
ld ST32, Z
mov ZL, H1
ld ST34, Z
mov H1, ST41
mov ZL, ST42
ld ST41, Z
mov ZL, ST43
ld ST42, Z
mov ZL, ST44
ld ST43, Z
mov ZL, H1
ld ST44, Z
decryp1:ld H1, -Y
eor ST44, H1
ld H1, -Y
eor ST34, H1
ld H1, -Y
eor ST24, H1
ld H1, -Y
eor ST14, H1
ld H1, -Y
eor ST43, H1
ld H1, -Y
eor ST33, H1
ld H1, -Y
eor ST23, H1
ld H1, -Y
eor ST13, H1
ld H1, -Y
eor ST42, H1
ld H1, -Y
eor ST32, H1
ld H1, -Y
eor ST22, H1
ld H1, -Y
eor ST12, H1
ld H1, -Y
eor ST41, H1
ld H1, -Y
eor ST31, H1
ld H1, -Y
eor ST21, H1
ld H1, -Y
eor ST11, H1
ret
;;; ***************************************************************************
;;;
;;; S-BOX
;;; Rijndael consists of a non-linear step in its rounds (called "sbox step"),
;;; generally implemented with hard-coded lookup tables. The implementation
;;; given above makes use of seven lookup tables in total: the sbox itself,
;;; its inverse, and scaled versions of both (e.g. sbox02[] = 2*sbox[]).
;;;
;;; This generous employment of expensive space of flash memory has two
;;; important advantages: excellent performance and protection against
;;; timing and power measurement attacks.
;;;
;;; The seven tables have to be aligned to a flash position with its lower
;;; address byte equal to 0x00. In assembler syntax: lo8(sbox<<1) == 0.
;;; To ensure the proper alignment of the sboxes, the assembler directive
;;; .ORG is used (below the sboxes are defined to begin at 0x800). Note, that
;;; any other address can be used as well, as long as the lower byte is equal
;;; to 0x00.
;;;
;;; The order of the sboxes is totally arbitrary. They even do not have to be
;;; allocated in adjacent memory areas.
.data
.balign 256
sbox:
.byte 0x63,0x7c,0x77,0x7b,0xf2,0x6b,0x6f,0xc5,0x30,0x01,0x67,0x2b,0xfe,0xd7,0xab,0x76
.byte 0xca,0x82,0xc9,0x7d,0xfa,0x59,0x47,0xf0,0xad,0xd4,0xa2,0xaf,0x9c,0xa4,0x72,0xc0
.byte 0xb7,0xfd,0x93,0x26,0x36,0x3f,0xf7,0xcc,0x34,0xa5,0xe5,0xf1,0x71,0xd8,0x31,0x15
.byte 0x04,0xc7,0x23,0xc3,0x18,0x96,0x05,0x9a,0x07,0x12,0x80,0xe2,0xeb,0x27,0xb2,0x75
.byte 0x09,0x83,0x2c,0x1a,0x1b,0x6e,0x5a,0xa0,0x52,0x3b,0xd6,0xb3,0x29,0xe3,0x2f,0x84
.byte 0x53,0xd1,0x00,0xed,0x20,0xfc,0xb1,0x5b,0x6a,0xcb,0xbe,0x39,0x4a,0x4c,0x58,0xcf
.byte 0xd0,0xef,0xaa,0xfb,0x43,0x4d,0x33,0x85,0x45,0xf9,0x02,0x7f,0x50,0x3c,0x9f,0xa8
.byte 0x51,0xa3,0x40,0x8f,0x92,0x9d,0x38,0xf5,0xbc,0xb6,0xda,0x21,0x10,0xff,0xf3,0xd2
.byte 0xcd,0x0c,0x13,0xec,0x5f,0x97,0x44,0x17,0xc4,0xa7,0x7e,0x3d,0x64,0x5d,0x19,0x73
.byte 0x60,0x81,0x4f,0xdc,0x22,0x2a,0x90,0x88,0x46,0xee,0xb8,0x14,0xde,0x5e,0x0b,0xdb
.byte 0xe0,0x32,0x3a,0x0a,0x49,0x06,0x24,0x5c,0xc2,0xd3,0xac,0x62,0x91,0x95,0xe4,0x79
.byte 0xe7,0xc8,0x37,0x6d,0x8d,0xd5,0x4e,0xa9,0x6c,0x56,0xf4,0xea,0x65,0x7a,0xae,0x08
.byte 0xba,0x78,0x25,0x2e,0x1c,0xa6,0xb4,0xc6,0xe8,0xdd,0x74,0x1f,0x4b,0xbd,0x8b,0x8a
.byte 0x70,0x3e,0xb5,0x66,0x48,0x03,0xf6,0x0e,0x61,0x35,0x57,0xb9,0x86,0xc1,0x1d,0x9e
.byte 0xe1,0xf8,0x98,0x11,0x69,0xd9,0x8e,0x94,0x9b,0x1e,0x87,0xe9,0xce,0x55,0x28,0xdf
.byte 0x8c,0xa1,0x89,0x0d,0xbf,0xe6,0x42,0x68,0x41,0x99,0x2d,0x0f,0xb0,0x54,0xbb,0x16
.size sbox, .-sbox
sbox02:
.byte 0xc6,0xf8,0xee,0xf6,0xff,0xd6,0xde,0x91,0x60,0x02,0xce,0x56,0xe7,0xb5,0x4d,0xec
.byte 0x8f,0x1f,0x89,0xfa,0xef,0xb2,0x8e,0xfb,0x41,0xb3,0x5f,0x45,0x23,0x53,0xe4,0x9b
.byte 0x75,0xe1,0x3d,0x4c,0x6c,0x7e,0xf5,0x83,0x68,0x51,0xd1,0xf9,0xe2,0xab,0x62,0x2a
.byte 0x08,0x95,0x46,0x9d,0x30,0x37,0x0a,0x2f,0x0e,0x24,0x1b,0xdf,0xcd,0x4e,0x7f,0xea
.byte 0x12,0x1d,0x58,0x34,0x36,0xdc,0xb4,0x5b,0xa4,0x76,0xb7,0x7d,0x52,0xdd,0x5e,0x13
.byte 0xa6,0xb9,0x00,0xc1,0x40,0xe3,0x79,0xb6,0xd4,0x8d,0x67,0x72,0x94,0x98,0xb0,0x85
.byte 0xbb,0xc5,0x4f,0xed,0x86,0x9a,0x66,0x11,0x8a,0xe9,0x04,0xfe,0xa0,0x78,0x25,0x4b
.byte 0xa2,0x5d,0x80,0x05,0x3f,0x21,0x70,0xf1,0x63,0x77,0xaf,0x42,0x20,0xe5,0xfd,0xbf
.byte 0x81,0x18,0x26,0xc3,0xbe,0x35,0x88,0x2e,0x93,0x55,0xfc,0x7a,0xc8,0xba,0x32,0xe6
.byte 0xc0,0x19,0x9e,0xa3,0x44,0x54,0x3b,0x0b,0x8c,0xc7,0x6b,0x28,0xa7,0xbc,0x16,0xad
.byte 0xdb,0x64,0x74,0x14,0x92,0x0c,0x48,0xb8,0x9f,0xbd,0x43,0xc4,0x39,0x31,0xd3,0xf2
.byte 0xd5,0x8b,0x6e,0xda,0x01,0xb1,0x9c,0x49,0xd8,0xac,0xf3,0xcf,0xca,0xf4,0x47,0x10
.byte 0x6f,0xf0,0x4a,0x5c,0x38,0x57,0x73,0x97,0xcb,0xa1,0xe8,0x3e,0x96,0x61,0x0d,0x0f
.byte 0xe0,0x7c,0x71,0xcc,0x90,0x06,0xf7,0x1c,0xc2,0x6a,0xae,0x69,0x17,0x99,0x3a,0x27
.byte 0xd9,0xeb,0x2b,0x22,0xd2,0xa9,0x07,0x33,0x2d,0x3c,0x15,0xc9,0x87,0xaa,0x50,0xa5
.byte 0x03,0x59,0x09,0x1a,0x65,0xd7,0x84,0xd0,0x82,0x29,0x5a,0x1e,0x7b,0xa8,0x6d,0x2c
.size sbox02, .-sbox02
isbox:
.byte 0x52,0x09,0x6a,0xd5,0x30,0x36,0xa5,0x38,0xbf,0x40,0xa3,0x9e,0x81,0xf3,0xd7,0xfb
.byte 0x7c,0xe3,0x39,0x82,0x9b,0x2f,0xff,0x87,0x34,0x8e,0x43,0x44,0xc4,0xde,0xe9,0xcb
.byte 0x54,0x7b,0x94,0x32,0xa6,0xc2,0x23,0x3d,0xee,0x4c,0x95,0x0b,0x42,0xfa,0xc3,0x4e
.byte 0x08,0x2e,0xa1,0x66,0x28,0xd9,0x24,0xb2,0x76,0x5b,0xa2,0x49,0x6d,0x8b,0xd1,0x25
.byte 0x72,0xf8,0xf6,0x64,0x86,0x68,0x98,0x16,0xd4,0xa4,0x5c,0xcc,0x5d,0x65,0xb6,0x92
.byte 0x6c,0x70,0x48,0x50,0xfd,0xed,0xb9,0xda,0x5e,0x15,0x46,0x57,0xa7,0x8d,0x9d,0x84
.byte 0x90,0xd8,0xab,0x00,0x8c,0xbc,0xd3,0x0a,0xf7,0xe4,0x58,0x05,0xb8,0xb3,0x45,0x06
.byte 0xd0,0x2c,0x1e,0x8f,0xca,0x3f,0x0f,0x02,0xc1,0xaf,0xbd,0x03,0x01,0x13,0x8a,0x6b
.byte 0x3a,0x91,0x11,0x41,0x4f,0x67,0xdc,0xea,0x97,0xf2,0xcf,0xce,0xf0,0xb4,0xe6,0x73
.byte 0x96,0xac,0x74,0x22,0xe7,0xad,0x35,0x85,0xe2,0xf9,0x37,0xe8,0x1c,0x75,0xdf,0x6e
.byte 0x47,0xf1,0x1a,0x71,0x1d,0x29,0xc5,0x89,0x6f,0xb7,0x62,0x0e,0xaa,0x18,0xbe,0x1b
.byte 0xfc,0x56,0x3e,0x4b,0xc6,0xd2,0x79,0x20,0x9a,0xdb,0xc0,0xfe,0x78,0xcd,0x5a,0xf4
.byte 0x1f,0xdd,0xa8,0x33,0x88,0x07,0xc7,0x31,0xb1,0x12,0x10,0x59,0x27,0x80,0xec,0x5f
.byte 0x60,0x51,0x7f,0xa9,0x19,0xb5,0x4a,0x0d,0x2d,0xe5,0x7a,0x9f,0x93,0xc9,0x9c,0xef
.byte 0xa0,0xe0,0x3b,0x4d,0xae,0x2a,0xf5,0xb0,0xc8,0xeb,0xbb,0x3c,0x83,0x53,0x99,0x61
.byte 0x17,0x2b,0x04,0x7e,0xba,0x77,0xd6,0x26,0xe1,0x69,0x14,0x63,0x55,0x21,0x0c,0x7d
isbox0e:
.byte 0x51,0x7e,0x1a,0x3a,0x3b,0x1f,0xac,0x4b,0x20,0xad,0x88,0xf5,0x4f,0xc5,0x26,0xb5
.byte 0xde,0x25,0x45,0x5d,0xc3,0x81,0x8d,0x6b,0x03,0x15,0xbf,0x95,0xd4,0x58,0x49,0x8e
.byte 0x75,0xf4,0x99,0x27,0xbe,0xf0,0xc9,0x7d,0x63,0xe5,0x97,0x62,0xb1,0xbb,0xfe,0xf9
.byte 0x70,0x8f,0x94,0x52,0xab,0x72,0xe3,0x66,0xb2,0x2f,0x86,0xd3,0x30,0x23,0x02,0xed
.byte 0x8a,0xa7,0xf3,0x4e,0x65,0x06,0xd1,0xc4,0x34,0xa2,0x05,0xa4,0x0b,0x40,0x5e,0xbd
.byte 0x3e,0x96,0xdd,0x4d,0x91,0x71,0x04,0x60,0x19,0xd6,0x89,0x67,0xb0,0x07,0xe7,0x79
.byte 0xa1,0x7c,0xf8,0x00,0x09,0x32,0x1e,0x6c,0xfd,0x0f,0x3d,0x36,0x0a,0x68,0x9b,0x24
.byte 0x0c,0x93,0xb4,0x1b,0x80,0x61,0x5a,0x1c,0xe2,0xc0,0x3c,0x12,0x0e,0xf2,0x2d,0x14
.byte 0x57,0xaf,0xee,0xa3,0xf7,0x5c,0x44,0x5b,0x8b,0xcb,0xb6,0xb8,0xd7,0x42,0x13,0x84
.byte 0x85,0xd2,0xae,0xc7,0x1d,0xdc,0x0d,0x77,0x2b,0xa9,0x11,0x47,0xa8,0xa0,0x56,0x22
.byte 0x87,0xd9,0x8c,0x98,0xa6,0xa5,0xda,0x3f,0x2c,0x50,0x6a,0x54,0xf6,0x90,0x2e,0x82
.byte 0x9f,0x69,0x6f,0xcf,0xc8,0x10,0xe8,0xdb,0xcd,0x6e,0xec,0x83,0xe6,0xaa,0x21,0xef
.byte 0xba,0x4a,0xea,0x29,0x31,0x2a,0xc6,0x35,0x74,0xfc,0xe0,0x33,0xf1,0x41,0x7f,0x17
.byte 0x76,0x43,0xcc,0xe4,0x9e,0x4c,0xc1,0x46,0x9d,0x01,0xfa,0xfb,0xb3,0x92,0xe9,0x6d
.byte 0x9a,0x37,0x59,0xeb,0xce,0xb7,0xe1,0x7a,0x9c,0x55,0x18,0x73,0x53,0x5f,0xdf,0x78
.byte 0xca,0xb9,0x38,0xc2,0x16,0xbc,0x28,0xff,0x39,0x08,0xd8,0x64,0x7b,0xd5,0x48,0xd0
isbox09:
.byte 0xf4,0x41,0x17,0x27,0xab,0x9d,0xfa,0xe3,0x30,0x76,0xcc,0x02,0xe5,0x2a,0x35,0x62
.byte 0xb1,0xba,0xea,0xfe,0x2f,0x4c,0x46,0xd3,0x8f,0x92,0x6d,0x52,0xbe,0x74,0xe0,0xc9
.byte 0xc2,0x8e,0x58,0xb9,0xe1,0x88,0x20,0xce,0xdf,0x1a,0x51,0x53,0x64,0x6b,0x81,0x08
.byte 0x48,0x45,0xde,0x7b,0x73,0x4b,0x1f,0x55,0xeb,0xb5,0xc5,0x37,0x28,0xbf,0x03,0x16
.byte 0xcf,0x79,0x07,0x69,0xda,0x05,0x34,0xa6,0x2e,0xf3,0x8a,0xf6,0x83,0x60,0x71,0x6e
.byte 0x21,0xdd,0x3e,0xe6,0x54,0xc4,0x06,0x50,0x98,0xbd,0x40,0xd9,0xe8,0x89,0x19,0xc8
.byte 0x7c,0x42,0x84,0x00,0x80,0x2b,0x11,0x5a,0x0e,0x85,0xae,0x2d,0x0f,0x5c,0x5b,0x36
.byte 0x0a,0x57,0xee,0x9b,0xc0,0xdc,0x77,0x12,0x93,0xa0,0x22,0x1b,0x09,0x8b,0xb6,0x1e
.byte 0xf1,0x75,0x99,0x7f,0x01,0x72,0x66,0xfb,0x43,0x23,0xed,0xe4,0x31,0x63,0x97,0xc6
.byte 0x4a,0xbb,0xf9,0x29,0x9e,0xb2,0x86,0xc1,0xb3,0x70,0x94,0xe9,0xfc,0xf0,0x7d,0x33
.byte 0x49,0x38,0xca,0xd4,0xf5,0x7a,0xb7,0xad,0x3a,0x78,0x5f,0x7e,0x8d,0xd8,0x39,0xc3
.byte 0x5d,0xd0,0xd5,0x25,0xac,0x18,0x9c,0x3b,0x26,0x59,0x9a,0x4f,0x95,0xff,0xbc,0x15
.byte 0xe7,0x6f,0x9f,0xb0,0xa4,0x3f,0xa5,0xa2,0x4e,0x82,0x90,0xa7,0x04,0xec,0xcd,0x91
.byte 0x4d,0xef,0xaa,0x96,0xd1,0x6a,0x2c,0x65,0x5e,0x8c,0x87,0x0b,0x67,0xdb,0x10,0xd6
.byte 0xd7,0xa1,0xf8,0x13,0xa9,0x61,0x1c,0x47,0xd2,0xf2,0x14,0xc7,0xf7,0xfd,0x3d,0x44
.byte 0xaf,0x68,0x24,0xa3,0x1d,0xe2,0x3c,0x0d,0xa8,0x0c,0xb4,0x56,0xcb,0x32,0x6c,0xb8
isbox0d:
.byte 0xa7,0x65,0xa4,0x5e,0x6b,0x45,0x58,0x03,0xfa,0x6d,0x76,0x4c,0xd7,0xcb,0x44,0xa3
.byte 0x5a,0x1b,0x0e,0xc0,0x75,0xf0,0x97,0xf9,0x5f,0x9c,0x7a,0x59,0x83,0x21,0x69,0xc8
.byte 0x89,0x79,0x3e,0x71,0x4f,0xad,0xac,0x3a,0x4a,0x31,0x33,0x7f,0x77,0xae,0xa0,0x2b
.byte 0x68,0xfd,0x6c,0xf8,0xd3,0x02,0x8f,0xab,0x28,0xc2,0x7b,0x08,0x87,0xa5,0x6a,0x82
.byte 0x1c,0xb4,0xf2,0xe2,0xf4,0xbe,0x62,0xfe,0x53,0x55,0xe1,0xeb,0xec,0xef,0x9f,0x10
.byte 0x8a,0x06,0x05,0xbd,0x8d,0x5d,0xd4,0x15,0xfb,0xe9,0x43,0x9e,0x42,0x8b,0x5b,0xee
.byte 0x0a,0x0f,0x1e,0x00,0x86,0xed,0x70,0x72,0xff,0x38,0xd5,0x39,0xd9,0xa6,0x54,0x2e
.byte 0x67,0xe7,0x96,0x91,0xc5,0x20,0x4b,0x1a,0xba,0x2a,0xe0,0x17,0x0d,0xc7,0xa8,0xa9
.byte 0x19,0x07,0xdd,0x60,0x26,0xf5,0x3b,0x7e,0x29,0xc6,0xfc,0xf1,0xdc,0x85,0x22,0x11
.byte 0x24,0x3d,0x32,0xa1,0x2f,0x30,0x52,0xe3,0x16,0xb9,0x48,0x64,0x8c,0x3f,0x2c,0x90
.byte 0x4e,0xd1,0xa2,0x0b,0x81,0xde,0x8e,0xbf,0x9d,0x92,0xcc,0x46,0x13,0xb8,0xf7,0xaf
.byte 0x80,0x93,0x2d,0x12,0x99,0x7d,0x63,0xbb,0x78,0x18,0xb7,0x9a,0x6e,0xe6,0xcf,0xe8
.byte 0x9b,0x36,0x09,0x7c,0xb2,0x23,0x94,0x66,0xbc,0xca,0xd0,0xd8,0x98,0xda,0x50,0xf6
.byte 0xd6,0xb0,0x4d,0x04,0xb5,0x88,0x1f,0x51,0xea,0x35,0x74,0x41,0x1d,0xd2,0x56,0x47
.byte 0x61,0x0c,0x14,0x3c,0x27,0xc9,0xe5,0xb1,0xdf,0x73,0xce,0x37,0xcd,0xaa,0x6f,0xdb
.byte 0xf3,0xc4,0x34,0x40,0xc3,0x25,0x49,0x95,0x01,0xb3,0xe4,0xc1,0x84,0xb6,0x5c,0x57
isbox0b:
.byte 0x50,0x53,0xc3,0x96,0xcb,0xf1,0xab,0x93,0x55,0xf6,0x91,0x25,0xfc,0xd7,0x80,0x8f
.byte 0x49,0x67,0x98,0xe1,0x02,0x12,0xa3,0xc6,0xe7,0x95,0xeb,0xda,0x2d,0xd3,0x29,0x44
.byte 0x6a,0x78,0x6b,0xdd,0xb6,0x17,0x66,0xb4,0x18,0x82,0x60,0x45,0xe0,0x84,0x1c,0x94
.byte 0x58,0x19,0x87,0xb7,0x23,0xe2,0x57,0x2a,0x07,0x03,0x9a,0xa5,0xf2,0xb2,0xba,0x5c
.byte 0x2b,0x92,0xf0,0xa1,0xcd,0xd5,0x1f,0x8a,0x9d,0xa0,0x32,0x75,0x39,0xaa,0x06,0x51
.byte 0xf9,0x3d,0xae,0x46,0xb5,0x05,0x6f,0xff,0x24,0x97,0xcc,0x77,0xbd,0x88,0x38,0xdb
.byte 0x47,0xe9,0xc9,0x00,0x83,0x48,0xac,0x4e,0xfb,0x56,0x1e,0x27,0x64,0x21,0xd1,0x3a
.byte 0xb1,0x0f,0xd2,0x9e,0x4f,0xa2,0x69,0x16,0x0a,0xe5,0x43,0x1d,0x0b,0xad,0xb9,0xc8
.byte 0x85,0x4c,0xbb,0xfd,0x9f,0xbc,0xc5,0x34,0x76,0xdc,0x68,0x63,0xca,0x10,0x40,0x20
.byte 0x7d,0xf8,0x11,0x6d,0x4b,0xf3,0xec,0xd0,0x6c,0x99,0xfa,0x22,0xc4,0x1a,0xd8,0xef
.byte 0xc7,0xc1,0xfe,0x36,0xcf,0x28,0x26,0xa4,0xe4,0x0d,0x9b,0x62,0xc2,0xe8,0x5e,0xf5
.byte 0xbe,0x7c,0xa9,0xb3,0x3b,0xa7,0x6e,0x7b,0x09,0xf4,0x01,0xa8,0x65,0x7e,0x08,0xe6
.byte 0xd9,0xce,0xd4,0xd6,0xaf,0x31,0x30,0xc0,0x37,0xa6,0xb0,0x15,0x4a,0xf7,0x0e,0x2f
.byte 0x8d,0x4d,0x54,0xdf,0xe3,0x1b,0xb8,0x7f,0x04,0x5d,0x73,0x2e,0x5a,0x52,0x33,0x13
.byte 0x8c,0x7a,0x8e,0x89,0xee,0x35,0xed,0x3c,0x59,0x3f,0x79,0xbf,0xea,0x5b,0x14,0x86
.byte 0x81,0x3e,0x2c,0x5f,0x72,0x0c,0x8b,0x41,0x71,0xde,0x9c,0x90,0x61,0x70,0x74,0x42
|
aadomn/cymric
| 31,727
|
artifact_tches2025-3/benchmark_avr/cymric_lwc/cymric_lwc/photonbeetle/internal-photon256-avr.S
|
#if defined(__AVR__)
/*
* Copyright (C) 2021 Southern Storm Software, Pty Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <avr/io.h>
/* Automatically generated - do not edit */
.text
.global photon256_permute
.type photon256_permute, @function
photon256_permute:
push r28
push r29
push r2
push r3
push r4
push r5
push r6
push r7
push r8
push r9
push r10
push r11
push r12
push r13
push r14
push r15
push r16
movw r30,r24
in r28,0x3d
in r29,0x3e
sbiw r28,32
in r0,0x3f
cli
out 0x3e,r29
out 0x3f,r0
out 0x3d,r28
.L__stack_usage = 49
ldd r18,Z+16
ldd r19,Z+17
ldd r20,Z+18
ldd r21,Z+19
bst r18,0
bld r2,0
bst r18,1
bld r6,0
bst r18,2
bld r10,0
bst r18,3
bld r14,0
bst r18,4
bld r2,1
bst r18,5
bld r6,1
bst r18,6
bld r10,1
bst r18,7
bld r14,1
bst r19,0
bld r2,2
bst r19,1
bld r6,2
bst r19,2
bld r10,2
bst r19,3
bld r14,2
bst r19,4
bld r2,3
bst r19,5
bld r6,3
bst r19,6
bld r10,3
bst r19,7
bld r14,3
bst r20,0
bld r2,4
bst r20,1
bld r6,4
bst r20,2
bld r10,4
bst r20,3
bld r14,4
bst r20,4
bld r2,5
bst r20,5
bld r6,5
bst r20,6
bld r10,5
bst r20,7
bld r14,5
bst r21,0
bld r2,6
bst r21,1
bld r6,6
bst r21,2
bld r10,6
bst r21,3
bld r14,6
bst r21,4
bld r2,7
bst r21,5
bld r6,7
bst r21,6
bld r10,7
bst r21,7
bld r14,7
ldd r18,Z+20
ldd r19,Z+21
ldd r20,Z+22
ldd r21,Z+23
bst r18,0
bld r3,0
bst r18,1
bld r7,0
bst r18,2
bld r11,0
bst r18,3
bld r15,0
bst r18,4
bld r3,1
bst r18,5
bld r7,1
bst r18,6
bld r11,1
bst r18,7
bld r15,1
bst r19,0
bld r3,2
bst r19,1
bld r7,2
bst r19,2
bld r11,2
bst r19,3
bld r15,2
bst r19,4
bld r3,3
bst r19,5
bld r7,3
bst r19,6
bld r11,3
bst r19,7
bld r15,3
bst r20,0
bld r3,4
bst r20,1
bld r7,4
bst r20,2
bld r11,4
bst r20,3
bld r15,4
bst r20,4
bld r3,5
bst r20,5
bld r7,5
bst r20,6
bld r11,5
bst r20,7
bld r15,5
bst r21,0
bld r3,6
bst r21,1
bld r7,6
bst r21,2
bld r11,6
bst r21,3
bld r15,6
bst r21,4
bld r3,7
bst r21,5
bld r7,7
bst r21,6
bld r11,7
bst r21,7
bld r15,7
ldd r18,Z+24
ldd r19,Z+25
ldd r20,Z+26
ldd r21,Z+27
bst r18,0
bld r4,0
bst r18,1
bld r8,0
bst r18,2
bld r12,0
bst r18,3
bld r24,0
bst r18,4
bld r4,1
bst r18,5
bld r8,1
bst r18,6
bld r12,1
bst r18,7
bld r24,1
bst r19,0
bld r4,2
bst r19,1
bld r8,2
bst r19,2
bld r12,2
bst r19,3
bld r24,2
bst r19,4
bld r4,3
bst r19,5
bld r8,3
bst r19,6
bld r12,3
bst r19,7
bld r24,3
bst r20,0
bld r4,4
bst r20,1
bld r8,4
bst r20,2
bld r12,4
bst r20,3
bld r24,4
bst r20,4
bld r4,5
bst r20,5
bld r8,5
bst r20,6
bld r12,5
bst r20,7
bld r24,5
bst r21,0
bld r4,6
bst r21,1
bld r8,6
bst r21,2
bld r12,6
bst r21,3
bld r24,6
bst r21,4
bld r4,7
bst r21,5
bld r8,7
bst r21,6
bld r12,7
bst r21,7
bld r24,7
ldd r18,Z+28
ldd r19,Z+29
ldd r20,Z+30
ldd r21,Z+31
bst r18,0
bld r5,0
bst r18,1
bld r9,0
bst r18,2
bld r13,0
bst r18,3
bld r25,0
bst r18,4
bld r5,1
bst r18,5
bld r9,1
bst r18,6
bld r13,1
bst r18,7
bld r25,1
bst r19,0
bld r5,2
bst r19,1
bld r9,2
bst r19,2
bld r13,2
bst r19,3
bld r25,2
bst r19,4
bld r5,3
bst r19,5
bld r9,3
bst r19,6
bld r13,3
bst r19,7
bld r25,3
bst r20,0
bld r5,4
bst r20,1
bld r9,4
bst r20,2
bld r13,4
bst r20,3
bld r25,4
bst r20,4
bld r5,5
bst r20,5
bld r9,5
bst r20,6
bld r13,5
bst r20,7
bld r25,5
bst r21,0
bld r5,6
bst r21,1
bld r9,6
bst r21,2
bld r13,6
bst r21,3
bld r25,6
bst r21,4
bld r5,7
bst r21,5
bld r9,7
bst r21,6
bld r13,7
bst r21,7
bld r25,7
std Z+16,r2
std Z+17,r3
std Z+18,r4
std Z+19,r5
std Z+20,r6
std Z+21,r7
std Z+22,r8
std Z+23,r9
std Z+24,r10
std Z+25,r11
std Z+26,r12
std Z+27,r13
std Z+28,r14
std Z+29,r15
std Z+30,r24
std Z+31,r25
ld r18,Z
ldd r19,Z+1
ldd r20,Z+2
ldd r21,Z+3
bst r18,0
bld r2,0
bst r18,1
bld r6,0
bst r18,2
bld r10,0
bst r18,3
bld r14,0
bst r18,4
bld r2,1
bst r18,5
bld r6,1
bst r18,6
bld r10,1
bst r18,7
bld r14,1
bst r19,0
bld r2,2
bst r19,1
bld r6,2
bst r19,2
bld r10,2
bst r19,3
bld r14,2
bst r19,4
bld r2,3
bst r19,5
bld r6,3
bst r19,6
bld r10,3
bst r19,7
bld r14,3
bst r20,0
bld r2,4
bst r20,1
bld r6,4
bst r20,2
bld r10,4
bst r20,3
bld r14,4
bst r20,4
bld r2,5
bst r20,5
bld r6,5
bst r20,6
bld r10,5
bst r20,7
bld r14,5
bst r21,0
bld r2,6
bst r21,1
bld r6,6
bst r21,2
bld r10,6
bst r21,3
bld r14,6
bst r21,4
bld r2,7
bst r21,5
bld r6,7
bst r21,6
bld r10,7
bst r21,7
bld r14,7
ldd r18,Z+4
ldd r19,Z+5
ldd r20,Z+6
ldd r21,Z+7
bst r18,0
bld r3,0
bst r18,1
bld r7,0
bst r18,2
bld r11,0
bst r18,3
bld r15,0
bst r18,4
bld r3,1
bst r18,5
bld r7,1
bst r18,6
bld r11,1
bst r18,7
bld r15,1
bst r19,0
bld r3,2
bst r19,1
bld r7,2
bst r19,2
bld r11,2
bst r19,3
bld r15,2
bst r19,4
bld r3,3
bst r19,5
bld r7,3
bst r19,6
bld r11,3
bst r19,7
bld r15,3
bst r20,0
bld r3,4
bst r20,1
bld r7,4
bst r20,2
bld r11,4
bst r20,3
bld r15,4
bst r20,4
bld r3,5
bst r20,5
bld r7,5
bst r20,6
bld r11,5
bst r20,7
bld r15,5
bst r21,0
bld r3,6
bst r21,1
bld r7,6
bst r21,2
bld r11,6
bst r21,3
bld r15,6
bst r21,4
bld r3,7
bst r21,5
bld r7,7
bst r21,6
bld r11,7
bst r21,7
bld r15,7
ldd r18,Z+8
ldd r19,Z+9
ldd r20,Z+10
ldd r21,Z+11
bst r18,0
bld r4,0
bst r18,1
bld r8,0
bst r18,2
bld r12,0
bst r18,3
bld r24,0
bst r18,4
bld r4,1
bst r18,5
bld r8,1
bst r18,6
bld r12,1
bst r18,7
bld r24,1
bst r19,0
bld r4,2
bst r19,1
bld r8,2
bst r19,2
bld r12,2
bst r19,3
bld r24,2
bst r19,4
bld r4,3
bst r19,5
bld r8,3
bst r19,6
bld r12,3
bst r19,7
bld r24,3
bst r20,0
bld r4,4
bst r20,1
bld r8,4
bst r20,2
bld r12,4
bst r20,3
bld r24,4
bst r20,4
bld r4,5
bst r20,5
bld r8,5
bst r20,6
bld r12,5
bst r20,7
bld r24,5
bst r21,0
bld r4,6
bst r21,1
bld r8,6
bst r21,2
bld r12,6
bst r21,3
bld r24,6
bst r21,4
bld r4,7
bst r21,5
bld r8,7
bst r21,6
bld r12,7
bst r21,7
bld r24,7
ldd r18,Z+12
ldd r19,Z+13
ldd r20,Z+14
ldd r21,Z+15
bst r18,0
bld r5,0
bst r18,1
bld r9,0
bst r18,2
bld r13,0
bst r18,3
bld r25,0
bst r18,4
bld r5,1
bst r18,5
bld r9,1
bst r18,6
bld r13,1
bst r18,7
bld r25,1
bst r19,0
bld r5,2
bst r19,1
bld r9,2
bst r19,2
bld r13,2
bst r19,3
bld r25,2
bst r19,4
bld r5,3
bst r19,5
bld r9,3
bst r19,6
bld r13,3
bst r19,7
bld r25,3
bst r20,0
bld r5,4
bst r20,1
bld r9,4
bst r20,2
bld r13,4
bst r20,3
bld r25,4
bst r20,4
bld r5,5
bst r20,5
bld r9,5
bst r20,6
bld r13,5
bst r20,7
bld r25,5
bst r21,0
bld r5,6
bst r21,1
bld r9,6
bst r21,2
bld r13,6
bst r21,3
bld r25,6
bst r21,4
bld r5,7
bst r21,5
bld r9,7
bst r21,6
bld r13,7
bst r21,7
bld r25,7
ldi r22,225
ldi r23,240
ldi r26,210
ldi r27,150
rcall 621f
ldi r22,195
ldi r23,210
ldi r26,240
ldi r27,180
rcall 621f
ldi r22,135
ldi r23,150
ldi r26,180
ldi r27,240
rcall 621f
ldi r22,30
ldi r23,15
ldi r26,45
ldi r27,105
rcall 621f
ldi r22,45
ldi r23,60
ldi r26,30
ldi r27,90
rcall 621f
ldi r22,75
ldi r23,90
ldi r26,120
ldi r27,60
rcall 621f
ldi r22,150
ldi r23,135
ldi r26,165
ldi r27,225
rcall 621f
ldi r22,60
ldi r23,45
ldi r26,15
ldi r27,75
rcall 621f
ldi r22,105
ldi r23,120
ldi r26,90
ldi r27,30
rcall 621f
ldi r22,210
ldi r23,195
ldi r26,225
ldi r27,165
rcall 621f
ldi r22,165
ldi r23,180
ldi r26,150
ldi r27,210
rcall 621f
ldi r22,90
ldi r23,75
ldi r26,105
ldi r27,45
rcall 621f
rjmp 1960f
621:
movw r18,r22
movw r20,r26
andi r18,1
andi r19,1
andi r20,1
andi r21,1
eor r2,r18
eor r3,r19
eor r4,r20
eor r5,r21
lsr r22
lsr r23
lsr r26
lsr r27
movw r18,r22
movw r20,r26
andi r18,1
andi r19,1
andi r20,1
andi r21,1
eor r6,r18
eor r7,r19
eor r8,r20
eor r9,r21
lsr r22
lsr r23
lsr r26
lsr r27
movw r18,r22
movw r20,r26
andi r18,1
andi r19,1
andi r20,1
andi r21,1
eor r10,r18
eor r11,r19
eor r12,r20
eor r13,r21
lsr r22
lsr r23
lsr r26
lsr r27
movw r18,r22
movw r20,r26
andi r18,1
andi r19,1
andi r20,1
andi r21,1
eor r14,r18
eor r15,r19
eor r24,r20
eor r25,r21
lsr r22
lsr r23
lsr r26
lsr r27
eor r6,r10
mov r0,r10
and r0,r6
eor r14,r0
mov r18,r14
and r14,r6
eor r14,r10
mov r16,r14
eor r14,r2
com r14
mov r10,r14
or r16,r2
eor r2,r18
eor r6,r2
or r10,r6
eor r10,r18
eor r6,r16
eor r14,r6
eor r7,r11
mov r0,r11
and r0,r7
eor r15,r0
mov r19,r15
and r15,r7
eor r15,r11
mov r16,r15
eor r15,r3
com r15
mov r11,r15
or r16,r3
eor r3,r19
eor r7,r3
or r11,r7
eor r11,r19
eor r7,r16
eor r15,r7
eor r8,r12
mov r0,r12
and r0,r8
eor r24,r0
mov r20,r24
and r24,r8
eor r24,r12
mov r16,r24
eor r24,r4
com r24
mov r12,r24
or r16,r4
eor r4,r20
eor r8,r4
or r12,r8
eor r12,r20
eor r8,r16
eor r24,r8
eor r9,r13
mov r0,r13
and r0,r9
eor r25,r0
mov r21,r25
and r25,r9
eor r25,r13
mov r16,r25
eor r25,r5
com r25
mov r13,r25
or r16,r5
eor r5,r21
eor r9,r5
or r13,r9
eor r13,r21
eor r9,r16
eor r25,r9
bst r3,0
lsr r3
bld r3,7
bst r7,0
lsr r7
bld r7,7
bst r11,0
lsr r11
bld r11,7
bst r15,0
lsr r15
bld r15,7
mov r0,r1
lsr r4
ror r0
lsr r4
ror r0
or r4,r0
mov r0,r1
lsr r8
ror r0
lsr r8
ror r0
or r8,r0
mov r0,r1
lsr r12
ror r0
lsr r12
ror r0
or r12,r0
mov r0,r1
lsr r24
ror r0
lsr r24
ror r0
or r24,r0
mov r0,r1
lsr r5
ror r0
lsr r5
ror r0
lsr r5
ror r0
or r5,r0
mov r0,r1
lsr r9
ror r0
lsr r9
ror r0
lsr r9
ror r0
or r9,r0
mov r0,r1
lsr r13
ror r0
lsr r13
ror r0
lsr r13
ror r0
or r13,r0
mov r0,r1
lsr r25
ror r0
lsr r25
ror r0
lsr r25
ror r0
or r25,r0
std Y+1,r2
std Y+2,r3
std Y+3,r4
std Y+4,r5
std Y+5,r6
std Y+6,r7
std Y+7,r8
std Y+8,r9
std Y+9,r10
std Y+10,r11
std Y+11,r12
std Y+12,r13
std Y+13,r14
std Y+14,r15
std Y+15,r24
std Y+16,r25
ldd r2,Z+16
ldd r3,Z+17
ldd r4,Z+18
ldd r5,Z+19
ldd r6,Z+20
ldd r7,Z+21
ldd r8,Z+22
ldd r9,Z+23
ldd r10,Z+24
ldd r11,Z+25
ldd r12,Z+26
ldd r13,Z+27
ldd r14,Z+28
ldd r15,Z+29
ldd r24,Z+30
ldd r25,Z+31
movw r18,r22
movw r20,r26
andi r18,1
andi r19,1
andi r20,1
andi r21,1
eor r2,r18
eor r3,r19
eor r4,r20
eor r5,r21
lsr r22
lsr r23
lsr r26
lsr r27
movw r18,r22
movw r20,r26
andi r18,1
andi r19,1
andi r20,1
andi r21,1
eor r6,r18
eor r7,r19
eor r8,r20
eor r9,r21
lsr r22
lsr r23
lsr r26
lsr r27
movw r18,r22
movw r20,r26
andi r18,1
andi r19,1
andi r20,1
andi r21,1
eor r10,r18
eor r11,r19
eor r12,r20
eor r13,r21
lsr r22
lsr r23
lsr r26
lsr r27
eor r14,r22
eor r15,r23
eor r24,r26
eor r25,r27
eor r6,r10
mov r0,r10
and r0,r6
eor r14,r0
mov r18,r14
and r14,r6
eor r14,r10
mov r22,r14
eor r14,r2
com r14
mov r10,r14
or r22,r2
eor r2,r18
eor r6,r2
or r10,r6
eor r10,r18
eor r6,r22
eor r14,r6
eor r7,r11
mov r0,r11
and r0,r7
eor r15,r0
mov r19,r15
and r15,r7
eor r15,r11
mov r22,r15
eor r15,r3
com r15
mov r11,r15
or r22,r3
eor r3,r19
eor r7,r3
or r11,r7
eor r11,r19
eor r7,r22
eor r15,r7
eor r8,r12
mov r0,r12
and r0,r8
eor r24,r0
mov r20,r24
and r24,r8
eor r24,r12
mov r22,r24
eor r24,r4
com r24
mov r12,r24
or r22,r4
eor r4,r20
eor r8,r4
or r12,r8
eor r12,r20
eor r8,r22
eor r24,r8
eor r9,r13
mov r0,r13
and r0,r9
eor r25,r0
mov r21,r25
and r25,r9
eor r25,r13
mov r22,r25
eor r25,r5
com r25
mov r13,r25
or r22,r5
eor r5,r21
eor r9,r5
or r13,r9
eor r13,r21
eor r9,r22
eor r25,r9
swap r2
swap r6
swap r10
swap r14
lsl r3
adc r3,r1
lsl r3
adc r3,r1
lsl r3
adc r3,r1
lsl r7
adc r7,r1
lsl r7
adc r7,r1
lsl r7
adc r7,r1
lsl r11
adc r11,r1
lsl r11
adc r11,r1
lsl r11
adc r11,r1
lsl r15
adc r15,r1
lsl r15
adc r15,r1
lsl r15
adc r15,r1
lsl r4
adc r4,r1
lsl r4
adc r4,r1
lsl r8
adc r8,r1
lsl r8
adc r8,r1
lsl r12
adc r12,r1
lsl r12
adc r12,r1
lsl r24
adc r24,r1
lsl r24
adc r24,r1
lsl r5
adc r5,r1
lsl r9
adc r9,r1
lsl r13
adc r13,r1
lsl r25
adc r25,r1
std Y+17,r2
std Y+18,r3
std Y+19,r4
std Y+20,r5
std Y+21,r6
std Y+22,r7
std Y+23,r8
std Y+24,r9
std Y+25,r10
std Y+26,r11
std Y+27,r12
std Y+28,r13
std Y+29,r14
std Y+30,r15
std Y+31,r24
std Y+32,r25
ldd r2,Y+1
ldd r6,Y+2
ldd r10,Y+3
ldd r14,Y+4
ldd r3,Y+5
ldd r7,Y+6
ldd r11,Y+7
ldd r15,Y+8
ldd r4,Y+9
ldd r8,Y+10
ldd r12,Y+11
ldd r24,Y+12
ldd r5,Y+13
ldd r9,Y+14
ldd r13,Y+15
ldd r25,Y+16
movw r22,r2
movw r26,r4
eor r22,r27
mov r18,r27
mov r19,r22
mov r20,r23
mov r21,r26
movw r22,r6
movw r26,r8
eor r22,r27
eor r27,r26
eor r18,r26
eor r19,r27
eor r20,r22
eor r21,r23
movw r22,r10
movw r26,r12
eor r22,r27
eor r18,r27
eor r19,r22
eor r20,r23
eor r21,r26
eor r18,r14
eor r19,r15
eor r20,r24
eor r21,r25
movw r22,r14
movw r26,r24
eor r22,r27
eor r18,r27
eor r19,r22
eor r20,r23
eor r21,r26
eor r27,r26
eor r26,r23
eor r18,r23
eor r19,r26
eor r20,r27
eor r21,r22
st Z,r18
std Z+4,r19
std Z+8,r20
std Z+12,r21
movw r22,r2
movw r26,r4
eor r22,r27
eor r27,r26
movw r18,r26
movw r20,r22
eor r26,r23
eor r18,r23
eor r19,r26
eor r20,r27
eor r21,r22
eor r18,r6
eor r19,r7
eor r20,r8
eor r21,r9
movw r22,r6
movw r26,r8
eor r22,r27
eor r27,r26
eor r26,r23
eor r18,r23
eor r19,r26
eor r20,r27
eor r21,r22
movw r22,r10
movw r26,r12
eor r22,r27
eor r27,r26
eor r26,r23
eor r18,r23
eor r19,r26
eor r20,r27
eor r21,r22
eor r18,r14
eor r19,r15
eor r20,r24
eor r21,r25
movw r22,r14
movw r26,r24
eor r22,r27
eor r27,r26
eor r18,r26
eor r19,r27
eor r20,r22
eor r21,r23
eor r26,r23
eor r18,r23
eor r19,r26
eor r20,r27
eor r21,r22
std Z+1,r18
std Z+5,r19
std Z+9,r20
std Z+13,r21
movw r22,r2
movw r26,r4
eor r22,r27
eor r27,r26
movw r18,r26
movw r20,r22
movw r22,r6
movw r26,r8
eor r22,r27
eor r27,r26
eor r18,r26
eor r19,r27
eor r20,r22
eor r21,r23
eor r18,r10
eor r19,r11
eor r20,r12
eor r21,r13
movw r22,r10
movw r26,r12
eor r22,r27
eor r27,r26
eor r18,r26
eor r19,r27
eor r20,r22
eor r21,r23
eor r26,r23
eor r18,r23
eor r19,r26
eor r20,r27
eor r21,r22
eor r18,r14
eor r19,r15
eor r20,r24
eor r21,r25
movw r22,r14
movw r26,r24
eor r22,r27
eor r27,r26
eor r18,r26
eor r19,r27
eor r20,r22
eor r21,r23
eor r26,r23
eor r18,r23
eor r19,r26
eor r20,r27
eor r21,r22
std Z+2,r18
std Z+6,r19
std Z+10,r20
std Z+14,r21
movw r18,r2
movw r20,r4
movw r22,r6
movw r26,r8
eor r22,r27
eor r18,r27
eor r19,r22
eor r20,r23
eor r21,r26
eor r27,r26
eor r18,r26
eor r19,r27
eor r20,r22
eor r21,r23
eor r18,r10
eor r19,r11
eor r20,r12
eor r21,r13
movw r22,r10
movw r26,r12
eor r22,r27
eor r27,r26
eor r18,r26
eor r19,r27
eor r20,r22
eor r21,r23
eor r18,r14
eor r19,r15
eor r20,r24
eor r21,r25
std Z+3,r18
std Z+7,r19
std Z+11,r20
std Z+15,r21
movw r18,r2
movw r20,r4
movw r22,r2
movw r26,r4
eor r22,r27
eor r18,r27
eor r19,r22
eor r20,r23
eor r21,r26
eor r27,r26
eor r18,r26
eor r19,r27
eor r20,r22
eor r21,r23
eor r26,r23
eor r18,r23
eor r19,r26
eor r20,r27
eor r21,r22
movw r22,r6
movw r26,r8
eor r22,r27
eor r27,r26
eor r18,r26
eor r19,r27
eor r20,r22
eor r21,r23
eor r26,r23
eor r18,r23
eor r19,r26
eor r20,r27
eor r21,r22
eor r18,r10
eor r19,r11
eor r20,r12
eor r21,r13
movw r22,r10
movw r26,r12
eor r22,r27
eor r27,r26
eor r26,r23
eor r18,r23
eor r19,r26
eor r20,r27
eor r21,r22
eor r18,r14
eor r19,r15
eor r20,r24
eor r21,r25
movw r22,r14
movw r26,r24
eor r22,r27
eor r27,r26
eor r18,r26
eor r19,r27
eor r20,r22
eor r21,r23
eor r26,r23
eor r18,r23
eor r19,r26
eor r20,r27
eor r21,r22
std Z+16,r18
std Z+20,r19
std Z+24,r20
std Z+28,r21
movw r18,r2
movw r20,r4
movw r22,r2
movw r26,r4
eor r22,r27
eor r27,r26
eor r26,r23
eor r18,r23
eor r19,r26
eor r20,r27
eor r21,r22
movw r22,r6
movw r26,r8
eor r22,r27
eor r18,r27
eor r19,r22
eor r20,r23
eor r21,r26
eor r27,r26
eor r18,r26
eor r19,r27
eor r20,r22
eor r21,r23
eor r26,r23
eor r18,r23
eor r19,r26
eor r20,r27
eor r21,r22
eor r18,r10
eor r19,r11
eor r20,r12
eor r21,r13
movw r22,r10
movw r26,r12
eor r22,r27
eor r27,r26
eor r18,r26
eor r19,r27
eor r20,r22
eor r21,r23
eor r18,r14
eor r19,r15
eor r20,r24
eor r21,r25
movw r22,r14
movw r26,r24
eor r22,r27
eor r18,r27
eor r19,r22
eor r20,r23
eor r21,r26
eor r27,r26
eor r18,r26
eor r19,r27
eor r20,r22
eor r21,r23
eor r26,r23
eor r18,r23
eor r19,r26
eor r20,r27
eor r21,r22
std Z+17,r18
std Z+21,r19
std Z+25,r20
std Z+29,r21
movw r22,r2
movw r26,r4
eor r22,r27
eor r27,r26
movw r18,r26
movw r20,r22
eor r26,r23
eor r18,r23
eor r19,r26
eor r20,r27
eor r21,r22
movw r22,r6
movw r26,r8
eor r22,r27
eor r18,r27
eor r19,r22
eor r20,r23
eor r21,r26
movw r22,r10
movw r26,r12
eor r22,r27
eor r18,r27
eor r19,r22
eor r20,r23
eor r21,r26
movw r22,r14
movw r26,r24
eor r22,r27
eor r18,r27
eor r19,r22
eor r20,r23
eor r21,r26
eor r27,r26
eor r26,r23
eor r18,r23
eor r19,r26
eor r20,r27
eor r21,r22
std Z+18,r18
std Z+22,r19
std Z+26,r20
std Z+30,r21
movw r18,r2
movw r20,r4
movw r22,r2
movw r26,r4
eor r22,r27
eor r18,r27
eor r19,r22
eor r20,r23
eor r21,r26
eor r27,r26
eor r18,r26
eor r19,r27
eor r20,r22
eor r21,r23
eor r26,r23
eor r18,r23
eor r19,r26
eor r20,r27
eor r21,r22
eor r18,r6
eor r19,r7
eor r20,r8
eor r21,r9
eor r18,r10
eor r19,r11
eor r20,r12
eor r21,r13
movw r22,r10
movw r26,r12
eor r22,r27
eor r27,r26
eor r18,r26
eor r19,r27
eor r20,r22
eor r21,r23
eor r26,r23
eor r18,r23
eor r19,r26
eor r20,r27
eor r21,r22
movw r22,r14
movw r26,r24
eor r22,r27
eor r18,r27
eor r19,r22
eor r20,r23
eor r21,r26
eor r27,r26
eor r26,r23
eor r18,r23
eor r19,r26
eor r20,r27
eor r21,r22
std Z+19,r18
std Z+23,r19
std Z+27,r20
std Z+31,r21
ldd r2,Y+17
ldd r6,Y+18
ldd r10,Y+19
ldd r14,Y+20
ldd r3,Y+21
ldd r7,Y+22
ldd r11,Y+23
ldd r15,Y+24
ldd r4,Y+25
ldd r8,Y+26
ldd r12,Y+27
ldd r24,Y+28
ldd r5,Y+29
ldd r9,Y+30
ldd r13,Y+31
ldd r25,Y+32
ld r18,Z
ldd r19,Z+4
ldd r20,Z+8
ldd r21,Z+12
movw r22,r2
movw r26,r4
eor r22,r27
eor r18,r27
eor r19,r22
eor r20,r23
eor r21,r26
movw r22,r6
movw r26,r8
eor r22,r27
eor r27,r26
eor r26,r23
eor r18,r23
eor r19,r26
eor r20,r27
eor r21,r22
eor r18,r10
eor r19,r11
eor r20,r12
eor r21,r13
movw r22,r10
movw r26,r12
eor r22,r27
eor r27,r26
eor r18,r26
eor r19,r27
eor r20,r22
eor r21,r23
movw r22,r14
movw r26,r24
eor r22,r27
eor r18,r27
eor r19,r22
eor r20,r23
eor r21,r26
eor r27,r26
eor r18,r26
eor r19,r27
eor r20,r22
eor r21,r23
st Z,r18
std Z+4,r19
std Z+8,r20
std Z+12,r21
ldd r18,Z+1
ldd r19,Z+5
ldd r20,Z+9
ldd r21,Z+13
eor r18,r2
eor r19,r3
eor r20,r4
eor r21,r5
movw r22,r2
movw r26,r4
eor r22,r27
eor r18,r27
eor r19,r22
eor r20,r23
eor r21,r26
eor r27,r26
eor r18,r26
eor r19,r27
eor r20,r22
eor r21,r23
eor r18,r6
eor r19,r7
eor r20,r8
eor r21,r9
movw r22,r6
movw r26,r8
eor r22,r27
eor r18,r27
eor r19,r22
eor r20,r23
eor r21,r26
eor r27,r26
eor r18,r26
eor r19,r27
eor r20,r22
eor r21,r23
eor r18,r10
eor r19,r11
eor r20,r12
eor r21,r13
movw r22,r10
movw r26,r12
eor r22,r27
eor r27,r26
eor r18,r26
eor r19,r27
eor r20,r22
eor r21,r23
movw r22,r14
movw r26,r24
eor r22,r27
eor r18,r27
eor r19,r22
eor r20,r23
eor r21,r26
std Z+1,r18
std Z+5,r19
std Z+9,r20
std Z+13,r21
ldd r18,Z+2
ldd r19,Z+6
ldd r20,Z+10
ldd r21,Z+14
eor r18,r2
eor r19,r3
eor r20,r4
eor r21,r5
movw r22,r2
movw r26,r4
eor r22,r27
eor r27,r26
eor r26,r23
eor r18,r23
eor r19,r26
eor r20,r27
eor r21,r22
movw r22,r6
movw r26,r8
eor r22,r27
eor r27,r26
eor r18,r26
eor r19,r27
eor r20,r22
eor r21,r23
eor r18,r10
eor r19,r11
eor r20,r12
eor r21,r13
movw r22,r10
movw r26,r12
eor r22,r27
eor r27,r26
eor r18,r26
eor r19,r27
eor r20,r22
eor r21,r23
eor r26,r23
eor r18,r23
eor r19,r26
eor r20,r27
eor r21,r22
eor r18,r14
eor r19,r15
eor r20,r24
eor r21,r25
movw r22,r14
movw r26,r24
eor r22,r27
eor r27,r26
eor r26,r23
eor r18,r23
eor r19,r26
eor r20,r27
eor r21,r22
std Z+2,r18
std Z+6,r19
std Z+10,r20
std Z+14,r21
ldd r18,Z+3
ldd r19,Z+7
ldd r20,Z+11
ldd r21,Z+15
movw r22,r2
movw r26,r4
eor r22,r27
eor r27,r26
eor r18,r26
eor r19,r27
eor r20,r22
eor r21,r23
eor r26,r23
eor r18,r23
eor r19,r26
eor r20,r27
eor r21,r22
eor r18,r6
eor r19,r7
eor r20,r8
eor r21,r9
movw r22,r6
movw r26,r8
eor r22,r27
eor r27,r26
eor r18,r26
eor r19,r27
eor r20,r22
eor r21,r23
eor r26,r23
eor r18,r23
eor r19,r26
eor r20,r27
eor r21,r22
eor r18,r10
eor r19,r11
eor r20,r12
eor r21,r13
movw r22,r10
movw r26,r12
eor r22,r27
eor r18,r27
eor r19,r22
eor r20,r23
eor r21,r26
eor r27,r26
eor r18,r26
eor r19,r27
eor r20,r22
eor r21,r23
eor r26,r23
eor r18,r23
eor r19,r26
eor r20,r27
eor r21,r22
movw r22,r14
movw r26,r24
eor r22,r27
eor r18,r27
eor r19,r22
eor r20,r23
eor r21,r26
eor r27,r26
eor r18,r26
eor r19,r27
eor r20,r22
eor r21,r23
eor r26,r23
eor r18,r23
eor r19,r26
eor r20,r27
eor r21,r22
std Z+3,r18
std Z+7,r19
std Z+11,r20
std Z+15,r21
ldd r18,Z+16
ldd r19,Z+20
ldd r20,Z+24
ldd r21,Z+28
movw r22,r2
movw r26,r4
eor r22,r27
eor r18,r27
eor r19,r22
eor r20,r23
eor r21,r26
eor r27,r26
eor r18,r26
eor r19,r27
eor r20,r22
eor r21,r23
eor r26,r23
eor r18,r23
eor r19,r26
eor r20,r27
eor r21,r22
eor r18,r6
eor r19,r7
eor r20,r8
eor r21,r9
movw r22,r6
movw r26,r8
eor r22,r27
eor r27,r26
eor r18,r26
eor r19,r27
eor r20,r22
eor r21,r23
movw r22,r10
movw r26,r12
eor r22,r27
eor r18,r27
eor r19,r22
eor r20,r23
eor r21,r26
eor r27,r26
eor r18,r26
eor r19,r27
eor r20,r22
eor r21,r23
eor r26,r23
eor r18,r23
eor r19,r26
eor r20,r27
eor r21,r22
eor r18,r14
eor r19,r15
eor r20,r24
eor r21,r25
movw r22,r14
movw r26,r24
eor r22,r27
eor r27,r26
eor r18,r26
eor r19,r27
eor r20,r22
eor r21,r23
eor r26,r23
eor r18,r23
eor r19,r26
eor r20,r27
eor r21,r22
std Z+16,r18
std Z+20,r19
std Z+24,r20
std Z+28,r21
ldd r18,Z+17
ldd r19,Z+21
ldd r20,Z+25
ldd r21,Z+29
movw r22,r2
movw r26,r4
eor r22,r27
eor r27,r26
eor r18,r26
eor r19,r27
eor r20,r22
eor r21,r23
movw r22,r6
movw r26,r8
eor r22,r27
eor r27,r26
eor r18,r26
eor r19,r27
eor r20,r22
eor r21,r23
eor r26,r23
eor r18,r23
eor r19,r26
eor r20,r27
eor r21,r22
eor r18,r10
eor r19,r11
eor r20,r12
eor r21,r13
movw r22,r10
movw r26,r12
eor r22,r27
eor r27,r26
eor r26,r23
eor r18,r23
eor r19,r26
eor r20,r27
eor r21,r22
movw r22,r14
movw r26,r24
eor r22,r27
eor r18,r27
eor r19,r22
eor r20,r23
eor r21,r26
eor r27,r26
eor r18,r26
eor r19,r27
eor r20,r22
eor r21,r23
std Z+17,r18
std Z+21,r19
std Z+25,r20
std Z+29,r21
ldd r18,Z+18
ldd r19,Z+22
ldd r20,Z+26
ldd r21,Z+30
eor r18,r2
eor r19,r3
eor r20,r4
eor r21,r5
movw r22,r2
movw r26,r4
eor r22,r27
eor r18,r27
eor r19,r22
eor r20,r23
eor r21,r26
eor r18,r6
eor r19,r7
eor r20,r8
eor r21,r9
eor r18,r10
eor r19,r11
eor r20,r12
eor r21,r13
movw r22,r14
movw r26,r24
eor r22,r27
eor r18,r27
eor r19,r22
eor r20,r23
eor r21,r26
eor r27,r26
eor r18,r26
eor r19,r27
eor r20,r22
eor r21,r23
eor r26,r23
eor r18,r23
eor r19,r26
eor r20,r27
eor r21,r22
std Z+18,r18
std Z+22,r19
std Z+26,r20
std Z+30,r21
ldd r18,Z+19
ldd r19,Z+23
ldd r20,Z+27
ldd r21,Z+31
eor r18,r2
eor r19,r3
eor r20,r4
eor r21,r5
movw r22,r2
movw r26,r4
eor r22,r27
eor r27,r26
eor r18,r26
eor r19,r27
eor r20,r22
eor r21,r23
movw r22,r6
movw r26,r8
eor r22,r27
eor r18,r27
eor r19,r22
eor r20,r23
eor r21,r26
eor r27,r26
eor r26,r23
eor r18,r23
eor r19,r26
eor r20,r27
eor r21,r22
movw r22,r10
movw r26,r12
eor r22,r27
eor r18,r27
eor r19,r22
eor r20,r23
eor r21,r26
eor r18,r14
eor r19,r15
eor r20,r24
eor r21,r25
movw r22,r14
movw r26,r24
eor r22,r27
eor r18,r27
eor r19,r22
eor r20,r23
eor r21,r26
std Z+19,r18
std Z+23,r19
std Z+27,r20
std Z+31,r21
ld r2,Z
ldd r3,Z+1
ldd r4,Z+2
ldd r5,Z+3
ldd r6,Z+4
ldd r7,Z+5
ldd r8,Z+6
ldd r9,Z+7
ldd r10,Z+8
ldd r11,Z+9
ldd r12,Z+10
ldd r13,Z+11
ldd r14,Z+12
ldd r15,Z+13
ldd r24,Z+14
ldd r25,Z+15
ret
1960:
bst r2,0
bld r18,0
bst r6,0
bld r18,1
bst r10,0
bld r18,2
bst r14,0
bld r18,3
bst r2,1
bld r18,4
bst r6,1
bld r18,5
bst r10,1
bld r18,6
bst r14,1
bld r18,7
bst r2,2
bld r19,0
bst r6,2
bld r19,1
bst r10,2
bld r19,2
bst r14,2
bld r19,3
bst r2,3
bld r19,4
bst r6,3
bld r19,5
bst r10,3
bld r19,6
bst r14,3
bld r19,7
bst r2,4
bld r20,0
bst r6,4
bld r20,1
bst r10,4
bld r20,2
bst r14,4
bld r20,3
bst r2,5
bld r20,4
bst r6,5
bld r20,5
bst r10,5
bld r20,6
bst r14,5
bld r20,7
bst r2,6
bld r21,0
bst r6,6
bld r21,1
bst r10,6
bld r21,2
bst r14,6
bld r21,3
bst r2,7
bld r21,4
bst r6,7
bld r21,5
bst r10,7
bld r21,6
bst r14,7
bld r21,7
st Z,r18
std Z+1,r19
std Z+2,r20
std Z+3,r21
bst r3,0
bld r18,0
bst r7,0
bld r18,1
bst r11,0
bld r18,2
bst r15,0
bld r18,3
bst r3,1
bld r18,4
bst r7,1
bld r18,5
bst r11,1
bld r18,6
bst r15,1
bld r18,7
bst r3,2
bld r19,0
bst r7,2
bld r19,1
bst r11,2
bld r19,2
bst r15,2
bld r19,3
bst r3,3
bld r19,4
bst r7,3
bld r19,5
bst r11,3
bld r19,6
bst r15,3
bld r19,7
bst r3,4
bld r20,0
bst r7,4
bld r20,1
bst r11,4
bld r20,2
bst r15,4
bld r20,3
bst r3,5
bld r20,4
bst r7,5
bld r20,5
bst r11,5
bld r20,6
bst r15,5
bld r20,7
bst r3,6
bld r21,0
bst r7,6
bld r21,1
bst r11,6
bld r21,2
bst r15,6
bld r21,3
bst r3,7
bld r21,4
bst r7,7
bld r21,5
bst r11,7
bld r21,6
bst r15,7
bld r21,7
std Z+4,r18
std Z+5,r19
std Z+6,r20
std Z+7,r21
bst r4,0
bld r18,0
bst r8,0
bld r18,1
bst r12,0
bld r18,2
bst r24,0
bld r18,3
bst r4,1
bld r18,4
bst r8,1
bld r18,5
bst r12,1
bld r18,6
bst r24,1
bld r18,7
bst r4,2
bld r19,0
bst r8,2
bld r19,1
bst r12,2
bld r19,2
bst r24,2
bld r19,3
bst r4,3
bld r19,4
bst r8,3
bld r19,5
bst r12,3
bld r19,6
bst r24,3
bld r19,7
bst r4,4
bld r20,0
bst r8,4
bld r20,1
bst r12,4
bld r20,2
bst r24,4
bld r20,3
bst r4,5
bld r20,4
bst r8,5
bld r20,5
bst r12,5
bld r20,6
bst r24,5
bld r20,7
bst r4,6
bld r21,0
bst r8,6
bld r21,1
bst r12,6
bld r21,2
bst r24,6
bld r21,3
bst r4,7
bld r21,4
bst r8,7
bld r21,5
bst r12,7
bld r21,6
bst r24,7
bld r21,7
std Z+8,r18
std Z+9,r19
std Z+10,r20
std Z+11,r21
bst r5,0
bld r18,0
bst r9,0
bld r18,1
bst r13,0
bld r18,2
bst r25,0
bld r18,3
bst r5,1
bld r18,4
bst r9,1
bld r18,5
bst r13,1
bld r18,6
bst r25,1
bld r18,7
bst r5,2
bld r19,0
bst r9,2
bld r19,1
bst r13,2
bld r19,2
bst r25,2
bld r19,3
bst r5,3
bld r19,4
bst r9,3
bld r19,5
bst r13,3
bld r19,6
bst r25,3
bld r19,7
bst r5,4
bld r20,0
bst r9,4
bld r20,1
bst r13,4
bld r20,2
bst r25,4
bld r20,3
bst r5,5
bld r20,4
bst r9,5
bld r20,5
bst r13,5
bld r20,6
bst r25,5
bld r20,7
bst r5,6
bld r21,0
bst r9,6
bld r21,1
bst r13,6
bld r21,2
bst r25,6
bld r21,3
bst r5,7
bld r21,4
bst r9,7
bld r21,5
bst r13,7
bld r21,6
bst r25,7
bld r21,7
std Z+12,r18
std Z+13,r19
std Z+14,r20
std Z+15,r21
ldd r2,Z+16
ldd r3,Z+17
ldd r4,Z+18
ldd r5,Z+19
ldd r6,Z+20
ldd r7,Z+21
ldd r8,Z+22
ldd r9,Z+23
ldd r10,Z+24
ldd r11,Z+25
ldd r12,Z+26
ldd r13,Z+27
ldd r14,Z+28
ldd r15,Z+29
ldd r24,Z+30
ldd r25,Z+31
bst r2,0
bld r18,0
bst r6,0
bld r18,1
bst r10,0
bld r18,2
bst r14,0
bld r18,3
bst r2,1
bld r18,4
bst r6,1
bld r18,5
bst r10,1
bld r18,6
bst r14,1
bld r18,7
bst r2,2
bld r19,0
bst r6,2
bld r19,1
bst r10,2
bld r19,2
bst r14,2
bld r19,3
bst r2,3
bld r19,4
bst r6,3
bld r19,5
bst r10,3
bld r19,6
bst r14,3
bld r19,7
bst r2,4
bld r20,0
bst r6,4
bld r20,1
bst r10,4
bld r20,2
bst r14,4
bld r20,3
bst r2,5
bld r20,4
bst r6,5
bld r20,5
bst r10,5
bld r20,6
bst r14,5
bld r20,7
bst r2,6
bld r21,0
bst r6,6
bld r21,1
bst r10,6
bld r21,2
bst r14,6
bld r21,3
bst r2,7
bld r21,4
bst r6,7
bld r21,5
bst r10,7
bld r21,6
bst r14,7
bld r21,7
std Z+16,r18
std Z+17,r19
std Z+18,r20
std Z+19,r21
bst r3,0
bld r18,0
bst r7,0
bld r18,1
bst r11,0
bld r18,2
bst r15,0
bld r18,3
bst r3,1
bld r18,4
bst r7,1
bld r18,5
bst r11,1
bld r18,6
bst r15,1
bld r18,7
bst r3,2
bld r19,0
bst r7,2
bld r19,1
bst r11,2
bld r19,2
bst r15,2
bld r19,3
bst r3,3
bld r19,4
bst r7,3
bld r19,5
bst r11,3
bld r19,6
bst r15,3
bld r19,7
bst r3,4
bld r20,0
bst r7,4
bld r20,1
bst r11,4
bld r20,2
bst r15,4
bld r20,3
bst r3,5
bld r20,4
bst r7,5
bld r20,5
bst r11,5
bld r20,6
bst r15,5
bld r20,7
bst r3,6
bld r21,0
bst r7,6
bld r21,1
bst r11,6
bld r21,2
bst r15,6
bld r21,3
bst r3,7
bld r21,4
bst r7,7
bld r21,5
bst r11,7
bld r21,6
bst r15,7
bld r21,7
std Z+20,r18
std Z+21,r19
std Z+22,r20
std Z+23,r21
bst r4,0
bld r18,0
bst r8,0
bld r18,1
bst r12,0
bld r18,2
bst r24,0
bld r18,3
bst r4,1
bld r18,4
bst r8,1
bld r18,5
bst r12,1
bld r18,6
bst r24,1
bld r18,7
bst r4,2
bld r19,0
bst r8,2
bld r19,1
bst r12,2
bld r19,2
bst r24,2
bld r19,3
bst r4,3
bld r19,4
bst r8,3
bld r19,5
bst r12,3
bld r19,6
bst r24,3
bld r19,7
bst r4,4
bld r20,0
bst r8,4
bld r20,1
bst r12,4
bld r20,2
bst r24,4
bld r20,3
bst r4,5
bld r20,4
bst r8,5
bld r20,5
bst r12,5
bld r20,6
bst r24,5
bld r20,7
bst r4,6
bld r21,0
bst r8,6
bld r21,1
bst r12,6
bld r21,2
bst r24,6
bld r21,3
bst r4,7
bld r21,4
bst r8,7
bld r21,5
bst r12,7
bld r21,6
bst r24,7
bld r21,7
std Z+24,r18
std Z+25,r19
std Z+26,r20
std Z+27,r21
bst r5,0
bld r18,0
bst r9,0
bld r18,1
bst r13,0
bld r18,2
bst r25,0
bld r18,3
bst r5,1
bld r18,4
bst r9,1
bld r18,5
bst r13,1
bld r18,6
bst r25,1
bld r18,7
bst r5,2
bld r19,0
bst r9,2
bld r19,1
bst r13,2
bld r19,2
bst r25,2
bld r19,3
bst r5,3
bld r19,4
bst r9,3
bld r19,5
bst r13,3
bld r19,6
bst r25,3
bld r19,7
bst r5,4
bld r20,0
bst r9,4
bld r20,1
bst r13,4
bld r20,2
bst r25,4
bld r20,3
bst r5,5
bld r20,4
bst r9,5
bld r20,5
bst r13,5
bld r20,6
bst r25,5
bld r20,7
bst r5,6
bld r21,0
bst r9,6
bld r21,1
bst r13,6
bld r21,2
bst r25,6
bld r21,3
bst r5,7
bld r21,4
bst r9,7
bld r21,5
bst r13,7
bld r21,6
bst r25,7
bld r21,7
std Z+28,r18
std Z+29,r19
std Z+30,r20
std Z+31,r21
adiw r28,32
in r0,0x3f
cli
out 0x3e,r29
out 0x3f,r0
out 0x3d,r28
pop r16
pop r15
pop r14
pop r13
pop r12
pop r11
pop r10
pop r9
pop r8
pop r7
pop r6
pop r5
pop r4
pop r3
pop r2
pop r29
pop r28
ret
.size photon256_permute, .-photon256_permute
#endif
|
aadomn/cymric
| 15,328
|
artifact_tches2025-3/benchmark_avr/cymric_lwc/cymric_lwc/giftcofb/gift128.S
|
; Argument registers for function calls
#define ARG1 r24
#define ARG2 r22
#define ARG3 r20
/**
* push_registers macro:
*
* Pushes a given range of registers in ascending order
* To be called like: push_registers 0,15
*/
.macro push_registers from:req, to:req
push \from
.if \to-\from
push_registers "(\from+1)",\to
.endif
.endm
/**
* pop_registers macro:
*
* Pops a given range of registers in descending order
* To be called like: pop_registers 0,15
*/
.macro pop_registers from:req, to:req
pop \to
.if \to-\from
pop_registers \from,"(\to-1)"
.endif
.endm
/**
* sbox macro:
*
* Computes the S-box layer in a bitsliced manner on a quarter of the state
*/
.macro sbox x0, x1, x2, x3
mov r16, \x0
and r16, \x2
eor \x1, r16
mov r16, \x1
and r16, \x3
eor \x0, r16
mov r16, \x0
or r16, \x1
eor \x2, r16
eor \x3, \x2
eor \x1, \x3
com \x3
mov r16, \x0
and r16, \x1
eor \x2, r16
.endm
/**
* llayer1 macro:
*
* Computes the linear layer on a quarter of the state for the 1st round
* within the quintuple round routine
*/
.macro llayer1 x1, x2, x3
// NIBBLE_ROR2
mov r16, \x1
lsr r16
lsr r16
and r16, r17
and \x1, r17
lsl \x1
lsl \x1
or \x1, r16
// NIBBLE_ROR1
mov r16, \x3
lsr r16
cbr r16, 136
and \x3, r18
lsl \x3
lsl \x3
lsl \x3
or \x3, r16
//NIBBLE_ROR3
mov r16, \x2
lsr \x2
lsr \x2
lsr \x2
and \x2, r18
cbr r16, 136
lsl r16
or \x2, r16
.endm
/**
* half_ror_4 macro:
*
* Rotates a 16-bit word by 4 bits to the right.
* Assumes r18 contains 0x0f.
*/
.macro half_ror_4 hi, lo
swap \hi
swap \lo
movw r16, \hi
cbr r16, 15
and \hi, r18
cbr r17, 15
and \lo, r18
or \hi, r17
or \lo, r16
.endm
/**
* half_ror_12 macro:
*
* Rotates a 16-bit word by 12 bits to the right
*/
.macro half_ror_12 hi, lo
swap \hi
swap \lo
movw r16, \hi
cbr r16, 240
and \hi, r18
cbr r17, 240
and \lo, r18
or \hi, r17
or \lo, r16
.endm
/**
* byte_ror_2 macro:
*
* Rotates a byte by 2 bits to the right
*/
.macro byte_ror_2 x
bst \x, 0
lsr \x
bld \x, 7
bst \x, 0
lsr \x
bld \x, 7
.endm
/**
* byte_rol_2 macro:
*
* Rotates a byte by 2 bits to the left
*/
.macro byte_rol_2 x, zero
lsl \x
adc \x, \zero
lsl \x
adc \x, \zero
.endm
/**
* add_round_key macro:
*
* Adds a round key to half of the state
*/
.macro add_round_key x0, x1, x2, x3, x4, x5, x6, x7
ld r16, X+
ld r17, X+
eor \x0, r16
eor \x1, r17
ld r16, X+
ld r17, X+
eor \x2, r16
eor \x3, r17
ld r16, X+
ld r17, X+
eor \x4, r16
eor \x5, r17
ld r16, X+
ld r17, X+
eor \x6, r16
eor \x7, r17
.endm
/**
* add_rconst macro:
*
* Adds round constants to a quarter of the state
*/
.macro add_rconst x0, x1, x2, x3
ld r16, Z+
ld r17, Z+
eor \x0, r16
eor \x1, r17
ld r16, Z+
ld r17, Z+
eor \x2, r16
eor \x3, r17
.endm
/**
* add_rconst0 macro:
*
* Same as add_rconst but w/ a specificity for rounds r s.t.
* r = 0 mod 5: the last rconst byte is always 0x10 so we hardcode it
*/
.macro add_rconst0 x0, x1, x2, x3
ld r16, Z+
ld r17, Z+
eor \x0, r16
eor \x1, r17
ld r16, Z+
ldi r17, 16
eor \x2, r16
eor \x3, r17
.endm
/**
* add_rconst1 macro:
*
* Same as add_rconst but w/ a specificity for rounds r s.t.
* r = 1 mod 5: the 1st and 3rd rconst bytes are always 0x00 and 0x01
* respectively so we hardcode them
*/
.macro add_rconst1 x1, x2, x3
ld r16, Z+
ldi r17, 1
eor \x1, r16
eor \x2, r17
ld r16, Z+
eor \x3, r16
.endm
/**
* add_rconst2 macro:
*
* Same as add_rconst but w/ a specificity for rounds r s.t.
* r = 2 mod 5: the first two bytes are always 0x02 and 0x00
* respectively so we hardcode them
*/
.macro add_rconst2 x0, x2, x3
ldi r16, 2
ld r17, Z+
eor \x0, r16
eor \x2, r17
ld r16, Z+
eor \x3, r16
.endm
/**
* llayer3 macro:
*
* Computes the linear layer on a quarter of the state for the 3rd round
* within the quintuple round routine
*/
.macro llayer3 x1, x2
movw r16, \x1
movw r28, \x1
lsr r28
lsr r29
eor r16, r28
eor r17, r29
andi r16, 85
andi r17, 85
eor \x1, r16
eor \x2, r17
lsl r16
lsl r17
eor \x1, r16
eor \x2, r17
.endm
.macro kexp_round k0, k1, k2, k3
; k0||k1 >>> 2
bst \k1, 0
ror \k0
ror \k1
bld \k0, 7
bst \k1, 0
ror \k0
ror \k1
bld \k0, 7
; k2||k3 <<< 4
mov r30, \k3
mul \k2, r20
mov \k2, r1
mov \k3, r0
mul r30, r20
or \k2, r0
or \k3, r1
eor \k2, \k3
eor \k3, \k2
eor \k2, \k3
.endm
.macro rearrange_rkey0 a, b, c, d
mov r22, \a
mov r23, \b
mov r24, \c
mov r25, \d
// SWAPMOVE(x, x, 0x00550055, 9) : x & 550055 <-> x & aa00aa00
mov r28, \a
mov r30, \c
lsr r28
lsr r30
eor r28, \b
eor r30, \d
andi r28, 85
andi r30, 85
eor r23, r28
eor r25, r30
lsl r28
lsl r30
eor r22, r28
eor r24, r30
// SWAPMOVE(x, x, 0x000f000f, 12) : x & 000f000f <-> x & f000f000
movw r28, r22
mov r30, r24
mov r31, r25
swap r28
swap r29
swap r30
swap r31
andi r28, 15
andi r29, 240
andi r30, 15
andi r31, 240
and r22, r19
and r23, r18
and r24, r19
and r25, r18
or r22, r29
or r23, r28
or r24, r31
or r25, r30
// SWAPMOVE(x, x, 0x00003333, 18) : x & 00003333 <-> x & cccc0000
movw r30, r22
lsr r30
lsr r30
lsr r31
lsr r31
eor r30, r24
eor r31, r25
andi r30, 51
andi r31, 51
eor r24, r30
eor r25, r31
lsl r30
lsl r30
lsl r31
lsl r31
eor r22, r30
eor r23, r31
// SWAPMOVE(x, x, 0x000000ff, 24)
st X+, r22
st X+, r24
st X+, r23
st X+, r25
.endm
.macro swap_4bits reg, tmp, src0, dst0, src1, dst1
mov \tmp, \reg
bst \reg, \src0
bld \reg, \dst0
bst \reg, \src1
bld \reg, \dst1
bst \tmp, \dst0
bld \reg, \src0
bst \tmp, \dst1
bld \reg, \src1
.endm
.macro rearrange_rkey1 a, b, c, d
mov r22, \a
mov r23, \b
mov r24, \c
mov r25, \d
// SWAPMOVE(x, x, 0x11111111, 3) : x & 11111111 <-> x & 88888888
swap_4bits r22, r28, 0, 3, 4, 7
swap_4bits r23, r28, 0, 3, 4, 7
swap_4bits r24, r28, 0, 3, 4, 7
swap_4bits r25, r28, 0, 3, 4, 7
// SWAPMOVE(x, x, 0x03030303, 6) : x & 03030303 <-> x & c0c0c0c0
swap_4bits r22, r28, 0, 6, 1, 7
swap_4bits r23, r28, 0, 6, 1, 7
swap_4bits r24, r28, 0, 6, 1, 7
swap_4bits r25, r28, 0, 6, 1, 7
// SWAPMOVE(x, x, 0x000f000f, 12) : x & 000f000f <-> x & f000f000
movw r28, r22
mov r30, r24
mov r31, r25
swap r28
swap r29
swap r30
swap r31
andi r28, 15
andi r29, 240
andi r30, 15
andi r31, 240
and r22, r19
and r23, r18
and r24, r19
and r25, r18
or r22, r29
or r23, r28
or r24, r31
or r25, r30
// SWAPMOVE(x, x, 0x000000ff, 24)
st X+, r22
st X+, r24
st X+, r23
st X+, r25
.endm
.macro rearrange_rkey2 a, b, c, d
mov r22, \a
mov r23, \b
mov r24, \c
mov r25, \d
// SWAPMOVE(x, x, 0x0000aaaa, 15) : x & 0000aaaa <-> x &
movw r28, \a
lsl r29
lsl r28
eor r28, \c
eor r29, \d
andi r28, 170
andi r29, 170
eor r24, r28
eor r25, r29
lsr r28
lsr r29
eor r22, r28
eor r23, r29
// SWAPMOVE(x, x, 0x00003333, 18) : x & 00003333 <-> x & cccc0000
movw r30, r22
lsr r30
lsr r30
lsr r31
lsr r31
eor r30, r24
eor r31, r25
andi r30, 51
andi r31, 51
eor r24, r30
eor r25, r31
lsl r30
lsl r30
lsl r31
lsl r31
eor r22, r30
eor r23, r31
// SWAPMOVE(x, x, 0x0000f0f0, 12) : x & 000f000f <-> x & f000f000
movw r28, r22
movw r30, r24
swap r28
swap r29
swap r30
swap r31
andi r28, 240
andi r29, 240
andi r30, 15
andi r31, 15
and r22, r18
and r23, r18
and r24, r19
and r25, r19
or r22, r30
or r23, r31
or r24, r28
or r25, r29
// SWAPMOVE(x, x, 0x000000ff, 24)
st X+, r22
st X+, r24
st X+, r23
st X+, r25
.endm
.macro rearrange_rkey3 a, b, c, d
mov r22, \a
mov r23, \b
mov r24, \c
mov r25, \d
// SWAPMOVE(x, x, 0x0a0a0a0a, 3) : x & 11111111 <-> x & 88888888
swap_4bits r22, r28, 1, 4, 3, 6
swap_4bits r23, r28, 1, 4, 3, 6
swap_4bits r24, r28, 1, 4, 3, 6
swap_4bits r25, r28, 1, 4, 3, 6
// SWAPMOVE
mov r28, r22
mov r30, r24
lsl r28
lsl r28
lsl r30
lsl r30
eor r28, r23
eor r30, r25
andi r28, 204
andi r30, 204
eor r23, r28
eor r25, r30
lsr r28
lsr r28
lsr r30
lsr r30
eor r22, r28
eor r24, r30
// SWAPMOVE(x, x, 0x0000f0f0, 12) : x & 000f000f <-> x & f000f000
movw r28, r22
movw r30, r24
swap r28
swap r29
swap r30
swap r31
andi r28, 240
andi r29, 240
andi r30, 15
andi r31, 15
and r22, r18
and r23, r18
and r24, r19
and r25, r19
or r22, r30
or r23, r31
or r24, r28
or r25, r29
// SWAPMOVE(x, x, 0x000000ff, 24)
st X+, r22
st X+, r24
st X+, r23
st X+, r25
.endm
.macro swap_bytes x, y
eor \x, \y
eor \y, \x
eor \x, \y
.endm
.global mygift128_kexp
mygift128_kexp:
; Save r2-r17,r28-r31
push_registers 2,17
push_registers 28,31
push_registers 24,25
; Save the argument pointers to Z (key) and X (round keys)
movw XL, ARG1
movw YL, ARG2
; Load the key given by argument to register 2-17 instead of 0-15 because
; the mul instruction inconditionally overwrites registers r1:r0.
.irp param,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12,r13,r14,r15,r16,r17
ld \param, Y+
.endr
; Constants for efficient bitshifts
ldi r18, 240
ldi r19, 15
ldi r20, 16
rearrange_rkey0 r14, r15, r16, r17
rearrange_rkey0 r6, r7, r8, r9
rearrange_rkey1 r10, r11, r12, r13
rearrange_rkey1 r2, r3, r4, r5
rearrange_rkey2 r6, r7, r8, r9
kexp_round r14, r15, r16, r17
rearrange_rkey2 r14, r15, r16, r17
rearrange_rkey3 r2, r3, r4, r5
kexp_round r10, r11, r12, r13
rearrange_rkey3 r10, r11, r12, r13
st X+, r17
st X+, r16
st X+, r15
st X+, r14
kexp_round r6, r7, r8, r9
st X+, r9
st X+, r8
st X+, r7
st X+, r6
; Save loop counter
ldi r21, 7
kexp_loop:
cpi r21, 4
brne skip_swap_start
swap_bytes r10, r2
swap_bytes r11, r3
swap_bytes r12, r4
swap_bytes r13, r5
skip_swap_start:
rearrange_rkey0 r10, r11, r12, r13
kexp_round r2, r3, r4, r5
rearrange_rkey0 r2, r3, r4, r5
rearrange_rkey1 r6, r7, r8, r9
kexp_round r14, r15, r16, r17
rearrange_rkey1 r14, r15, r16, r17
rearrange_rkey2 r2, r3, r4, r5
kexp_round r10, r11, r12, r13
rearrange_rkey2 r10, r11, r12, r13
rearrange_rkey3 r14, r15, r16, r17
kexp_round r6, r7, r8, r9
rearrange_rkey3 r6, r7, r8, r9
st X+, r13
st X+, r12
st X+, r11
st X+, r10
kexp_round r2, r3, r4, r5
st X+, r5
st X+, r4
st X+, r3
st X+, r2
swap_bytes r10, r14
swap_bytes r11, r15
swap_bytes r12, r16
swap_bytes r13, r17
swap_bytes r2, r6
swap_bytes r3, r7
swap_bytes r4, r8
swap_bytes r5, r9
cpi r21, 4
brne skip_swap_end
swap_bytes r10, r2
swap_bytes r11, r3
swap_bytes r12, r4
swap_bytes r13, r5
skip_swap_end:
// decrement loop counter
subi r21, 1
cpi r21, 0
breq kexp_exit
rjmp kexp_loop
kexp_exit:
; Restore r2-r19,r28-r29
pop_registers 24,25
pop_registers 28,31
pop_registers 2,17
ret
.global mygift128_enc
mygift128_enc:
; Save r2-r17,r28-r29
push_registers 2,17
push_registers 28,29
; Save the argument pointers to Z (key) and X (plaintext)
movw XL, ARG2
; Load the plaintext given by argument to register 2-17 instead of 0-15 because
; the mul instruction inconditionally overwrites registers r1:r0.
.irp param,r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12,r13,r14,r15
ld \param, X+
.endr
ldi ZL, lo8(rconst)
ldi ZH, hi8(rconst)
movw XL, ARG3
// for byte_rol_2
ldi r20, 0
// Save loop counter
ldi r19, 8
quintuple_round:
// 1st_round
sbox r0, r4, r8, r12
sbox r1, r5, r9, r13
sbox r2, r6, r10, r14
sbox r3, r7, r11, r15
ldi r17, 51
ldi r18, 17
llayer1 r4, r8, r12
llayer1 r5, r9, r13
llayer1 r6, r10, r14
llayer1 r7, r11, r15
add_round_key r4, r5, r6, r7, r8, r9, r10, r11
add_rconst0 r0, r1, r2, r3
// 2nd round
sbox r12, r4, r8, r0
sbox r13, r5, r9, r1
sbox r14, r6, r10, r2
sbox r15, r7, r11, r3
subi r18, 2
half_ror_4 r0, r1
half_ror_4 r2, r3
ldi r18, 240
half_ror_12 r8, r9
half_ror_12 r10, r11
add_round_key r5, r4, r7, r6, r8, r9, r10, r11
add_rconst1 r13, r14, r15
// 3rd round
sbox r0, r5, r8, r12
sbox r1, r4, r9, r13
sbox r2, r7, r10, r14
sbox r3, r6, r11, r15
llayer3 r4, r5
llayer3 r6, r7
llayer3 r10, r11
llayer3 r12, r13
add_round_key r5, r4, r7, r6, r10, r11, r8, r9
add_rconst2 r0, r2, r3
// 4th round
sbox r14, r5, r10, r0
sbox r15, r4, r11, r1
sbox r12, r7, r8, r2
sbox r13, r6, r9, r3
// byte_ror_6
byte_rol_2 r0, r20
byte_rol_2 r1, r20
byte_rol_2 r2, r20
byte_rol_2 r3, r20
// byte_ror_4
swap r4
swap r5
swap r6
swap r7
// byte_ror_2
byte_ror_2 r8
byte_ror_2 r9
byte_ror_2 r10
byte_ror_2 r11
add_round_key r5, r4, r7, r6, r10, r11, r8, r9
add_rconst r14, r15, r12, r13
// 5th round
sbox r0, r5, r10, r14
sbox r1, r4, r11, r15
sbox r2, r7, r8, r12
sbox r3, r6, r9, r13
// swap state[0] w/ ROR(state[3], 24)
movw r16, r0
mov r0, r13
mov r1, r14
mov r13, r17
mov r14, r2
mov r17, r3
mov r2, r15
mov r3, r12
mov r15, r17
mov r12, r16
// state[1] = ROR(state[1], 16)
movw r16, r4
mov r4, r7
mov r7, r16
mov r5, r6
mov r6, r17
// state[2] = ROR(state[2], 8)
movw r16, r10
mov r10, r9
mov r9, r8
mov r8, r17
mov r11, r16
add_round_key r4, r5, r6, r7, r8, r9, r10, r11
// last rconst is always formed as 800000xx
ld r16, Z+
eor r12, r16
ldi r16, 128
eor r15, r16
// decrement loop counter
subi r19, 1
cpi r19, 0
breq exit
rjmp quintuple_round
exit:
; Store output
movw YL, ARG1
st Y+, r0
st Y+, r1
st Y+, r2
st Y+, r3
st Y+, r4
st Y+, r5
st Y+, r6
st Y+, r7
st Y+, r8
st Y+, r9
st Y+, r10
st Y+, r11
st Y+, r12
st Y+, r13
st Y+, r14
st Y+, r15
; Restore r2-r19,r28-r29
pop_registers 28,29
pop_registers 2,17
ret
.data
rconst:
.byte 0x08, 0x00, 0x00, 0x80, 0x80, 0x00, 0x54, 0x81, 0x01, 0x01, 0x01, 0x1f
.byte 0x80, 0x88, 0x88, 0xe0, 0x60, 0x50, 0x51, 0x80, 0x01, 0x03, 0x03, 0x2f
.byte 0x80, 0x88, 0x08, 0x60, 0x60, 0x50, 0x41, 0x80, 0x00, 0x03, 0x03, 0x27
.byte 0x80, 0x88, 0x00, 0xe0, 0x40, 0x50, 0x11, 0x80, 0x01, 0x02, 0x03, 0x2b
.byte 0x80, 0x08, 0x08, 0x40, 0x60, 0x40, 0x01, 0x80, 0x00, 0x02, 0x02, 0x21
.byte 0x80, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x51, 0x80, 0x01, 0x01, 0x03, 0x2e
.byte 0x00, 0x88, 0x08, 0x20, 0x60, 0x50, 0x40, 0x80, 0x00, 0x03, 0x01, 0x06
.byte 0x08, 0x88, 0x00, 0xa0, 0xc0, 0x50, 0x14, 0x81, 0x01, 0x02, 0x01, 0x1a
/*
.byte 0x08, 0x00, 0x00, 0x10, 0x00, 0x80, 0x01, 0x80, 0x02, 0x00, 0x00, 0x54, 0x81, 0x01, 0x01, 0x01, 0x1f, 0x00, 0x00, 0x80
.byte 0x80, 0x88, 0x88, 0x10, 0x00, 0xe0, 0x01, 0x60, 0x02, 0x00, 0x50, 0x51, 0x80, 0x01, 0x03, 0x03, 0x2f, 0x00, 0x00, 0x80
.byte 0x80, 0x88, 0x08, 0x10, 0x00, 0x60, 0x01, 0x60, 0x02, 0x00, 0x50, 0x41, 0x80, 0x00, 0x03, 0x03, 0x27, 0x00, 0x00, 0x80
.byte 0x80, 0x88, 0x00, 0x10, 0x00, 0xe0, 0x01, 0x40, 0x02, 0x00, 0x50, 0x11, 0x80, 0x01, 0x02, 0x03, 0x2b, 0x00, 0x00, 0x80
.byte 0x80, 0x08, 0x08, 0x10, 0x00, 0x40, 0x01, 0x60, 0x02, 0x00, 0x40, 0x01, 0x80, 0x00, 0x02, 0x02, 0x21, 0x00, 0x00, 0x80
.byte 0x80, 0x00, 0x00, 0x10, 0x00, 0xc0, 0x01, 0x00, 0x02, 0x00, 0x00, 0x51, 0x80, 0x01, 0x01, 0x03, 0x2e, 0x00, 0x00, 0x80
.byte 0x00, 0x88, 0x08, 0x10, 0x00, 0x20, 0x01, 0x60, 0x02, 0x00, 0x50, 0x40, 0x80, 0x00, 0x03, 0x01, 0x06, 0x00, 0x00, 0x80
.byte 0x08, 0x88, 0x00, 0x10, 0x00, 0xa0, 0x01, 0xc0, 0x02, 0x00, 0x50, 0x14, 0x81, 0x01, 0x02, 0x01, 0x1a, 0x00, 0x00, 0x80
*/
|
aadomn/cymric
| 85,418
|
artifact_tches2025-3/benchmark_avr/cymric_lwc/cymric_lwc/romulusn/internal-skinny-tiny-avr.S
|
#if defined(__AVR__)
/*
* Copyright (C) 2021 Southern Storm Software, Pty Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <avr/io.h>
/* Automatically generated - do not edit */
.section .progmem.data,"a",@progbits
.p2align 8
.type table_0, @object
.size table_0, 256
table_0:
.byte 101
.byte 76
.byte 106
.byte 66
.byte 75
.byte 99
.byte 67
.byte 107
.byte 85
.byte 117
.byte 90
.byte 122
.byte 83
.byte 115
.byte 91
.byte 123
.byte 53
.byte 140
.byte 58
.byte 129
.byte 137
.byte 51
.byte 128
.byte 59
.byte 149
.byte 37
.byte 152
.byte 42
.byte 144
.byte 35
.byte 153
.byte 43
.byte 229
.byte 204
.byte 232
.byte 193
.byte 201
.byte 224
.byte 192
.byte 233
.byte 213
.byte 245
.byte 216
.byte 248
.byte 208
.byte 240
.byte 217
.byte 249
.byte 165
.byte 28
.byte 168
.byte 18
.byte 27
.byte 160
.byte 19
.byte 169
.byte 5
.byte 181
.byte 10
.byte 184
.byte 3
.byte 176
.byte 11
.byte 185
.byte 50
.byte 136
.byte 60
.byte 133
.byte 141
.byte 52
.byte 132
.byte 61
.byte 145
.byte 34
.byte 156
.byte 44
.byte 148
.byte 36
.byte 157
.byte 45
.byte 98
.byte 74
.byte 108
.byte 69
.byte 77
.byte 100
.byte 68
.byte 109
.byte 82
.byte 114
.byte 92
.byte 124
.byte 84
.byte 116
.byte 93
.byte 125
.byte 161
.byte 26
.byte 172
.byte 21
.byte 29
.byte 164
.byte 20
.byte 173
.byte 2
.byte 177
.byte 12
.byte 188
.byte 4
.byte 180
.byte 13
.byte 189
.byte 225
.byte 200
.byte 236
.byte 197
.byte 205
.byte 228
.byte 196
.byte 237
.byte 209
.byte 241
.byte 220
.byte 252
.byte 212
.byte 244
.byte 221
.byte 253
.byte 54
.byte 142
.byte 56
.byte 130
.byte 139
.byte 48
.byte 131
.byte 57
.byte 150
.byte 38
.byte 154
.byte 40
.byte 147
.byte 32
.byte 155
.byte 41
.byte 102
.byte 78
.byte 104
.byte 65
.byte 73
.byte 96
.byte 64
.byte 105
.byte 86
.byte 118
.byte 88
.byte 120
.byte 80
.byte 112
.byte 89
.byte 121
.byte 166
.byte 30
.byte 170
.byte 17
.byte 25
.byte 163
.byte 16
.byte 171
.byte 6
.byte 182
.byte 8
.byte 186
.byte 0
.byte 179
.byte 9
.byte 187
.byte 230
.byte 206
.byte 234
.byte 194
.byte 203
.byte 227
.byte 195
.byte 235
.byte 214
.byte 246
.byte 218
.byte 250
.byte 211
.byte 243
.byte 219
.byte 251
.byte 49
.byte 138
.byte 62
.byte 134
.byte 143
.byte 55
.byte 135
.byte 63
.byte 146
.byte 33
.byte 158
.byte 46
.byte 151
.byte 39
.byte 159
.byte 47
.byte 97
.byte 72
.byte 110
.byte 70
.byte 79
.byte 103
.byte 71
.byte 111
.byte 81
.byte 113
.byte 94
.byte 126
.byte 87
.byte 119
.byte 95
.byte 127
.byte 162
.byte 24
.byte 174
.byte 22
.byte 31
.byte 167
.byte 23
.byte 175
.byte 1
.byte 178
.byte 14
.byte 190
.byte 7
.byte 183
.byte 15
.byte 191
.byte 226
.byte 202
.byte 238
.byte 198
.byte 207
.byte 231
.byte 199
.byte 239
.byte 210
.byte 242
.byte 222
.byte 254
.byte 215
.byte 247
.byte 223
.byte 255
.section .progmem.data,"a",@progbits
.p2align 8
.type table_1, @object
.size table_1, 256
table_1:
.byte 172
.byte 232
.byte 104
.byte 60
.byte 108
.byte 56
.byte 168
.byte 236
.byte 170
.byte 174
.byte 58
.byte 62
.byte 106
.byte 110
.byte 234
.byte 238
.byte 166
.byte 163
.byte 51
.byte 54
.byte 102
.byte 99
.byte 227
.byte 230
.byte 225
.byte 164
.byte 97
.byte 52
.byte 49
.byte 100
.byte 161
.byte 228
.byte 141
.byte 201
.byte 73
.byte 29
.byte 77
.byte 25
.byte 137
.byte 205
.byte 139
.byte 143
.byte 27
.byte 31
.byte 75
.byte 79
.byte 203
.byte 207
.byte 133
.byte 192
.byte 64
.byte 21
.byte 69
.byte 16
.byte 128
.byte 197
.byte 130
.byte 135
.byte 18
.byte 23
.byte 66
.byte 71
.byte 194
.byte 199
.byte 150
.byte 147
.byte 3
.byte 6
.byte 86
.byte 83
.byte 211
.byte 214
.byte 209
.byte 148
.byte 81
.byte 4
.byte 1
.byte 84
.byte 145
.byte 212
.byte 156
.byte 216
.byte 88
.byte 12
.byte 92
.byte 8
.byte 152
.byte 220
.byte 154
.byte 158
.byte 10
.byte 14
.byte 90
.byte 94
.byte 218
.byte 222
.byte 149
.byte 208
.byte 80
.byte 5
.byte 85
.byte 0
.byte 144
.byte 213
.byte 146
.byte 151
.byte 2
.byte 7
.byte 82
.byte 87
.byte 210
.byte 215
.byte 157
.byte 217
.byte 89
.byte 13
.byte 93
.byte 9
.byte 153
.byte 221
.byte 155
.byte 159
.byte 11
.byte 15
.byte 91
.byte 95
.byte 219
.byte 223
.byte 22
.byte 19
.byte 131
.byte 134
.byte 70
.byte 67
.byte 195
.byte 198
.byte 65
.byte 20
.byte 193
.byte 132
.byte 17
.byte 68
.byte 129
.byte 196
.byte 28
.byte 72
.byte 200
.byte 140
.byte 76
.byte 24
.byte 136
.byte 204
.byte 26
.byte 30
.byte 138
.byte 142
.byte 74
.byte 78
.byte 202
.byte 206
.byte 53
.byte 96
.byte 224
.byte 165
.byte 101
.byte 48
.byte 160
.byte 229
.byte 50
.byte 55
.byte 162
.byte 167
.byte 98
.byte 103
.byte 226
.byte 231
.byte 61
.byte 105
.byte 233
.byte 173
.byte 109
.byte 57
.byte 169
.byte 237
.byte 59
.byte 63
.byte 171
.byte 175
.byte 107
.byte 111
.byte 235
.byte 239
.byte 38
.byte 35
.byte 179
.byte 182
.byte 118
.byte 115
.byte 243
.byte 246
.byte 113
.byte 36
.byte 241
.byte 180
.byte 33
.byte 116
.byte 177
.byte 244
.byte 44
.byte 120
.byte 248
.byte 188
.byte 124
.byte 40
.byte 184
.byte 252
.byte 42
.byte 46
.byte 186
.byte 190
.byte 122
.byte 126
.byte 250
.byte 254
.byte 37
.byte 112
.byte 240
.byte 181
.byte 117
.byte 32
.byte 176
.byte 245
.byte 34
.byte 39
.byte 178
.byte 183
.byte 114
.byte 119
.byte 242
.byte 247
.byte 45
.byte 121
.byte 249
.byte 189
.byte 125
.byte 41
.byte 185
.byte 253
.byte 43
.byte 47
.byte 187
.byte 191
.byte 123
.byte 127
.byte 251
.byte 255
.section .progmem.data,"a",@progbits
.p2align 8
.type table_2, @object
.size table_2, 256
table_2:
.byte 0
.byte 2
.byte 4
.byte 6
.byte 8
.byte 10
.byte 12
.byte 14
.byte 16
.byte 18
.byte 20
.byte 22
.byte 24
.byte 26
.byte 28
.byte 30
.byte 32
.byte 34
.byte 36
.byte 38
.byte 40
.byte 42
.byte 44
.byte 46
.byte 48
.byte 50
.byte 52
.byte 54
.byte 56
.byte 58
.byte 60
.byte 62
.byte 65
.byte 67
.byte 69
.byte 71
.byte 73
.byte 75
.byte 77
.byte 79
.byte 81
.byte 83
.byte 85
.byte 87
.byte 89
.byte 91
.byte 93
.byte 95
.byte 97
.byte 99
.byte 101
.byte 103
.byte 105
.byte 107
.byte 109
.byte 111
.byte 113
.byte 115
.byte 117
.byte 119
.byte 121
.byte 123
.byte 125
.byte 127
.byte 128
.byte 130
.byte 132
.byte 134
.byte 136
.byte 138
.byte 140
.byte 142
.byte 144
.byte 146
.byte 148
.byte 150
.byte 152
.byte 154
.byte 156
.byte 158
.byte 160
.byte 162
.byte 164
.byte 166
.byte 168
.byte 170
.byte 172
.byte 174
.byte 176
.byte 178
.byte 180
.byte 182
.byte 184
.byte 186
.byte 188
.byte 190
.byte 193
.byte 195
.byte 197
.byte 199
.byte 201
.byte 203
.byte 205
.byte 207
.byte 209
.byte 211
.byte 213
.byte 215
.byte 217
.byte 219
.byte 221
.byte 223
.byte 225
.byte 227
.byte 229
.byte 231
.byte 233
.byte 235
.byte 237
.byte 239
.byte 241
.byte 243
.byte 245
.byte 247
.byte 249
.byte 251
.byte 253
.byte 255
.byte 1
.byte 3
.byte 5
.byte 7
.byte 9
.byte 11
.byte 13
.byte 15
.byte 17
.byte 19
.byte 21
.byte 23
.byte 25
.byte 27
.byte 29
.byte 31
.byte 33
.byte 35
.byte 37
.byte 39
.byte 41
.byte 43
.byte 45
.byte 47
.byte 49
.byte 51
.byte 53
.byte 55
.byte 57
.byte 59
.byte 61
.byte 63
.byte 64
.byte 66
.byte 68
.byte 70
.byte 72
.byte 74
.byte 76
.byte 78
.byte 80
.byte 82
.byte 84
.byte 86
.byte 88
.byte 90
.byte 92
.byte 94
.byte 96
.byte 98
.byte 100
.byte 102
.byte 104
.byte 106
.byte 108
.byte 110
.byte 112
.byte 114
.byte 116
.byte 118
.byte 120
.byte 122
.byte 124
.byte 126
.byte 129
.byte 131
.byte 133
.byte 135
.byte 137
.byte 139
.byte 141
.byte 143
.byte 145
.byte 147
.byte 149
.byte 151
.byte 153
.byte 155
.byte 157
.byte 159
.byte 161
.byte 163
.byte 165
.byte 167
.byte 169
.byte 171
.byte 173
.byte 175
.byte 177
.byte 179
.byte 181
.byte 183
.byte 185
.byte 187
.byte 189
.byte 191
.byte 192
.byte 194
.byte 196
.byte 198
.byte 200
.byte 202
.byte 204
.byte 206
.byte 208
.byte 210
.byte 212
.byte 214
.byte 216
.byte 218
.byte 220
.byte 222
.byte 224
.byte 226
.byte 228
.byte 230
.byte 232
.byte 234
.byte 236
.byte 238
.byte 240
.byte 242
.byte 244
.byte 246
.byte 248
.byte 250
.byte 252
.byte 254
.section .progmem.data,"a",@progbits
.p2align 8
.type table_3, @object
.size table_3, 256
table_3:
.byte 0
.byte 128
.byte 1
.byte 129
.byte 2
.byte 130
.byte 3
.byte 131
.byte 4
.byte 132
.byte 5
.byte 133
.byte 6
.byte 134
.byte 7
.byte 135
.byte 8
.byte 136
.byte 9
.byte 137
.byte 10
.byte 138
.byte 11
.byte 139
.byte 12
.byte 140
.byte 13
.byte 141
.byte 14
.byte 142
.byte 15
.byte 143
.byte 16
.byte 144
.byte 17
.byte 145
.byte 18
.byte 146
.byte 19
.byte 147
.byte 20
.byte 148
.byte 21
.byte 149
.byte 22
.byte 150
.byte 23
.byte 151
.byte 24
.byte 152
.byte 25
.byte 153
.byte 26
.byte 154
.byte 27
.byte 155
.byte 28
.byte 156
.byte 29
.byte 157
.byte 30
.byte 158
.byte 31
.byte 159
.byte 160
.byte 32
.byte 161
.byte 33
.byte 162
.byte 34
.byte 163
.byte 35
.byte 164
.byte 36
.byte 165
.byte 37
.byte 166
.byte 38
.byte 167
.byte 39
.byte 168
.byte 40
.byte 169
.byte 41
.byte 170
.byte 42
.byte 171
.byte 43
.byte 172
.byte 44
.byte 173
.byte 45
.byte 174
.byte 46
.byte 175
.byte 47
.byte 176
.byte 48
.byte 177
.byte 49
.byte 178
.byte 50
.byte 179
.byte 51
.byte 180
.byte 52
.byte 181
.byte 53
.byte 182
.byte 54
.byte 183
.byte 55
.byte 184
.byte 56
.byte 185
.byte 57
.byte 186
.byte 58
.byte 187
.byte 59
.byte 188
.byte 60
.byte 189
.byte 61
.byte 190
.byte 62
.byte 191
.byte 63
.byte 64
.byte 192
.byte 65
.byte 193
.byte 66
.byte 194
.byte 67
.byte 195
.byte 68
.byte 196
.byte 69
.byte 197
.byte 70
.byte 198
.byte 71
.byte 199
.byte 72
.byte 200
.byte 73
.byte 201
.byte 74
.byte 202
.byte 75
.byte 203
.byte 76
.byte 204
.byte 77
.byte 205
.byte 78
.byte 206
.byte 79
.byte 207
.byte 80
.byte 208
.byte 81
.byte 209
.byte 82
.byte 210
.byte 83
.byte 211
.byte 84
.byte 212
.byte 85
.byte 213
.byte 86
.byte 214
.byte 87
.byte 215
.byte 88
.byte 216
.byte 89
.byte 217
.byte 90
.byte 218
.byte 91
.byte 219
.byte 92
.byte 220
.byte 93
.byte 221
.byte 94
.byte 222
.byte 95
.byte 223
.byte 224
.byte 96
.byte 225
.byte 97
.byte 226
.byte 98
.byte 227
.byte 99
.byte 228
.byte 100
.byte 229
.byte 101
.byte 230
.byte 102
.byte 231
.byte 103
.byte 232
.byte 104
.byte 233
.byte 105
.byte 234
.byte 106
.byte 235
.byte 107
.byte 236
.byte 108
.byte 237
.byte 109
.byte 238
.byte 110
.byte 239
.byte 111
.byte 240
.byte 112
.byte 241
.byte 113
.byte 242
.byte 114
.byte 243
.byte 115
.byte 244
.byte 116
.byte 245
.byte 117
.byte 246
.byte 118
.byte 247
.byte 119
.byte 248
.byte 120
.byte 249
.byte 121
.byte 250
.byte 122
.byte 251
.byte 123
.byte 252
.byte 124
.byte 253
.byte 125
.byte 254
.byte 126
.byte 255
.byte 127
.section .progmem.data,"a",@progbits
.p2align 8
.type table_4, @object
.size table_4, 112
table_4:
.byte 1
.byte 0
.byte 3
.byte 0
.byte 7
.byte 0
.byte 15
.byte 0
.byte 15
.byte 1
.byte 14
.byte 3
.byte 13
.byte 3
.byte 11
.byte 3
.byte 7
.byte 3
.byte 15
.byte 2
.byte 14
.byte 1
.byte 12
.byte 3
.byte 9
.byte 3
.byte 3
.byte 3
.byte 7
.byte 2
.byte 14
.byte 0
.byte 13
.byte 1
.byte 10
.byte 3
.byte 5
.byte 3
.byte 11
.byte 2
.byte 6
.byte 1
.byte 12
.byte 2
.byte 8
.byte 1
.byte 0
.byte 3
.byte 1
.byte 2
.byte 2
.byte 0
.byte 5
.byte 0
.byte 11
.byte 0
.byte 7
.byte 1
.byte 14
.byte 2
.byte 12
.byte 1
.byte 8
.byte 3
.byte 1
.byte 3
.byte 3
.byte 2
.byte 6
.byte 0
.byte 13
.byte 0
.byte 11
.byte 1
.byte 6
.byte 3
.byte 13
.byte 2
.byte 10
.byte 1
.byte 4
.byte 3
.byte 9
.byte 2
.byte 2
.byte 1
.byte 4
.byte 2
.byte 8
.byte 0
.byte 1
.byte 1
.byte 2
.byte 2
.byte 4
.byte 0
.byte 9
.byte 0
.byte 3
.byte 1
.byte 6
.byte 2
.byte 12
.byte 0
.byte 9
.byte 1
.byte 2
.byte 3
.byte 5
.byte 2
.byte 10
.byte 0
.text
.global skinny_plus_init_without_tk1
.type skinny_plus_init_without_tk1, @function
skinny_plus_init_without_tk1:
push r28
push r29
movw r30,r24
movw r26,r22
.L__stack_usage = 2
movw r28,r20
adiw r30,16
ld r18,X+
ld r19,X+
ld r22,X+
ld r23,X+
st Z+,r18
st Z+,r19
st Z+,r22
st Z+,r23
ld r18,X+
ld r19,X+
ld r22,X+
ld r23,X+
st Z+,r18
st Z+,r19
st Z+,r22
st Z+,r23
ld r18,X+
ld r19,X+
ld r22,X+
ld r23,X+
st Z+,r18
st Z+,r19
st Z+,r22
st Z+,r23
ld r18,X+
ld r19,X+
ld r22,X+
ld r23,X+
st Z+,r18
st Z+,r19
st Z+,r22
st Z+,r23
ld r18,Y+
ld r19,Y+
ld r22,Y+
ld r23,Y+
st Z+,r18
st Z+,r19
st Z+,r22
st Z+,r23
ld r18,Y+
ld r19,Y+
ld r22,Y+
ld r23,Y+
st Z+,r18
st Z+,r19
st Z+,r22
st Z+,r23
ld r18,Y+
ld r19,Y+
ld r22,Y+
ld r23,Y+
st Z+,r18
st Z+,r19
st Z+,r22
st Z+,r23
ld r18,Y+
ld r19,Y+
ld r22,Y+
ld r23,Y+
st Z+,r18
st Z+,r19
st Z+,r22
st Z+,r23
pop r29
pop r28
ret
.size skinny_plus_init_without_tk1, .-skinny_plus_init_without_tk1
.text
.global skinny_plus_encrypt
.type skinny_plus_encrypt, @function
skinny_plus_encrypt:
push r28
push r29
push r2
push r3
push r4
push r5
push r6
push r7
push r8
push r9
push r10
push r11
push r12
push r13
push r14
push r15
push r16
push r17
push r23
push r22
movw r30,r24
movw r26,r20
in r28,0x3d
in r29,0x3e
sbiw r28,48
in r0,0x3f
cli
out 0x3e,r29
out 0x3f,r0
out 0x3d,r28
.L__stack_usage = 68
ld r18,Z
ldd r19,Z+1
ldd r20,Z+2
ldd r21,Z+3
std Y+1,r18
std Y+2,r19
std Y+3,r20
std Y+4,r21
ldd r18,Z+4
ldd r19,Z+5
ldd r20,Z+6
ldd r21,Z+7
std Y+5,r18
std Y+6,r19
std Y+7,r20
std Y+8,r21
ldd r18,Z+8
ldd r19,Z+9
ldd r20,Z+10
ldd r21,Z+11
std Y+9,r18
std Y+10,r19
std Y+11,r20
std Y+12,r21
ldd r18,Z+12
ldd r19,Z+13
ldd r20,Z+14
ldd r21,Z+15
std Y+13,r18
std Y+14,r19
std Y+15,r20
std Y+16,r21
ldd r18,Z+16
ldd r19,Z+17
ldd r20,Z+18
ldd r21,Z+19
std Y+17,r18
std Y+18,r19
std Y+19,r20
std Y+20,r21
ldd r18,Z+20
ldd r19,Z+21
ldd r20,Z+22
ldd r21,Z+23
std Y+21,r18
std Y+22,r19
std Y+23,r20
std Y+24,r21
ldd r18,Z+24
ldd r19,Z+25
ldd r20,Z+26
ldd r21,Z+27
std Y+25,r18
std Y+26,r19
std Y+27,r20
std Y+28,r21
ldd r18,Z+28
ldd r19,Z+29
ldd r20,Z+30
ldd r21,Z+31
std Y+29,r18
std Y+30,r19
std Y+31,r20
std Y+32,r21
ldd r18,Z+32
ldd r19,Z+33
ldd r20,Z+34
ldd r21,Z+35
std Y+33,r18
std Y+34,r19
std Y+35,r20
std Y+36,r21
ldd r18,Z+36
ldd r19,Z+37
ldd r20,Z+38
ldd r21,Z+39
std Y+37,r18
std Y+38,r19
std Y+39,r20
std Y+40,r21
ldd r18,Z+40
ldd r19,Z+41
ldd r20,Z+42
ldd r21,Z+43
std Y+41,r18
std Y+42,r19
std Y+43,r20
std Y+44,r21
ldd r18,Z+44
ldd r19,Z+45
ldd r20,Z+46
ldd r21,Z+47
std Y+45,r18
std Y+46,r19
std Y+47,r20
std Y+48,r21
ld r18,X+
ld r19,X+
ld r20,X+
ld r21,X+
ld r22,X+
ld r23,X+
ld r2,X+
ld r3,X+
ld r4,X+
ld r5,X+
ld r6,X+
ld r7,X+
ld r8,X+
ld r9,X+
ld r10,X+
ld r11,X+
ldi r30,lo8(table_0)
ldi r31,hi8(table_0)
#if defined(RAMPZ)
ldi r26,hh8(table_0)
in r0,_SFR_IO_ADDR(RAMPZ)
push r0
out _SFR_IO_ADDR(RAMPZ),r26
#endif
mov r26,r1
114:
mov r30,r18
#if defined(RAMPZ)
elpm r18,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r18,Z
#elif defined(__AVR_TINY__)
ld r18,Z
#else
lpm
mov r18,r0
#endif
mov r30,r19
#if defined(RAMPZ)
elpm r19,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r19,Z
#elif defined(__AVR_TINY__)
ld r19,Z
#else
lpm
mov r19,r0
#endif
mov r30,r20
#if defined(RAMPZ)
elpm r20,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r20,Z
#elif defined(__AVR_TINY__)
ld r20,Z
#else
lpm
mov r20,r0
#endif
mov r30,r21
#if defined(RAMPZ)
elpm r21,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r21,Z
#elif defined(__AVR_TINY__)
ld r21,Z
#else
lpm
mov r21,r0
#endif
mov r30,r22
#if defined(RAMPZ)
elpm r22,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r22,Z
#elif defined(__AVR_TINY__)
ld r22,Z
#else
lpm
mov r22,r0
#endif
mov r30,r23
#if defined(RAMPZ)
elpm r23,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r23,Z
#elif defined(__AVR_TINY__)
ld r23,Z
#else
lpm
mov r23,r0
#endif
mov r30,r2
#if defined(RAMPZ)
elpm r2,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r2,Z
#elif defined(__AVR_TINY__)
ld r2,Z
#else
lpm
mov r2,r0
#endif
mov r30,r3
#if defined(RAMPZ)
elpm r3,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r3,Z
#elif defined(__AVR_TINY__)
ld r3,Z
#else
lpm
mov r3,r0
#endif
mov r30,r4
#if defined(RAMPZ)
elpm r4,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r4,Z
#elif defined(__AVR_TINY__)
ld r4,Z
#else
lpm
mov r4,r0
#endif
mov r30,r5
#if defined(RAMPZ)
elpm r5,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r5,Z
#elif defined(__AVR_TINY__)
ld r5,Z
#else
lpm
mov r5,r0
#endif
mov r30,r6
#if defined(RAMPZ)
elpm r6,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r6,Z
#elif defined(__AVR_TINY__)
ld r6,Z
#else
lpm
mov r6,r0
#endif
mov r30,r7
#if defined(RAMPZ)
elpm r7,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r7,Z
#elif defined(__AVR_TINY__)
ld r7,Z
#else
lpm
mov r7,r0
#endif
mov r30,r8
#if defined(RAMPZ)
elpm r8,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r8,Z
#elif defined(__AVR_TINY__)
ld r8,Z
#else
lpm
mov r8,r0
#endif
mov r30,r9
#if defined(RAMPZ)
elpm r9,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r9,Z
#elif defined(__AVR_TINY__)
ld r9,Z
#else
lpm
mov r9,r0
#endif
mov r30,r10
#if defined(RAMPZ)
elpm r10,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r10,Z
#elif defined(__AVR_TINY__)
ld r10,Z
#else
lpm
mov r10,r0
#endif
mov r30,r11
#if defined(RAMPZ)
elpm r11,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r11,Z
#elif defined(__AVR_TINY__)
ld r11,Z
#else
lpm
mov r11,r0
#endif
ldi r30,lo8(table_4)
ldi r31,hi8(table_4)
#if defined(RAMPZ)
ldi r24,hh8(table_4)
out _SFR_IO_ADDR(RAMPZ),r24
#endif
mov r30,r26
#if defined(RAMPZ)
elpm r27,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r27,Z
#elif defined(__AVR_TINY__)
ld r27,Z
#else
lpm
mov r27,r0
#endif
eor r18,r27
inc r26
mov r30,r26
#if defined(RAMPZ)
elpm r27,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r27,Z
#elif defined(__AVR_TINY__)
ld r27,Z
#else
lpm
mov r27,r0
#endif
eor r22,r27
inc r26
ldi r27,2
eor r4,r27
ldd r0,Y+1
eor r18,r0
ldd r0,Y+2
eor r19,r0
ldd r0,Y+3
eor r20,r0
ldd r0,Y+4
eor r21,r0
ldd r0,Y+17
eor r18,r0
ldd r0,Y+18
eor r19,r0
ldd r0,Y+19
eor r20,r0
ldd r0,Y+20
eor r21,r0
ldd r0,Y+33
eor r18,r0
ldd r0,Y+34
eor r19,r0
ldd r0,Y+35
eor r20,r0
ldd r0,Y+36
eor r21,r0
ldd r0,Y+5
eor r22,r0
ldd r0,Y+6
eor r23,r0
ldd r0,Y+7
eor r2,r0
ldd r0,Y+8
eor r3,r0
ldd r0,Y+21
eor r22,r0
ldd r0,Y+22
eor r23,r0
ldd r0,Y+23
eor r2,r0
ldd r0,Y+24
eor r3,r0
ldd r0,Y+37
eor r22,r0
ldd r0,Y+38
eor r23,r0
ldd r0,Y+39
eor r2,r0
ldd r0,Y+40
eor r3,r0
mov r0,r3
mov r3,r2
mov r2,r23
mov r23,r22
mov r22,r0
mov r0,r6
mov r6,r4
mov r4,r0
mov r0,r7
mov r7,r5
mov r5,r0
mov r0,r8
mov r8,r9
mov r9,r10
mov r10,r11
mov r11,r0
eor r22,r4
eor r23,r5
eor r2,r6
eor r3,r7
eor r4,r18
eor r5,r19
eor r6,r20
eor r7,r21
eor r8,r4
eor r9,r5
eor r10,r6
eor r11,r7
ldd r12,Y+9
ldd r13,Y+10
ldd r14,Y+11
ldd r15,Y+12
ldd r24,Y+13
ldd r25,Y+14
ldd r16,Y+15
ldd r17,Y+16
std Y+9,r13
std Y+10,r17
std Y+11,r12
std Y+12,r25
std Y+13,r14
std Y+14,r16
std Y+15,r24
std Y+16,r15
ldi r30,lo8(table_2)
ldi r31,hi8(table_2)
#if defined(RAMPZ)
ldi r27,hh8(table_2)
out _SFR_IO_ADDR(RAMPZ),r27
#endif
ldd r12,Y+25
ldd r13,Y+26
ldd r14,Y+27
ldd r15,Y+28
ldd r24,Y+29
ldd r25,Y+30
ldd r16,Y+31
ldd r17,Y+32
mov r30,r12
#if defined(RAMPZ)
elpm r12,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r12,Z
#elif defined(__AVR_TINY__)
ld r12,Z
#else
lpm
mov r12,r0
#endif
mov r30,r13
#if defined(RAMPZ)
elpm r13,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r13,Z
#elif defined(__AVR_TINY__)
ld r13,Z
#else
lpm
mov r13,r0
#endif
mov r30,r14
#if defined(RAMPZ)
elpm r14,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r14,Z
#elif defined(__AVR_TINY__)
ld r14,Z
#else
lpm
mov r14,r0
#endif
mov r30,r15
#if defined(RAMPZ)
elpm r15,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r15,Z
#elif defined(__AVR_TINY__)
ld r15,Z
#else
lpm
mov r15,r0
#endif
mov r30,r24
#if defined(RAMPZ)
elpm r24,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r24,Z
#elif defined(__AVR_TINY__)
ld r24,Z
#else
lpm
mov r24,r0
#endif
mov r30,r25
#if defined(RAMPZ)
elpm r25,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r25,Z
#elif defined(__AVR_TINY__)
ld r25,Z
#else
lpm
mov r25,r0
#endif
mov r30,r16
#if defined(RAMPZ)
elpm r16,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r16,Z
#elif defined(__AVR_TINY__)
ld r16,Z
#else
lpm
mov r16,r0
#endif
mov r30,r17
#if defined(RAMPZ)
elpm r17,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r17,Z
#elif defined(__AVR_TINY__)
ld r17,Z
#else
lpm
mov r17,r0
#endif
std Y+25,r13
std Y+26,r17
std Y+27,r12
std Y+28,r25
std Y+29,r14
std Y+30,r16
std Y+31,r24
std Y+32,r15
ldi r30,lo8(table_3)
ldi r31,hi8(table_3)
#if defined(RAMPZ)
ldi r27,hh8(table_3)
out _SFR_IO_ADDR(RAMPZ),r27
#endif
ldd r12,Y+41
ldd r13,Y+42
ldd r14,Y+43
ldd r15,Y+44
ldd r24,Y+45
ldd r25,Y+46
ldd r16,Y+47
ldd r17,Y+48
mov r30,r12
#if defined(RAMPZ)
elpm r12,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r12,Z
#elif defined(__AVR_TINY__)
ld r12,Z
#else
lpm
mov r12,r0
#endif
mov r30,r13
#if defined(RAMPZ)
elpm r13,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r13,Z
#elif defined(__AVR_TINY__)
ld r13,Z
#else
lpm
mov r13,r0
#endif
mov r30,r14
#if defined(RAMPZ)
elpm r14,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r14,Z
#elif defined(__AVR_TINY__)
ld r14,Z
#else
lpm
mov r14,r0
#endif
mov r30,r15
#if defined(RAMPZ)
elpm r15,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r15,Z
#elif defined(__AVR_TINY__)
ld r15,Z
#else
lpm
mov r15,r0
#endif
mov r30,r24
#if defined(RAMPZ)
elpm r24,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r24,Z
#elif defined(__AVR_TINY__)
ld r24,Z
#else
lpm
mov r24,r0
#endif
mov r30,r25
#if defined(RAMPZ)
elpm r25,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r25,Z
#elif defined(__AVR_TINY__)
ld r25,Z
#else
lpm
mov r25,r0
#endif
mov r30,r16
#if defined(RAMPZ)
elpm r16,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r16,Z
#elif defined(__AVR_TINY__)
ld r16,Z
#else
lpm
mov r16,r0
#endif
mov r30,r17
#if defined(RAMPZ)
elpm r17,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r17,Z
#elif defined(__AVR_TINY__)
ld r17,Z
#else
lpm
mov r17,r0
#endif
std Y+41,r13
std Y+42,r17
std Y+43,r12
std Y+44,r25
std Y+45,r14
std Y+46,r16
std Y+47,r24
std Y+48,r15
ldi r30,lo8(table_0)
ldi r31,hi8(table_0)
#if defined(RAMPZ)
ldi r27,hh8(table_0)
out _SFR_IO_ADDR(RAMPZ),r27
#endif
mov r30,r8
#if defined(RAMPZ)
elpm r8,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r8,Z
#elif defined(__AVR_TINY__)
ld r8,Z
#else
lpm
mov r8,r0
#endif
mov r30,r9
#if defined(RAMPZ)
elpm r9,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r9,Z
#elif defined(__AVR_TINY__)
ld r9,Z
#else
lpm
mov r9,r0
#endif
mov r30,r10
#if defined(RAMPZ)
elpm r10,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r10,Z
#elif defined(__AVR_TINY__)
ld r10,Z
#else
lpm
mov r10,r0
#endif
mov r30,r11
#if defined(RAMPZ)
elpm r11,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r11,Z
#elif defined(__AVR_TINY__)
ld r11,Z
#else
lpm
mov r11,r0
#endif
mov r30,r18
#if defined(RAMPZ)
elpm r18,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r18,Z
#elif defined(__AVR_TINY__)
ld r18,Z
#else
lpm
mov r18,r0
#endif
mov r30,r19
#if defined(RAMPZ)
elpm r19,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r19,Z
#elif defined(__AVR_TINY__)
ld r19,Z
#else
lpm
mov r19,r0
#endif
mov r30,r20
#if defined(RAMPZ)
elpm r20,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r20,Z
#elif defined(__AVR_TINY__)
ld r20,Z
#else
lpm
mov r20,r0
#endif
mov r30,r21
#if defined(RAMPZ)
elpm r21,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r21,Z
#elif defined(__AVR_TINY__)
ld r21,Z
#else
lpm
mov r21,r0
#endif
mov r30,r22
#if defined(RAMPZ)
elpm r22,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r22,Z
#elif defined(__AVR_TINY__)
ld r22,Z
#else
lpm
mov r22,r0
#endif
mov r30,r23
#if defined(RAMPZ)
elpm r23,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r23,Z
#elif defined(__AVR_TINY__)
ld r23,Z
#else
lpm
mov r23,r0
#endif
mov r30,r2
#if defined(RAMPZ)
elpm r2,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r2,Z
#elif defined(__AVR_TINY__)
ld r2,Z
#else
lpm
mov r2,r0
#endif
mov r30,r3
#if defined(RAMPZ)
elpm r3,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r3,Z
#elif defined(__AVR_TINY__)
ld r3,Z
#else
lpm
mov r3,r0
#endif
mov r30,r4
#if defined(RAMPZ)
elpm r4,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r4,Z
#elif defined(__AVR_TINY__)
ld r4,Z
#else
lpm
mov r4,r0
#endif
mov r30,r5
#if defined(RAMPZ)
elpm r5,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r5,Z
#elif defined(__AVR_TINY__)
ld r5,Z
#else
lpm
mov r5,r0
#endif
mov r30,r6
#if defined(RAMPZ)
elpm r6,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r6,Z
#elif defined(__AVR_TINY__)
ld r6,Z
#else
lpm
mov r6,r0
#endif
mov r30,r7
#if defined(RAMPZ)
elpm r7,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r7,Z
#elif defined(__AVR_TINY__)
ld r7,Z
#else
lpm
mov r7,r0
#endif
ldi r30,lo8(table_4)
ldi r31,hi8(table_4)
#if defined(RAMPZ)
ldi r24,hh8(table_4)
out _SFR_IO_ADDR(RAMPZ),r24
#endif
mov r30,r26
#if defined(RAMPZ)
elpm r27,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r27,Z
#elif defined(__AVR_TINY__)
ld r27,Z
#else
lpm
mov r27,r0
#endif
eor r8,r27
inc r26
mov r30,r26
#if defined(RAMPZ)
elpm r27,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r27,Z
#elif defined(__AVR_TINY__)
ld r27,Z
#else
lpm
mov r27,r0
#endif
eor r18,r27
inc r26
ldi r27,2
eor r22,r27
ldd r0,Y+9
eor r8,r0
ldd r0,Y+10
eor r9,r0
ldd r0,Y+11
eor r10,r0
ldd r0,Y+12
eor r11,r0
ldd r0,Y+25
eor r8,r0
ldd r0,Y+26
eor r9,r0
ldd r0,Y+27
eor r10,r0
ldd r0,Y+28
eor r11,r0
ldd r0,Y+41
eor r8,r0
ldd r0,Y+42
eor r9,r0
ldd r0,Y+43
eor r10,r0
ldd r0,Y+44
eor r11,r0
ldd r0,Y+13
eor r18,r0
ldd r0,Y+14
eor r19,r0
ldd r0,Y+15
eor r20,r0
ldd r0,Y+16
eor r21,r0
ldd r0,Y+29
eor r18,r0
ldd r0,Y+30
eor r19,r0
ldd r0,Y+31
eor r20,r0
ldd r0,Y+32
eor r21,r0
ldd r0,Y+45
eor r18,r0
ldd r0,Y+46
eor r19,r0
ldd r0,Y+47
eor r20,r0
ldd r0,Y+48
eor r21,r0
mov r0,r21
mov r21,r20
mov r20,r19
mov r19,r18
mov r18,r0
mov r0,r2
mov r2,r22
mov r22,r0
mov r0,r3
mov r3,r23
mov r23,r0
mov r0,r4
mov r4,r5
mov r5,r6
mov r6,r7
mov r7,r0
eor r18,r22
eor r19,r23
eor r20,r2
eor r21,r3
eor r22,r8
eor r23,r9
eor r2,r10
eor r3,r11
eor r4,r22
eor r5,r23
eor r6,r2
eor r7,r3
ldd r12,Y+1
ldd r13,Y+2
ldd r14,Y+3
ldd r15,Y+4
ldd r24,Y+5
ldd r25,Y+6
ldd r16,Y+7
ldd r17,Y+8
std Y+1,r13
std Y+2,r17
std Y+3,r12
std Y+4,r25
std Y+5,r14
std Y+6,r16
std Y+7,r24
std Y+8,r15
ldi r30,lo8(table_2)
ldi r31,hi8(table_2)
#if defined(RAMPZ)
ldi r27,hh8(table_2)
out _SFR_IO_ADDR(RAMPZ),r27
#endif
ldd r12,Y+17
ldd r13,Y+18
ldd r14,Y+19
ldd r15,Y+20
ldd r24,Y+21
ldd r25,Y+22
ldd r16,Y+23
ldd r17,Y+24
mov r30,r12
#if defined(RAMPZ)
elpm r12,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r12,Z
#elif defined(__AVR_TINY__)
ld r12,Z
#else
lpm
mov r12,r0
#endif
mov r30,r13
#if defined(RAMPZ)
elpm r13,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r13,Z
#elif defined(__AVR_TINY__)
ld r13,Z
#else
lpm
mov r13,r0
#endif
mov r30,r14
#if defined(RAMPZ)
elpm r14,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r14,Z
#elif defined(__AVR_TINY__)
ld r14,Z
#else
lpm
mov r14,r0
#endif
mov r30,r15
#if defined(RAMPZ)
elpm r15,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r15,Z
#elif defined(__AVR_TINY__)
ld r15,Z
#else
lpm
mov r15,r0
#endif
mov r30,r24
#if defined(RAMPZ)
elpm r24,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r24,Z
#elif defined(__AVR_TINY__)
ld r24,Z
#else
lpm
mov r24,r0
#endif
mov r30,r25
#if defined(RAMPZ)
elpm r25,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r25,Z
#elif defined(__AVR_TINY__)
ld r25,Z
#else
lpm
mov r25,r0
#endif
mov r30,r16
#if defined(RAMPZ)
elpm r16,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r16,Z
#elif defined(__AVR_TINY__)
ld r16,Z
#else
lpm
mov r16,r0
#endif
mov r30,r17
#if defined(RAMPZ)
elpm r17,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r17,Z
#elif defined(__AVR_TINY__)
ld r17,Z
#else
lpm
mov r17,r0
#endif
std Y+17,r13
std Y+18,r17
std Y+19,r12
std Y+20,r25
std Y+21,r14
std Y+22,r16
std Y+23,r24
std Y+24,r15
ldi r30,lo8(table_3)
ldi r31,hi8(table_3)
#if defined(RAMPZ)
ldi r27,hh8(table_3)
out _SFR_IO_ADDR(RAMPZ),r27
#endif
ldd r12,Y+33
ldd r13,Y+34
ldd r14,Y+35
ldd r15,Y+36
ldd r24,Y+37
ldd r25,Y+38
ldd r16,Y+39
ldd r17,Y+40
mov r30,r12
#if defined(RAMPZ)
elpm r12,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r12,Z
#elif defined(__AVR_TINY__)
ld r12,Z
#else
lpm
mov r12,r0
#endif
mov r30,r13
#if defined(RAMPZ)
elpm r13,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r13,Z
#elif defined(__AVR_TINY__)
ld r13,Z
#else
lpm
mov r13,r0
#endif
mov r30,r14
#if defined(RAMPZ)
elpm r14,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r14,Z
#elif defined(__AVR_TINY__)
ld r14,Z
#else
lpm
mov r14,r0
#endif
mov r30,r15
#if defined(RAMPZ)
elpm r15,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r15,Z
#elif defined(__AVR_TINY__)
ld r15,Z
#else
lpm
mov r15,r0
#endif
mov r30,r24
#if defined(RAMPZ)
elpm r24,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r24,Z
#elif defined(__AVR_TINY__)
ld r24,Z
#else
lpm
mov r24,r0
#endif
mov r30,r25
#if defined(RAMPZ)
elpm r25,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r25,Z
#elif defined(__AVR_TINY__)
ld r25,Z
#else
lpm
mov r25,r0
#endif
mov r30,r16
#if defined(RAMPZ)
elpm r16,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r16,Z
#elif defined(__AVR_TINY__)
ld r16,Z
#else
lpm
mov r16,r0
#endif
mov r30,r17
#if defined(RAMPZ)
elpm r17,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r17,Z
#elif defined(__AVR_TINY__)
ld r17,Z
#else
lpm
mov r17,r0
#endif
std Y+33,r13
std Y+34,r17
std Y+35,r12
std Y+36,r25
std Y+37,r14
std Y+38,r16
std Y+39,r24
std Y+40,r15
ldi r30,lo8(table_0)
ldi r31,hi8(table_0)
#if defined(RAMPZ)
ldi r27,hh8(table_0)
out _SFR_IO_ADDR(RAMPZ),r27
#endif
mov r30,r4
#if defined(RAMPZ)
elpm r4,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r4,Z
#elif defined(__AVR_TINY__)
ld r4,Z
#else
lpm
mov r4,r0
#endif
mov r30,r5
#if defined(RAMPZ)
elpm r5,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r5,Z
#elif defined(__AVR_TINY__)
ld r5,Z
#else
lpm
mov r5,r0
#endif
mov r30,r6
#if defined(RAMPZ)
elpm r6,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r6,Z
#elif defined(__AVR_TINY__)
ld r6,Z
#else
lpm
mov r6,r0
#endif
mov r30,r7
#if defined(RAMPZ)
elpm r7,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r7,Z
#elif defined(__AVR_TINY__)
ld r7,Z
#else
lpm
mov r7,r0
#endif
mov r30,r8
#if defined(RAMPZ)
elpm r8,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r8,Z
#elif defined(__AVR_TINY__)
ld r8,Z
#else
lpm
mov r8,r0
#endif
mov r30,r9
#if defined(RAMPZ)
elpm r9,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r9,Z
#elif defined(__AVR_TINY__)
ld r9,Z
#else
lpm
mov r9,r0
#endif
mov r30,r10
#if defined(RAMPZ)
elpm r10,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r10,Z
#elif defined(__AVR_TINY__)
ld r10,Z
#else
lpm
mov r10,r0
#endif
mov r30,r11
#if defined(RAMPZ)
elpm r11,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r11,Z
#elif defined(__AVR_TINY__)
ld r11,Z
#else
lpm
mov r11,r0
#endif
mov r30,r18
#if defined(RAMPZ)
elpm r18,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r18,Z
#elif defined(__AVR_TINY__)
ld r18,Z
#else
lpm
mov r18,r0
#endif
mov r30,r19
#if defined(RAMPZ)
elpm r19,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r19,Z
#elif defined(__AVR_TINY__)
ld r19,Z
#else
lpm
mov r19,r0
#endif
mov r30,r20
#if defined(RAMPZ)
elpm r20,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r20,Z
#elif defined(__AVR_TINY__)
ld r20,Z
#else
lpm
mov r20,r0
#endif
mov r30,r21
#if defined(RAMPZ)
elpm r21,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r21,Z
#elif defined(__AVR_TINY__)
ld r21,Z
#else
lpm
mov r21,r0
#endif
mov r30,r22
#if defined(RAMPZ)
elpm r22,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r22,Z
#elif defined(__AVR_TINY__)
ld r22,Z
#else
lpm
mov r22,r0
#endif
mov r30,r23
#if defined(RAMPZ)
elpm r23,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r23,Z
#elif defined(__AVR_TINY__)
ld r23,Z
#else
lpm
mov r23,r0
#endif
mov r30,r2
#if defined(RAMPZ)
elpm r2,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r2,Z
#elif defined(__AVR_TINY__)
ld r2,Z
#else
lpm
mov r2,r0
#endif
mov r30,r3
#if defined(RAMPZ)
elpm r3,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r3,Z
#elif defined(__AVR_TINY__)
ld r3,Z
#else
lpm
mov r3,r0
#endif
ldi r30,lo8(table_4)
ldi r31,hi8(table_4)
#if defined(RAMPZ)
ldi r24,hh8(table_4)
out _SFR_IO_ADDR(RAMPZ),r24
#endif
mov r30,r26
#if defined(RAMPZ)
elpm r27,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r27,Z
#elif defined(__AVR_TINY__)
ld r27,Z
#else
lpm
mov r27,r0
#endif
eor r4,r27
inc r26
mov r30,r26
#if defined(RAMPZ)
elpm r27,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r27,Z
#elif defined(__AVR_TINY__)
ld r27,Z
#else
lpm
mov r27,r0
#endif
eor r8,r27
inc r26
ldi r27,2
eor r18,r27
ldd r0,Y+1
eor r4,r0
ldd r0,Y+2
eor r5,r0
ldd r0,Y+3
eor r6,r0
ldd r0,Y+4
eor r7,r0
ldd r0,Y+17
eor r4,r0
ldd r0,Y+18
eor r5,r0
ldd r0,Y+19
eor r6,r0
ldd r0,Y+20
eor r7,r0
ldd r0,Y+33
eor r4,r0
ldd r0,Y+34
eor r5,r0
ldd r0,Y+35
eor r6,r0
ldd r0,Y+36
eor r7,r0
ldd r0,Y+5
eor r8,r0
ldd r0,Y+6
eor r9,r0
ldd r0,Y+7
eor r10,r0
ldd r0,Y+8
eor r11,r0
ldd r0,Y+21
eor r8,r0
ldd r0,Y+22
eor r9,r0
ldd r0,Y+23
eor r10,r0
ldd r0,Y+24
eor r11,r0
ldd r0,Y+37
eor r8,r0
ldd r0,Y+38
eor r9,r0
ldd r0,Y+39
eor r10,r0
ldd r0,Y+40
eor r11,r0
mov r0,r11
mov r11,r10
mov r10,r9
mov r9,r8
mov r8,r0
mov r0,r20
mov r20,r18
mov r18,r0
mov r0,r21
mov r21,r19
mov r19,r0
mov r0,r22
mov r22,r23
mov r23,r2
mov r2,r3
mov r3,r0
eor r8,r18
eor r9,r19
eor r10,r20
eor r11,r21
eor r18,r4
eor r19,r5
eor r20,r6
eor r21,r7
eor r22,r18
eor r23,r19
eor r2,r20
eor r3,r21
ldd r12,Y+9
ldd r13,Y+10
ldd r14,Y+11
ldd r15,Y+12
ldd r24,Y+13
ldd r25,Y+14
ldd r16,Y+15
ldd r17,Y+16
std Y+9,r13
std Y+10,r17
std Y+11,r12
std Y+12,r25
std Y+13,r14
std Y+14,r16
std Y+15,r24
std Y+16,r15
ldi r30,lo8(table_2)
ldi r31,hi8(table_2)
#if defined(RAMPZ)
ldi r27,hh8(table_2)
out _SFR_IO_ADDR(RAMPZ),r27
#endif
ldd r12,Y+25
ldd r13,Y+26
ldd r14,Y+27
ldd r15,Y+28
ldd r24,Y+29
ldd r25,Y+30
ldd r16,Y+31
ldd r17,Y+32
mov r30,r12
#if defined(RAMPZ)
elpm r12,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r12,Z
#elif defined(__AVR_TINY__)
ld r12,Z
#else
lpm
mov r12,r0
#endif
mov r30,r13
#if defined(RAMPZ)
elpm r13,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r13,Z
#elif defined(__AVR_TINY__)
ld r13,Z
#else
lpm
mov r13,r0
#endif
mov r30,r14
#if defined(RAMPZ)
elpm r14,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r14,Z
#elif defined(__AVR_TINY__)
ld r14,Z
#else
lpm
mov r14,r0
#endif
mov r30,r15
#if defined(RAMPZ)
elpm r15,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r15,Z
#elif defined(__AVR_TINY__)
ld r15,Z
#else
lpm
mov r15,r0
#endif
mov r30,r24
#if defined(RAMPZ)
elpm r24,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r24,Z
#elif defined(__AVR_TINY__)
ld r24,Z
#else
lpm
mov r24,r0
#endif
mov r30,r25
#if defined(RAMPZ)
elpm r25,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r25,Z
#elif defined(__AVR_TINY__)
ld r25,Z
#else
lpm
mov r25,r0
#endif
mov r30,r16
#if defined(RAMPZ)
elpm r16,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r16,Z
#elif defined(__AVR_TINY__)
ld r16,Z
#else
lpm
mov r16,r0
#endif
mov r30,r17
#if defined(RAMPZ)
elpm r17,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r17,Z
#elif defined(__AVR_TINY__)
ld r17,Z
#else
lpm
mov r17,r0
#endif
std Y+25,r13
std Y+26,r17
std Y+27,r12
std Y+28,r25
std Y+29,r14
std Y+30,r16
std Y+31,r24
std Y+32,r15
ldi r30,lo8(table_3)
ldi r31,hi8(table_3)
#if defined(RAMPZ)
ldi r27,hh8(table_3)
out _SFR_IO_ADDR(RAMPZ),r27
#endif
ldd r12,Y+41
ldd r13,Y+42
ldd r14,Y+43
ldd r15,Y+44
ldd r24,Y+45
ldd r25,Y+46
ldd r16,Y+47
ldd r17,Y+48
mov r30,r12
#if defined(RAMPZ)
elpm r12,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r12,Z
#elif defined(__AVR_TINY__)
ld r12,Z
#else
lpm
mov r12,r0
#endif
mov r30,r13
#if defined(RAMPZ)
elpm r13,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r13,Z
#elif defined(__AVR_TINY__)
ld r13,Z
#else
lpm
mov r13,r0
#endif
mov r30,r14
#if defined(RAMPZ)
elpm r14,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r14,Z
#elif defined(__AVR_TINY__)
ld r14,Z
#else
lpm
mov r14,r0
#endif
mov r30,r15
#if defined(RAMPZ)
elpm r15,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r15,Z
#elif defined(__AVR_TINY__)
ld r15,Z
#else
lpm
mov r15,r0
#endif
mov r30,r24
#if defined(RAMPZ)
elpm r24,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r24,Z
#elif defined(__AVR_TINY__)
ld r24,Z
#else
lpm
mov r24,r0
#endif
mov r30,r25
#if defined(RAMPZ)
elpm r25,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r25,Z
#elif defined(__AVR_TINY__)
ld r25,Z
#else
lpm
mov r25,r0
#endif
mov r30,r16
#if defined(RAMPZ)
elpm r16,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r16,Z
#elif defined(__AVR_TINY__)
ld r16,Z
#else
lpm
mov r16,r0
#endif
mov r30,r17
#if defined(RAMPZ)
elpm r17,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r17,Z
#elif defined(__AVR_TINY__)
ld r17,Z
#else
lpm
mov r17,r0
#endif
std Y+41,r13
std Y+42,r17
std Y+43,r12
std Y+44,r25
std Y+45,r14
std Y+46,r16
std Y+47,r24
std Y+48,r15
ldi r30,lo8(table_0)
ldi r31,hi8(table_0)
#if defined(RAMPZ)
ldi r27,hh8(table_0)
out _SFR_IO_ADDR(RAMPZ),r27
#endif
mov r30,r22
#if defined(RAMPZ)
elpm r22,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r22,Z
#elif defined(__AVR_TINY__)
ld r22,Z
#else
lpm
mov r22,r0
#endif
mov r30,r23
#if defined(RAMPZ)
elpm r23,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r23,Z
#elif defined(__AVR_TINY__)
ld r23,Z
#else
lpm
mov r23,r0
#endif
mov r30,r2
#if defined(RAMPZ)
elpm r2,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r2,Z
#elif defined(__AVR_TINY__)
ld r2,Z
#else
lpm
mov r2,r0
#endif
mov r30,r3
#if defined(RAMPZ)
elpm r3,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r3,Z
#elif defined(__AVR_TINY__)
ld r3,Z
#else
lpm
mov r3,r0
#endif
mov r30,r4
#if defined(RAMPZ)
elpm r4,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r4,Z
#elif defined(__AVR_TINY__)
ld r4,Z
#else
lpm
mov r4,r0
#endif
mov r30,r5
#if defined(RAMPZ)
elpm r5,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r5,Z
#elif defined(__AVR_TINY__)
ld r5,Z
#else
lpm
mov r5,r0
#endif
mov r30,r6
#if defined(RAMPZ)
elpm r6,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r6,Z
#elif defined(__AVR_TINY__)
ld r6,Z
#else
lpm
mov r6,r0
#endif
mov r30,r7
#if defined(RAMPZ)
elpm r7,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r7,Z
#elif defined(__AVR_TINY__)
ld r7,Z
#else
lpm
mov r7,r0
#endif
mov r30,r8
#if defined(RAMPZ)
elpm r8,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r8,Z
#elif defined(__AVR_TINY__)
ld r8,Z
#else
lpm
mov r8,r0
#endif
mov r30,r9
#if defined(RAMPZ)
elpm r9,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r9,Z
#elif defined(__AVR_TINY__)
ld r9,Z
#else
lpm
mov r9,r0
#endif
mov r30,r10
#if defined(RAMPZ)
elpm r10,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r10,Z
#elif defined(__AVR_TINY__)
ld r10,Z
#else
lpm
mov r10,r0
#endif
mov r30,r11
#if defined(RAMPZ)
elpm r11,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r11,Z
#elif defined(__AVR_TINY__)
ld r11,Z
#else
lpm
mov r11,r0
#endif
mov r30,r18
#if defined(RAMPZ)
elpm r18,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r18,Z
#elif defined(__AVR_TINY__)
ld r18,Z
#else
lpm
mov r18,r0
#endif
mov r30,r19
#if defined(RAMPZ)
elpm r19,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r19,Z
#elif defined(__AVR_TINY__)
ld r19,Z
#else
lpm
mov r19,r0
#endif
mov r30,r20
#if defined(RAMPZ)
elpm r20,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r20,Z
#elif defined(__AVR_TINY__)
ld r20,Z
#else
lpm
mov r20,r0
#endif
mov r30,r21
#if defined(RAMPZ)
elpm r21,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r21,Z
#elif defined(__AVR_TINY__)
ld r21,Z
#else
lpm
mov r21,r0
#endif
ldi r30,lo8(table_4)
ldi r31,hi8(table_4)
#if defined(RAMPZ)
ldi r24,hh8(table_4)
out _SFR_IO_ADDR(RAMPZ),r24
#endif
mov r30,r26
#if defined(RAMPZ)
elpm r27,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r27,Z
#elif defined(__AVR_TINY__)
ld r27,Z
#else
lpm
mov r27,r0
#endif
eor r22,r27
inc r26
mov r30,r26
#if defined(RAMPZ)
elpm r27,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r27,Z
#elif defined(__AVR_TINY__)
ld r27,Z
#else
lpm
mov r27,r0
#endif
eor r4,r27
inc r26
ldi r27,2
eor r8,r27
ldd r0,Y+9
eor r22,r0
ldd r0,Y+10
eor r23,r0
ldd r0,Y+11
eor r2,r0
ldd r0,Y+12
eor r3,r0
ldd r0,Y+25
eor r22,r0
ldd r0,Y+26
eor r23,r0
ldd r0,Y+27
eor r2,r0
ldd r0,Y+28
eor r3,r0
ldd r0,Y+41
eor r22,r0
ldd r0,Y+42
eor r23,r0
ldd r0,Y+43
eor r2,r0
ldd r0,Y+44
eor r3,r0
ldd r0,Y+13
eor r4,r0
ldd r0,Y+14
eor r5,r0
ldd r0,Y+15
eor r6,r0
ldd r0,Y+16
eor r7,r0
ldd r0,Y+29
eor r4,r0
ldd r0,Y+30
eor r5,r0
ldd r0,Y+31
eor r6,r0
ldd r0,Y+32
eor r7,r0
ldd r0,Y+45
eor r4,r0
ldd r0,Y+46
eor r5,r0
ldd r0,Y+47
eor r6,r0
ldd r0,Y+48
eor r7,r0
mov r0,r7
mov r7,r6
mov r6,r5
mov r5,r4
mov r4,r0
mov r0,r10
mov r10,r8
mov r8,r0
mov r0,r11
mov r11,r9
mov r9,r0
mov r0,r18
mov r18,r19
mov r19,r20
mov r20,r21
mov r21,r0
eor r4,r8
eor r5,r9
eor r6,r10
eor r7,r11
eor r8,r22
eor r9,r23
eor r10,r2
eor r11,r3
eor r18,r8
eor r19,r9
eor r20,r10
eor r21,r11
cpi r26,80
brne 5721f
rjmp 790f
5721:
ldd r12,Y+1
ldd r13,Y+2
ldd r14,Y+3
ldd r15,Y+4
ldd r24,Y+5
ldd r25,Y+6
ldd r16,Y+7
ldd r17,Y+8
std Y+1,r13
std Y+2,r17
std Y+3,r12
std Y+4,r25
std Y+5,r14
std Y+6,r16
std Y+7,r24
std Y+8,r15
ldi r30,lo8(table_2)
ldi r31,hi8(table_2)
#if defined(RAMPZ)
ldi r27,hh8(table_2)
out _SFR_IO_ADDR(RAMPZ),r27
#endif
ldd r12,Y+17
ldd r13,Y+18
ldd r14,Y+19
ldd r15,Y+20
ldd r24,Y+21
ldd r25,Y+22
ldd r16,Y+23
ldd r17,Y+24
mov r30,r12
#if defined(RAMPZ)
elpm r12,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r12,Z
#elif defined(__AVR_TINY__)
ld r12,Z
#else
lpm
mov r12,r0
#endif
mov r30,r13
#if defined(RAMPZ)
elpm r13,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r13,Z
#elif defined(__AVR_TINY__)
ld r13,Z
#else
lpm
mov r13,r0
#endif
mov r30,r14
#if defined(RAMPZ)
elpm r14,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r14,Z
#elif defined(__AVR_TINY__)
ld r14,Z
#else
lpm
mov r14,r0
#endif
mov r30,r15
#if defined(RAMPZ)
elpm r15,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r15,Z
#elif defined(__AVR_TINY__)
ld r15,Z
#else
lpm
mov r15,r0
#endif
mov r30,r24
#if defined(RAMPZ)
elpm r24,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r24,Z
#elif defined(__AVR_TINY__)
ld r24,Z
#else
lpm
mov r24,r0
#endif
mov r30,r25
#if defined(RAMPZ)
elpm r25,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r25,Z
#elif defined(__AVR_TINY__)
ld r25,Z
#else
lpm
mov r25,r0
#endif
mov r30,r16
#if defined(RAMPZ)
elpm r16,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r16,Z
#elif defined(__AVR_TINY__)
ld r16,Z
#else
lpm
mov r16,r0
#endif
mov r30,r17
#if defined(RAMPZ)
elpm r17,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r17,Z
#elif defined(__AVR_TINY__)
ld r17,Z
#else
lpm
mov r17,r0
#endif
std Y+17,r13
std Y+18,r17
std Y+19,r12
std Y+20,r25
std Y+21,r14
std Y+22,r16
std Y+23,r24
std Y+24,r15
ldi r30,lo8(table_3)
ldi r31,hi8(table_3)
#if defined(RAMPZ)
ldi r27,hh8(table_3)
out _SFR_IO_ADDR(RAMPZ),r27
#endif
ldd r12,Y+33
ldd r13,Y+34
ldd r14,Y+35
ldd r15,Y+36
ldd r24,Y+37
ldd r25,Y+38
ldd r16,Y+39
ldd r17,Y+40
mov r30,r12
#if defined(RAMPZ)
elpm r12,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r12,Z
#elif defined(__AVR_TINY__)
ld r12,Z
#else
lpm
mov r12,r0
#endif
mov r30,r13
#if defined(RAMPZ)
elpm r13,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r13,Z
#elif defined(__AVR_TINY__)
ld r13,Z
#else
lpm
mov r13,r0
#endif
mov r30,r14
#if defined(RAMPZ)
elpm r14,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r14,Z
#elif defined(__AVR_TINY__)
ld r14,Z
#else
lpm
mov r14,r0
#endif
mov r30,r15
#if defined(RAMPZ)
elpm r15,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r15,Z
#elif defined(__AVR_TINY__)
ld r15,Z
#else
lpm
mov r15,r0
#endif
mov r30,r24
#if defined(RAMPZ)
elpm r24,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r24,Z
#elif defined(__AVR_TINY__)
ld r24,Z
#else
lpm
mov r24,r0
#endif
mov r30,r25
#if defined(RAMPZ)
elpm r25,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r25,Z
#elif defined(__AVR_TINY__)
ld r25,Z
#else
lpm
mov r25,r0
#endif
mov r30,r16
#if defined(RAMPZ)
elpm r16,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r16,Z
#elif defined(__AVR_TINY__)
ld r16,Z
#else
lpm
mov r16,r0
#endif
mov r30,r17
#if defined(RAMPZ)
elpm r17,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r17,Z
#elif defined(__AVR_TINY__)
ld r17,Z
#else
lpm
mov r17,r0
#endif
std Y+33,r13
std Y+34,r17
std Y+35,r12
std Y+36,r25
std Y+37,r14
std Y+38,r16
std Y+39,r24
std Y+40,r15
ldi r30,lo8(table_0)
ldi r31,hi8(table_0)
#if defined(RAMPZ)
ldi r27,hh8(table_0)
out _SFR_IO_ADDR(RAMPZ),r27
#endif
rjmp 114b
790:
#if defined(RAMPZ)
pop r0
out _SFR_IO_ADDR(RAMPZ),r0
#endif
ldd r26,Y+49
ldd r27,Y+50
st X+,r18
st X+,r19
st X+,r20
st X+,r21
st X+,r22
st X+,r23
st X+,r2
st X+,r3
st X+,r4
st X+,r5
st X+,r6
st X+,r7
st X+,r8
st X+,r9
st X+,r10
st X+,r11
adiw r28,50
in r0,0x3f
cli
out 0x3e,r29
out 0x3f,r0
out 0x3d,r28
pop r17
pop r16
pop r15
pop r14
pop r13
pop r12
pop r11
pop r10
pop r9
pop r8
pop r7
pop r6
pop r5
pop r4
pop r3
pop r2
pop r29
pop r28
ret
.size skinny_plus_encrypt, .-skinny_plus_encrypt
.global skinny_plus_encrypt_tk_full
.set skinny_plus_encrypt_tk_full,skinny_plus_encrypt
.text
.global skinny_plus_decrypt
.type skinny_plus_decrypt, @function
skinny_plus_decrypt:
push r28
push r29
push r2
push r3
push r4
push r5
push r6
push r7
push r8
push r9
push r10
push r11
push r12
push r13
push r14
push r15
push r16
push r17
push r23
push r22
movw r30,r24
movw r26,r20
in r28,0x3d
in r29,0x3e
sbiw r28,48
in r0,0x3f
cli
out 0x3e,r29
out 0x3f,r0
out 0x3d,r28
.L__stack_usage = 68
ld r18,Z
ldd r19,Z+1
ldd r20,Z+2
ldd r21,Z+3
ldd r22,Z+4
ldd r23,Z+5
ldd r2,Z+6
ldd r3,Z+7
ldd r4,Z+8
ldd r5,Z+9
ldd r6,Z+10
ldd r7,Z+11
ldd r8,Z+12
ldd r9,Z+13
ldd r10,Z+14
ldd r11,Z+15
std Y+1,r23
std Y+2,r2
std Y+3,r21
std Y+4,r20
std Y+5,r3
std Y+6,r18
std Y+7,r19
std Y+8,r22
std Y+9,r9
std Y+10,r10
std Y+11,r7
std Y+12,r6
std Y+13,r11
std Y+14,r4
std Y+15,r5
std Y+16,r8
ldd r18,Z+16
ldd r19,Z+17
ldd r20,Z+18
ldd r21,Z+19
ldd r22,Z+20
ldd r23,Z+21
ldd r2,Z+22
ldd r3,Z+23
ldd r4,Z+24
ldd r5,Z+25
ldd r6,Z+26
ldd r7,Z+27
ldd r8,Z+28
ldd r9,Z+29
ldd r10,Z+30
ldd r11,Z+31
std Y+17,r23
std Y+18,r2
std Y+19,r21
std Y+20,r20
std Y+21,r3
std Y+22,r18
std Y+23,r19
std Y+24,r22
std Y+25,r9
std Y+26,r10
std Y+27,r7
std Y+28,r6
std Y+29,r11
std Y+30,r4
std Y+31,r5
std Y+32,r8
ldd r18,Z+32
ldd r19,Z+33
ldd r20,Z+34
ldd r21,Z+35
ldd r22,Z+36
ldd r23,Z+37
ldd r2,Z+38
ldd r3,Z+39
ldd r4,Z+40
ldd r5,Z+41
ldd r6,Z+42
ldd r7,Z+43
ldd r8,Z+44
ldd r9,Z+45
ldd r10,Z+46
ldd r11,Z+47
std Y+33,r23
std Y+34,r2
std Y+35,r21
std Y+36,r20
std Y+37,r3
std Y+38,r18
std Y+39,r19
std Y+40,r22
std Y+41,r9
std Y+42,r10
std Y+43,r7
std Y+44,r6
std Y+45,r11
std Y+46,r4
std Y+47,r5
std Y+48,r8
ld r18,X+
ld r19,X+
ld r20,X+
ld r21,X+
ld r22,X+
ld r23,X+
ld r2,X+
ld r3,X+
ld r4,X+
ld r5,X+
ld r6,X+
ld r7,X+
ld r8,X+
ld r9,X+
ld r10,X+
ld r11,X+
ldi r30,lo8(table_2)
ldi r31,hi8(table_2)
#if defined(RAMPZ)
ldi r26,hh8(table_2)
in r0,_SFR_IO_ADDR(RAMPZ)
push r0
out _SFR_IO_ADDR(RAMPZ),r26
#endif
ldi r26,20
ldd r12,Y+17
ldd r13,Y+18
ldd r14,Y+19
ldd r15,Y+20
ldd r24,Y+21
ldd r25,Y+22
ldd r16,Y+23
ldd r17,Y+24
122:
mov r30,r12
#if defined(RAMPZ)
elpm r12,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r12,Z
#elif defined(__AVR_TINY__)
ld r12,Z
#else
lpm
mov r12,r0
#endif
mov r30,r13
#if defined(RAMPZ)
elpm r13,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r13,Z
#elif defined(__AVR_TINY__)
ld r13,Z
#else
lpm
mov r13,r0
#endif
mov r30,r14
#if defined(RAMPZ)
elpm r14,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r14,Z
#elif defined(__AVR_TINY__)
ld r14,Z
#else
lpm
mov r14,r0
#endif
mov r30,r15
#if defined(RAMPZ)
elpm r15,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r15,Z
#elif defined(__AVR_TINY__)
ld r15,Z
#else
lpm
mov r15,r0
#endif
mov r30,r24
#if defined(RAMPZ)
elpm r24,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r24,Z
#elif defined(__AVR_TINY__)
ld r24,Z
#else
lpm
mov r24,r0
#endif
mov r30,r25
#if defined(RAMPZ)
elpm r25,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r25,Z
#elif defined(__AVR_TINY__)
ld r25,Z
#else
lpm
mov r25,r0
#endif
mov r30,r16
#if defined(RAMPZ)
elpm r16,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r16,Z
#elif defined(__AVR_TINY__)
ld r16,Z
#else
lpm
mov r16,r0
#endif
mov r30,r17
#if defined(RAMPZ)
elpm r17,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r17,Z
#elif defined(__AVR_TINY__)
ld r17,Z
#else
lpm
mov r17,r0
#endif
dec r26
brne 122b
std Y+17,r12
std Y+18,r13
std Y+19,r14
std Y+20,r15
std Y+21,r24
std Y+22,r25
std Y+23,r16
std Y+24,r17
ldi r26,20
ldd r12,Y+25
ldd r13,Y+26
ldd r14,Y+27
ldd r15,Y+28
ldd r24,Y+29
ldd r25,Y+30
ldd r16,Y+31
ldd r17,Y+32
150:
mov r30,r12
#if defined(RAMPZ)
elpm r12,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r12,Z
#elif defined(__AVR_TINY__)
ld r12,Z
#else
lpm
mov r12,r0
#endif
mov r30,r13
#if defined(RAMPZ)
elpm r13,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r13,Z
#elif defined(__AVR_TINY__)
ld r13,Z
#else
lpm
mov r13,r0
#endif
mov r30,r14
#if defined(RAMPZ)
elpm r14,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r14,Z
#elif defined(__AVR_TINY__)
ld r14,Z
#else
lpm
mov r14,r0
#endif
mov r30,r15
#if defined(RAMPZ)
elpm r15,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r15,Z
#elif defined(__AVR_TINY__)
ld r15,Z
#else
lpm
mov r15,r0
#endif
mov r30,r24
#if defined(RAMPZ)
elpm r24,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r24,Z
#elif defined(__AVR_TINY__)
ld r24,Z
#else
lpm
mov r24,r0
#endif
mov r30,r25
#if defined(RAMPZ)
elpm r25,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r25,Z
#elif defined(__AVR_TINY__)
ld r25,Z
#else
lpm
mov r25,r0
#endif
mov r30,r16
#if defined(RAMPZ)
elpm r16,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r16,Z
#elif defined(__AVR_TINY__)
ld r16,Z
#else
lpm
mov r16,r0
#endif
mov r30,r17
#if defined(RAMPZ)
elpm r17,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r17,Z
#elif defined(__AVR_TINY__)
ld r17,Z
#else
lpm
mov r17,r0
#endif
dec r26
brne 150b
std Y+25,r12
std Y+26,r13
std Y+27,r14
std Y+28,r15
std Y+29,r24
std Y+30,r25
std Y+31,r16
std Y+32,r17
ldi r30,lo8(table_3)
ldi r31,hi8(table_3)
#if defined(RAMPZ)
ldi r26,hh8(table_3)
out _SFR_IO_ADDR(RAMPZ),r26
#endif
ldi r26,20
ldd r12,Y+33
ldd r13,Y+34
ldd r14,Y+35
ldd r15,Y+36
ldd r24,Y+37
ldd r25,Y+38
ldd r16,Y+39
ldd r17,Y+40
179:
mov r30,r12
#if defined(RAMPZ)
elpm r12,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r12,Z
#elif defined(__AVR_TINY__)
ld r12,Z
#else
lpm
mov r12,r0
#endif
mov r30,r13
#if defined(RAMPZ)
elpm r13,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r13,Z
#elif defined(__AVR_TINY__)
ld r13,Z
#else
lpm
mov r13,r0
#endif
mov r30,r14
#if defined(RAMPZ)
elpm r14,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r14,Z
#elif defined(__AVR_TINY__)
ld r14,Z
#else
lpm
mov r14,r0
#endif
mov r30,r15
#if defined(RAMPZ)
elpm r15,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r15,Z
#elif defined(__AVR_TINY__)
ld r15,Z
#else
lpm
mov r15,r0
#endif
mov r30,r24
#if defined(RAMPZ)
elpm r24,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r24,Z
#elif defined(__AVR_TINY__)
ld r24,Z
#else
lpm
mov r24,r0
#endif
mov r30,r25
#if defined(RAMPZ)
elpm r25,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r25,Z
#elif defined(__AVR_TINY__)
ld r25,Z
#else
lpm
mov r25,r0
#endif
mov r30,r16
#if defined(RAMPZ)
elpm r16,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r16,Z
#elif defined(__AVR_TINY__)
ld r16,Z
#else
lpm
mov r16,r0
#endif
mov r30,r17
#if defined(RAMPZ)
elpm r17,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r17,Z
#elif defined(__AVR_TINY__)
ld r17,Z
#else
lpm
mov r17,r0
#endif
dec r26
brne 179b
std Y+33,r12
std Y+34,r13
std Y+35,r14
std Y+36,r15
std Y+37,r24
std Y+38,r25
std Y+39,r16
std Y+40,r17
ldi r26,20
ldd r12,Y+41
ldd r13,Y+42
ldd r14,Y+43
ldd r15,Y+44
ldd r24,Y+45
ldd r25,Y+46
ldd r16,Y+47
ldd r17,Y+48
207:
mov r30,r12
#if defined(RAMPZ)
elpm r12,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r12,Z
#elif defined(__AVR_TINY__)
ld r12,Z
#else
lpm
mov r12,r0
#endif
mov r30,r13
#if defined(RAMPZ)
elpm r13,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r13,Z
#elif defined(__AVR_TINY__)
ld r13,Z
#else
lpm
mov r13,r0
#endif
mov r30,r14
#if defined(RAMPZ)
elpm r14,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r14,Z
#elif defined(__AVR_TINY__)
ld r14,Z
#else
lpm
mov r14,r0
#endif
mov r30,r15
#if defined(RAMPZ)
elpm r15,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r15,Z
#elif defined(__AVR_TINY__)
ld r15,Z
#else
lpm
mov r15,r0
#endif
mov r30,r24
#if defined(RAMPZ)
elpm r24,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r24,Z
#elif defined(__AVR_TINY__)
ld r24,Z
#else
lpm
mov r24,r0
#endif
mov r30,r25
#if defined(RAMPZ)
elpm r25,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r25,Z
#elif defined(__AVR_TINY__)
ld r25,Z
#else
lpm
mov r25,r0
#endif
mov r30,r16
#if defined(RAMPZ)
elpm r16,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r16,Z
#elif defined(__AVR_TINY__)
ld r16,Z
#else
lpm
mov r16,r0
#endif
mov r30,r17
#if defined(RAMPZ)
elpm r17,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r17,Z
#elif defined(__AVR_TINY__)
ld r17,Z
#else
lpm
mov r17,r0
#endif
dec r26
brne 207b
std Y+41,r12
std Y+42,r13
std Y+43,r14
std Y+44,r15
std Y+45,r24
std Y+46,r25
std Y+47,r16
std Y+48,r17
ldi r26,80
227:
ldd r12,Y+1
ldd r13,Y+2
ldd r14,Y+3
ldd r15,Y+4
ldd r24,Y+5
ldd r25,Y+6
ldd r16,Y+7
ldd r17,Y+8
std Y+1,r14
std Y+2,r12
std Y+3,r24
std Y+4,r17
std Y+5,r16
std Y+6,r15
std Y+7,r25
std Y+8,r13
ldd r12,Y+17
ldd r13,Y+18
ldd r14,Y+19
ldd r15,Y+20
ldd r24,Y+21
ldd r25,Y+22
ldd r16,Y+23
ldd r17,Y+24
mov r30,r12
#if defined(RAMPZ)
elpm r12,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r12,Z
#elif defined(__AVR_TINY__)
ld r12,Z
#else
lpm
mov r12,r0
#endif
mov r30,r13
#if defined(RAMPZ)
elpm r13,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r13,Z
#elif defined(__AVR_TINY__)
ld r13,Z
#else
lpm
mov r13,r0
#endif
mov r30,r14
#if defined(RAMPZ)
elpm r14,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r14,Z
#elif defined(__AVR_TINY__)
ld r14,Z
#else
lpm
mov r14,r0
#endif
mov r30,r15
#if defined(RAMPZ)
elpm r15,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r15,Z
#elif defined(__AVR_TINY__)
ld r15,Z
#else
lpm
mov r15,r0
#endif
mov r30,r24
#if defined(RAMPZ)
elpm r24,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r24,Z
#elif defined(__AVR_TINY__)
ld r24,Z
#else
lpm
mov r24,r0
#endif
mov r30,r25
#if defined(RAMPZ)
elpm r25,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r25,Z
#elif defined(__AVR_TINY__)
ld r25,Z
#else
lpm
mov r25,r0
#endif
mov r30,r16
#if defined(RAMPZ)
elpm r16,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r16,Z
#elif defined(__AVR_TINY__)
ld r16,Z
#else
lpm
mov r16,r0
#endif
mov r30,r17
#if defined(RAMPZ)
elpm r17,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r17,Z
#elif defined(__AVR_TINY__)
ld r17,Z
#else
lpm
mov r17,r0
#endif
std Y+17,r14
std Y+18,r12
std Y+19,r24
std Y+20,r17
std Y+21,r16
std Y+22,r15
std Y+23,r25
std Y+24,r13
ldi r30,lo8(table_2)
ldi r31,hi8(table_2)
#if defined(RAMPZ)
ldi r27,hh8(table_2)
out _SFR_IO_ADDR(RAMPZ),r27
#endif
ldd r12,Y+33
ldd r13,Y+34
ldd r14,Y+35
ldd r15,Y+36
ldd r24,Y+37
ldd r25,Y+38
ldd r16,Y+39
ldd r17,Y+40
mov r30,r12
#if defined(RAMPZ)
elpm r12,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r12,Z
#elif defined(__AVR_TINY__)
ld r12,Z
#else
lpm
mov r12,r0
#endif
mov r30,r13
#if defined(RAMPZ)
elpm r13,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r13,Z
#elif defined(__AVR_TINY__)
ld r13,Z
#else
lpm
mov r13,r0
#endif
mov r30,r14
#if defined(RAMPZ)
elpm r14,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r14,Z
#elif defined(__AVR_TINY__)
ld r14,Z
#else
lpm
mov r14,r0
#endif
mov r30,r15
#if defined(RAMPZ)
elpm r15,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r15,Z
#elif defined(__AVR_TINY__)
ld r15,Z
#else
lpm
mov r15,r0
#endif
mov r30,r24
#if defined(RAMPZ)
elpm r24,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r24,Z
#elif defined(__AVR_TINY__)
ld r24,Z
#else
lpm
mov r24,r0
#endif
mov r30,r25
#if defined(RAMPZ)
elpm r25,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r25,Z
#elif defined(__AVR_TINY__)
ld r25,Z
#else
lpm
mov r25,r0
#endif
mov r30,r16
#if defined(RAMPZ)
elpm r16,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r16,Z
#elif defined(__AVR_TINY__)
ld r16,Z
#else
lpm
mov r16,r0
#endif
mov r30,r17
#if defined(RAMPZ)
elpm r17,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r17,Z
#elif defined(__AVR_TINY__)
ld r17,Z
#else
lpm
mov r17,r0
#endif
std Y+33,r14
std Y+34,r12
std Y+35,r24
std Y+36,r17
std Y+37,r16
std Y+38,r15
std Y+39,r25
std Y+40,r13
eor r18,r8
eor r19,r9
eor r20,r10
eor r21,r11
eor r8,r22
eor r9,r23
eor r10,r2
eor r11,r3
eor r4,r8
eor r5,r9
eor r6,r10
eor r7,r11
mov r0,r4
mov r4,r5
mov r5,r6
mov r6,r7
mov r7,r0
mov r0,r8
mov r8,r10
mov r10,r0
mov r0,r9
mov r9,r11
mov r11,r0
mov r0,r21
mov r21,r20
mov r20,r19
mov r19,r18
mov r18,r0
ldd r0,Y+9
eor r22,r0
ldd r0,Y+10
eor r23,r0
ldd r0,Y+11
eor r2,r0
ldd r0,Y+12
eor r3,r0
ldd r0,Y+25
eor r22,r0
ldd r0,Y+26
eor r23,r0
ldd r0,Y+27
eor r2,r0
ldd r0,Y+28
eor r3,r0
ldd r0,Y+41
eor r22,r0
ldd r0,Y+42
eor r23,r0
ldd r0,Y+43
eor r2,r0
ldd r0,Y+44
eor r3,r0
ldd r0,Y+13
eor r4,r0
ldd r0,Y+14
eor r5,r0
ldd r0,Y+15
eor r6,r0
ldd r0,Y+16
eor r7,r0
ldd r0,Y+29
eor r4,r0
ldd r0,Y+30
eor r5,r0
ldd r0,Y+31
eor r6,r0
ldd r0,Y+32
eor r7,r0
ldd r0,Y+45
eor r4,r0
ldd r0,Y+46
eor r5,r0
ldd r0,Y+47
eor r6,r0
ldd r0,Y+48
eor r7,r0
ldi r30,lo8(table_4)
ldi r31,hi8(table_4)
#if defined(RAMPZ)
ldi r24,hh8(table_4)
out _SFR_IO_ADDR(RAMPZ),r24
#endif
dec r26
mov r30,r26
#if defined(RAMPZ)
elpm r27,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r27,Z
#elif defined(__AVR_TINY__)
ld r27,Z
#else
lpm
mov r27,r0
#endif
eor r4,r27
dec r26
mov r30,r26
#if defined(RAMPZ)
elpm r27,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r27,Z
#elif defined(__AVR_TINY__)
ld r27,Z
#else
lpm
mov r27,r0
#endif
eor r22,r27
ldi r27,2
eor r8,r27
ldi r30,lo8(table_1)
ldi r31,hi8(table_1)
#if defined(RAMPZ)
ldi r27,hh8(table_1)
out _SFR_IO_ADDR(RAMPZ),r27
#endif
mov r30,r22
#if defined(RAMPZ)
elpm r22,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r22,Z
#elif defined(__AVR_TINY__)
ld r22,Z
#else
lpm
mov r22,r0
#endif
mov r30,r23
#if defined(RAMPZ)
elpm r23,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r23,Z
#elif defined(__AVR_TINY__)
ld r23,Z
#else
lpm
mov r23,r0
#endif
mov r30,r2
#if defined(RAMPZ)
elpm r2,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r2,Z
#elif defined(__AVR_TINY__)
ld r2,Z
#else
lpm
mov r2,r0
#endif
mov r30,r3
#if defined(RAMPZ)
elpm r3,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r3,Z
#elif defined(__AVR_TINY__)
ld r3,Z
#else
lpm
mov r3,r0
#endif
mov r30,r4
#if defined(RAMPZ)
elpm r4,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r4,Z
#elif defined(__AVR_TINY__)
ld r4,Z
#else
lpm
mov r4,r0
#endif
mov r30,r5
#if defined(RAMPZ)
elpm r5,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r5,Z
#elif defined(__AVR_TINY__)
ld r5,Z
#else
lpm
mov r5,r0
#endif
mov r30,r6
#if defined(RAMPZ)
elpm r6,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r6,Z
#elif defined(__AVR_TINY__)
ld r6,Z
#else
lpm
mov r6,r0
#endif
mov r30,r7
#if defined(RAMPZ)
elpm r7,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r7,Z
#elif defined(__AVR_TINY__)
ld r7,Z
#else
lpm
mov r7,r0
#endif
mov r30,r8
#if defined(RAMPZ)
elpm r8,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r8,Z
#elif defined(__AVR_TINY__)
ld r8,Z
#else
lpm
mov r8,r0
#endif
mov r30,r9
#if defined(RAMPZ)
elpm r9,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r9,Z
#elif defined(__AVR_TINY__)
ld r9,Z
#else
lpm
mov r9,r0
#endif
mov r30,r10
#if defined(RAMPZ)
elpm r10,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r10,Z
#elif defined(__AVR_TINY__)
ld r10,Z
#else
lpm
mov r10,r0
#endif
mov r30,r11
#if defined(RAMPZ)
elpm r11,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r11,Z
#elif defined(__AVR_TINY__)
ld r11,Z
#else
lpm
mov r11,r0
#endif
mov r30,r18
#if defined(RAMPZ)
elpm r18,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r18,Z
#elif defined(__AVR_TINY__)
ld r18,Z
#else
lpm
mov r18,r0
#endif
mov r30,r19
#if defined(RAMPZ)
elpm r19,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r19,Z
#elif defined(__AVR_TINY__)
ld r19,Z
#else
lpm
mov r19,r0
#endif
mov r30,r20
#if defined(RAMPZ)
elpm r20,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r20,Z
#elif defined(__AVR_TINY__)
ld r20,Z
#else
lpm
mov r20,r0
#endif
mov r30,r21
#if defined(RAMPZ)
elpm r21,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r21,Z
#elif defined(__AVR_TINY__)
ld r21,Z
#else
lpm
mov r21,r0
#endif
ldi r30,lo8(table_3)
ldi r31,hi8(table_3)
#if defined(RAMPZ)
ldi r27,hh8(table_3)
out _SFR_IO_ADDR(RAMPZ),r27
#endif
ldd r12,Y+9
ldd r13,Y+10
ldd r14,Y+11
ldd r15,Y+12
ldd r24,Y+13
ldd r25,Y+14
ldd r16,Y+15
ldd r17,Y+16
std Y+9,r14
std Y+10,r12
std Y+11,r24
std Y+12,r17
std Y+13,r16
std Y+14,r15
std Y+15,r25
std Y+16,r13
ldd r12,Y+25
ldd r13,Y+26
ldd r14,Y+27
ldd r15,Y+28
ldd r24,Y+29
ldd r25,Y+30
ldd r16,Y+31
ldd r17,Y+32
mov r30,r12
#if defined(RAMPZ)
elpm r12,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r12,Z
#elif defined(__AVR_TINY__)
ld r12,Z
#else
lpm
mov r12,r0
#endif
mov r30,r13
#if defined(RAMPZ)
elpm r13,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r13,Z
#elif defined(__AVR_TINY__)
ld r13,Z
#else
lpm
mov r13,r0
#endif
mov r30,r14
#if defined(RAMPZ)
elpm r14,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r14,Z
#elif defined(__AVR_TINY__)
ld r14,Z
#else
lpm
mov r14,r0
#endif
mov r30,r15
#if defined(RAMPZ)
elpm r15,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r15,Z
#elif defined(__AVR_TINY__)
ld r15,Z
#else
lpm
mov r15,r0
#endif
mov r30,r24
#if defined(RAMPZ)
elpm r24,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r24,Z
#elif defined(__AVR_TINY__)
ld r24,Z
#else
lpm
mov r24,r0
#endif
mov r30,r25
#if defined(RAMPZ)
elpm r25,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r25,Z
#elif defined(__AVR_TINY__)
ld r25,Z
#else
lpm
mov r25,r0
#endif
mov r30,r16
#if defined(RAMPZ)
elpm r16,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r16,Z
#elif defined(__AVR_TINY__)
ld r16,Z
#else
lpm
mov r16,r0
#endif
mov r30,r17
#if defined(RAMPZ)
elpm r17,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r17,Z
#elif defined(__AVR_TINY__)
ld r17,Z
#else
lpm
mov r17,r0
#endif
std Y+25,r14
std Y+26,r12
std Y+27,r24
std Y+28,r17
std Y+29,r16
std Y+30,r15
std Y+31,r25
std Y+32,r13
ldi r30,lo8(table_2)
ldi r31,hi8(table_2)
#if defined(RAMPZ)
ldi r27,hh8(table_2)
out _SFR_IO_ADDR(RAMPZ),r27
#endif
ldd r12,Y+41
ldd r13,Y+42
ldd r14,Y+43
ldd r15,Y+44
ldd r24,Y+45
ldd r25,Y+46
ldd r16,Y+47
ldd r17,Y+48
mov r30,r12
#if defined(RAMPZ)
elpm r12,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r12,Z
#elif defined(__AVR_TINY__)
ld r12,Z
#else
lpm
mov r12,r0
#endif
mov r30,r13
#if defined(RAMPZ)
elpm r13,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r13,Z
#elif defined(__AVR_TINY__)
ld r13,Z
#else
lpm
mov r13,r0
#endif
mov r30,r14
#if defined(RAMPZ)
elpm r14,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r14,Z
#elif defined(__AVR_TINY__)
ld r14,Z
#else
lpm
mov r14,r0
#endif
mov r30,r15
#if defined(RAMPZ)
elpm r15,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r15,Z
#elif defined(__AVR_TINY__)
ld r15,Z
#else
lpm
mov r15,r0
#endif
mov r30,r24
#if defined(RAMPZ)
elpm r24,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r24,Z
#elif defined(__AVR_TINY__)
ld r24,Z
#else
lpm
mov r24,r0
#endif
mov r30,r25
#if defined(RAMPZ)
elpm r25,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r25,Z
#elif defined(__AVR_TINY__)
ld r25,Z
#else
lpm
mov r25,r0
#endif
mov r30,r16
#if defined(RAMPZ)
elpm r16,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r16,Z
#elif defined(__AVR_TINY__)
ld r16,Z
#else
lpm
mov r16,r0
#endif
mov r30,r17
#if defined(RAMPZ)
elpm r17,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r17,Z
#elif defined(__AVR_TINY__)
ld r17,Z
#else
lpm
mov r17,r0
#endif
std Y+41,r14
std Y+42,r12
std Y+43,r24
std Y+44,r17
std Y+45,r16
std Y+46,r15
std Y+47,r25
std Y+48,r13
eor r22,r18
eor r23,r19
eor r2,r20
eor r3,r21
eor r18,r4
eor r19,r5
eor r20,r6
eor r21,r7
eor r8,r18
eor r9,r19
eor r10,r20
eor r11,r21
mov r0,r8
mov r8,r9
mov r9,r10
mov r10,r11
mov r11,r0
mov r0,r18
mov r18,r20
mov r20,r0
mov r0,r19
mov r19,r21
mov r21,r0
mov r0,r3
mov r3,r2
mov r2,r23
mov r23,r22
mov r22,r0
ldd r0,Y+1
eor r4,r0
ldd r0,Y+2
eor r5,r0
ldd r0,Y+3
eor r6,r0
ldd r0,Y+4
eor r7,r0
ldd r0,Y+17
eor r4,r0
ldd r0,Y+18
eor r5,r0
ldd r0,Y+19
eor r6,r0
ldd r0,Y+20
eor r7,r0
ldd r0,Y+33
eor r4,r0
ldd r0,Y+34
eor r5,r0
ldd r0,Y+35
eor r6,r0
ldd r0,Y+36
eor r7,r0
ldd r0,Y+5
eor r8,r0
ldd r0,Y+6
eor r9,r0
ldd r0,Y+7
eor r10,r0
ldd r0,Y+8
eor r11,r0
ldd r0,Y+21
eor r8,r0
ldd r0,Y+22
eor r9,r0
ldd r0,Y+23
eor r10,r0
ldd r0,Y+24
eor r11,r0
ldd r0,Y+37
eor r8,r0
ldd r0,Y+38
eor r9,r0
ldd r0,Y+39
eor r10,r0
ldd r0,Y+40
eor r11,r0
ldi r30,lo8(table_4)
ldi r31,hi8(table_4)
#if defined(RAMPZ)
ldi r24,hh8(table_4)
out _SFR_IO_ADDR(RAMPZ),r24
#endif
dec r26
mov r30,r26
#if defined(RAMPZ)
elpm r27,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r27,Z
#elif defined(__AVR_TINY__)
ld r27,Z
#else
lpm
mov r27,r0
#endif
eor r8,r27
dec r26
mov r30,r26
#if defined(RAMPZ)
elpm r27,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r27,Z
#elif defined(__AVR_TINY__)
ld r27,Z
#else
lpm
mov r27,r0
#endif
eor r4,r27
ldi r27,2
eor r18,r27
ldi r30,lo8(table_1)
ldi r31,hi8(table_1)
#if defined(RAMPZ)
ldi r27,hh8(table_1)
out _SFR_IO_ADDR(RAMPZ),r27
#endif
mov r30,r4
#if defined(RAMPZ)
elpm r4,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r4,Z
#elif defined(__AVR_TINY__)
ld r4,Z
#else
lpm
mov r4,r0
#endif
mov r30,r5
#if defined(RAMPZ)
elpm r5,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r5,Z
#elif defined(__AVR_TINY__)
ld r5,Z
#else
lpm
mov r5,r0
#endif
mov r30,r6
#if defined(RAMPZ)
elpm r6,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r6,Z
#elif defined(__AVR_TINY__)
ld r6,Z
#else
lpm
mov r6,r0
#endif
mov r30,r7
#if defined(RAMPZ)
elpm r7,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r7,Z
#elif defined(__AVR_TINY__)
ld r7,Z
#else
lpm
mov r7,r0
#endif
mov r30,r8
#if defined(RAMPZ)
elpm r8,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r8,Z
#elif defined(__AVR_TINY__)
ld r8,Z
#else
lpm
mov r8,r0
#endif
mov r30,r9
#if defined(RAMPZ)
elpm r9,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r9,Z
#elif defined(__AVR_TINY__)
ld r9,Z
#else
lpm
mov r9,r0
#endif
mov r30,r10
#if defined(RAMPZ)
elpm r10,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r10,Z
#elif defined(__AVR_TINY__)
ld r10,Z
#else
lpm
mov r10,r0
#endif
mov r30,r11
#if defined(RAMPZ)
elpm r11,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r11,Z
#elif defined(__AVR_TINY__)
ld r11,Z
#else
lpm
mov r11,r0
#endif
mov r30,r18
#if defined(RAMPZ)
elpm r18,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r18,Z
#elif defined(__AVR_TINY__)
ld r18,Z
#else
lpm
mov r18,r0
#endif
mov r30,r19
#if defined(RAMPZ)
elpm r19,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r19,Z
#elif defined(__AVR_TINY__)
ld r19,Z
#else
lpm
mov r19,r0
#endif
mov r30,r20
#if defined(RAMPZ)
elpm r20,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r20,Z
#elif defined(__AVR_TINY__)
ld r20,Z
#else
lpm
mov r20,r0
#endif
mov r30,r21
#if defined(RAMPZ)
elpm r21,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r21,Z
#elif defined(__AVR_TINY__)
ld r21,Z
#else
lpm
mov r21,r0
#endif
mov r30,r22
#if defined(RAMPZ)
elpm r22,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r22,Z
#elif defined(__AVR_TINY__)
ld r22,Z
#else
lpm
mov r22,r0
#endif
mov r30,r23
#if defined(RAMPZ)
elpm r23,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r23,Z
#elif defined(__AVR_TINY__)
ld r23,Z
#else
lpm
mov r23,r0
#endif
mov r30,r2
#if defined(RAMPZ)
elpm r2,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r2,Z
#elif defined(__AVR_TINY__)
ld r2,Z
#else
lpm
mov r2,r0
#endif
mov r30,r3
#if defined(RAMPZ)
elpm r3,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r3,Z
#elif defined(__AVR_TINY__)
ld r3,Z
#else
lpm
mov r3,r0
#endif
ldi r30,lo8(table_3)
ldi r31,hi8(table_3)
#if defined(RAMPZ)
ldi r27,hh8(table_3)
out _SFR_IO_ADDR(RAMPZ),r27
#endif
ldd r12,Y+1
ldd r13,Y+2
ldd r14,Y+3
ldd r15,Y+4
ldd r24,Y+5
ldd r25,Y+6
ldd r16,Y+7
ldd r17,Y+8
std Y+1,r14
std Y+2,r12
std Y+3,r24
std Y+4,r17
std Y+5,r16
std Y+6,r15
std Y+7,r25
std Y+8,r13
ldd r12,Y+17
ldd r13,Y+18
ldd r14,Y+19
ldd r15,Y+20
ldd r24,Y+21
ldd r25,Y+22
ldd r16,Y+23
ldd r17,Y+24
mov r30,r12
#if defined(RAMPZ)
elpm r12,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r12,Z
#elif defined(__AVR_TINY__)
ld r12,Z
#else
lpm
mov r12,r0
#endif
mov r30,r13
#if defined(RAMPZ)
elpm r13,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r13,Z
#elif defined(__AVR_TINY__)
ld r13,Z
#else
lpm
mov r13,r0
#endif
mov r30,r14
#if defined(RAMPZ)
elpm r14,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r14,Z
#elif defined(__AVR_TINY__)
ld r14,Z
#else
lpm
mov r14,r0
#endif
mov r30,r15
#if defined(RAMPZ)
elpm r15,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r15,Z
#elif defined(__AVR_TINY__)
ld r15,Z
#else
lpm
mov r15,r0
#endif
mov r30,r24
#if defined(RAMPZ)
elpm r24,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r24,Z
#elif defined(__AVR_TINY__)
ld r24,Z
#else
lpm
mov r24,r0
#endif
mov r30,r25
#if defined(RAMPZ)
elpm r25,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r25,Z
#elif defined(__AVR_TINY__)
ld r25,Z
#else
lpm
mov r25,r0
#endif
mov r30,r16
#if defined(RAMPZ)
elpm r16,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r16,Z
#elif defined(__AVR_TINY__)
ld r16,Z
#else
lpm
mov r16,r0
#endif
mov r30,r17
#if defined(RAMPZ)
elpm r17,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r17,Z
#elif defined(__AVR_TINY__)
ld r17,Z
#else
lpm
mov r17,r0
#endif
std Y+17,r14
std Y+18,r12
std Y+19,r24
std Y+20,r17
std Y+21,r16
std Y+22,r15
std Y+23,r25
std Y+24,r13
ldi r30,lo8(table_2)
ldi r31,hi8(table_2)
#if defined(RAMPZ)
ldi r27,hh8(table_2)
out _SFR_IO_ADDR(RAMPZ),r27
#endif
ldd r12,Y+33
ldd r13,Y+34
ldd r14,Y+35
ldd r15,Y+36
ldd r24,Y+37
ldd r25,Y+38
ldd r16,Y+39
ldd r17,Y+40
mov r30,r12
#if defined(RAMPZ)
elpm r12,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r12,Z
#elif defined(__AVR_TINY__)
ld r12,Z
#else
lpm
mov r12,r0
#endif
mov r30,r13
#if defined(RAMPZ)
elpm r13,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r13,Z
#elif defined(__AVR_TINY__)
ld r13,Z
#else
lpm
mov r13,r0
#endif
mov r30,r14
#if defined(RAMPZ)
elpm r14,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r14,Z
#elif defined(__AVR_TINY__)
ld r14,Z
#else
lpm
mov r14,r0
#endif
mov r30,r15
#if defined(RAMPZ)
elpm r15,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r15,Z
#elif defined(__AVR_TINY__)
ld r15,Z
#else
lpm
mov r15,r0
#endif
mov r30,r24
#if defined(RAMPZ)
elpm r24,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r24,Z
#elif defined(__AVR_TINY__)
ld r24,Z
#else
lpm
mov r24,r0
#endif
mov r30,r25
#if defined(RAMPZ)
elpm r25,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r25,Z
#elif defined(__AVR_TINY__)
ld r25,Z
#else
lpm
mov r25,r0
#endif
mov r30,r16
#if defined(RAMPZ)
elpm r16,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r16,Z
#elif defined(__AVR_TINY__)
ld r16,Z
#else
lpm
mov r16,r0
#endif
mov r30,r17
#if defined(RAMPZ)
elpm r17,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r17,Z
#elif defined(__AVR_TINY__)
ld r17,Z
#else
lpm
mov r17,r0
#endif
std Y+33,r14
std Y+34,r12
std Y+35,r24
std Y+36,r17
std Y+37,r16
std Y+38,r15
std Y+39,r25
std Y+40,r13
eor r4,r22
eor r5,r23
eor r6,r2
eor r7,r3
eor r22,r8
eor r23,r9
eor r2,r10
eor r3,r11
eor r18,r22
eor r19,r23
eor r20,r2
eor r21,r3
mov r0,r18
mov r18,r19
mov r19,r20
mov r20,r21
mov r21,r0
mov r0,r22
mov r22,r2
mov r2,r0
mov r0,r23
mov r23,r3
mov r3,r0
mov r0,r7
mov r7,r6
mov r6,r5
mov r5,r4
mov r4,r0
ldd r0,Y+9
eor r8,r0
ldd r0,Y+10
eor r9,r0
ldd r0,Y+11
eor r10,r0
ldd r0,Y+12
eor r11,r0
ldd r0,Y+25
eor r8,r0
ldd r0,Y+26
eor r9,r0
ldd r0,Y+27
eor r10,r0
ldd r0,Y+28
eor r11,r0
ldd r0,Y+41
eor r8,r0
ldd r0,Y+42
eor r9,r0
ldd r0,Y+43
eor r10,r0
ldd r0,Y+44
eor r11,r0
ldd r0,Y+13
eor r18,r0
ldd r0,Y+14
eor r19,r0
ldd r0,Y+15
eor r20,r0
ldd r0,Y+16
eor r21,r0
ldd r0,Y+29
eor r18,r0
ldd r0,Y+30
eor r19,r0
ldd r0,Y+31
eor r20,r0
ldd r0,Y+32
eor r21,r0
ldd r0,Y+45
eor r18,r0
ldd r0,Y+46
eor r19,r0
ldd r0,Y+47
eor r20,r0
ldd r0,Y+48
eor r21,r0
ldi r30,lo8(table_4)
ldi r31,hi8(table_4)
#if defined(RAMPZ)
ldi r24,hh8(table_4)
out _SFR_IO_ADDR(RAMPZ),r24
#endif
dec r26
mov r30,r26
#if defined(RAMPZ)
elpm r27,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r27,Z
#elif defined(__AVR_TINY__)
ld r27,Z
#else
lpm
mov r27,r0
#endif
eor r18,r27
dec r26
mov r30,r26
#if defined(RAMPZ)
elpm r27,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r27,Z
#elif defined(__AVR_TINY__)
ld r27,Z
#else
lpm
mov r27,r0
#endif
eor r8,r27
ldi r27,2
eor r22,r27
ldi r30,lo8(table_1)
ldi r31,hi8(table_1)
#if defined(RAMPZ)
ldi r27,hh8(table_1)
out _SFR_IO_ADDR(RAMPZ),r27
#endif
mov r30,r8
#if defined(RAMPZ)
elpm r8,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r8,Z
#elif defined(__AVR_TINY__)
ld r8,Z
#else
lpm
mov r8,r0
#endif
mov r30,r9
#if defined(RAMPZ)
elpm r9,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r9,Z
#elif defined(__AVR_TINY__)
ld r9,Z
#else
lpm
mov r9,r0
#endif
mov r30,r10
#if defined(RAMPZ)
elpm r10,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r10,Z
#elif defined(__AVR_TINY__)
ld r10,Z
#else
lpm
mov r10,r0
#endif
mov r30,r11
#if defined(RAMPZ)
elpm r11,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r11,Z
#elif defined(__AVR_TINY__)
ld r11,Z
#else
lpm
mov r11,r0
#endif
mov r30,r18
#if defined(RAMPZ)
elpm r18,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r18,Z
#elif defined(__AVR_TINY__)
ld r18,Z
#else
lpm
mov r18,r0
#endif
mov r30,r19
#if defined(RAMPZ)
elpm r19,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r19,Z
#elif defined(__AVR_TINY__)
ld r19,Z
#else
lpm
mov r19,r0
#endif
mov r30,r20
#if defined(RAMPZ)
elpm r20,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r20,Z
#elif defined(__AVR_TINY__)
ld r20,Z
#else
lpm
mov r20,r0
#endif
mov r30,r21
#if defined(RAMPZ)
elpm r21,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r21,Z
#elif defined(__AVR_TINY__)
ld r21,Z
#else
lpm
mov r21,r0
#endif
mov r30,r22
#if defined(RAMPZ)
elpm r22,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r22,Z
#elif defined(__AVR_TINY__)
ld r22,Z
#else
lpm
mov r22,r0
#endif
mov r30,r23
#if defined(RAMPZ)
elpm r23,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r23,Z
#elif defined(__AVR_TINY__)
ld r23,Z
#else
lpm
mov r23,r0
#endif
mov r30,r2
#if defined(RAMPZ)
elpm r2,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r2,Z
#elif defined(__AVR_TINY__)
ld r2,Z
#else
lpm
mov r2,r0
#endif
mov r30,r3
#if defined(RAMPZ)
elpm r3,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r3,Z
#elif defined(__AVR_TINY__)
ld r3,Z
#else
lpm
mov r3,r0
#endif
mov r30,r4
#if defined(RAMPZ)
elpm r4,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r4,Z
#elif defined(__AVR_TINY__)
ld r4,Z
#else
lpm
mov r4,r0
#endif
mov r30,r5
#if defined(RAMPZ)
elpm r5,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r5,Z
#elif defined(__AVR_TINY__)
ld r5,Z
#else
lpm
mov r5,r0
#endif
mov r30,r6
#if defined(RAMPZ)
elpm r6,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r6,Z
#elif defined(__AVR_TINY__)
ld r6,Z
#else
lpm
mov r6,r0
#endif
mov r30,r7
#if defined(RAMPZ)
elpm r7,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r7,Z
#elif defined(__AVR_TINY__)
ld r7,Z
#else
lpm
mov r7,r0
#endif
ldi r30,lo8(table_3)
ldi r31,hi8(table_3)
#if defined(RAMPZ)
ldi r27,hh8(table_3)
out _SFR_IO_ADDR(RAMPZ),r27
#endif
ldd r12,Y+9
ldd r13,Y+10
ldd r14,Y+11
ldd r15,Y+12
ldd r24,Y+13
ldd r25,Y+14
ldd r16,Y+15
ldd r17,Y+16
std Y+9,r14
std Y+10,r12
std Y+11,r24
std Y+12,r17
std Y+13,r16
std Y+14,r15
std Y+15,r25
std Y+16,r13
ldd r12,Y+25
ldd r13,Y+26
ldd r14,Y+27
ldd r15,Y+28
ldd r24,Y+29
ldd r25,Y+30
ldd r16,Y+31
ldd r17,Y+32
mov r30,r12
#if defined(RAMPZ)
elpm r12,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r12,Z
#elif defined(__AVR_TINY__)
ld r12,Z
#else
lpm
mov r12,r0
#endif
mov r30,r13
#if defined(RAMPZ)
elpm r13,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r13,Z
#elif defined(__AVR_TINY__)
ld r13,Z
#else
lpm
mov r13,r0
#endif
mov r30,r14
#if defined(RAMPZ)
elpm r14,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r14,Z
#elif defined(__AVR_TINY__)
ld r14,Z
#else
lpm
mov r14,r0
#endif
mov r30,r15
#if defined(RAMPZ)
elpm r15,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r15,Z
#elif defined(__AVR_TINY__)
ld r15,Z
#else
lpm
mov r15,r0
#endif
mov r30,r24
#if defined(RAMPZ)
elpm r24,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r24,Z
#elif defined(__AVR_TINY__)
ld r24,Z
#else
lpm
mov r24,r0
#endif
mov r30,r25
#if defined(RAMPZ)
elpm r25,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r25,Z
#elif defined(__AVR_TINY__)
ld r25,Z
#else
lpm
mov r25,r0
#endif
mov r30,r16
#if defined(RAMPZ)
elpm r16,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r16,Z
#elif defined(__AVR_TINY__)
ld r16,Z
#else
lpm
mov r16,r0
#endif
mov r30,r17
#if defined(RAMPZ)
elpm r17,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r17,Z
#elif defined(__AVR_TINY__)
ld r17,Z
#else
lpm
mov r17,r0
#endif
std Y+25,r14
std Y+26,r12
std Y+27,r24
std Y+28,r17
std Y+29,r16
std Y+30,r15
std Y+31,r25
std Y+32,r13
ldi r30,lo8(table_2)
ldi r31,hi8(table_2)
#if defined(RAMPZ)
ldi r27,hh8(table_2)
out _SFR_IO_ADDR(RAMPZ),r27
#endif
ldd r12,Y+41
ldd r13,Y+42
ldd r14,Y+43
ldd r15,Y+44
ldd r24,Y+45
ldd r25,Y+46
ldd r16,Y+47
ldd r17,Y+48
mov r30,r12
#if defined(RAMPZ)
elpm r12,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r12,Z
#elif defined(__AVR_TINY__)
ld r12,Z
#else
lpm
mov r12,r0
#endif
mov r30,r13
#if defined(RAMPZ)
elpm r13,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r13,Z
#elif defined(__AVR_TINY__)
ld r13,Z
#else
lpm
mov r13,r0
#endif
mov r30,r14
#if defined(RAMPZ)
elpm r14,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r14,Z
#elif defined(__AVR_TINY__)
ld r14,Z
#else
lpm
mov r14,r0
#endif
mov r30,r15
#if defined(RAMPZ)
elpm r15,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r15,Z
#elif defined(__AVR_TINY__)
ld r15,Z
#else
lpm
mov r15,r0
#endif
mov r30,r24
#if defined(RAMPZ)
elpm r24,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r24,Z
#elif defined(__AVR_TINY__)
ld r24,Z
#else
lpm
mov r24,r0
#endif
mov r30,r25
#if defined(RAMPZ)
elpm r25,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r25,Z
#elif defined(__AVR_TINY__)
ld r25,Z
#else
lpm
mov r25,r0
#endif
mov r30,r16
#if defined(RAMPZ)
elpm r16,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r16,Z
#elif defined(__AVR_TINY__)
ld r16,Z
#else
lpm
mov r16,r0
#endif
mov r30,r17
#if defined(RAMPZ)
elpm r17,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r17,Z
#elif defined(__AVR_TINY__)
ld r17,Z
#else
lpm
mov r17,r0
#endif
std Y+41,r14
std Y+42,r12
std Y+43,r24
std Y+44,r17
std Y+45,r16
std Y+46,r15
std Y+47,r25
std Y+48,r13
eor r8,r4
eor r9,r5
eor r10,r6
eor r11,r7
eor r4,r18
eor r5,r19
eor r6,r20
eor r7,r21
eor r22,r4
eor r23,r5
eor r2,r6
eor r3,r7
mov r0,r22
mov r22,r23
mov r23,r2
mov r2,r3
mov r3,r0
mov r0,r4
mov r4,r6
mov r6,r0
mov r0,r5
mov r5,r7
mov r7,r0
mov r0,r11
mov r11,r10
mov r10,r9
mov r9,r8
mov r8,r0
ldd r0,Y+1
eor r18,r0
ldd r0,Y+2
eor r19,r0
ldd r0,Y+3
eor r20,r0
ldd r0,Y+4
eor r21,r0
ldd r0,Y+17
eor r18,r0
ldd r0,Y+18
eor r19,r0
ldd r0,Y+19
eor r20,r0
ldd r0,Y+20
eor r21,r0
ldd r0,Y+33
eor r18,r0
ldd r0,Y+34
eor r19,r0
ldd r0,Y+35
eor r20,r0
ldd r0,Y+36
eor r21,r0
ldd r0,Y+5
eor r22,r0
ldd r0,Y+6
eor r23,r0
ldd r0,Y+7
eor r2,r0
ldd r0,Y+8
eor r3,r0
ldd r0,Y+21
eor r22,r0
ldd r0,Y+22
eor r23,r0
ldd r0,Y+23
eor r2,r0
ldd r0,Y+24
eor r3,r0
ldd r0,Y+37
eor r22,r0
ldd r0,Y+38
eor r23,r0
ldd r0,Y+39
eor r2,r0
ldd r0,Y+40
eor r3,r0
ldi r30,lo8(table_4)
ldi r31,hi8(table_4)
#if defined(RAMPZ)
ldi r24,hh8(table_4)
out _SFR_IO_ADDR(RAMPZ),r24
#endif
dec r26
mov r30,r26
#if defined(RAMPZ)
elpm r27,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r27,Z
#elif defined(__AVR_TINY__)
ld r27,Z
#else
lpm
mov r27,r0
#endif
eor r22,r27
dec r26
mov r30,r26
#if defined(RAMPZ)
elpm r27,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r27,Z
#elif defined(__AVR_TINY__)
ld r27,Z
#else
lpm
mov r27,r0
#endif
eor r18,r27
ldi r27,2
eor r4,r27
ldi r30,lo8(table_1)
ldi r31,hi8(table_1)
#if defined(RAMPZ)
ldi r27,hh8(table_1)
out _SFR_IO_ADDR(RAMPZ),r27
#endif
mov r30,r18
#if defined(RAMPZ)
elpm r18,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r18,Z
#elif defined(__AVR_TINY__)
ld r18,Z
#else
lpm
mov r18,r0
#endif
mov r30,r19
#if defined(RAMPZ)
elpm r19,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r19,Z
#elif defined(__AVR_TINY__)
ld r19,Z
#else
lpm
mov r19,r0
#endif
mov r30,r20
#if defined(RAMPZ)
elpm r20,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r20,Z
#elif defined(__AVR_TINY__)
ld r20,Z
#else
lpm
mov r20,r0
#endif
mov r30,r21
#if defined(RAMPZ)
elpm r21,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r21,Z
#elif defined(__AVR_TINY__)
ld r21,Z
#else
lpm
mov r21,r0
#endif
mov r30,r22
#if defined(RAMPZ)
elpm r22,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r22,Z
#elif defined(__AVR_TINY__)
ld r22,Z
#else
lpm
mov r22,r0
#endif
mov r30,r23
#if defined(RAMPZ)
elpm r23,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r23,Z
#elif defined(__AVR_TINY__)
ld r23,Z
#else
lpm
mov r23,r0
#endif
mov r30,r2
#if defined(RAMPZ)
elpm r2,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r2,Z
#elif defined(__AVR_TINY__)
ld r2,Z
#else
lpm
mov r2,r0
#endif
mov r30,r3
#if defined(RAMPZ)
elpm r3,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r3,Z
#elif defined(__AVR_TINY__)
ld r3,Z
#else
lpm
mov r3,r0
#endif
mov r30,r4
#if defined(RAMPZ)
elpm r4,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r4,Z
#elif defined(__AVR_TINY__)
ld r4,Z
#else
lpm
mov r4,r0
#endif
mov r30,r5
#if defined(RAMPZ)
elpm r5,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r5,Z
#elif defined(__AVR_TINY__)
ld r5,Z
#else
lpm
mov r5,r0
#endif
mov r30,r6
#if defined(RAMPZ)
elpm r6,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r6,Z
#elif defined(__AVR_TINY__)
ld r6,Z
#else
lpm
mov r6,r0
#endif
mov r30,r7
#if defined(RAMPZ)
elpm r7,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r7,Z
#elif defined(__AVR_TINY__)
ld r7,Z
#else
lpm
mov r7,r0
#endif
mov r30,r8
#if defined(RAMPZ)
elpm r8,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r8,Z
#elif defined(__AVR_TINY__)
ld r8,Z
#else
lpm
mov r8,r0
#endif
mov r30,r9
#if defined(RAMPZ)
elpm r9,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r9,Z
#elif defined(__AVR_TINY__)
ld r9,Z
#else
lpm
mov r9,r0
#endif
mov r30,r10
#if defined(RAMPZ)
elpm r10,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r10,Z
#elif defined(__AVR_TINY__)
ld r10,Z
#else
lpm
mov r10,r0
#endif
mov r30,r11
#if defined(RAMPZ)
elpm r11,Z
#elif defined(__AVR_HAVE_LPMX__)
lpm r11,Z
#elif defined(__AVR_TINY__)
ld r11,Z
#else
lpm
mov r11,r0
#endif
cp r26,r1
breq 903f
ldi r30,lo8(table_3)
ldi r31,hi8(table_3)
#if defined(RAMPZ)
ldi r27,hh8(table_3)
out _SFR_IO_ADDR(RAMPZ),r27
#endif
rjmp 227b
903:
#if defined(RAMPZ)
pop r0
out _SFR_IO_ADDR(RAMPZ),r0
#endif
ldd r26,Y+49
ldd r27,Y+50
st X+,r18
st X+,r19
st X+,r20
st X+,r21
st X+,r22
st X+,r23
st X+,r2
st X+,r3
st X+,r4
st X+,r5
st X+,r6
st X+,r7
st X+,r8
st X+,r9
st X+,r10
st X+,r11
adiw r28,50
in r0,0x3f
cli
out 0x3e,r29
out 0x3f,r0
out 0x3d,r28
pop r17
pop r16
pop r15
pop r14
pop r13
pop r12
pop r11
pop r10
pop r9
pop r8
pop r7
pop r6
pop r5
pop r4
pop r3
pop r2
pop r29
pop r28
ret
.size skinny_plus_decrypt, .-skinny_plus_decrypt
.global skinny_plus_decrypt_tk_full
.set skinny_plus_decrypt_tk_full,skinny_plus_decrypt
#endif
|
aadomn/cymric
| 11,916
|
artifact_tches2025-3/benchmark_avr/cymric_lwc/cymric_lwc/xoodyak/internal-xoodoo-avr.S
|
#if defined(__AVR__)
/*
* Copyright (C) 2021 Southern Storm Software, Pty Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <avr/io.h>
/* Automatically generated - do not edit */
.text
.global xoodoo_permute
.type xoodoo_permute, @function
xoodoo_permute:
push r28
push r29
push r2
push r3
push r4
push r5
push r6
push r7
push r8
push r9
push r10
push r11
push r12
push r13
push r14
push r15
movw r30,r24
.L__stack_usage = 16
ldi r18,88
mov r19,r1
rcall 34f
ldi r18,56
rcall 34f
ldi r18,192
ldi r19,3
rcall 34f
ldi r18,208
mov r19,r1
rcall 34f
ldi r18,32
ldi r19,1
rcall 34f
ldi r18,20
mov r19,r1
rcall 34f
ldi r18,96
rcall 34f
ldi r18,44
rcall 34f
ldi r18,128
ldi r19,3
rcall 34f
ldi r18,240
mov r19,r1
rcall 34f
ldi r18,160
ldi r19,1
rcall 34f
ldi r18,18
mov r19,r1
rcall 34f
rjmp 888f
34:
ldd r6,Z+12
ldd r7,Z+13
ldd r8,Z+14
ldd r9,Z+15
ldd r0,Z+28
eor r6,r0
ldd r0,Z+29
eor r7,r0
ldd r0,Z+30
eor r8,r0
ldd r0,Z+31
eor r9,r0
ldd r0,Z+44
eor r6,r0
ldd r0,Z+45
eor r7,r0
ldd r0,Z+46
eor r8,r0
ldd r0,Z+47
eor r9,r0
ld r20,Z
ldd r21,Z+1
ldd r22,Z+2
ldd r23,Z+3
ldd r26,Z+16
ldd r27,Z+17
ldd r28,Z+18
ldd r29,Z+19
ldd r2,Z+32
ldd r3,Z+33
ldd r4,Z+34
ldd r5,Z+35
movw r10,r20
movw r12,r22
eor r10,r26
eor r11,r27
eor r12,r28
eor r13,r29
eor r10,r2
eor r11,r3
eor r12,r4
eor r13,r5
movw r14,r6
movw r24,r8
mov r0,r1
lsr r9
ror r8
ror r7
ror r6
ror r0
lsr r9
ror r8
ror r7
ror r6
ror r0
lsr r9
ror r8
ror r7
ror r6
ror r0
or r9,r0
mov r0,r1
lsr r25
ror r24
ror r15
ror r14
ror r0
lsr r25
ror r24
ror r15
ror r14
ror r0
or r25,r0
eor r9,r24
eor r6,r25
eor r7,r14
eor r8,r15
movw r14,r10
movw r24,r12
mov r0,r1
lsr r13
ror r12
ror r11
ror r10
ror r0
lsr r13
ror r12
ror r11
ror r10
ror r0
lsr r13
ror r12
ror r11
ror r10
ror r0
or r13,r0
mov r0,r1
lsr r25
ror r24
ror r15
ror r14
ror r0
lsr r25
ror r24
ror r15
ror r14
ror r0
or r25,r0
eor r13,r24
eor r10,r25
eor r11,r14
eor r12,r15
eor r20,r9
eor r21,r6
eor r22,r7
eor r23,r8
eor r26,r9
eor r27,r6
eor r28,r7
eor r29,r8
eor r2,r9
eor r3,r6
eor r4,r7
eor r5,r8
st Z,r20
std Z+1,r21
std Z+2,r22
std Z+3,r23
std Z+16,r26
std Z+17,r27
std Z+18,r28
std Z+19,r29
std Z+32,r2
std Z+33,r3
std Z+34,r4
std Z+35,r5
ldd r20,Z+4
ldd r21,Z+5
ldd r22,Z+6
ldd r23,Z+7
ldd r26,Z+20
ldd r27,Z+21
ldd r28,Z+22
ldd r29,Z+23
ldd r2,Z+36
ldd r3,Z+37
ldd r4,Z+38
ldd r5,Z+39
movw r6,r20
movw r8,r22
eor r6,r26
eor r7,r27
eor r8,r28
eor r9,r29
eor r6,r2
eor r7,r3
eor r8,r4
eor r9,r5
movw r14,r6
movw r24,r8
mov r0,r1
lsr r9
ror r8
ror r7
ror r6
ror r0
lsr r9
ror r8
ror r7
ror r6
ror r0
lsr r9
ror r8
ror r7
ror r6
ror r0
or r9,r0
mov r0,r1
lsr r25
ror r24
ror r15
ror r14
ror r0
lsr r25
ror r24
ror r15
ror r14
ror r0
or r25,r0
eor r9,r24
eor r6,r25
eor r7,r14
eor r8,r15
eor r20,r13
eor r21,r10
eor r22,r11
eor r23,r12
eor r26,r13
eor r27,r10
eor r28,r11
eor r29,r12
eor r2,r13
eor r3,r10
eor r4,r11
eor r5,r12
std Z+4,r20
std Z+5,r21
std Z+6,r22
std Z+7,r23
std Z+20,r26
std Z+21,r27
std Z+22,r28
std Z+23,r29
std Z+36,r2
std Z+37,r3
std Z+38,r4
std Z+39,r5
ldd r20,Z+8
ldd r21,Z+9
ldd r22,Z+10
ldd r23,Z+11
ldd r26,Z+24
ldd r27,Z+25
ldd r28,Z+26
ldd r29,Z+27
ldd r2,Z+40
ldd r3,Z+41
ldd r4,Z+42
ldd r5,Z+43
movw r10,r20
movw r12,r22
eor r10,r26
eor r11,r27
eor r12,r28
eor r13,r29
eor r10,r2
eor r11,r3
eor r12,r4
eor r13,r5
movw r14,r10
movw r24,r12
mov r0,r1
lsr r13
ror r12
ror r11
ror r10
ror r0
lsr r13
ror r12
ror r11
ror r10
ror r0
lsr r13
ror r12
ror r11
ror r10
ror r0
or r13,r0
mov r0,r1
lsr r25
ror r24
ror r15
ror r14
ror r0
lsr r25
ror r24
ror r15
ror r14
ror r0
or r25,r0
eor r13,r24
eor r10,r25
eor r11,r14
eor r12,r15
eor r20,r9
eor r21,r6
eor r22,r7
eor r23,r8
eor r26,r9
eor r27,r6
eor r28,r7
eor r29,r8
eor r2,r9
eor r3,r6
eor r4,r7
eor r5,r8
std Z+8,r20
std Z+9,r21
std Z+10,r22
std Z+11,r23
std Z+24,r26
std Z+25,r27
std Z+26,r28
std Z+27,r29
std Z+40,r2
std Z+41,r3
std Z+42,r4
std Z+43,r5
ldd r0,Z+12
eor r0,r13
std Z+12,r0
ldd r0,Z+13
eor r0,r10
std Z+13,r0
ldd r0,Z+14
eor r0,r11
std Z+14,r0
ldd r0,Z+15
eor r0,r12
std Z+15,r0
ldd r6,Z+28
ldd r7,Z+29
ldd r8,Z+30
ldd r9,Z+31
eor r6,r13
eor r7,r10
eor r8,r11
eor r9,r12
ldd r14,Z+44
ldd r15,Z+45
ldd r24,Z+46
ldd r25,Z+47
eor r14,r13
eor r15,r10
eor r24,r11
eor r25,r12
ldd r10,Z+24
ldd r11,Z+25
ldd r12,Z+26
ldd r13,Z+27
std Z+28,r10
std Z+29,r11
std Z+30,r12
std Z+31,r13
ldd r10,Z+20
ldd r11,Z+21
ldd r12,Z+22
ldd r13,Z+23
std Z+24,r10
std Z+25,r11
std Z+26,r12
std Z+27,r13
ldd r10,Z+16
ldd r11,Z+17
ldd r12,Z+18
ldd r13,Z+19
std Z+20,r10
std Z+21,r11
std Z+22,r12
std Z+23,r13
std Z+16,r6
std Z+17,r7
std Z+18,r8
std Z+19,r9
ldd r6,Z+32
ldd r7,Z+33
ldd r8,Z+34
ldd r9,Z+35
mov r0,r9
mov r9,r8
mov r8,r7
mov r7,r6
mov r6,r0
lsl r6
rol r7
rol r8
rol r9
adc r6,r1
lsl r6
rol r7
rol r8
rol r9
adc r6,r1
lsl r6
rol r7
rol r8
rol r9
adc r6,r1
std Z+32,r6
std Z+33,r7
std Z+34,r8
std Z+35,r9
ldd r6,Z+36
ldd r7,Z+37
ldd r8,Z+38
ldd r9,Z+39
mov r0,r9
mov r9,r8
mov r8,r7
mov r7,r6
mov r6,r0
lsl r6
rol r7
rol r8
rol r9
adc r6,r1
lsl r6
rol r7
rol r8
rol r9
adc r6,r1
lsl r6
rol r7
rol r8
rol r9
adc r6,r1
std Z+36,r6
std Z+37,r7
std Z+38,r8
std Z+39,r9
ldd r6,Z+40
ldd r7,Z+41
ldd r8,Z+42
ldd r9,Z+43
mov r0,r9
mov r9,r8
mov r8,r7
mov r7,r6
mov r6,r0
lsl r6
rol r7
rol r8
rol r9
adc r6,r1
lsl r6
rol r7
rol r8
rol r9
adc r6,r1
lsl r6
rol r7
rol r8
rol r9
adc r6,r1
std Z+40,r6
std Z+41,r7
std Z+42,r8
std Z+43,r9
mov r0,r25
mov r25,r24
mov r24,r15
mov r15,r14
mov r14,r0
lsl r14
rol r15
rol r24
rol r25
adc r14,r1
lsl r14
rol r15
rol r24
rol r25
adc r14,r1
lsl r14
rol r15
rol r24
rol r25
adc r14,r1
std Z+44,r14
std Z+45,r15
std Z+46,r24
std Z+47,r25
ld r20,Z
ldd r21,Z+1
ldd r22,Z+2
ldd r23,Z+3
eor r20,r18
eor r21,r19
ldd r26,Z+16
ldd r27,Z+17
ldd r28,Z+18
ldd r29,Z+19
ldd r2,Z+32
ldd r3,Z+33
ldd r4,Z+34
ldd r5,Z+35
movw r6,r2
movw r8,r4
mov r0,r26
com r0
and r6,r0
mov r0,r27
com r0
and r7,r0
mov r0,r28
com r0
and r8,r0
mov r0,r29
com r0
and r9,r0
eor r20,r6
eor r21,r7
eor r22,r8
eor r23,r9
st Z,r20
std Z+1,r21
std Z+2,r22
std Z+3,r23
movw r6,r20
movw r8,r22
mov r0,r2
com r0
and r6,r0
mov r0,r3
com r0
and r7,r0
mov r0,r4
com r0
and r8,r0
mov r0,r5
com r0
and r9,r0
eor r26,r6
eor r27,r7
eor r28,r8
eor r29,r9
std Z+16,r26
std Z+17,r27
std Z+18,r28
std Z+19,r29
mov r0,r20
com r0
and r26,r0
mov r0,r21
com r0
and r27,r0
mov r0,r22
com r0
and r28,r0
mov r0,r23
com r0
and r29,r0
eor r2,r26
eor r3,r27
eor r4,r28
eor r5,r29
std Z+32,r2
std Z+33,r3
std Z+34,r4
std Z+35,r5
ldd r20,Z+4
ldd r21,Z+5
ldd r22,Z+6
ldd r23,Z+7
ldd r26,Z+20
ldd r27,Z+21
ldd r28,Z+22
ldd r29,Z+23
ldd r2,Z+36
ldd r3,Z+37
ldd r4,Z+38
ldd r5,Z+39
movw r6,r2
movw r8,r4
mov r0,r26
com r0
and r6,r0
mov r0,r27
com r0
and r7,r0
mov r0,r28
com r0
and r8,r0
mov r0,r29
com r0
and r9,r0
eor r20,r6
eor r21,r7
eor r22,r8
eor r23,r9
std Z+4,r20
std Z+5,r21
std Z+6,r22
std Z+7,r23
movw r6,r20
movw r8,r22
mov r0,r2
com r0
and r6,r0
mov r0,r3
com r0
and r7,r0
mov r0,r4
com r0
and r8,r0
mov r0,r5
com r0
and r9,r0
eor r26,r6
eor r27,r7
eor r28,r8
eor r29,r9
std Z+20,r26
std Z+21,r27
std Z+22,r28
std Z+23,r29
mov r0,r20
com r0
and r26,r0
mov r0,r21
com r0
and r27,r0
mov r0,r22
com r0
and r28,r0
mov r0,r23
com r0
and r29,r0
eor r2,r26
eor r3,r27
eor r4,r28
eor r5,r29
std Z+36,r2
std Z+37,r3
std Z+38,r4
std Z+39,r5
ldd r20,Z+8
ldd r21,Z+9
ldd r22,Z+10
ldd r23,Z+11
ldd r26,Z+24
ldd r27,Z+25
ldd r28,Z+26
ldd r29,Z+27
ldd r2,Z+40
ldd r3,Z+41
ldd r4,Z+42
ldd r5,Z+43
movw r6,r2
movw r8,r4
mov r0,r26
com r0
and r6,r0
mov r0,r27
com r0
and r7,r0
mov r0,r28
com r0
and r8,r0
mov r0,r29
com r0
and r9,r0
eor r20,r6
eor r21,r7
eor r22,r8
eor r23,r9
std Z+8,r20
std Z+9,r21
std Z+10,r22
std Z+11,r23
movw r6,r20
movw r8,r22
mov r0,r2
com r0
and r6,r0
mov r0,r3
com r0
and r7,r0
mov r0,r4
com r0
and r8,r0
mov r0,r5
com r0
and r9,r0
eor r26,r6
eor r27,r7
eor r28,r8
eor r29,r9
std Z+24,r26
std Z+25,r27
std Z+26,r28
std Z+27,r29
mov r0,r20
com r0
and r26,r0
mov r0,r21
com r0
and r27,r0
mov r0,r22
com r0
and r28,r0
mov r0,r23
com r0
and r29,r0
eor r2,r26
eor r3,r27
eor r4,r28
eor r5,r29
std Z+40,r2
std Z+41,r3
std Z+42,r4
std Z+43,r5
ldd r20,Z+12
ldd r21,Z+13
ldd r22,Z+14
ldd r23,Z+15
ldd r26,Z+28
ldd r27,Z+29
ldd r28,Z+30
ldd r29,Z+31
ldd r2,Z+44
ldd r3,Z+45
ldd r4,Z+46
ldd r5,Z+47
movw r6,r2
movw r8,r4
mov r0,r26
com r0
and r6,r0
mov r0,r27
com r0
and r7,r0
mov r0,r28
com r0
and r8,r0
mov r0,r29
com r0
and r9,r0
eor r20,r6
eor r21,r7
eor r22,r8
eor r23,r9
std Z+12,r20
std Z+13,r21
std Z+14,r22
std Z+15,r23
movw r6,r20
movw r8,r22
mov r0,r2
com r0
and r6,r0
mov r0,r3
com r0
and r7,r0
mov r0,r4
com r0
and r8,r0
mov r0,r5
com r0
and r9,r0
eor r26,r6
eor r27,r7
eor r28,r8
eor r29,r9
std Z+28,r26
std Z+29,r27
std Z+30,r28
std Z+31,r29
mov r0,r20
com r0
and r26,r0
mov r0,r21
com r0
and r27,r0
mov r0,r22
com r0
and r28,r0
mov r0,r23
com r0
and r29,r0
eor r2,r26
eor r3,r27
eor r4,r28
eor r5,r29
std Z+44,r2
std Z+45,r3
std Z+46,r4
std Z+47,r5
ldd r6,Z+16
ldd r7,Z+17
ldd r8,Z+18
ldd r9,Z+19
lsl r6
rol r7
rol r8
rol r9
adc r6,r1
std Z+16,r6
std Z+17,r7
std Z+18,r8
std Z+19,r9
ldd r6,Z+20
ldd r7,Z+21
ldd r8,Z+22
ldd r9,Z+23
lsl r6
rol r7
rol r8
rol r9
adc r6,r1
std Z+20,r6
std Z+21,r7
std Z+22,r8
std Z+23,r9
ldd r6,Z+24
ldd r7,Z+25
ldd r8,Z+26
ldd r9,Z+27
lsl r6
rol r7
rol r8
rol r9
adc r6,r1
std Z+24,r6
std Z+25,r7
std Z+26,r8
std Z+27,r9
ldd r6,Z+28
ldd r7,Z+29
ldd r8,Z+30
ldd r9,Z+31
lsl r6
rol r7
rol r8
rol r9
adc r6,r1
std Z+28,r6
std Z+29,r7
std Z+30,r8
std Z+31,r9
ldd r6,Z+40
ldd r7,Z+41
ldd r8,Z+42
ldd r9,Z+43
ldd r10,Z+44
ldd r11,Z+45
ldd r12,Z+46
ldd r13,Z+47
ldd r14,Z+32
ldd r15,Z+33
ldd r24,Z+34
ldd r25,Z+35
std Z+40,r25
std Z+41,r14
std Z+42,r15
std Z+43,r24
ldd r14,Z+36
ldd r15,Z+37
ldd r24,Z+38
ldd r25,Z+39
std Z+44,r25
std Z+45,r14
std Z+46,r15
std Z+47,r24
std Z+32,r9
std Z+33,r6
std Z+34,r7
std Z+35,r8
std Z+36,r13
std Z+37,r10
std Z+38,r11
std Z+39,r12
ret
888:
pop r15
pop r14
pop r13
pop r12
pop r11
pop r10
pop r9
pop r8
pop r7
pop r6
pop r5
pop r4
pop r3
pop r2
pop r29
pop r28
ret
.size xoodoo_permute, .-xoodoo_permute
#endif
|
aadomn/cymric
| 20,077
|
artifact_tches2025-3/benchmark_avr/cymric_lwc/cymric_lwc/asconaead/permutations.S
|
///////////////////////////////////////////////////////////////////////////////
// ascon_avr.S: AVR Assembler implementation (for GCC) of the permutation. //
// Version 1.0.5 (20-Sep-2022), see <http://github.com/ascon/> for updates. //
// Author: L. Cardoso and J. Groszschaedl, (DCS, University of Luxembourg). //
// License: CC0-1.0, see <http://creativecommons.org/publicdomain/zero/1.0/> //
// ------------------------------------------------------------------------- //
// This source code is an improved version of the Assembler implementation //
// described in the paper "An Evaluation of the Multi-Platform Efficiency of //
// Lightweight Cryptographic Permutations" (Proceedings of SecITC 2021, //
// Lecture Notes in Computer Science volume 13195, Springer Verlag, 2022). //
///////////////////////////////////////////////////////////////////////////////
// Function prototype:
// -------------------
// void P(state *s, unsigned char r)
//
// Parameters:
// -----------
// `s`: pointer to a struct containing five 64-bit state-words in little
// endian representation
// `r`: number of rounds
//
// Return value:
// -------------
// None
#include "avr/io.h"
.section .text
///////////////////////////////////////////////////////////////////////////////
//////////////////////// REGISTER NAMES AND CONSTANTS /////////////////////////
///////////////////////////////////////////////////////////////////////////////
// Two AVR registers form a Dual-byte Register (DR) that can store a 16-bit
// operand, whereby little-endian format is used, i.e. the least-significant
// byte of the 16-bit operand is in the lower register.
// Twelve dual-byte registers for sbox layer
#define DR0 r2,r3
#define DR1 r4,r5
#define DR2 r6,r7
#define DR3 r8,r9
#define DR4 r10,r11
#define DR5 r12,r13
#define DR6 r14,r15
#define DR7 r16,r17
#define DR8 r18,r19
#define DR9 r20,r21
#define DR10 r22,r23
#define DR11 r24,r25
// Eight AVR registers form an Octa-byte Register (OR) that can store a 64-bit
// operand, whereby little-endian format is used, i.e. the least-significant
// byte of the 64-bit operand is in the lowest register.
// Three octa-byte registers for linear layer
#define OR0 r2,r3,r4,r5,r6,r7,r8,r9
#define OR1 r10,r11,r12,r13,r14,r15,r16,r17
#define OR2 r18,r19,r20,r21,r22,r23,r24,r25
// Octa-byte registers rotated 1 byte right
#define OR0R8 r3,r4,r5,r6,r7,r8,r9,r2
#define OR1R8 r11,r12,r13,r14,r15,r16,r17,r10
#define OR2R8 r19,r20,r21,r22,r23,r24,r25,r18
// Octa-byte registers rotated 2 bytes right
#define OR0R16 r4,r5,r6,r7,r8,r9,r2,r3
#define OR1R16 r12,r13,r14,r15,r16,r17,r10,r11
#define OR2R16 r20,r21,r22,r23,r24,r25,r18,r19
// Octa-byte registers rotated 3 bytes right
#define OR0R24 r5,r6,r7,r8,r9,r2,r3,r4
#define OR1R24 r13,r14,r15,r16,r17,r10,r11,r12
#define OR2R24 r21,r22,r23,r24,r25,r18,r19,r20
// Octa-byte registers rotated 3 bytes left
#define OR0L24 r7,r8,r9,r2,r3,r4,r5,r6
#define OR1L24 r15,r16,r17,r10,r11,r12,r13,r14
#define OR2L24 r23,r24,r25,r18,r19,r20,r21,r22
// Offset values for octa-byte registers
#define OF0 0,1,2,3,4,5,6,7
#define OF8 8,9,10,11,12,13,14,15
#define OF16 16,17,18,19,20,21,22,23
#define OF24 24,25,26,27,28,29,30,31
#define OF32 32,33,34,35,36,37,38,39
// Register to determine loop termination
#define rstop r0
#define sstop r26
// Zero register (for e.g. left-rotation)
#define zero r1
// Temporary register (INITVARS, ADDRCON)
#define tmp r26
// Register for 8-bit round-constant RCON
#define rcon r27
// Least-significat byte of state-word X2
#define x2lsb r18
///////////////////////////////////////////////////////////////////////////////
// MACROS FOR DUAL-BYTE (16-BIT) ARITHMETIC/LOGICAL OPERATIONS AND LOADS/STORES
///////////////////////////////////////////////////////////////////////////////
// The macro `DMOV` moves a dual-byte operand: A = B.
.macro DMOV a0:req, a1:req, b0:req, b1:req
movw \a0, \b0
.endm
// The macro `DAND` bitwise ANDs two dual-byte operands: A = A ^ B. An implicit
// 8-bit rotation of operand B is possible, i.e the macro can also perform an
// operation of the form A = A ^ (B >>> 8).
.macro DAND a0:req, a1:req, b0:req, b1:req
and \a0, \b0
and \a1, \b1
.endm
// The macro `DIOR` bitwise ORs two dual-byte operands: A = A | B. An implicit
// 8-bit rotation of operand B is possible, i.e the macro can also perform an
// operation of the form A = A | (B >>> 8).
.macro DIOR a0:req, a1:req, b0:req, b1:req
or \a0, \b0
or \a1, \b1
.endm
// The macro `DXOR` bitwise XORs two dual-byte operands: A = A ^ B. An implicit
// 8-bit rotation of operand B is possible, i.e the macro can also perform an
// operation of the form A = A ^ (B >>> 8).
.macro DXOR a0:req, a1:req, b0:req, b1:req
eor \a0, \b0
eor \a1, \b1
.endm
// The macro `DINV` bitwise inverts a dual-byte operand, whereby the inversion
// is performed in place: A = ~A.
.macro DINV a0:req, a1:req
com \a0
com \a1
.endm
// Note: The AVR architecture uses little-endian memory format, which means the
// least-significant byte of a 16-bit word is at the lowest address.
// The macro `DLDZO` loads a dual-byte operand from RAM via Z-pointer using the
// base+offset (i.e. displacement) addressing mode: A = RAM[Z+B]. An implicit
// 8-bit rotation of the loaded operand is possible, i.e. the macro can also
// perform an operation of the form A = RAM[Z+B] >>> 8.
.macro DLDZO a0:req, a1:req, b0:req, b1:req
ldd \a0, Z+\b0
ldd \a1, Z+\b1
.endm
// The macro `DSTZI` stores a dual-byte operand to RAM via Z-pointer using the
// auto/increment addressing mode: RAM[Z++] = A. An implicit 8-bit rotation of
// the operand to be stored is possible, i.e. the macro can also perform an
// operation of the form RAM[Z+B] = (A >>> 8).
.macro DSTZI a0:req, a1:req
st Z+, \a0
st Z+, \a1
.endm
// The macro `DSTZO` stores a dual-byte operand to RAM via Z-pointer using the
// base+offset (i.e. displacement) addressing mode: RAM[Z+B] = A. An implicit
// 8-bit rotation of the operand to be stored is possible, i.e. the macro can
// also perform an operation of the form RAM[Z+B] = (A >>> 8).
.macro DSTZO a0:req, a1:req, b0:req, b1:req
std Z+\b0, \a0
std Z+\b1, \a1
.endm
///////////////////////////////////////////////////////////////////////////////
// MACROS FOR OCTA-BYTE (64-BIT) ARITHMETIC/LOGICAL OPERATIONS AND LOADS/STORES
///////////////////////////////////////////////////////////////////////////////
// The macro `OMOV` moves an octa-byte operand: A = B. An implicit 16/32/48-bit
// rotation of operand B is possible, i.e the macro can also perform operations
// of the form A = (B >>> 16) and A = (B <<< 32).
.macro OMOV a0:req, a1:req, a2:req, a3:req, a4:req, a5:req, a6:req, a7:req, \
b0:req, b1:req, b2:req, b3:req, b4:req, b5:req, b6:req, b7:req
movw \a0, \b0
movw \a2, \b2
movw \a4, \b4
movw \a6, \b6
.endm
// The macro `OROL` rotates an octa-byte operand one bit left, whereby the
// rotation is performed in place: A = A <<< 1.
.macro OROL a0:req, a1:req, a2:req, a3:req, a4:req, a5:req, a6:req, a7:req
lsl \a0
rol \a1
rol \a2
rol \a3
rol \a4
rol \a5
rol \a6
rol \a7
adc \a0, zero
.endm
// The macro `OROL2` rotates an octa-byte operand two bits left, whereby the
// rotation is performed in place: A = A <<< 2.
.macro OROL2 a0:req, a1:req, a2:req, a3:req, a4:req, a5:req, a6:req, a7:req
lsl \a0
rol \a1
rol \a2
rol \a3
rol \a4
rol \a5
rol \a6
rol \a7
adc \a0, zero
lsl \a0
rol \a1
rol \a2
rol \a3
rol \a4
rol \a5
rol \a6
rol \a7
adc \a0, zero
.endm
// The macro `OROL3` rotates an octa-byte operand three bits left, whereby the
// rotation is performed in place: A = A <<< 3.
.macro OROL3 a0:req, a1:req, a2:req, a3:req, a4:req, a5:req, a6:req, a7:req
lsl \a0
rol \a1
rol \a2
rol \a3
rol \a4
rol \a5
rol \a6
rol \a7
adc \a0, zero
lsl \a0
rol \a1
rol \a2
rol \a3
rol \a4
rol \a5
rol \a6
rol \a7
adc \a0, zero
lsl \a0
rol \a1
rol \a2
rol \a3
rol \a4
rol \a5
rol \a6
rol \a7
adc \a0, zero
.endm
// The macro `OROR` rotates an octa-byte operand one bit right, whereby the
// rotation is performed in place: A = A >>> 1.
.macro OROR a0:req, a1:req, a2:req, a3:req, a4:req, a5:req, a6:req, a7:req
bst \a0, 0
ror \a7
ror \a6
ror \a5
ror \a4
ror \a3
ror \a2
ror \a1
ror \a0
bld \a7, 7
.endm
// The macro `OROR2` rotates an octa-byte operand two bits right, whereby the
// rotation is performed in place: A = A >>> 2.
.macro OROR2 a0:req, a1:req, a2:req, a3:req, a4:req, a5:req, a6:req, a7:req
bst \a0, 0
ror \a7
ror \a6
ror \a5
ror \a4
ror \a3
ror \a2
ror \a1
ror \a0
bld \a7, 7
bst \a0, 0
ror \a7
ror \a6
ror \a5
ror \a4
ror \a3
ror \a2
ror \a1
ror \a0
bld \a7, 7
.endm
// The macro `OROR3` rotates an octa-byte operand three bits right, whereby the
// rotation is performed in place: A = A >>> 3.
.macro OROR3 a0:req, a1:req, a2:req, a3:req, a4:req, a5:req, a6:req, a7:req
bst \a0, 0
ror \a7
ror \a6
ror \a5
ror \a4
ror \a3
ror \a2
ror \a1
ror \a0
bld \a7, 7
bst \a0, 0
ror \a7
ror \a6
ror \a5
ror \a4
ror \a3
ror \a2
ror \a1
ror \a0
bld \a7, 7
bst \a0, 0
ror \a7
ror \a6
ror \a5
ror \a4
ror \a3
ror \a2
ror \a1
ror \a0
bld \a7, 7
.endm
// The macro `OXOR` bitwise XORs two octa-byte operands: A = A ^ B. An implicit
// bytewise rotation of operand B is possible, i.e the macro can also perform
// operations of the form A = A ^ (B >>> 8) and A = A ^ (B <<< 16).
.macro OXOR a0:req, a1:req, a2:req, a3:req, a4:req, a5:req, a6:req, a7:req, \
b0:req, b1:req, b2:req, b3:req, b4:req, b5:req, b6:req, b7:req
eor \a0, \b0
eor \a1, \b1
eor \a2, \b2
eor \a3, \b3
eor \a4, \b4
eor \a5, \b5
eor \a6, \b6
eor \a7, \b7
.endm
// Note: The AVR architecture uses little-endian memory format, which means the
// least-significant byte of a 64-bit word is at the lowest address.
// The macro `OLDZO` loads an octa-byte operand from RAM via Z-pointer using
// the base+offset (i.e. displacement) addressing mode: A = RAM[Z+B]. An
// implicit bytewise rotation of the loaded operand is possible, i.e. the macro
// can also perform operations of the form A = RAM[Z+B] >>> 8 and
// A = RAM[Z+B] <<< 16.
.macro OLDZO a0:req, a1:req, a2:req, a3:req, a4:req, a5:req, a6:req, a7:req, \
b0:req, b1:req, b2:req, b3:req, b4:req, b5:req, b6:req, b7:req
ldd \a0, Z+\b0
ldd \a1, Z+\b1
ldd \a2, Z+\b2
ldd \a3, Z+\b3
ldd \a4, Z+\b4
ldd \a5, Z+\b5
ldd \a6, Z+\b6
ldd \a7, Z+\b7
.endm
// The macro `OSTZO` stores an octa-byte operand to RAM via Z-pointer using the
// base+offset (i.e. displacement) addressing mode: RAM[Z+B] = A. An implicit
// bytewise rotation of the operand to be stored is possible, i.e. the macro
// can also perform operations of the form RAM[Z+B] = (A >>> 8) and
// RAM[Z+B] = (A <<< 16).
.macro OSTZO a0:req, a1:req, a2:req, a3:req, a4:req, a5:req, a6:req, a7:req, \
b0:req, b1:req, b2:req, b3:req, b4:req, b5:req, b6:req, b7:req
std Z+\b0, \a0
std Z+\b1, \a1
std Z+\b2, \a2
std Z+\b3, \a3
std Z+\b4, \a4
std Z+\b5, \a5
std Z+\b6, \a6
std Z+\b7, \a7
.endm
///////////////////////////////////////////////////////////////////////////////
/////////////////// HELPER MACROS FOR THE ASCON PERMUTATION ///////////////////
///////////////////////////////////////////////////////////////////////////////
// The macro `PROLOGUE` pushes all callee-saved registers on the stack.
.macro PROLOGUE
push r0
push r2
push r3
push r4
push r5
push r6
push r7
push r8
push r9
push r10
push r11
push r12
push r13
push r14
push r15
push r16
push r17
.endm
// The macro `EPILOGUE` pops all callee-saved registers from the stack and
// returns to the caller.
.macro EPILOGUE
pop r17
pop r16
pop r15
pop r14
pop r13
pop r12
pop r11
pop r10
pop r9
pop r8
pop r7
pop r6
pop r5
pop r4
pop r3
pop r2
pop r0
ret
.endm
// The macro `INITVARS` initializes local variables.
.macro INITVARS
// initialize pointers and constants
movw ZL, r24
ldi tmp, 0x3C
mov rstop, tmp
clr zero
// initial RCON = 15*rounds + 0x3C
mov rcon, r22
swap rcon
sub rcon, r22
add rcon, rstop
.endm
// The macro `ADDRCON` XORs an 8-bit round-constant to the state-word X2.
.macro ADDRCON
eor x2lsb, rcon
.endm
///////////////////////////////////////////////////////////////////////////////
/////////////////// MACROS FOR NONLINEAR SUBSTITUTION LAYER ///////////////////
///////////////////////////////////////////////////////////////////////////////
// The macro `LDSLICE` loads a 16-bit slice of each of the four 64-bit state-
// words X0, X1, X3, and X4. The 16-bit slice of state-word X2 does not need to
// be loaded because X2 is always kept in OR2 during the computation of the
// permutation.
.macro LDSLICE
DLDZO DR0, 0,1
DLDZO DR1, 8,9
DLDZO DR3, 24,25
DLDZO DR4, 32,33
.endm
// The macro `SBOXSLICE` computes a 16-bit slice of the non-linear substitution
// operation. This implementation uses the improved formulae of Campos et al
// (Proceedings of CANS 2020). The 16-bit slices of the state-words X0, X1, X2,
// X3, and X4 are stored in the five double-byte registers DR0, DR1, DR8, DR3,
// and DR4, respectively. DR5, DR6, and DR7 serve as temporary registers.
.macro SBOXSLICE
DMOV DR5, DR0 // t5 = i0
DMOV DR6, DR1 // t6 = i1
DMOV DR7, DR3 // t7 = i3
DXOR DR5, DR4 // t5 = t5 ^ i4 = i0 ^ i4
DXOR DR6, DR8 // t6 = t6 ^ i2 = i1 ^ i2
DXOR DR7, DR4 // t7 = t7 ^ i4 = i3 ^ i4
DINV DR4 // i4 = ~i4
DIOR DR4, DR3 // i4 = i4 | i3
DXOR DR4, DR6 // i4 = i4 ^ t6 -> i4 contains o2
DXOR DR3, DR8 // i3 = i3 ^ i2
DIOR DR3, DR6 // i3 = i3 | t6
DXOR DR3, DR5 // i3 = i3 ^ t5 -> i3 contains o1
DXOR DR8, DR5 // i2 = i2 ^ t5
DIOR DR8, DR1 // i2 = i2 | i1
DXOR DR8, DR7 // i2 = i2 ^ t7 -> i2 contains o0
DINV DR5 // t5 = ~t5
DAND DR1, DR5 // i1 = i1 & t5
DXOR DR1, DR7 // i1 = i1 ^ t7 -> i1 contains o4
DIOR DR0, DR7 // i0 = i0 | t7
DXOR DR0, DR6 // i0 = i0 ^ t6 -> i0 contains o3
.endm
// The macro `STSLICE` stores a 16-bit slice of each of the four 64-bit state-
// words X0, X1, X3, and X4. The 16-bit slice of state-word X2 does not need to
// be stored because X2 is always kept in OR2 during the computation of the
// permutation. However, OR2 is rotated by 16 bits so that DR8 contains the
// correct 16-bit slice for the subsequent iteration of the round-loop.
.macro STSLICE
DSTZI DR8
DSTZO DR3, 6,7
DSTZO DR0, 22,23
DSTZO DR1, 30,31
DMOV DR8, DR9
DMOV DR9, DR10
DMOV DR10, DR11
DMOV DR11, DR4
.endm
// The macro `SBOXLAYER` computes the complete non-linear substitution layer in
// a 16-bit slice-wise fashion. This implementation is optimized for small code
// size.
.macro SBOXLAYER
ldi sstop, 8 // initialize loop-stopper for sbox-loop with 8
add sstop, ZL // add ZL to sstop to stop loop after 4 iterations
.LSBOXLOOP: // start of sbox-loop
LDSLICE // load a 16-bit slice of the state from RAM
SBOXSLICE // compute the sbox for this 16-bit slice
STSLICE // store the 16-bit slice to RAM
cpse ZL, sstop // check whether loop has been iterated 4 times
rjmp .LSBOXLOOP // if not then jump back to start of loop
sbiw ZL, 8 // set Z-pointer to start-address of X0
.endm
///////////////////////////////////////////////////////////////////////////////
////////////////////// MACROS FOR LINEAR DIFFUSION LAYER //////////////////////
///////////////////////////////////////////////////////////////////////////////
// The macro `LINSWX0` computes the linear diffusion operation on state-word
// X0: X0 = X0 ^ (X0 >>> 19) ^ (X0 >>> 28).
.macro LINSWX0
OLDZO OR0, OF0
OMOV OR1, OR0
OROR3 OR1
OXOR OR0, OR1R16
OROR OR1
OXOR OR0, OR1R24
OSTZO OR0, OF0
.endm
// The macro `LINSWX1` computes the linear diffusion operation on state-word
// X1: X1 = X1 ^ (X1 >>> 61) ^ (X1 >>> 39).
.macro LINSWX1
OLDZO OR0, OF8
OMOV OR1, OR0
OROL OR1
OXOR OR0, OR1L24
OROL2 OR1
OXOR OR0, OR1
OSTZO OR0, OF8
.endm
// The macro `LINSWX2` computes the linear diffusion operation on state-word
// X2: X2 = X2 ^ (X2 >>> 1) ^ (X2 >>> 6). Note that X2 is not loaded from RAM
// because it is always kept in OR2 during the computation of the permutation.
.macro LINSWX2
OMOV OR1, OR2
OROR OR2
OXOR OR2, OR1
OROL2 OR1
OXOR OR2, OR1R8
.endm
// The macro `LINSWX3` computes the linear diffusion operation on state-word
// X3: X3 = X3 ^ (X3 >>> 10) ^ (X3 >>> 17).
.macro LINSWX3
OLDZO OR0, OF24
OMOV OR1, OR0
OROR OR1
OXOR OR0, OR1R16
OROR OR1
OXOR OR0, OR1R8
OSTZO OR0, OF24
.endm
// The macro `LINSWX4` computes the linear diffusion operation on state-word
// X4: X4 = X4 ^ (X4 >>> 7) ^ (X4 >>> 41).
.macro LINSWX4
OLDZO OR0, OF32
OMOV OR1, OR0
OROR OR1
OXOR OR0, OR1L24
OROL2 OR1
OXOR OR0, OR1R8
OSTZO OR0, OF32
.endm
// The macro `LINLAYER` computes the complete linear diffusion layer.
.macro LINLAYER
LINSWX0
LINSWX1
LINSWX2
LINSWX3
LINSWX4
.endm
///////////////////////////////////////////////////////////////////////////////
////////////////////////////// ASCON PERMUTATION //////////////////////////////
///////////////////////////////////////////////////////////////////////////////
.balign 2
.global P
.type P, @function
.func P
P:
PROLOGUE // push callee-saved registers
INITVARS // initialize local variables
OLDZO OR2, OF16 // load state-word X2 to octa-byte register OR2
.LROUNDLOOP: // start of round-loop
ADDRCON // macro for addition of round-constant
SBOXLAYER // macro for nonlinear substitution layer
LINLAYER // macro for linear diffusion layer
subi rcon, 15 // subtract 15 from round-constant
cpse rcon, rstop // check whether round-constant is below 0x4B
rjmp .LROUNDLOOP // if not then jump back to start of loop
OSTZO OR2, OF16 // store octa-byte register OR2 to state-word X2
EPILOGUE // pop callee-saved registers and return
.endfunc
.size P, .-P
.end
|
aadomn/cymric
| 7,151
|
artifact_tches2025-3/benchmark_avr/cymric_lwc/cymric_lwc/cymric/lea/lea128.S
|
; Argument registers for function calls
#define ARG1 r24
#define ARG2 r22
#define ARG3 r20
/**
* push_registers macro:
*
* Pushes a given range of registers in ascending order
* To be called like: push_registers 0,15
*/
.macro push_registers from:req, to:req
push \from
.if \to-\from
push_registers "(\from+1)",\to
.endif
.endm
/**
* ldi_w macro:
*
* Load immediate for 16-bit values
*/
.macro ldi_w reg:req, val:req
ldi \reg, lo8(\val)
ldi "(\reg+1)", hi8(\val)
.endm
/**
* pop_registers macro:
*
* Pops a given range of registers in descending order
* To be called like: pop_registers 0,15
*/
.macro pop_registers from:req, to:req
pop \to
.if \to-\from
pop_registers \from,"(\to-1)"
.endif
.endm
.global lea128_kexpand
lea128_kexpand:
; Save r2-r17,r28-r31
push_registers 2,17
push_registers 28,31
push_registers 24,25
; Save the argument pointers to Z (key) and X (round keys)
movw XL, ARG1
movw ZL, ARG2
; Allocate 16 bytes on the stack and save pointer to Y
in r28, 0x3d
in r29, 0x3e
sbiw r28, 16
in r0, 0x3f
cli
out 0x3e, r29
out 0x3f, r0
out 0x3d, r28
.L__stack_usage = 42
; Load the key given by argument to register 2-17 instead of 0-15 because
; the mul instruction inconditionally overwrites registers r1:r0.
.irp param,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12,r13,r14,r15,r16,r17
ld \param, Z+
.endr
; Save loop counter
ldi r18, 24
; Constants for efficient bitshifts
ldi r19, 64
ldi r20, 8
; Save round constants to the stack
ldi r22, lo8(0xf4ed)
ldi r23, hi8(0xf4ed)
ldi r24, lo8(0xe1f7)
ldi r25, hi8(0xe1f7)
std Y+0, r22
std Y+1, r23
std Y+2, r24
std Y+3, r25
ldi r22, lo8(0x6b02)
ldi r23, hi8(0x6b02)
ldi r24, lo8(0x4462)
ldi r25, hi8(0x4462)
std Y+4, r22
std Y+5, r23
std Y+6, r24
std Y+7, r25
ldi r22, lo8(0xf914)
ldi r23, hi8(0xf914)
ldi r24, lo8(0xf3c4)
ldi r25, hi8(0xf3c4)
std Y+8, r22
std Y+9, r23
std Y+10, r24
std Y+11, r25
ldi r22, lo8(0xc3b1)
ldi r23, hi8(0xc3b1)
ldi r24, lo8(0xe37c)
ldi r25, hi8(0xe37c)
std Y+12, r22
std Y+13, r23
std Y+14, r24
std Y+15, r25
loop_kexp:
; Load round constant rc
ldd r22, Y+0
ldd r23, Y+1
ldd r24, Y+2
ldd r25, Y+3
; First round key word
; rc <<< 1
bst r25, 7
rol r22
rol r23
rol r24
rol r25
bld r22, 0
; T[0] = T[0] + rc
add r2, r22
adc r3, r23
adc r4, r24
adc r5, r25
; T[0] <<< 1
bst r5, 7
rol r2
rol r3
rol r4
rol r5
bld r2, 0
; Second round key word
; rc <<< 1
bst r25, 7
rol r22
rol r23
rol r24
rol r25
bld r22, 0
; T[1] = T[1] + rc
add r6, r22
adc r7, r23
adc r8, r24
adc r9, r25
; T[1] <<< 3
mov r30, r7
mov r31, r9
mul r6, r20
movw r6, r0
mul r8, r20
movw r8, r0
mul r30, r20
eor r7, r0
eor r8, r1
mul r31, r20
eor r9, r0
eor r6, r1
; Third round key word
; rc <<< 1
bst r25, 7
rol r22
rol r23
rol r24
rol r25
bld r22, 0
; T[2] = T[2] + rc
add r10, r22
adc r11, r23
adc r12, r24
adc r13, r25
; T[2] <<< 6
mov r30, r11
mov r31, r13
mul r10, r19
movw r10, r0
mul r12, r19
movw r12, r0
mul r30, r19
eor r11, r0
eor r12, r1
mul r31, r19
eor r13, r0
eor r10, r1
; Fourth round key word
; rc <<< 1
bst r25, 7
rol r22
rol r23
rol r24
rol r25
bld r22, 0
; T[3] = T[3] + rc
add r14, r22
adc r15, r23
adc r16, r24
adc r17, r25
; T[3] << 11
mov r30, r14
mov r14, r17
mov r17, r16
mov r16, r15
mov r15, r30
mov r31, r17
mul r14, r20
movw r14, r0
mul r16, r20
movw r16, r0
mul r30, r20
eor r15, r0
eor r16, r1
mul r31, r20
eor r17, r0
eor r14, r1
; Store the round key
st X+, r2
st X+, r3
st X+, r4
st X+, r5
st X+, r6
st X+, r7
st X+, r8
st X+, r9
st X+, r10
st X+, r11
st X+, r12
st X+, r13
st X+, r14
st X+, r15
st X+, r16
st X+, r17
; Store the updated round constant
st Y+, r22
st Y+, r23
st Y+, r24
st Y+, r25
; Decrement loop counter
subi r18, 1
; If counter != 0 mod 4, no need to wrap round constant
mov r21, r18
andi r21, 3
cpi r21, 0
brne no_wrap_rc
sbiw Y, 16
no_wrap_rc:
cpi r18, 0
breq exit_kexp
rjmp loop_kexp
exit_kexp:
adiw r28, 16
in r0, 0x3f
cli
out 0x3e, r29
out 0x3f, r0
out 0x3d, r28
pop_registers 24,25
; Restore r2-r19,r28-r31
pop_registers 28,31
pop_registers 2,17
ret
.size lea128_kexpand, .-lea128_kexpand
.global lea128_encrypt
lea128_encrypt:
; Save r2-r17,r28-r29
push_registers 2,17
push_registers 28,29
push_registers 24,25
.L__stack_usage = 20
; Save the argument pointers to Z (key) and X (plaintext)
movw XL, ARG2
movw ZL, ARG3
; Load the plaintext given by argument to register 2-17 instead of 0-15 because
; the mul instruction inconditionally overwrites registers r1:r0.
.irp param,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12,r13,r14,r15,r16,r17
ld \param, X+
.endr
ldi r18, 24
ldi r19, 32
ldi r20, 8
loop:
// save x[0]
movw r22, r2
movw r24, r4
// x[0] ^= k[0]
ld r26, Z+
ld r27, Z+
ld r28, Z+
ld r29, Z+
eor r2, r26
eor r3, r27
eor r4, r28
eor r5, r29
// x[0] += (x[1] ^ k[1])
ld r26, Z+
ld r27, Z+
ld r28, Z+
ld r29, Z+
movw r0, r26
eor r0, r6
eor r1, r7
add r2, r0
adc r3, r1
movw r0, r28
eor r0, r8
eor r1, r9
adc r4, r0
adc r5, r1
// x[1] ^= k[2]
ld r0, Z+
ld r1, Z+
eor r6, r0
eor r7, r1
ld r0, Z+
ld r1, Z+
eor r8, r0
eor r9, r1
// x[1] += (x[2] ^ k[3])
movw r0, r26
eor r0, r10
eor r1, r11
add r6, r0
adc r7, r1
movw r0, r28
eor r0, r12
eor r1, r13
adc r8, r0
adc r9, r1
// x[3] ^= k[5]
eor r14, r26
eor r15, r27
eor r16, r28
eor r17, r29
// x[2] ^= k[4]
ld r26, Z+
ld r27, Z+
ld r28, Z+
ld r29, Z+
eor r10, r26
eor r11, r27
eor r12, r28
eor r13, r29
// x[2] += x[3]
add r10, r14
adc r11, r15
adc r12, r16
adc r13, r17
// x[0] <<<= 9
mov r28, r5
mov r5, r4
mov r4, r3
mov r3, r2
mov r2, r28
bst r5, 7
rol r2
rol r3
rol r4
rol r5
bld r2, 0
// x[1] <<<= 27
mov r28, r6
mov r6, r7
mov r7, r8
mov r8, r9
mov r9, r28
mov r29, r7
mul r6, r20
movw r6, r0
mul r8, r20
movw r8, r0
mul r29, r20
eor r7, r0
eor r8, r1
mul r28, r20
eor r9, r0
eor r6, r1
// x[2] <<<= 29
mov r28, r10
mov r10, r11
mov r11, r12
mov r12, r13
mov r13, r28
mov r29, r11
mul r10, r19
movw r10, r0
mul r12, r19
movw r12, r0
mul r29, r19
eor r11, r0
eor r12, r1
mul r28, r19
eor r13, r0
eor r10, r1
// x[3] = x[0]
movw r14, r22
movw r16, r24
; Decrement loop counter
subi r18, 1
cpi r18, 0
breq exit
rjmp loop
exit:
; Store output
pop_registers 24,25
movw YL, ARG1
st Y+, r2
st Y+, r3
st Y+, r4
st Y+, r5
st Y+, r6
st Y+, r7
st Y+, r8
st Y+, r9
st Y+, r10
st Y+, r11
st Y+, r12
st Y+, r13
st Y+, r14
st Y+, r15
st Y+, r16
st Y+, r17
; Restore r2-r19,r28-r29
pop_registers 28,29
pop_registers 2,17
ret
.size lea128_encrypt, .-lea128_encrypt
|
aadomn/cymric
| 15,461
|
artifact_tches2025-3/benchmark_avr/cymric_lwc/cymric_lwc/cymric/gift/gift128.S
|
; Argument registers for function calls
#define ARG1 r24
#define ARG2 r22
#define ARG3 r20
/**
* push_registers macro:
*
* Pushes a given range of registers in ascending order
* To be called like: push_registers 0,15
*/
.macro push_registers from:req, to:req
push \from
.if \to-\from
push_registers "(\from+1)",\to
.endif
.endm
/**
* pop_registers macro:
*
* Pops a given range of registers in descending order
* To be called like: pop_registers 0,15
*/
.macro pop_registers from:req, to:req
pop \to
.if \to-\from
pop_registers \from,"(\to-1)"
.endif
.endm
/**
* sbox macro:
*
* Computes the S-box layer in a bitsliced manner on a quarter of the state
*/
.macro sbox x0, x1, x2, x3
mov r16, \x0
and r16, \x2
eor \x1, r16
mov r16, \x1
and r16, \x3
eor \x0, r16
mov r16, \x0
or r16, \x1
eor \x2, r16
eor \x3, \x2
eor \x1, \x3
com \x3
mov r16, \x0
and r16, \x1
eor \x2, r16
.endm
/**
* llayer1 macro:
*
* Computes the linear layer on a quarter of the state for the 1st round
* within the quintuple round routine
*/
.macro llayer1 x1, x2, x3
// NIBBLE_ROR2
mov r16, \x1
lsr r16
lsr r16
and r16, r17
and \x1, r17
lsl \x1
lsl \x1
or \x1, r16
// NIBBLE_ROR1
mov r16, \x3
lsr r16
cbr r16, 136
and \x3, r18
lsl \x3
lsl \x3
lsl \x3
or \x3, r16
//NIBBLE_ROR3
mov r16, \x2
lsr \x2
lsr \x2
lsr \x2
and \x2, r18
cbr r16, 136
lsl r16
or \x2, r16
.endm
/**
* half_ror_4 macro:
*
* Rotates a 16-bit word by 4 bits to the right.
* Assumes r18 contains 0x0f.
*/
.macro half_ror_4 hi, lo
swap \hi
swap \lo
movw r16, \hi
cbr r16, 15
and \hi, r18
cbr r17, 15
and \lo, r18
or \hi, r17
or \lo, r16
.endm
/**
* half_ror_12 macro:
*
* Rotates a 16-bit word by 12 bits to the right
*/
.macro half_ror_12 hi, lo
swap \hi
swap \lo
movw r16, \hi
cbr r16, 240
and \hi, r18
cbr r17, 240
and \lo, r18
or \hi, r17
or \lo, r16
.endm
/**
* byte_ror_2 macro:
*
* Rotates a byte by 2 bits to the right
*/
.macro byte_ror_2 x
bst \x, 0
lsr \x
bld \x, 7
bst \x, 0
lsr \x
bld \x, 7
.endm
/**
* byte_rol_2 macro:
*
* Rotates a byte by 2 bits to the left
*/
.macro byte_rol_2 x, zero
lsl \x
adc \x, \zero
lsl \x
adc \x, \zero
.endm
/**
* add_round_key macro:
*
* Adds a round key to half of the state
*/
.macro add_round_key x0, x1, x2, x3, x4, x5, x6, x7
ld r16, X+
ld r17, X+
eor \x0, r16
eor \x1, r17
ld r16, X+
ld r17, X+
eor \x2, r16
eor \x3, r17
ld r16, X+
ld r17, X+
eor \x4, r16
eor \x5, r17
ld r16, X+
ld r17, X+
eor \x6, r16
eor \x7, r17
.endm
/**
* add_rconst macro:
*
* Adds round constants to a quarter of the state
*/
.macro add_rconst x0, x1, x2, x3
ld r16, Z+
ld r17, Z+
eor \x0, r16
eor \x1, r17
ld r16, Z+
ld r17, Z+
eor \x2, r16
eor \x3, r17
.endm
/**
* add_rconst0 macro:
*
* Same as add_rconst but w/ a specificity for rounds r s.t.
* r = 0 mod 5: the last rconst byte is always 0x10 so we hardcode it
*/
.macro add_rconst0 x0, x1, x2, x3
ld r16, Z+
ld r17, Z+
eor \x0, r16
eor \x1, r17
ld r16, Z+
ldi r17, 16
eor \x2, r16
eor \x3, r17
.endm
/**
* add_rconst1 macro:
*
* Same as add_rconst but w/ a specificity for rounds r s.t.
* r = 1 mod 5: the 1st and 3rd rconst bytes are always 0x00 and 0x01
* respectively so we hardcode them
*/
.macro add_rconst1 x1, x2, x3
ld r16, Z+
ldi r17, 1
eor \x1, r16
eor \x2, r17
ld r16, Z+
eor \x3, r16
.endm
/**
* add_rconst2 macro:
*
* Same as add_rconst but w/ a specificity for rounds r s.t.
* r = 2 mod 5: the first two bytes are always 0x02 and 0x00
* respectively so we hardcode them
*/
.macro add_rconst2 x0, x2, x3
ldi r16, 2
ld r17, Z+
eor \x0, r16
eor \x2, r17
ld r16, Z+
eor \x3, r16
.endm
/**
* llayer3 macro:
*
* Computes the linear layer on a quarter of the state for the 3rd round
* within the quintuple round routine
*/
.macro llayer3 x1, x2
movw r16, \x1
movw r28, \x1
lsr r28
lsr r29
eor r16, r28
eor r17, r29
andi r16, 85
andi r17, 85
eor \x1, r16
eor \x2, r17
lsl r16
lsl r17
eor \x1, r16
eor \x2, r17
.endm
.macro kexp_round k0, k1, k2, k3
; k0||k1 >>> 2
bst \k1, 0
ror \k0
ror \k1
bld \k0, 7
bst \k1, 0
ror \k0
ror \k1
bld \k0, 7
; k2||k3 <<< 4
mov r30, \k3
mul \k2, r20
mov \k2, r1
mov \k3, r0
mul r30, r20
or \k2, r0
or \k3, r1
eor \k2, \k3
eor \k3, \k2
eor \k2, \k3
.endm
.macro rearrange_rkey0 a, b, c, d
mov r22, \a
mov r23, \b
mov r24, \c
mov r25, \d
// SWAPMOVE(x, x, 0x00550055, 9) : x & 550055 <-> x & aa00aa00
mov r28, \a
mov r30, \c
lsr r28
lsr r30
eor r28, \b
eor r30, \d
andi r28, 85
andi r30, 85
eor r23, r28
eor r25, r30
lsl r28
lsl r30
eor r22, r28
eor r24, r30
// SWAPMOVE(x, x, 0x000f000f, 12) : x & 000f000f <-> x & f000f000
movw r28, r22
mov r30, r24
mov r31, r25
swap r28
swap r29
swap r30
swap r31
andi r28, 15
andi r29, 240
andi r30, 15
andi r31, 240
and r22, r19
and r23, r18
and r24, r19
and r25, r18
or r22, r29
or r23, r28
or r24, r31
or r25, r30
// SWAPMOVE(x, x, 0x00003333, 18) : x & 00003333 <-> x & cccc0000
movw r30, r22
lsr r30
lsr r30
lsr r31
lsr r31
eor r30, r24
eor r31, r25
andi r30, 51
andi r31, 51
eor r24, r30
eor r25, r31
lsl r30
lsl r30
lsl r31
lsl r31
eor r22, r30
eor r23, r31
// SWAPMOVE(x, x, 0x000000ff, 24)
st X+, r22
st X+, r24
st X+, r23
st X+, r25
.endm
.macro swap_4bits reg, tmp, src0, dst0, src1, dst1
mov \tmp, \reg
bst \reg, \src0
bld \reg, \dst0
bst \reg, \src1
bld \reg, \dst1
bst \tmp, \dst0
bld \reg, \src0
bst \tmp, \dst1
bld \reg, \src1
.endm
.macro rearrange_rkey1 a, b, c, d
mov r22, \a
mov r23, \b
mov r24, \c
mov r25, \d
// SWAPMOVE(x, x, 0x11111111, 3) : x & 11111111 <-> x & 88888888
swap_4bits r22, r28, 0, 3, 4, 7
swap_4bits r23, r28, 0, 3, 4, 7
swap_4bits r24, r28, 0, 3, 4, 7
swap_4bits r25, r28, 0, 3, 4, 7
// SWAPMOVE(x, x, 0x03030303, 6) : x & 03030303 <-> x & c0c0c0c0
swap_4bits r22, r28, 0, 6, 1, 7
swap_4bits r23, r28, 0, 6, 1, 7
swap_4bits r24, r28, 0, 6, 1, 7
swap_4bits r25, r28, 0, 6, 1, 7
// SWAPMOVE(x, x, 0x000f000f, 12) : x & 000f000f <-> x & f000f000
movw r28, r22
mov r30, r24
mov r31, r25
swap r28
swap r29
swap r30
swap r31
andi r28, 15
andi r29, 240
andi r30, 15
andi r31, 240
and r22, r19
and r23, r18
and r24, r19
and r25, r18
or r22, r29
or r23, r28
or r24, r31
or r25, r30
// SWAPMOVE(x, x, 0x000000ff, 24)
st X+, r22
st X+, r24
st X+, r23
st X+, r25
.endm
.macro rearrange_rkey2 a, b, c, d
mov r22, \a
mov r23, \b
mov r24, \c
mov r25, \d
// SWAPMOVE(x, x, 0x0000aaaa, 15) : x & 0000aaaa <-> x &
movw r28, \a
lsl r29
lsl r28
eor r28, \c
eor r29, \d
andi r28, 170
andi r29, 170
eor r24, r28
eor r25, r29
lsr r28
lsr r29
eor r22, r28
eor r23, r29
// SWAPMOVE(x, x, 0x00003333, 18) : x & 00003333 <-> x & cccc0000
movw r30, r22
lsr r30
lsr r30
lsr r31
lsr r31
eor r30, r24
eor r31, r25
andi r30, 51
andi r31, 51
eor r24, r30
eor r25, r31
lsl r30
lsl r30
lsl r31
lsl r31
eor r22, r30
eor r23, r31
// SWAPMOVE(x, x, 0x0000f0f0, 12) : x & 000f000f <-> x & f000f000
movw r28, r22
movw r30, r24
swap r28
swap r29
swap r30
swap r31
andi r28, 240
andi r29, 240
andi r30, 15
andi r31, 15
and r22, r18
and r23, r18
and r24, r19
and r25, r19
or r22, r30
or r23, r31
or r24, r28
or r25, r29
// SWAPMOVE(x, x, 0x000000ff, 24)
st X+, r22
st X+, r24
st X+, r23
st X+, r25
.endm
.macro rearrange_rkey3 a, b, c, d
mov r22, \a
mov r23, \b
mov r24, \c
mov r25, \d
// SWAPMOVE(x, x, 0x0a0a0a0a, 3) : x & 11111111 <-> x & 88888888
swap_4bits r22, r28, 1, 4, 3, 6
swap_4bits r23, r28, 1, 4, 3, 6
swap_4bits r24, r28, 1, 4, 3, 6
swap_4bits r25, r28, 1, 4, 3, 6
// SWAPMOVE
mov r28, r22
mov r30, r24
lsl r28
lsl r28
lsl r30
lsl r30
eor r28, r23
eor r30, r25
andi r28, 204
andi r30, 204
eor r23, r28
eor r25, r30
lsr r28
lsr r28
lsr r30
lsr r30
eor r22, r28
eor r24, r30
// SWAPMOVE(x, x, 0x0000f0f0, 12) : x & 000f000f <-> x & f000f000
movw r28, r22
movw r30, r24
swap r28
swap r29
swap r30
swap r31
andi r28, 240
andi r29, 240
andi r30, 15
andi r31, 15
and r22, r18
and r23, r18
and r24, r19
and r25, r19
or r22, r30
or r23, r31
or r24, r28
or r25, r29
// SWAPMOVE(x, x, 0x000000ff, 24)
st X+, r22
st X+, r24
st X+, r23
st X+, r25
.endm
.macro swap_bytes x, y
eor \x, \y
eor \y, \x
eor \x, \y
.endm
.global gift128_kexpand
gift128_kexpand:
; Save r2-r17,r28-r31
push_registers 2,17
push_registers 28,31
push_registers 24,25
.L__stack_usage = 22
; Save the argument pointers to Z (key) and X (round keys)
movw XL, ARG1
movw YL, ARG2
; Load the key given by argument to register 2-17 instead of 0-15 because
; the mul instruction inconditionally overwrites registers r1:r0.
.irp param,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12,r13,r14,r15,r16,r17
ld \param, Y+
.endr
; Constants for efficient bitshifts
ldi r18, 240
ldi r19, 15
ldi r20, 16
rearrange_rkey0 r14, r15, r16, r17
rearrange_rkey0 r6, r7, r8, r9
rearrange_rkey1 r10, r11, r12, r13
rearrange_rkey1 r2, r3, r4, r5
rearrange_rkey2 r6, r7, r8, r9
kexp_round r14, r15, r16, r17
rearrange_rkey2 r14, r15, r16, r17
rearrange_rkey3 r2, r3, r4, r5
kexp_round r10, r11, r12, r13
rearrange_rkey3 r10, r11, r12, r13
st X+, r17
st X+, r16
st X+, r15
st X+, r14
kexp_round r6, r7, r8, r9
st X+, r9
st X+, r8
st X+, r7
st X+, r6
; Save loop counter
ldi r21, 7
kexp_loop:
cpi r21, 4
brne skip_swap_start
swap_bytes r10, r2
swap_bytes r11, r3
swap_bytes r12, r4
swap_bytes r13, r5
skip_swap_start:
rearrange_rkey0 r10, r11, r12, r13
kexp_round r2, r3, r4, r5
rearrange_rkey0 r2, r3, r4, r5
rearrange_rkey1 r6, r7, r8, r9
kexp_round r14, r15, r16, r17
rearrange_rkey1 r14, r15, r16, r17
rearrange_rkey2 r2, r3, r4, r5
kexp_round r10, r11, r12, r13
rearrange_rkey2 r10, r11, r12, r13
rearrange_rkey3 r14, r15, r16, r17
kexp_round r6, r7, r8, r9
rearrange_rkey3 r6, r7, r8, r9
st X+, r13
st X+, r12
st X+, r11
st X+, r10
kexp_round r2, r3, r4, r5
st X+, r5
st X+, r4
st X+, r3
st X+, r2
swap_bytes r10, r14
swap_bytes r11, r15
swap_bytes r12, r16
swap_bytes r13, r17
swap_bytes r2, r6
swap_bytes r3, r7
swap_bytes r4, r8
swap_bytes r5, r9
cpi r21, 4
brne skip_swap_end
swap_bytes r10, r2
swap_bytes r11, r3
swap_bytes r12, r4
swap_bytes r13, r5
skip_swap_end:
// decrement loop counter
subi r21, 1
cpi r21, 0
breq kexp_exit
rjmp kexp_loop
kexp_exit:
; Restore r2-r19,r28-r29
pop_registers 24,25
pop_registers 28,31
pop_registers 2,17
ret
.size gift128_kexpand, .-gift128_kexpand
.global gift128_encrypt
gift128_encrypt:
; Save r2-r17,r28-r29
push_registers 2,17
push_registers 28,29
.L__stack_usage = 18
; Save the argument pointers to Z (key) and X (plaintext)
movw XL, ARG2
; Load the plaintext given by argument to register 2-17 instead of 0-15 because
; the mul instruction inconditionally overwrites registers r1:r0.
.irp param,r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12,r13,r14,r15
ld \param, X+
.endr
ldi ZL, lo8(rconst)
ldi ZH, hi8(rconst)
movw XL, ARG3
// for byte_rol_2
ldi r20, 0
// Save loop counter
ldi r19, 8
quintuple_round:
// 1st_round
sbox r0, r4, r8, r12
sbox r1, r5, r9, r13
sbox r2, r6, r10, r14
sbox r3, r7, r11, r15
ldi r17, 51
ldi r18, 17
llayer1 r4, r8, r12
llayer1 r5, r9, r13
llayer1 r6, r10, r14
llayer1 r7, r11, r15
add_round_key r4, r5, r6, r7, r8, r9, r10, r11
add_rconst0 r0, r1, r2, r3
// 2nd round
sbox r12, r4, r8, r0
sbox r13, r5, r9, r1
sbox r14, r6, r10, r2
sbox r15, r7, r11, r3
subi r18, 2
half_ror_4 r0, r1
half_ror_4 r2, r3
ldi r18, 240
half_ror_12 r8, r9
half_ror_12 r10, r11
add_round_key r5, r4, r7, r6, r8, r9, r10, r11
add_rconst1 r13, r14, r15
// 3rd round
sbox r0, r5, r8, r12
sbox r1, r4, r9, r13
sbox r2, r7, r10, r14
sbox r3, r6, r11, r15
llayer3 r4, r5
llayer3 r6, r7
llayer3 r10, r11
llayer3 r12, r13
add_round_key r5, r4, r7, r6, r10, r11, r8, r9
add_rconst2 r0, r2, r3
// 4th round
sbox r14, r5, r10, r0
sbox r15, r4, r11, r1
sbox r12, r7, r8, r2
sbox r13, r6, r9, r3
// byte_ror_6
byte_rol_2 r0, r20
byte_rol_2 r1, r20
byte_rol_2 r2, r20
byte_rol_2 r3, r20
// byte_ror_4
swap r4
swap r5
swap r6
swap r7
// byte_ror_2
byte_ror_2 r8
byte_ror_2 r9
byte_ror_2 r10
byte_ror_2 r11
add_round_key r5, r4, r7, r6, r10, r11, r8, r9
add_rconst r14, r15, r12, r13
// 5th round
sbox r0, r5, r10, r14
sbox r1, r4, r11, r15
sbox r2, r7, r8, r12
sbox r3, r6, r9, r13
// swap state[0] w/ ROR(state[3], 24)
movw r16, r0
mov r0, r13
mov r1, r14
mov r13, r17
mov r14, r2
mov r17, r3
mov r2, r15
mov r3, r12
mov r15, r17
mov r12, r16
// state[1] = ROR(state[1], 16)
movw r16, r4
mov r4, r7
mov r7, r16
mov r5, r6
mov r6, r17
// state[2] = ROR(state[2], 8)
movw r16, r10
mov r10, r9
mov r9, r8
mov r8, r17
mov r11, r16
add_round_key r4, r5, r6, r7, r8, r9, r10, r11
// last rconst is always formed as 800000xx
ld r16, Z+
eor r12, r16
ldi r16, 128
eor r15, r16
// decrement loop counter
subi r19, 1
cpi r19, 0
breq exit
rjmp quintuple_round
exit:
; Store output
movw YL, ARG1
st Y+, r0
st Y+, r1
st Y+, r2
st Y+, r3
st Y+, r4
st Y+, r5
st Y+, r6
st Y+, r7
st Y+, r8
st Y+, r9
st Y+, r10
st Y+, r11
st Y+, r12
st Y+, r13
st Y+, r14
st Y+, r15
; Restore r2-r19,r28-r29
pop_registers 28,29
pop_registers 2,17
ret
.size gift128_encrypt, .-gift128_encrypt
.data
rconst:
.byte 0x08, 0x00, 0x00, 0x80, 0x80, 0x00, 0x54, 0x81, 0x01, 0x01, 0x01, 0x1f
.byte 0x80, 0x88, 0x88, 0xe0, 0x60, 0x50, 0x51, 0x80, 0x01, 0x03, 0x03, 0x2f
.byte 0x80, 0x88, 0x08, 0x60, 0x60, 0x50, 0x41, 0x80, 0x00, 0x03, 0x03, 0x27
.byte 0x80, 0x88, 0x00, 0xe0, 0x40, 0x50, 0x11, 0x80, 0x01, 0x02, 0x03, 0x2b
.byte 0x80, 0x08, 0x08, 0x40, 0x60, 0x40, 0x01, 0x80, 0x00, 0x02, 0x02, 0x21
.byte 0x80, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x51, 0x80, 0x01, 0x01, 0x03, 0x2e
.byte 0x00, 0x88, 0x08, 0x20, 0x60, 0x50, 0x40, 0x80, 0x00, 0x03, 0x01, 0x06
.byte 0x08, 0x88, 0x00, 0xa0, 0xc0, 0x50, 0x14, 0x81, 0x01, 0x02, 0x01, 0x1a
/*
.byte 0x08, 0x00, 0x00, 0x10, 0x00, 0x80, 0x01, 0x80, 0x02, 0x00, 0x00, 0x54, 0x81, 0x01, 0x01, 0x01, 0x1f, 0x00, 0x00, 0x80
.byte 0x80, 0x88, 0x88, 0x10, 0x00, 0xe0, 0x01, 0x60, 0x02, 0x00, 0x50, 0x51, 0x80, 0x01, 0x03, 0x03, 0x2f, 0x00, 0x00, 0x80
.byte 0x80, 0x88, 0x08, 0x10, 0x00, 0x60, 0x01, 0x60, 0x02, 0x00, 0x50, 0x41, 0x80, 0x00, 0x03, 0x03, 0x27, 0x00, 0x00, 0x80
.byte 0x80, 0x88, 0x00, 0x10, 0x00, 0xe0, 0x01, 0x40, 0x02, 0x00, 0x50, 0x11, 0x80, 0x01, 0x02, 0x03, 0x2b, 0x00, 0x00, 0x80
.byte 0x80, 0x08, 0x08, 0x10, 0x00, 0x40, 0x01, 0x60, 0x02, 0x00, 0x40, 0x01, 0x80, 0x00, 0x02, 0x02, 0x21, 0x00, 0x00, 0x80
.byte 0x80, 0x00, 0x00, 0x10, 0x00, 0xc0, 0x01, 0x00, 0x02, 0x00, 0x00, 0x51, 0x80, 0x01, 0x01, 0x03, 0x2e, 0x00, 0x00, 0x80
.byte 0x00, 0x88, 0x08, 0x10, 0x00, 0x20, 0x01, 0x60, 0x02, 0x00, 0x50, 0x40, 0x80, 0x00, 0x03, 0x01, 0x06, 0x00, 0x00, 0x80
.byte 0x08, 0x88, 0x00, 0x10, 0x00, 0xa0, 0x01, 0xc0, 0x02, 0x00, 0x50, 0x14, 0x81, 0x01, 0x02, 0x01, 0x1a, 0x00, 0x00, 0x80
*/
|
aadomn/cymric
| 24,997
|
artifact_tches2025-3/benchmark_armv7m/lwc/photonbeetle/internal-photon256-armv7m.S
|
#if defined(__ARM_ARCH_ISA_THUMB) && __ARM_ARCH == 7
/*
* Copyright (C) 2021 Southern Storm Software, Pty Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
.syntax unified
.thumb
.text
.align 4
.type rconst, %object
rconst:
.word 0x00000001
.word 0x01010000
.word 0x01000000
.word 0x00000000
.word 0x01010100
.word 0x00000101
.word 0x00010101
.word 0x01010101
.word 0x00000001
.word 0x00000101
.word 0x01000000
.word 0x00000000
.word 0x01010100
.word 0x01010000
.word 0x00010101
.word 0x01010101
.word 0x00000001
.word 0x00000101
.word 0x00010101
.word 0x00000000
.word 0x01010100
.word 0x01010000
.word 0x01000000
.word 0x01010101
.word 0x01010100
.word 0x00000101
.word 0x00010101
.word 0x01010101
.word 0x00000001
.word 0x01010000
.word 0x01000000
.word 0x00000000
.word 0x00000001
.word 0x01010000
.word 0x00010101
.word 0x01010101
.word 0x01010100
.word 0x00000101
.word 0x01000000
.word 0x00000000
.word 0x00000001
.word 0x00000101
.word 0x01000000
.word 0x01010101
.word 0x01010100
.word 0x01010000
.word 0x00010101
.word 0x00000000
.word 0x01010100
.word 0x00000101
.word 0x00010101
.word 0x00000000
.word 0x00000001
.word 0x01010000
.word 0x01000000
.word 0x01010101
.word 0x01010100
.word 0x01010000
.word 0x00010101
.word 0x01010101
.word 0x00000001
.word 0x00000101
.word 0x01000000
.word 0x00000000
.word 0x00000001
.word 0x01010000
.word 0x01000000
.word 0x01010101
.word 0x01010100
.word 0x00000101
.word 0x00010101
.word 0x00000000
.word 0x01010100
.word 0x00000101
.word 0x01000000
.word 0x00000000
.word 0x00000001
.word 0x01010000
.word 0x00010101
.word 0x01010101
.word 0x00000001
.word 0x01010000
.word 0x00010101
.word 0x00000000
.word 0x01010100
.word 0x00000101
.word 0x01000000
.word 0x01010101
.word 0x01010100
.word 0x00000101
.word 0x01000000
.word 0x01010101
.word 0x00000001
.word 0x01010000
.word 0x00010101
.word 0x00000000
.size rconst, .-rconst
.align 2
.global photon256_permute
.thumb
.thumb_func
.type photon256_permute, %function
photon256_permute:
push {r4, r5, r6, r7, r8, r9, r10, fp, lr}
mov fp, sp
sub sp, sp, #48
ldr r6, [r0, #16]
ldr r7, [r0, #20]
ldr r8, [r0, #24]
ldr r9, [r0, #28]
eor ip, r6, r6, lsr #3
and ip, ip, #168430090
eor r6, r6, ip
eor r6, r6, ip, lsl #3
eor ip, r6, r6, lsr #6
and ip, ip, #13369548
eor r6, r6, ip
eor r6, r6, ip, lsl #6
eor ip, r6, r6, lsr #12
movw lr, #61680
and ip, ip, lr
eor r6, r6, ip
eor r6, r6, ip, lsl #12
eor ip, r6, r6, lsr #8
and ip, ip, #65280
eor r6, r6, ip
eor r6, r6, ip, lsl #8
eor ip, r7, r7, lsr #3
and ip, ip, #168430090
eor r7, r7, ip
eor r7, r7, ip, lsl #3
eor ip, r7, r7, lsr #6
and ip, ip, #13369548
eor r7, r7, ip
eor r7, r7, ip, lsl #6
eor ip, r7, r7, lsr #12
movw lr, #61680
and ip, ip, lr
eor r7, r7, ip
eor r7, r7, ip, lsl #12
eor ip, r7, r7, lsr #8
and ip, ip, #65280
eor r7, r7, ip
eor r7, r7, ip, lsl #8
eor ip, r8, r8, lsr #3
and ip, ip, #168430090
eor r8, r8, ip
eor r8, r8, ip, lsl #3
eor ip, r8, r8, lsr #6
and ip, ip, #13369548
eor r8, r8, ip
eor r8, r8, ip, lsl #6
eor ip, r8, r8, lsr #12
movw lr, #61680
and ip, ip, lr
eor r8, r8, ip
eor r8, r8, ip, lsl #12
eor ip, r8, r8, lsr #8
and ip, ip, #65280
eor r8, r8, ip
eor r8, r8, ip, lsl #8
eor ip, r9, r9, lsr #3
and ip, ip, #168430090
eor r9, r9, ip
eor r9, r9, ip, lsl #3
eor ip, r9, r9, lsr #6
and ip, ip, #13369548
eor r9, r9, ip
eor r9, r9, ip, lsl #6
eor ip, r9, r9, lsr #12
movw lr, #61680
and ip, ip, lr
eor r9, r9, ip
eor r9, r9, ip, lsl #12
eor ip, r9, r9, lsr #8
and ip, ip, #65280
eor r9, r9, ip
eor r9, r9, ip, lsl #8
uxtb r2, r6
uxtb r3, r6, ror #8
uxtb r4, r6, ror #16
uxtb r5, r6, ror #24
bfi r2, r7, #8, #8
bfi r2, r8, #16, #8
bfi r2, r9, #24, #8
lsr r7, r7, #8
lsr r8, r8, #8
lsr r9, r9, #8
bfi r3, r7, #8, #8
bfi r3, r8, #16, #8
bfi r3, r9, #24, #8
lsr r7, r7, #8
lsr r8, r8, #8
lsr r9, r9, #8
bfi r4, r7, #8, #8
bfi r4, r8, #16, #8
bfi r4, r9, #24, #8
lsr r7, r7, #8
lsr r8, r8, #8
lsr r9, r9, #8
bfi r5, r7, #8, #8
bfi r5, r8, #16, #8
bfi r5, r9, #24, #8
str r2, [r0, #16]
str r3, [r0, #20]
str r4, [r0, #24]
str r5, [r0, #28]
ldr r6, [r0, #0]
ldr r7, [r0, #4]
ldr r8, [r0, #8]
ldr r9, [r0, #12]
eor ip, r6, r6, lsr #3
and ip, ip, #168430090
eor r6, r6, ip
eor r6, r6, ip, lsl #3
eor ip, r6, r6, lsr #6
and ip, ip, #13369548
eor r6, r6, ip
eor r6, r6, ip, lsl #6
eor ip, r6, r6, lsr #12
movw lr, #61680
and ip, ip, lr
eor r6, r6, ip
eor r6, r6, ip, lsl #12
eor ip, r6, r6, lsr #8
and ip, ip, #65280
eor r6, r6, ip
eor r6, r6, ip, lsl #8
eor ip, r7, r7, lsr #3
and ip, ip, #168430090
eor r7, r7, ip
eor r7, r7, ip, lsl #3
eor ip, r7, r7, lsr #6
and ip, ip, #13369548
eor r7, r7, ip
eor r7, r7, ip, lsl #6
eor ip, r7, r7, lsr #12
movw lr, #61680
and ip, ip, lr
eor r7, r7, ip
eor r7, r7, ip, lsl #12
eor ip, r7, r7, lsr #8
and ip, ip, #65280
eor r7, r7, ip
eor r7, r7, ip, lsl #8
eor ip, r8, r8, lsr #3
and ip, ip, #168430090
eor r8, r8, ip
eor r8, r8, ip, lsl #3
eor ip, r8, r8, lsr #6
and ip, ip, #13369548
eor r8, r8, ip
eor r8, r8, ip, lsl #6
eor ip, r8, r8, lsr #12
movw lr, #61680
and ip, ip, lr
eor r8, r8, ip
eor r8, r8, ip, lsl #12
eor ip, r8, r8, lsr #8
and ip, ip, #65280
eor r8, r8, ip
eor r8, r8, ip, lsl #8
eor ip, r9, r9, lsr #3
and ip, ip, #168430090
eor r9, r9, ip
eor r9, r9, ip, lsl #3
eor ip, r9, r9, lsr #6
and ip, ip, #13369548
eor r9, r9, ip
eor r9, r9, ip, lsl #6
eor ip, r9, r9, lsr #12
movw lr, #61680
and ip, ip, lr
eor r9, r9, ip
eor r9, r9, ip, lsl #12
eor ip, r9, r9, lsr #8
and ip, ip, #65280
eor r9, r9, ip
eor r9, r9, ip, lsl #8
uxtb r2, r6
uxtb r3, r6, ror #8
uxtb r4, r6, ror #16
uxtb r5, r6, ror #24
bfi r2, r7, #8, #8
bfi r2, r8, #16, #8
bfi r2, r9, #24, #8
lsr r7, r7, #8
lsr r8, r8, #8
lsr r9, r9, #8
bfi r3, r7, #8, #8
bfi r3, r8, #16, #8
bfi r3, r9, #24, #8
lsr r7, r7, #8
lsr r8, r8, #8
lsr r9, r9, #8
bfi r4, r7, #8, #8
bfi r4, r8, #16, #8
bfi r4, r9, #24, #8
lsr r7, r7, #8
lsr r8, r8, #8
lsr r9, r9, #8
bfi r5, r7, #8, #8
bfi r5, r8, #16, #8
bfi r5, r9, #24, #8
adr r10, rconst
movs r1, #12
.L1:
ldr r6, [r10], #4
ldr r7, [r10], #4
ldr r8, [r10], #4
ldr r9, [r10], #4
eors r2, r6
eors r3, r7
eor r4, r8
eor r5, r9
eors r3, r4
and r6, r4, r3
eors r5, r6
movs r7, r5
ands r5, r3
eors r5, r4
mov r8, r5
eors r5, r2
mvns r5, r5
movs r4, r5
orr r8, r2
eors r2, r7
eors r3, r2
orrs r4, r3
eors r4, r7
eor r3, r8
eors r5, r3
eor ip, r2, r2, lsr #4
movw lr, #256
movt lr, #1795
and ip, ip, lr
eor r2, r2, ip
eor r2, r2, ip, lsl #4
eor ip, r2, r2, lsr #2
movw lr, #4352
movt lr, #8755
and ip, ip, lr
eor r2, r2, ip
eor r2, r2, ip, lsl #2
eor ip, r2, r2, lsr #1
and ip, ip, #1426085120
eor r2, r2, ip
eor r2, r2, ip, lsl #1
eor ip, r3, r3, lsr #4
movw lr, #256
movt lr, #1795
and ip, ip, lr
eor r3, r3, ip
eor r3, r3, ip, lsl #4
eor ip, r3, r3, lsr #2
movw lr, #4352
movt lr, #8755
and ip, ip, lr
eor r3, r3, ip
eor r3, r3, ip, lsl #2
eor ip, r3, r3, lsr #1
and ip, ip, #1426085120
eor r3, r3, ip
eor r3, r3, ip, lsl #1
eor ip, r4, r4, lsr #4
movw lr, #256
movt lr, #1795
and ip, ip, lr
eor r4, r4, ip
eor r4, r4, ip, lsl #4
eor ip, r4, r4, lsr #2
movw lr, #4352
movt lr, #8755
and ip, ip, lr
eor r4, r4, ip
eor r4, r4, ip, lsl #2
eor ip, r4, r4, lsr #1
and ip, ip, #1426085120
eor r4, r4, ip
eor r4, r4, ip, lsl #1
eor ip, r5, r5, lsr #4
movw lr, #256
movt lr, #1795
and ip, ip, lr
eor r5, r5, ip
eor r5, r5, ip, lsl #4
eor ip, r5, r5, lsr #2
movw lr, #4352
movt lr, #8755
and ip, ip, lr
eor r5, r5, ip
eor r5, r5, ip, lsl #2
eor ip, r5, r5, lsr #1
and ip, ip, #1426085120
eor r5, r5, ip
eor r5, r5, ip, lsl #1
str r2, [fp, #-48]
str r3, [fp, #-44]
str r4, [fp, #-40]
str r5, [fp, #-36]
ldr r2, [r0, #16]
ldr r3, [r0, #20]
ldr r4, [r0, #24]
ldr r5, [r0, #28]
ldr r6, [r10], #4
ldr r7, [r10], #4
ldr r8, [r10], #4
ldr r9, [r10], #4
eors r2, r6
eors r3, r7
eor r4, r8
eor r5, r9
eors r3, r4
and r6, r4, r3
eors r5, r6
movs r7, r5
ands r5, r3
eors r5, r4
mov r8, r5
eors r5, r2
mvns r5, r5
movs r4, r5
orr r8, r2
eors r2, r7
eors r3, r2
orrs r4, r3
eors r4, r7
eor r3, r8
eors r5, r3
eor ip, r2, r2, lsr #4
movw lr, #3599
movt lr, #2060
and ip, ip, lr
eor r2, r2, ip
eor r2, r2, ip, lsl #4
eor ip, r2, r2, lsr #2
movw lr, #4352
movt lr, #8755
and ip, ip, lr
eor r2, r2, ip
eor r2, r2, ip, lsl #2
eor ip, r2, r2, lsr #1
and ip, ip, #1426085120
eor r2, r2, ip
eor r2, r2, ip, lsl #1
eor ip, r3, r3, lsr #4
movw lr, #3599
movt lr, #2060
and ip, ip, lr
eor r3, r3, ip
eor r3, r3, ip, lsl #4
eor ip, r3, r3, lsr #2
movw lr, #4352
movt lr, #8755
and ip, ip, lr
eor r3, r3, ip
eor r3, r3, ip, lsl #2
eor ip, r3, r3, lsr #1
and ip, ip, #1426085120
eor r3, r3, ip
eor r3, r3, ip, lsl #1
eor ip, r4, r4, lsr #4
movw lr, #3599
movt lr, #2060
and ip, ip, lr
eor r4, r4, ip
eor r4, r4, ip, lsl #4
eor ip, r4, r4, lsr #2
movw lr, #4352
movt lr, #8755
and ip, ip, lr
eor r4, r4, ip
eor r4, r4, ip, lsl #2
eor ip, r4, r4, lsr #1
and ip, ip, #1426085120
eor r4, r4, ip
eor r4, r4, ip, lsl #1
eor ip, r5, r5, lsr #4
movw lr, #3599
movt lr, #2060
and ip, ip, lr
eor r5, r5, ip
eor r5, r5, ip, lsl #4
eor ip, r5, r5, lsr #2
movw lr, #4352
movt lr, #8755
and ip, ip, lr
eor r5, r5, ip
eor r5, r5, ip, lsl #2
eor ip, r5, r5, lsr #1
and ip, ip, #1426085120
eor r5, r5, ip
eor r5, r5, ip, lsl #1
str r2, [fp, #-32]
str r3, [fp, #-28]
str r4, [fp, #-24]
str r5, [fp, #-20]
ldrb r2, [fp, #-48]
ldrb ip, [fp, #-44]
ldrb lr, [fp, #-40]
orr r2, r2, ip, lsl #8
ldrb ip, [fp, #-36]
orr r2, r2, lr, lsl #16
orr r2, r2, ip, lsl #24
ldrb r3, [fp, #-47]
ldrb ip, [fp, #-43]
ldrb lr, [fp, #-39]
orr r3, r3, ip, lsl #8
ldrb ip, [fp, #-35]
orr r3, r3, lr, lsl #16
orr r3, r3, ip, lsl #24
ldrb r4, [fp, #-46]
ldrb ip, [fp, #-42]
ldrb lr, [fp, #-38]
orr r4, r4, ip, lsl #8
ldrb ip, [fp, #-34]
orr r4, r4, lr, lsl #16
orr r4, r4, ip, lsl #24
ldrb r5, [fp, #-45]
ldrb ip, [fp, #-41]
ldrb lr, [fp, #-37]
orr r5, r5, ip, lsl #8
ldrb ip, [fp, #-33]
orr r5, r5, lr, lsl #16
orr r5, r5, ip, lsl #24
eor ip, r2, r2, lsr #24
ror ip, ip, #24
mov r6, ip
eor ip, r3, r3, lsr #24
ror ip, ip, #24
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r6, ip
eor ip, r4, r4, lsr #24
ror ip, ip, #24
eor r6, ip
eors r6, r5
eor ip, r5, r5, lsr #24
ror ip, ip, #24
eor r6, ip
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r6, ip
eor ip, r2, r2, lsr #24
ror ip, ip, #24
eor ip, ip, ip, lsr #24
ror ip, ip, #24
mov r7, ip
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r7, ip
eors r7, r3
eor ip, r3, r3, lsr #24
ror ip, ip, #24
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r7, ip
eor ip, r4, r4, lsr #24
ror ip, ip, #24
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r7, ip
eors r7, r5
eor ip, r5, r5, lsr #24
ror ip, ip, #24
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r7, ip
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r7, ip
eor ip, r2, r2, lsr #24
ror ip, ip, #24
eor ip, ip, ip, lsr #24
ror ip, ip, #24
mov r8, ip
eor ip, r3, r3, lsr #24
ror ip, ip, #24
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r8, ip
eor r8, r4
eor ip, r4, r4, lsr #24
ror ip, ip, #24
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r8, ip
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r8, ip
eor r8, r5
eor ip, r5, r5, lsr #24
ror ip, ip, #24
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r8, ip
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r8, ip
mov r9, r2
eor ip, r3, r3, lsr #24
ror ip, ip, #24
eor r9, ip
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r9, ip
eor r9, r4
eor ip, r4, r4, lsr #24
ror ip, ip, #24
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r9, ip
eor r9, r5
str r6, [fp, #-16]
str r7, [fp, #-12]
str r8, [fp, #-8]
str r9, [fp, #-4]
movs r6, r2
eor ip, r2, r2, lsr #24
ror ip, ip, #24
eor r6, ip
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r6, ip
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r6, ip
eor ip, r3, r3, lsr #24
ror ip, ip, #24
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r6, ip
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r6, ip
eors r6, r4
eor ip, r4, r4, lsr #24
ror ip, ip, #24
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r6, ip
eors r6, r5
eor ip, r5, r5, lsr #24
ror ip, ip, #24
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r6, ip
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r6, ip
movs r7, r2
eor ip, r2, r2, lsr #24
ror ip, ip, #24
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r7, ip
eor ip, r3, r3, lsr #24
ror ip, ip, #24
eor r7, ip
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r7, ip
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r7, ip
eors r7, r4
eor ip, r4, r4, lsr #24
ror ip, ip, #24
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r7, ip
eors r7, r5
eor ip, r5, r5, lsr #24
ror ip, ip, #24
eor r7, ip
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r7, ip
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r7, ip
eor ip, r2, r2, lsr #24
ror ip, ip, #24
eor ip, ip, ip, lsr #24
ror ip, ip, #24
mov r8, ip
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r8, ip
eor ip, r3, r3, lsr #24
ror ip, ip, #24
eor r8, ip
eor ip, r4, r4, lsr #24
ror ip, ip, #24
eor r8, ip
eor ip, r5, r5, lsr #24
ror ip, ip, #24
eor r8, ip
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r8, ip
mov r9, r2
eor ip, r2, r2, lsr #24
ror ip, ip, #24
eor r9, ip
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r9, ip
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r9, ip
eor r9, r3
eor r9, r4
eor ip, r4, r4, lsr #24
ror ip, ip, #24
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r9, ip
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r9, ip
eor ip, r5, r5, lsr #24
ror ip, ip, #24
eor r9, ip
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r9, ip
ldrb r2, [fp, #-32]
ldrb ip, [fp, #-28]
ldrb lr, [fp, #-24]
orr r2, r2, ip, lsl #8
ldrb ip, [fp, #-20]
orr r2, r2, lr, lsl #16
orr r2, r2, ip, lsl #24
ldrb r3, [fp, #-31]
ldrb ip, [fp, #-27]
ldrb lr, [fp, #-23]
orr r3, r3, ip, lsl #8
ldrb ip, [fp, #-19]
orr r3, r3, lr, lsl #16
orr r3, r3, ip, lsl #24
ldrb r4, [fp, #-30]
ldrb ip, [fp, #-26]
ldrb lr, [fp, #-22]
orr r4, r4, ip, lsl #8
ldrb ip, [fp, #-18]
orr r4, r4, lr, lsl #16
orr r4, r4, ip, lsl #24
ldrb r5, [fp, #-29]
ldrb ip, [fp, #-25]
ldrb lr, [fp, #-21]
orr r5, r5, ip, lsl #8
ldrb ip, [fp, #-17]
orr r5, r5, lr, lsl #16
orr r5, r5, ip, lsl #24
eor ip, r2, r2, lsr #24
ror ip, ip, #24
eor r6, ip
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r6, ip
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r6, ip
eors r6, r3
eor ip, r3, r3, lsr #24
ror ip, ip, #24
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r6, ip
eor ip, r4, r4, lsr #24
ror ip, ip, #24
eor r6, ip
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r6, ip
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r6, ip
eors r6, r5
eor ip, r5, r5, lsr #24
ror ip, ip, #24
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r6, ip
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r6, ip
eor ip, r2, r2, lsr #24
ror ip, ip, #24
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r7, ip
eor ip, r3, r3, lsr #24
ror ip, ip, #24
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r7, ip
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r7, ip
eors r7, r4
eor ip, r4, r4, lsr #24
ror ip, ip, #24
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r7, ip
eor ip, r5, r5, lsr #24
ror ip, ip, #24
eor r7, ip
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r7, ip
eor r8, r2
eor ip, r2, r2, lsr #24
ror ip, ip, #24
eor r8, ip
eor r8, r3
eor r8, r4
eor ip, r5, r5, lsr #24
ror ip, ip, #24
eor r8, ip
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r8, ip
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r8, ip
eor r9, r2
eor ip, r2, r2, lsr #24
ror ip, ip, #24
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r9, ip
eor ip, r3, r3, lsr #24
ror ip, ip, #24
eor r9, ip
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r9, ip
eor ip, r4, r4, lsr #24
ror ip, ip, #24
eor r9, ip
eor r9, r5
eor ip, r5, r5, lsr #24
ror ip, ip, #24
eor r9, ip
strb r6, [r0, #16]
lsr ip, r6, #8
lsr lr, r6, #16
strb ip, [r0, #20]
strb lr, [r0, #24]
lsr ip, r6, #24
strb ip, [r0, #28]
strb r7, [r0, #17]
lsr ip, r7, #8
lsr lr, r7, #16
strb ip, [r0, #21]
strb lr, [r0, #25]
lsr ip, r7, #24
strb ip, [r0, #29]
strb r8, [r0, #18]
lsr ip, r8, #8
lsr lr, r8, #16
strb ip, [r0, #22]
strb lr, [r0, #26]
lsr ip, r8, #24
strb ip, [r0, #30]
strb r9, [r0, #19]
lsr ip, r9, #8
lsr lr, r9, #16
strb ip, [r0, #23]
strb lr, [r0, #27]
lsr ip, r9, #24
strb ip, [r0, #31]
ldr r6, [fp, #-16]
ldr r7, [fp, #-12]
ldr r8, [fp, #-8]
ldr r9, [fp, #-4]
eor ip, r2, r2, lsr #24
ror ip, ip, #24
eor r6, ip
eor ip, r3, r3, lsr #24
ror ip, ip, #24
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r6, ip
eors r6, r4
eor ip, r4, r4, lsr #24
ror ip, ip, #24
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r6, ip
eor ip, r5, r5, lsr #24
ror ip, ip, #24
eor r6, ip
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r6, ip
eors r7, r2
eor ip, r2, r2, lsr #24
ror ip, ip, #24
eor r7, ip
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r7, ip
eors r7, r3
eor ip, r3, r3, lsr #24
ror ip, ip, #24
eor r7, ip
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r7, ip
eors r7, r4
eor ip, r4, r4, lsr #24
ror ip, ip, #24
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r7, ip
eor ip, r5, r5, lsr #24
ror ip, ip, #24
eor r7, ip
eor r8, r2
eor ip, r2, r2, lsr #24
ror ip, ip, #24
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r8, ip
eor ip, r3, r3, lsr #24
ror ip, ip, #24
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r8, ip
eor r8, r4
eor ip, r4, r4, lsr #24
ror ip, ip, #24
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r8, ip
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r8, ip
eor r8, r5
eor ip, r5, r5, lsr #24
ror ip, ip, #24
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r8, ip
eor ip, r2, r2, lsr #24
ror ip, ip, #24
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r9, ip
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r9, ip
eor r9, r3
eor ip, r3, r3, lsr #24
ror ip, ip, #24
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r9, ip
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r9, ip
eor r9, r4
eor ip, r4, r4, lsr #24
ror ip, ip, #24
eor r9, ip
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r9, ip
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r9, ip
eor ip, r5, r5, lsr #24
ror ip, ip, #24
eor r9, ip
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r9, ip
eor ip, ip, ip, lsr #24
ror ip, ip, #24
eor r9, ip
strb r6, [r0, #0]
lsr ip, r6, #8
lsr lr, r6, #16
strb ip, [r0, #4]
strb lr, [r0, #8]
lsr ip, r6, #24
strb ip, [r0, #12]
strb r7, [r0, #1]
lsr ip, r7, #8
lsr lr, r7, #16
strb ip, [r0, #5]
strb lr, [r0, #9]
lsr ip, r7, #24
strb ip, [r0, #13]
strb r8, [r0, #2]
lsr ip, r8, #8
lsr lr, r8, #16
strb ip, [r0, #6]
strb lr, [r0, #10]
lsr ip, r8, #24
strb ip, [r0, #14]
strb r9, [r0, #3]
lsr ip, r9, #8
lsr lr, r9, #16
strb ip, [r0, #7]
strb lr, [r0, #11]
lsr ip, r9, #24
strb ip, [r0, #15]
ldr r2, [r0, #0]
ldr r3, [r0, #4]
ldr r4, [r0, #8]
ldr r5, [r0, #12]
subs r1, r1, #1
bne .L1
uxtb r6, r2
uxtb r7, r2, ror #8
uxtb r8, r2, ror #16
uxtb r9, r2, ror #24
bfi r6, r3, #8, #8
bfi r6, r4, #16, #8
bfi r6, r5, #24, #8
lsr r3, r3, #8
lsr r4, r4, #8
lsr r5, r5, #8
bfi r7, r3, #8, #8
bfi r7, r4, #16, #8
bfi r7, r5, #24, #8
lsr r3, r3, #8
lsr r4, r4, #8
lsr r5, r5, #8
bfi r8, r3, #8, #8
bfi r8, r4, #16, #8
bfi r8, r5, #24, #8
lsr r3, r3, #8
lsr r4, r4, #8
lsr r5, r5, #8
bfi r9, r3, #8, #8
bfi r9, r4, #16, #8
bfi r9, r5, #24, #8
eor ip, r6, r6, lsr #7
and ip, ip, #11141290
eor r6, r6, ip
eor r6, r6, ip, lsl #7
eor ip, r6, r6, lsr #14
movw lr, #52428
and ip, ip, lr
eor r6, r6, ip
eor r6, r6, ip, lsl #14
eor ip, r6, r6, lsr #4
and ip, ip, #15728880
eor r6, r6, ip
eor r6, r6, ip, lsl #4
eor ip, r6, r6, lsr #8
and ip, ip, #65280
eor r6, r6, ip
eor r6, r6, ip, lsl #8
eor ip, r7, r7, lsr #7
and ip, ip, #11141290
eor r7, r7, ip
eor r7, r7, ip, lsl #7
eor ip, r7, r7, lsr #14
movw lr, #52428
and ip, ip, lr
eor r7, r7, ip
eor r7, r7, ip, lsl #14
eor ip, r7, r7, lsr #4
and ip, ip, #15728880
eor r7, r7, ip
eor r7, r7, ip, lsl #4
eor ip, r7, r7, lsr #8
and ip, ip, #65280
eor r7, r7, ip
eor r7, r7, ip, lsl #8
eor ip, r8, r8, lsr #7
and ip, ip, #11141290
eor r8, r8, ip
eor r8, r8, ip, lsl #7
eor ip, r8, r8, lsr #14
movw lr, #52428
and ip, ip, lr
eor r8, r8, ip
eor r8, r8, ip, lsl #14
eor ip, r8, r8, lsr #4
and ip, ip, #15728880
eor r8, r8, ip
eor r8, r8, ip, lsl #4
eor ip, r8, r8, lsr #8
and ip, ip, #65280
eor r8, r8, ip
eor r8, r8, ip, lsl #8
eor ip, r9, r9, lsr #7
and ip, ip, #11141290
eor r9, r9, ip
eor r9, r9, ip, lsl #7
eor ip, r9, r9, lsr #14
movw lr, #52428
and ip, ip, lr
eor r9, r9, ip
eor r9, r9, ip, lsl #14
eor ip, r9, r9, lsr #4
and ip, ip, #15728880
eor r9, r9, ip
eor r9, r9, ip, lsl #4
eor ip, r9, r9, lsr #8
and ip, ip, #65280
eor r9, r9, ip
eor r9, r9, ip, lsl #8
str r6, [r0, #0]
str r7, [r0, #4]
str r8, [r0, #8]
str r9, [r0, #12]
ldr r2, [r0, #16]
ldr r3, [r0, #20]
ldr r4, [r0, #24]
ldr r5, [r0, #28]
uxtb r6, r2
uxtb r7, r2, ror #8
uxtb r8, r2, ror #16
uxtb r9, r2, ror #24
bfi r6, r3, #8, #8
bfi r6, r4, #16, #8
bfi r6, r5, #24, #8
lsr r3, r3, #8
lsr r4, r4, #8
lsr r5, r5, #8
bfi r7, r3, #8, #8
bfi r7, r4, #16, #8
bfi r7, r5, #24, #8
lsr r3, r3, #8
lsr r4, r4, #8
lsr r5, r5, #8
bfi r8, r3, #8, #8
bfi r8, r4, #16, #8
bfi r8, r5, #24, #8
lsr r3, r3, #8
lsr r4, r4, #8
lsr r5, r5, #8
bfi r9, r3, #8, #8
bfi r9, r4, #16, #8
bfi r9, r5, #24, #8
eor ip, r6, r6, lsr #7
and ip, ip, #11141290
eor r6, r6, ip
eor r6, r6, ip, lsl #7
eor ip, r6, r6, lsr #14
movw lr, #52428
and ip, ip, lr
eor r6, r6, ip
eor r6, r6, ip, lsl #14
eor ip, r6, r6, lsr #4
and ip, ip, #15728880
eor r6, r6, ip
eor r6, r6, ip, lsl #4
eor ip, r6, r6, lsr #8
and ip, ip, #65280
eor r6, r6, ip
eor r6, r6, ip, lsl #8
eor ip, r7, r7, lsr #7
and ip, ip, #11141290
eor r7, r7, ip
eor r7, r7, ip, lsl #7
eor ip, r7, r7, lsr #14
movw lr, #52428
and ip, ip, lr
eor r7, r7, ip
eor r7, r7, ip, lsl #14
eor ip, r7, r7, lsr #4
and ip, ip, #15728880
eor r7, r7, ip
eor r7, r7, ip, lsl #4
eor ip, r7, r7, lsr #8
and ip, ip, #65280
eor r7, r7, ip
eor r7, r7, ip, lsl #8
eor ip, r8, r8, lsr #7
and ip, ip, #11141290
eor r8, r8, ip
eor r8, r8, ip, lsl #7
eor ip, r8, r8, lsr #14
movw lr, #52428
and ip, ip, lr
eor r8, r8, ip
eor r8, r8, ip, lsl #14
eor ip, r8, r8, lsr #4
and ip, ip, #15728880
eor r8, r8, ip
eor r8, r8, ip, lsl #4
eor ip, r8, r8, lsr #8
and ip, ip, #65280
eor r8, r8, ip
eor r8, r8, ip, lsl #8
eor ip, r9, r9, lsr #7
and ip, ip, #11141290
eor r9, r9, ip
eor r9, r9, ip, lsl #7
eor ip, r9, r9, lsr #14
movw lr, #52428
and ip, ip, lr
eor r9, r9, ip
eor r9, r9, ip, lsl #14
eor ip, r9, r9, lsr #4
and ip, ip, #15728880
eor r9, r9, ip
eor r9, r9, ip, lsl #4
eor ip, r9, r9, lsr #8
and ip, ip, #65280
eor r9, r9, ip
eor r9, r9, ip, lsl #8
str r6, [r0, #16]
str r7, [r0, #20]
str r8, [r0, #24]
str r9, [r0, #28]
mov sp, fp
pop {r4, r5, r6, r7, r8, r9, r10, fp, pc}
.size photon256_permute, .-photon256_permute
#endif
|
aadomn/cymric
| 19,884
|
artifact_tches2025-3/benchmark_armv7m/lwc/giftcofb/giftb128.s
|
/****************************************************************************
* Compact ARM assembly implementation of the GIFT-128 block cipher. This
* implementation focuses on code size rather than speed.
*
* See "Fixslicing: A New GIFT Representation" paper available at
* https://eprint.iacr.org/2020/412.pdf for more details.
*
* @author Alexandre Adomnicai, Nanyang Technological University
*
* @date July 2021
****************************************************************************/
.syntax unified
.thumb
/*****************************************************************************
* Round constants look-up table according to the fixsliced representation.
*****************************************************************************/
.align 2
.type rconst,%object
rconst:
.word 0x10000008, 0x80018000, 0x54000002, 0x01010181
.word 0x8000001f, 0x10888880, 0x6001e000, 0x51500002
.word 0x03030180, 0x8000002f, 0x10088880, 0x60016000
.word 0x41500002, 0x03030080, 0x80000027, 0x10008880
.word 0x4001e000, 0x11500002, 0x03020180, 0x8000002b
.word 0x10080880, 0x60014000, 0x01400002, 0x02020080
.word 0x80000021, 0x10000080, 0x0001c000, 0x51000002
.word 0x03010180, 0x8000002e, 0x10088800, 0x60012000
.word 0x40500002, 0x01030080, 0x80000006, 0x10008808
.word 0xc001a000, 0x14500002, 0x01020181, 0x8000001a
/******************************************************************************
* Macro to compute the SWAPMOVE technique.
* - out0-out1 output registers
* - in0-in1 input registers
* - m mask
* - n shift value
* - tmp temporary register
******************************************************************************/
.macro swpmv out0, out1, in0, in1, m, n, tmp
eor \tmp, \in1, \in0, lsr \n
and \tmp, \m
eor \out1, \in1, \tmp
eor \out0, \in0, \tmp, lsl \n
.endm
/******************************************************************************
* Macro to compute a nibble-wise rotation to the right.
* - out output register
* - in input register
* - m0-m1 masks
* - n0-n1 shift value
* - tmp temporary register
******************************************************************************/
.macro nibror out, in, m0, m1, n0, n1, tmp
and \tmp, \m0, \in, lsr \n0
and \out, \in, \m1
orr \out, \tmp, \out, lsl \n1
.endm
/******************************************************************************
* Macro to compute the SBox (the NOT operation is included in the round keys).
* - in0-in3 input/output registers
* - tmp temporary register
* - n ror index value to math fixslicing
******************************************************************************/
.macro sbox in0, in1, in2, in3, tmp, n
and \tmp, \in2, \in0, ror \n
eor \in1, \in1, \tmp
and \tmp, \in1, \in3
eor \in0, \tmp, \in0, ror \n
orr \tmp, \in0, \in1
eor \in2, \tmp, \in2
eor \in3, \in3, \in2
eor \in1, \in1, \in3
and \tmp, \in0, \in1
eor \in2, \in2, \tmp
mvn \in3, \in3
.endm
/******************************************************************************
* Macro to compute the first round within a quintuple round routine.
* - in0-in3 input/output registers
******************************************************************************/
.macro round_0 in0, in1, in2, in3
ldr.w r5, [r0], #4 // load rconst
ldr.w r6, [r1], #4 // load 1st rkey word
ldr.w r7, [r1], #4 // load 2nd rkey word
sbox \in0, \in1, \in2, \in3, r8, #0 // sbox layer
nibror \in3, \in3, r4, r2, 1, 3, r8 // linear layer
nibror \in2, \in2, r2, r4, 3, 1, r8 // linear layer
orr r14, r2, r2, lsl #1 // 0x33333333 for 'nibror'
nibror \in1, \in1, r14, r14, 2, 2, r8 // linear layer
eor \in1, \in1, r6 // add 1st rkey word
eor \in2, \in2, r7 // add 2nd rkey word
eor \in0, \in0, r5 // add rconst
.endm
/******************************************************************************
* Macro to compute the second round within a quintuple round routine.
* - in0-in3 input/output registers
******************************************************************************/
.macro round_1 in0, in1, in2, in3
ldr.w r5, [r0], #4 // load rconst
ldr.w r6, [r1], #4 // load 1st rkey word
ldr.w r7, [r1], #4 // load 2nd rkey word
sbox \in0, \in1, \in2, \in3, r8, #0 // sbox layer
mvn r14, r3, lsl #12 // r14<-0x0fff0fff for HALF_ROR
nibror \in3, \in3, r14, r3, 4, 12, r8 // HALF_ROR(in3, 4)
nibror \in2, \in2, r3, r14, 12, 4, r8 // HALF_ROR(in2, 12)
rev16 \in1, \in1 // HALF_ROR(in1, 8)
eor \in1, \in1, r6 // add 1st rkey word
eor \in2, \in2, r7 // add 2nd rkey word
eor \in0, \in0, r5 // add rconst
.endm
/******************************************************************************
* Macro to compute the third round within a quintuple round routine.
* - in0-in3 input/output registers
******************************************************************************/
.macro round_2 in0, in1, in2, in3
ldr.w r5, [r0], #4 // load rconst
ldr.w r6, [r1], #4 // load 1st rkey word
ldr.w r7, [r1], #4 // load 2nd rkey word
sbox \in0, \in1, \in2, \in3, r8, #0 // sbox layer
orr r14, r2, r2, lsl #2 // r14<-0x55555555 for swpmv
swpmv \in1, \in1, \in1, \in1, r14, #1, r8
eor r8, \in3, \in3, lsr #1
and r8, r8, r14, lsr #16
eor \in3, \in3, r8
eor \in3, \in3, r8, lsl #1 //SWAPMOVE(r12,r12,0x55550000,1)
eor r8, \in2, \in2, lsr #1
and r8, r8, r14, lsl #16
eor \in2, \in2, r8
eor \in2, \in2, r8, lsl #1 //SWAPMOVE(r11,r11,0x00005555,1)
eor \in1, \in1, r6 // add 1st rkey word
eor \in2, r7, \in2, ror #16 // add 2nd rkey word
eor \in0, \in0, r5 // add rconst
.endm
/******************************************************************************
* Macro to compute the fourth round within a quintuple round routine.
* - in0-in3 input/output registers
******************************************************************************/
.macro round_3 in0, in1, in2, in3
ldr.w r6, [r1], #4 // load 1st rkey word
ldr.w r7, [r1], #4 // load 2nd rkey word
sbox \in0, \in1, \in2, \in3, r8, #16 // sbox layer
eor r14, r3, r3, lsl #8 // r14<-0x0f0f0f0f for nibror
nibror \in1, \in1, r14, r14, #4, #4, r8
orr r14, r14, r14, lsl #2 // r14<-0x3f3f3f3f for nibror
mvn r8, r14, lsr #6 // r8 <-0xc0c0c0c0 for nibror
nibror \in2, \in2, r14, r8, #2, #6, r5
nibror \in3, \in3, r8, r14, #6, #2, r8
ldr.w r5, [r0], #4 // load rconst
eor \in1, \in1, r6 // add 1st rkey word
eor \in2, \in2, r7 // add 2nd rkey word
eor \in0, \in0, r5 // add rconst
.endm
/******************************************************************************
* Macro to compute the fifth round within a quintuple round routine.
* - in0-in3 input/output registers
******************************************************************************/
.macro round_4 in0, in1, in2, in3
ldr.w r5, [r0], #4 // load rconst
ldr.w r6, [r1], #4 // load 1st rkey word
ldr.w r7, [r1], #4 // load 2nd rkey word
sbox \in0, \in1, \in2, \in3, r8, #0 // sbox layer
eor \in1, r6, \in1, ror #16 // add 1st keyword
eor \in2, r7, \in2, ror #8 // add 2nd keyword
eor \in0, \in0, r5 // add rconst
.endm
/******************************************************************************
* Macro to compute the GIFT-128 key update (in its classical representation).
* Two 16-bit rotations are computed on the 32-bit word 'v' given as input.
* - u 1st round key word as defined in the specification (U <- W2||W3)
* - v 2nd round key word as defined in the specification (V <- W6||W7)
******************************************************************************/
.macro k_upd u, v
and r2, r10, \v, lsr #12
and r3, \v, r9
orr r2, r2, r3, lsl #4
and r3, r12, \v, lsr #2
orr r2, r2, r3
and \v, \v, #0x00030000
orr \v, r2, \v, lsl #14
str.w \u, [r1], #4
str.w \v, [r1], #4
.endm
/******************************************************************************
* Macro to rearrange round key words from their classical to fixsliced
* representations.
* - rk0 1st round key word
* - rk1 2nd round key word
* - idx0 index for SWAPMOVE
* - idx1 index for SWAPMOVE
* - tmp temporary register for SWAPMOVE
******************************************************************************/
.macro rearr_rk rk0, rk1, idx0, idx1, tmp
swpmv \rk1, \rk1, \rk1, \rk1, r3, \idx0, \tmp
swpmv \rk0, \rk0, \rk0, \rk0, r3, \idx0, \tmp
swpmv \rk1, \rk1, \rk1, \rk1, r10, \idx1, \tmp
swpmv \rk0, \rk0, \rk0, \rk0, r10, \idx1, \tmp
swpmv \rk1, \rk1, \rk1, \rk1, r11, #12, \tmp
swpmv \rk0, \rk0, \rk0, \rk0, r11, #12, \tmp
swpmv \rk1, \rk1, \rk1, \rk1, #0xff, #24, \tmp
swpmv \rk0, \rk0, \rk0, \rk0, #0xff, #24, \tmp
.endm
/******************************************************************************
* Soubroutine to update the rkeys according to the classical representation.
******************************************************************************/
.align 2
classical_key_update:
k_upd r5, r7 // 1st classical key update
k_upd r4, r6 // 2nd classical key update
k_upd r7, r5 // 3rd classical key update
k_upd r6, r4 // 4th classical key update
bx lr
/******************************************************************************
* Soubroutine to rearrange round key words from classical to fixsliced
* representation for round i s.t. i mod 5 = 0.
******************************************************************************/
.align 2
rearrange_rkey_0:
ldr.w r6, [r1] // load 1st rkey word (classical rep)
ldr.w r4, [r1, #4] // load 2nd rkey word (classical rep)
rearr_rk r4, r6, #9, #18, r12 // rearrange rkey words for round 1
str.w r4, [r1, #4] // store 2nd rkey word (fixsliced rep)
str.w r6, [r1], #40 // store 1st rkey word (fixsliced rep)
bx lr
/******************************************************************************
* Soubroutine to rearrange round key words from classical to fixsliced
* representation for round i s.t. i mod 5 = 1 or 3.
******************************************************************************/
.align 2
rearrange_rkey_1:
ldr.w r5, [r1] // load 3rd rkey word (classical rep)
ldr.w r7, [r1, #4] // load 4th rkey word (classical rep)
rearr_rk r5, r7, #3, #6, r8 // rearrange rkey words for round 2
str.w r7, [r1, #4] // store 4th rkey word (fixsliced rep)
str.w r5, [r1], #40 // store 3rd rkey word (fixsliced rep)
bx lr
/******************************************************************************
* Soubroutine to rearrange round key words from classical to fixsliced
* representation for round i s.t. i mod 5 = 2.
******************************************************************************/
.align 2
rearrange_rkey_2:
ldr.w r5, [r1] // load 5th rkey word (classical rep)
ldr.w r7, [r1, #4] // load 6th rkey word (classical rep)
rearr_rk r5, r7, #15, #18, r8 // rearrange rkey words for round 3
str.w r7, [r1, #4] // store 6th rkey word (fixsliced rep)
str.w r5, [r1], #40 // store 5th rkey word (fixsliced rep)
bx lr
.align 2
/*****************************************************************************
* Implementation of the GIFT-128 key schedule according to fixslicing.
* The entire round key material is first computed according to the classical
* representation before being rearranged according to fixslicing.
*****************************************************************************/
@ void gift128_kschedule(const u8* key, u32* rkey) {
.global gift128_kschedule
.type gift128_kschedule,%function
gift128_kschedule:
push {r1-r12, r14}
ldr.w r4, [r0] // load key words
ldr.w r5, [r0, #4] // load key words
ldr.w r6, [r0, #8] // load key words
ldr.w r7, [r0, #12] // load key words
rev r4, r4 // endianness
rev r5, r5 // endianness
rev r6, r6 // endianness
rev r7, r7 // endianness
str.w r5, [r1, #4]
str.w r7, [r1], #8 //the first rkeys are not updated
str.w r4, [r1, #4]
str.w r6, [r1], #8 //the first rkeys are not updated
movw r12, #0x3fff
lsl r12, r12, #16 //r12<- 0x3fff0000
movw r10, #0x000f //r10<- 0x0000000f
movw r9, #0x0fff //r9 <- 0x00000fff
bl classical_key_update
bl classical_key_update
bl classical_key_update
bl classical_key_update
bl classical_key_update
bl classical_key_update
bl classical_key_update
bl classical_key_update
bl classical_key_update
k_upd r5, r7 // penultimate round key
k_upd r4, r6 // ultimate round key
sub.w r1, r1, #320
movw r3, #0x0055
movt r3, #0x0055 //r3 <- 0x00550055
movw r10, #0x3333 //r10<- 0x00003333
movw r11, #0x000f
movt r11, #0x000f //r11<- 0x000f000f
bl rearrange_rkey_0 // fixslice the rkey words for round 0
bl rearrange_rkey_0 // fixslice the rkey words for round 5
bl rearrange_rkey_0 // fixslice the rkey words for round 10
bl rearrange_rkey_0 // fixslice the rkey words for round 15
bl rearrange_rkey_0 // fixslice the rkey words for round 20
bl rearrange_rkey_0 // fixslice the rkey words for round 25
bl rearrange_rkey_0 // fixslice the rkey words for round 30
bl rearrange_rkey_0 // fixslice the rkey words for round 35
sub.w r1, r1, #312
movw r3, #0x1111
movt r3, #0x1111 // r3 <- 0x11111111
movw r10, #0x0303
movt r10, #0x0303 // r10<- 0x03030303
bl rearrange_rkey_1 // fixslice the rkey words for round 1
bl rearrange_rkey_1 // fixslice the rkey words for round 6
bl rearrange_rkey_1 // fixslice the rkey words for round 11
bl rearrange_rkey_1 // fixslice the rkey words for round 16
bl rearrange_rkey_1 // fixslice the rkey words for round 21
bl rearrange_rkey_1 // fixslice the rkey words for round 26
bl rearrange_rkey_1 // fixslice the rkey words for round 31
bl rearrange_rkey_1 // fixslice the rkey words for round 36
sub.w r1, r1, #312
movw r3, #0xaaaa // r3 <- 0x0000aaaa
movw r10, #0x3333 // r10<- 0x00003333
movw r11, #0xf0f0 // r11<- 0x0000f0f0
bl rearrange_rkey_2 // fixslice the rkey words for round 2
bl rearrange_rkey_2 // fixslice the rkey words for round 7
bl rearrange_rkey_2 // fixslice the rkey words for round 12
bl rearrange_rkey_2 // fixslice the rkey words for round 17
bl rearrange_rkey_2 // fixslice the rkey words for round 22
bl rearrange_rkey_2 // fixslice the rkey words for round 27
bl rearrange_rkey_2 // fixslice the rkey words for round 32
bl rearrange_rkey_2 // fixslice the rkey words for round 37
sub.w r1, r1, #312
movw r3, #0x0a0a
movt r3, #0x0a0a // r3 <- 0x0a0a0a0a
movw r10, #0x00cc
movt r10, #0x00cc // r10<- 0x00cc00cc
bl rearrange_rkey_1 // fixslice the rkey words for round 3
bl rearrange_rkey_1 // fixslice the rkey words for round 8
bl rearrange_rkey_1 // fixslice the rkey words for round 13
bl rearrange_rkey_1 // fixslice the rkey words for round 18
bl rearrange_rkey_1 // fixslice the rkey words for round 23
bl rearrange_rkey_1 // fixslice the rkey words for round 28
bl rearrange_rkey_1 // fixslice the rkey words for round 33
bl rearrange_rkey_1 // fixslice the rkey words for round 38
pop {r1-r12,r14}
bx lr
/*****************************************************************************
* Subroutine to implement a quintuple round of GIFT-128.
*****************************************************************************/
.align 2
quintuple_round:
str.w r14, [sp]
round_0 r9, r10, r11, r12
round_1 r12, r10, r11, r9
round_2 r9, r10, r11, r12
round_3 r12, r10, r11, r9
round_4 r9, r10, r11, r12
ldr.w r14, [sp]
eor r9, r9, r12, ror #24
eor r12, r9, r12, ror #24
eor r9, r9, r12 // swap r9 with r12
bx lr
/*****************************************************************************
* Fully unrolled ARM assembly implementation of the GIFTb-128 block cipher.
* This function simply encrypts a 128-bit block, without any operation mode.
*****************************************************************************/
@ void giftb128_encrypt_block(u8 *out, const u32* rkey, const u8 *block)
.global giftb128_encrypt_block
.type giftb128_encrypt_block,%function
giftb128_encrypt_block:
push {r0,r2-r12,r14}
sub.w sp, #4 // to store 'lr' when calling 'quintuple_round'
ldm r2, {r9-r12} // load plaintext words
rev r9, r9
rev r10, r10
rev r11, r11
rev r12, r12
movw r2, #0x1111
movt r2, #0x1111 // r2 <- 0x11111111 (for NIBBLE_ROR)
movw r3, #0x000f
movt r3, #0x000f // r3 <- 0x000f000f (for HALF_ROR)
mvn r4, r2, lsl #3 // r4 <- 0x7777777 (for NIBBLE_ROR)
adr r0, rconst // r0 <- 'rconst' address
bl quintuple_round
bl quintuple_round
bl quintuple_round
bl quintuple_round
bl quintuple_round
bl quintuple_round
bl quintuple_round
bl quintuple_round
ldr.w r0, [sp ,#4] // restore 'ctext' address
rev r9, r9
rev r10, r10
rev r11, r11
rev r12, r12
stm r0, {r9-r12}
add.w sp, #4
pop {r0,r2-r12,r14}
bx lr
|
aadomn/cymric
| 4,424
|
artifact_tches2025-3/benchmark_armv7m/lwc/romulusn/skinny128_core.s
|
/*******************************************************************************
* ARMv7-M assembly implementation of fixsliced Skinny-128-384+.
*
* For more details, see the paper at
* https://csrc.nist.gov/CSRC/media/Events/lightweight-cryptography-workshop-2020
* /documents/papers/fixslicing-lwc2020.pdf
*
* @author Alexandre Adomnicai
* alex.adomnicai@gmail.com
*
* @date March 2022
*******************************************************************************/
.syntax unified
.thumb
.macro swpmv in0, in1, tmp, mask, sh0, sh1
eor \tmp, \in1, \in0, lsr \sh0
and \tmp, \tmp, \mask, lsr \sh1
eor \in1, \in1, \tmp
eor \in0, \in0, \tmp, lsl \sh0
.endm
.macro sbox in0, in1, in2, in3, tmp, mask
orr \tmp, \in0, \in1
eor \in3, \in3, \tmp
mvn \in3, \in3
swpmv \in2, \in1, \tmp, \mask, #1, #0
swpmv \in3, \in2, \tmp, \mask, #1, #0
orr \tmp, \in2, \in3
eor \in1, \in1, \tmp
mvn \in1, \in1
swpmv \in1, \in0, \tmp, \mask, #1, #0
swpmv \in0, \in3, \tmp, \mask, #1, #0
orr \tmp, \in0, \in1
eor \in3, \in3, \tmp
mvn \in3, \in3
swpmv \in2, \in1, \tmp, \mask, #1, #0
swpmv \in3, \in2, \tmp, \mask, #1, #0
orr \tmp, \in2, \in3
eor \in1, \in1, \tmp
swpmv \in0, \in3, \tmp, \mask, #0, #0
.endm
.macro mixcol idx0, idx1, idx2, idx3, idx4, idx5
and r8, r7, r2, ror \idx0
eor r2, r2, r8, ror \idx1
and r8, r7, r2, ror \idx2
eor r2, r2, r8, ror \idx3
and r8, r7, r2, ror \idx4
eor r2, r2, r8, ror \idx5
and r8, r7, r3, ror \idx0
eor r3, r3, r8, ror \idx1
and r8, r7, r3, ror \idx2
eor r3, r3, r8, ror \idx3
and r8, r7, r3, ror \idx4
eor r3, r3, r8, ror \idx5
and r8, r7, r4, ror \idx0
eor r4, r4, r8, ror \idx1
and r8, r7, r4, ror \idx2
eor r4, r4, r8, ror \idx3
and r8, r7, r4, ror \idx4
eor r4, r4, r8, ror \idx5
and r8, r7, r5, ror \idx0
eor r5, r5, r8, ror \idx1
and r8, r7, r5, ror \idx2
eor r5, r5, r8, ror \idx3
and r8, r7, r5, ror \idx4
eor r5, r5, r8, ror \idx5
.endm
.macro rtk23
ldmia.w r1!, {r8-r11}
eor r2, r2, r8
eor r3, r3, r9
eor r4, r4, r10
eor r5, r5, r11
.endm
.macro rtk1
ldmia.w r0!,{r8-r11}
eor r2, r2, r8
eor r3, r3, r9
eor r4, r4, r10
eor r5, r5, r11
.endm
quadruple_round:
sbox r2, r3, r4, r5, r8, r6
rtk23
rtk1
mixcol #30, #24, #18, #2, #6, #4
sbox r4, r5, r2, r3, r8, r6
rtk23
mixcol #16, #30, #28, #0, #16, #2
sbox r2, r3, r4, r5, r8, r6
rtk23
rtk1
mixcol #10, #4, #6, #6, #26, #0
sbox r4, r5, r2, r3, r8, r6
rtk23
mixcol #4, #26, #0, #4, #4, #22
bx lr
@ void skinny128_384_plus(u8* out, const u8* in, const u8* rtk_1, const u8* rtk_23)
.global skinny128_384_plus
.type skinny128_384_plus,%function
.align 2
skinny128_384_plus:
push {r0-r12, r14}
mov.w r0, r2
// load input words
ldr.w r3, [r1, #8]
ldr.w r4, [r1, #4]
ldr.w r5, [r1, #12]
ldr.w r2, [r1]
// preload bitmasks for swapmove (packing into bitsliced)
movw r6, #0x0a0a
movt r6, #0x0a0a
movw r7, #0x3030
movt r7, #0x3030
swpmv r2, r2, r12, r6, #3, #0
swpmv r3, r3, r12, r6, #3, #0
swpmv r4, r4, r12, r6, #3, #0
swpmv r5, r5, r12, r6, #3, #0
swpmv r4, r2, r12, r7, #2, #0
swpmv r3, r2, r12, r7, #4, #2
swpmv r5, r2, r12, r7, #6, #4
swpmv r3, r4, r12, r7, #2, #2
swpmv r5, r4, r12, r7, #4, #4
swpmv r5, r3, r12, r7, #2, #4
// preload bitmasks for swapmove (s-box)
movw r6, #0x5555
movt r6, #0x5555
// reload pointer to round tweakeys (erased when loading ptext)
ldr.w r1, [sp, #12]
// run 16 rounds
bl quadruple_round
bl quadruple_round
bl quadruple_round
bl quadruple_round
// run 16 rounds
sub.w r0, #128 // reset rtk1
bl quadruple_round
bl quadruple_round
bl quadruple_round
bl quadruple_round
// run 16 rounds
sub.w r0, #128 // reset rtk1
// run 8 rounds (16*2+8 = 40 rounds for skinny128-384+)
bl quadruple_round
bl quadruple_round
// preload bitmasks for swapmove (unpacking)
movw r6, #0x0a0a
movt r6, #0x0a0a
// restore output buffer
ldr.w r0, [sp], #4
swpmv r5, r3, r12, r7, #2, #4
swpmv r5, r4, r12, r7, #4, #4
swpmv r3, r4, r12, r7, #2, #2
swpmv r5, r2, r12, r7, #6, #4
swpmv r3, r2, r12, r7, #4, #2
swpmv r4, r2, r12, r7, #2, #0
swpmv r5, r5, r12, r6, #3, #0
swpmv r4, r4, r12, r6, #3, #0
swpmv r3, r3, r12, r6, #3, #0
swpmv r2, r2, r12, r6, #3, #0
str.w r2, [r0]
str.w r4, [r0, #4]
str.w r3, [r0, #8]
str.w r5, [r0, #12]
pop {r1-r12,r14}
bx lr
|
aadomn/cymric
| 3,401
|
artifact_tches2025-3/benchmark_armv7m/lwc/romulusn/skinny128_tks_lfsr.s
|
/*******************************************************************************
* ARMv7-M assembly implementation of fixsliced Skinny-128-384+.
*
* For more details, see the paper at
* https://csrc.nist.gov/CSRC/media/Events/lightweight-cryptography-workshop-2020
* /documents/papers/fixslicing-lwc2020.pdf
*
* @author Alexandre Adomnicai
* alex.adomnicai@gmail.com
*
* @date March 2022
*******************************************************************************/
.syntax unified
.thumb
// swapmove technique for bit manipulations
.macro swpmv in0, in1, tmp, mask, sh0, sh1
eor \tmp, \in1, \in0, lsr \sh0
and \tmp, \tmp, \mask, lsr \sh1
eor \in1, \in1, \tmp
eor \in0, \in0, \tmp, lsl \sh0
.endm
// packing from byte-array to bitsliced representation
packing:
swpmv r2, r2, r12, r10, #3, #0
swpmv r3, r3, r12, r10, #3, #0
swpmv r4, r4, r12, r10, #3, #0
swpmv r5, r5, r12, r10, #3, #0
swpmv r4, r2, r12, r11, #2, #0
swpmv r3, r2, r12, r11, #4, #2
swpmv r5, r2, r12, r11, #6, #4
swpmv r3, r4, r12, r11, #2, #2
swpmv r5, r4, r12, r11, #4, #4
swpmv r5, r3, r12, r11, #2, #4
bx lr
// computes lfsr2 on tk2 in a bitsliced fashion
.macro lfsr2 out, in
and r12, \in, r10
eor r12, r12, \out
and r14, r10, r12, lsl #1
and r12, r12, r10
orr \out, r14, r12, lsr #1
.endm
// computes lfsr3 on tk3 in a bitsliced fashion
.macro lfsr3 out, in
and r12, \in, r10
eor r12, \out, r12, lsr #1
and r14, r10, r12, lsl #1
and r12, r12, r10
orr \out, r14, r12, lsr #1
.endm
// computes lfsr2(tk2) ^ lfsr3(tk3) and store the result as round tweakeys
.macro strtk tk2a, tk2b, tk3a, tk3b, inc
eor r11, \tk2a, \tk3a //tk2 ^ tk3 (1st word)
eor r12, \tk2b, \tk3b //tk2 ^ tk3 (2nd word)
strd r11, r12, [r0], \inc //store in tk
.endm
/*
* Computes LFSR2(TK2) ^ LFSR3(TK3) for all rounds.
* Processing both at the same time allows to save some memory accesses.
*/
@ void tks_lfsr_23(uint32_t* tk, const uint8_t* tk2, const uint8_t* tk3, const int rounds)
.global tks_lfsr_23
.type tks_lfsr_23,%function
.align 2
tks_lfsr_23:
push {r0-r12, r14}
// load 128-bit tk2
ldr.w r3, [r1, #8]
ldr.w r4, [r1, #4]
ldr.w r5, [r1, #12]
ldr.w r12, [r1]
//move tk3 address in r1
mov r1, r2
//move 1st tk2 word in r2
mov r2, r12
// preload bitmasks for swapmove (packing)
movw r10, #0x0a0a
movt r10, #0x0a0a
movw r11, #0x3030
movt r11, #0x3030
// packing tk2 into bitsliced
bl packing
// move tk2 from r2-r5 to r6-r9
mov r6, r2
mov r7, r3
mov r8, r4
mov r9, r5
// load 128-bit tk3
ldr.w r3, [r1, #8]
ldr.w r4, [r1, #4]
ldr.w r5, [r1, #12]
ldr.w r2, [r1]
// packing tk3 into bitsliced
bl packing
eor r10, r10, r10, lsl #4
// load loop counter (#rounds) in r1
ldr.w r1, [sp, #12]
// store tk2 ^ tk3 in round tweakeys array (r0)
strtk r2, r3, r6, r7, #8
strtk r4, r5, r8, r9, #8
// Precompute 8 round tweakeys per iteration
loop_2_3:
lfsr2 r6, r8
lfsr3 r5, r3
strtk r5, r2, r7, r8, #8
strtk r3, r4, r9, r6, #24
lfsr2 r7, r9
lfsr3 r4, r2
strtk r4, r5, r8, r9, #8
strtk r2, r3, r6, r7, #24
lfsr2 r8, r6
lfsr3 r3, r5
strtk r3, r4, r9, r6, #8
strtk r5, r2, r7, r8, #24
lfsr2 r9, r7
lfsr3 r2, r4
strtk r2, r3, r6, r7, #8
strtk r4, r5, r8, r9, #24
subs.w r1, r1, #8 //loop counter -= 8
bne loop_2_3
pop {r0-r12, r14}
bx lr
|
aadomn/cymric
| 32,837
|
artifact_tches2025-3/benchmark_armv7m/lwc/romulusn/skinny128_tks_perm.s
|
/*******************************************************************************
* ARMv7-M assembly implementation of fixsliced Skinny-128-384+.
*
* For more details, see the paper at
* https://csrc.nist.gov/CSRC/media/Events/lightweight-cryptography-workshop-2020
* /documents/papers/fixslicing-lwc2020.pdf
*
* @author Alexandre Adomnicai
* alex.adomnicai@gmail.com
*
* @date March 2022
*******************************************************************************/
.syntax unified
.thumb
// swapmove technique for bit manipulations
.macro swpmv in0, in1, tmp, mask, sh0, sh1
eor \tmp, \in1, \in0, lsr \sh0
and \tmp, \tmp, \mask, lsr \sh1
eor \in1, \in1, \tmp
eor \in0, \in0, \tmp, lsl \sh0
.endm
// tweakey permutation applied twice on 32-bit input
.macro perm2 in
and r11, r1, \in, ror #14
bfi r11, \in, #16, #8
and r12, \in, #0xcc000000
orr r11, r11, r12, lsr #2
and r12, r10, \in
orr r11, r11, r12, lsr #8
and r12, \in, #0x00cc0000
orr \in, r11, r12, lsr #18
.endm
// tweakey permutation applied twice on full tweakey
p2:
// bitmasks for perm2
movw r1, #0xcc00
movt r1, #0xcc00
movw r10, #0xcc00
movt r10, #0x0033
perm2 r6
perm2 r7
perm2 r8
perm2 r9
bx lr
// tweakey permutation applied 4 times on 32-bit input
.macro perm4 in
and r10, r14, \in, ror #22
and r1, r12, \in, ror #16
orr r10, r10, r1
and r1, \in, r11
orr r10, r10, r1, lsr #2
movw r1, #0xcc33
and \in, \in, r1
orr \in, r10, \in, ror #24
.endm
// tweakey permutation applied 4 times on full tweakey
p4:
str.w r14, [sp]
// bitmasks for perm4
movw r14, #0x00cc
movt r14, #0xcc00
movw r12, #0xcc00
movt r12, #0x3300
movw r11, #0x00cc
movt r11, #0x00cc
perm4 r6
perm4 r7
perm4 r8
perm4 r9
ldr.w r14, [sp] //restore r14
bx lr
// tweakey permutation applied 6 times on 32-bit input
.macro perm6 in
and r10, \in, r1, ror #8
and r11, r12, \in, ror #24
orr r11, r11, r10, ror #6
and r10, r1, \in, ror #10
orr r11, r11, r10
and r10, \in, #0x000000cc
orr r11, r11, r10, lsl #14
and r10, \in, #0x00003300
orr \in, r11, r10, lsl #2
.endm
// tweakey permutation applied 6 times on full tweakey
p6:
// bitmasks for perm6
movw r1, #0x3333
movw r12, #0x00cc
movt r12, #0x3300
perm6 r6
perm6 r7
perm6 r8
perm6 r9
bx lr
// tweakey permutation applied 8 times on 32-bit input
.macro perm8 in
and r10, \in, r1
and r11, r1, \in, ror #8
orr r11, r11, r10, ror #24
and r10, \in, r12, lsl #2
orr r11, r11, r10, ror #26
and r10, \in, r12, lsl #8
orr \in, r11, r10, lsr #6
.endm
// tweakey permutation applied 8 times on full tweakey
p8:
// bitmasks for perm8
movw r12, #0x3333
movw r1, #0x0000
movt r1, #0x33cc
perm8 r6
perm8 r7
perm8 r8
perm8 r9
bx lr
// tweakey permutation applied 10 times on 32-bit input
.macro perm10 in
and r10, \in, r1, ror #8
and r11, r12, \in, ror #26
orr r11, r11, r10, ror #8
and r10, \in, r12, ror #24
orr r11, r11, r10, ror #22
and r10, \in, #0x00330000
orr r11, r11, r10, lsr #14
and r10, \in, #0x0000cc00
orr \in, r11, r10, lsr #2
.endm
// tweakey permutation applied 10 times on full tweakey
p10:
// bitmasks for perm10
movw r12, #0x0033
movt r12, #0x3300
movw r1, #0xcc33
perm10 r6
perm10 r7
perm10 r8
perm10 r9
bx lr
// tweakey permutation applied 12 times on 32-bit input
.macro perm12 in
and r10, r14, \in, ror #8
and r11, r12, \in, ror #30
orr r11, r11, r10
and r10, r1, \in, ror #16
orr r11, r11, r10
movw r10, #0xcccc
and r10, \in, r10, ror #8
orr \in, r11, r10, ror #10
.endm
// tweakey permutation applied 12 times on full tweakey
p12:
str.w r14, [sp]
// bitmasks for perm12
movw r14, #0xcc33
movw r12, #0x00cc
movt r12, #0x00cc
movw r1, #0x3300
movt r1, #0xcc00
perm12 r6
perm12 r7
perm12 r8
perm12 r9
ldr.w r14, [sp] //restore r14
bx lr
// tweakey permutation applied 14 times on 32-bit input
.macro perm14 in
and r10, r1, \in, ror #24
and r11, \in, #0x00000033
orr r11, r10, r11, ror #14
and r10, \in, #0x33000000
orr r11, r11, r10, ror #30
and r10, \in, #0x00ff0000
orr r11, r11, r10, ror #16
and r10, \in, r12
orr \in, r11, r10, ror #18
.endm
// tweakey permutation applied 14 times on full tweakey
p14:
// bitmasks for perm14
movw r1, #0xcc00
movt r1, #0x0033
movw r12, #0xcc00
movt r12, #0xcc00
perm14 r6
perm14 r7
perm14 r8
perm14 r9
bx lr
// tweakey permutation applied 16 times is identity
.macro perm16 in
.endm
// bitmasks and rotations to match fixslicing (odd rounds)
.macro bs2fs_odd sh0, sh1, sh2
and r10, r10, r10, lsr #6 //r10<- 0x03030303
and r11, r10, r6, ror \sh0
and r6, r6, r10, lsl \sh1
orr r6, r11, r6, ror \sh2
and r11, r10, r7, ror \sh0
and r7, r7, r10, lsl \sh1
orr r7, r11, r7, ror \sh2
and r11, r10, r8, ror \sh0
and r8, r8, r10, lsl \sh1
orr r8, r11, r8, ror \sh2
and r11, r10, r9, ror \sh0
and r9, r9, r10, lsl \sh1
orr r9, r11, r9, ror \sh2
.endm
// bitmasks and rotations to match fixslicing (even rounds)
.macro bs2fs_even sh0, sh1, sh2
and r10, r10, r10, lsr #2 //r10<- 0x30303030
and r11, r10, r6, ror \sh0
and r6, r6, r10, ror \sh1
orr r6, r11, r6, ror \sh2
and r11, r10, r7, ror \sh0
and r7, r7, r10, ror \sh1
orr r7, r11, r7, ror \sh2
and r11, r10, r8, ror \sh0
and r8, r8, r10, ror \sh1
orr r8, r11, r8, ror \sh2
and r11, r10, r9, ror \sh0
and r9, r9, r10, ror \sh1
orr r9, r11, r9, ror \sh2
.endm
/* Apply tweakey permutation to LFSR2(TK2) ^ LFSR2(TK3) for all round tweakeys.
* Also add the round constant and some NOT to speedup skinny128-384+ core.
* The function is fully unrolled to add round constants w/ immediate values.
*/
@ void tks_perm_23(uint8_t *rtk)
.global tks_perm_23
.type tks_perm_23,%function
.align 2
tks_perm_23:
push {r0-r12, lr}
sub.w sp, #4 //to store r14 in subroutines
ldm r0, {r6-r9} //load tk
movw r10, #0xf0f0
movt r10, #0xf0f0 //r10<- 0xf0f0f0f0
and r6, r6, r10 //tk &= 0xf0f0f0f0 (1st word)
and r7, r7, r10 //tk &= 0xf0f0f0f0 (2nd word)
and r8, r8, r10 //tk &= 0xf0f0f0f0 (3rd word)
and r9, r9, r10 //tk &= 0xf0f0f0f0 (4th word)
eor r8, r8, #0x00000004 //add rconst
eor r9, r9, #0x00000040 //add rconst
mvn r9, r9 //to remove a NOT in sbox calculations
strd r8, r9, [r0], #8 //store 1st half tk for 1st round
strd r6, r7, [r0], #8 //store 2nd half tk for 1st round
ldm r0, {r6-r9} //load tk
bl p2 //apply the permutation twice
movw r10, #0xc3c3
movt r10, #0xc3c3 //r10<- 0xc3c3c3c3
and r11, r10, r6, ror #26 //ror and mask to match fixslicing
and r12, r10, r7, ror #26 //ror and mask to match fixslicing
strd r11, r12, [r0], #8 //store 1st half tk for 2nd round
and r11, r10, r8, ror #26 //ror and mask to match fixslicing
and r12, r10, r9, ror #26 //ror and mask to match fixslicing
eor r11, r11, #0x10000000 //add rconst
eor r11, r11, #0x00000100 //add rconst
eor r12, r12, #0x00000100 //add rconst
mvn r12, r12 //to save a NOT in sbox calculations
strd r11, r12, [r0], #8 //store 2nd half tk for 2nd round
bs2fs_odd 28, 6, 12
eor r7, r7, #0x04000000 //add rconst
eor r8, r8, #0x44000000 //add rconst
eor r9, r9, #0x04000000 //add rconst
mvn r9, r9 //to save a NOT in sbox calculations
strd r8, r9, [r0], #8 //store 1st half tk for 3rd round
strd r6, r7, [r0], #8 //store 2nd half tk for 3rd round
ldm r0, {r6-r9} //load tk
bl p4 //apply the permutation 4 times
movw r10, #0xf0f0
movt r10, #0xf0f0 //r10<- 0xf0f0f0f0
and r11, r10, r6, ror #16 //ror and mask to match fixslicing
and r12, r10, r7, ror #16 //ror and mask to match fixslicing
eor r11, r11, #0x00400000 //add rconst
eor r12, r12, #0x00400000 //add rconst
strd r11, r12, [r0, #24] //store 2nd half tk for 5th round
and r11, r10, r8, ror #16 //ror and mask to match fixslicing
and r12, r10, r9, ror #16 //ror and mask to match fixslicing
eor r11, r11, #0x00440000 //add rconst
eor r12, r12, #0x00500000 //add rconst
mvn r12, r12 //to save a NOT in sbox calculations
strd r11, r12, [r0, #16] //store 1st half tk for 5th round
bs2fs_even 14, 4, 6
eor r6, r6, #0x00100000 //add rconst
eor r7, r7, #0x00100000 //add rconst
eor r8, r8, #0x00100000 //add rconst
eor r8, r8, #0x00000001 //add rconst
eor r9, r9, #0x00100000 //add rconst
mvn r9, r9 //to save a NOT in sbox calculations
strd r6, r7, [r0], #8 //store 1st half tk for 4th round
strd r8, r9, [r0], #24 //store 2nd half tk for 4th round
ldm r0, {r6-r9} //load tk
bl p6 //apply the permutation 6 times
movw r10, #0xc3c3
movt r10, #0xc3c3 //r10<- 0xc3c3c3c3
and r11, r10, r6, ror #10 //ror and mask to match fixslicing
and r12, r10, r7, ror #10 //ror and mask to match fixslicing
eor r11, r11, #0x01000000 //add rconst
eor r12, r12, #0x01000000 //add rconst
strd r11, r12, [r0], #8 //store 1st half tk for 6th round
and r11, r10, r8, ror #10 //ror and mask to match fixslicing
and r12, r10, r9, ror #10 //ror and mask to match fixslicing
eor r11, r11, #0x01400000 //add rconst
eor r11, r11, #0x00001000 //add rconst
eor r12, r12, #0x00400000 //add rconst
mvn r12, r12 //to save a NOT in sbox calculations
strd r11, r12, [r0], #8 //store 2nd half tk for 6th round
bs2fs_odd 12, 6, 28
eor r6, r6, #0x00000400 //add rconst
eor r7, r7, #0x00000400 //add rconst
eor r8, r8, #0x01000000 //add rconst
eor r8, r8, #0x00004000 //add rconst
eor r9, r9, #0x01000000 //add rconst
eor r9, r9, #0x00000400 //add rconst
mvn r9, r9 //to save a NOT in sbox calculations
strd r8, r9, [r0], #8 //store 1st half tk for 7th round
strd r6, r7, [r0], #8 //store 2nd half tk for 7th round
ldm r0, {r6-r9} //load tk
bl p8 //apply the permutation 8 times
movw r10, #0xf0f0
movt r10, #0xf0f0 //r10<- 0xf0f0f0f0
and r11, r10, r6 //ror and mask to match fixslicing
and r12, r10, r7 //ror and mask to match fixslicing
eor r12, r12, #0x00000040 //add rconst
strd r11, r12, [r0, #24] //store 2nd half tk for 9th round
and r11, r10, r8 //ror and mask to match fixslicing
and r12, r10, r9 //ror and mask to match fixslicing
eor r11, r11, #0x00000054 //add rconst
eor r12, r12, #0x00000050 //add rconst
mvn r12, r12 //to save a NOT in sbox calculations
strd r11, r12, [r0, #16] //store 1st half tk for 9th round
bs2fs_even 30, 4, 22
eor r6 ,r6, #0x00000010
eor r8, r8, #0x00010000
eor r8, r8, #0x00000410
eor r9, r9, #0x00000410
mvn r9, r9 //to save a NOT in sbox calculations
strd r6, r7, [r0], #8 //store 1st half tk for 8th round
strd r8, r9, [r0], #24 //store 2nd half tk for 8th round
ldm r0, {r6-r9} //load tk
bl p10 //apply the permutation 10 times
movw r10, #0xc3c3
movt r10, #0xc3c3 //r10<- 0xc3c3c3c3
and r11, r10, r6, ror #26 //ror and mask to match fixslicing
and r12, r10, r7, ror #26 //ror and mask to match fixslicing
eor r11, r11, #0x00000100 //add rconst
eor r12, r12, #0x00000100 //add rconst
strd r11, r12, [r0], #8 //store 1st half tk for 10th round
and r11, r10, r8, ror #26 //ror and mask to match fixslicing
and r12, r10, r9, ror #26 //ror and mask to match fixslicing
eor r11, r11, #0x10000000 //add rconst
eor r11, r11, #0x00000140 //add rconst
eor r12, r12, #0x00000100 //add rconst
mvn r12, r12 //to save a NOT in sbox calculations
strd r11, r12, [r0], #8 //store 2nd half tk for 10th round
bs2fs_odd 28, 6, 12
eor r6, r6, #0x04000000 //add rconst
eor r7, r7, #0x04000000 //add rconst
eor r8, r8, #0x44000000 //add rconst
eor r9, r9, #0x00000100 //add rconst
mvn r9, r9 //to save a NOT in sbox calculations
strd r8, r9, [r0], #8 //store 1st half tk for 11th round
strd r6, r7, [r0], #8 //store 2nd half tk for 11th round
ldm r0, {r6-r9} //load tk
bl p12 //apply the permutation 4 times
movw r10, #0xf0f0
movt r10, #0xf0f0 //r10<- 0xf0f0f0f0
and r11, r10, r6, ror #16 //ror and mask to match fixslicing
and r12, r10, r7, ror #16 //ror and mask to match fixslicing
eor r11, r11, #0x00400000 //add rconst
strd r11, r12, [r0, #24] //store 2nd half tk for 13th round
and r11, r10, r8, ror #16 //ror and mask to match fixslicing
and r12, r10, r9, ror #16 //ror and mask to match fixslicing
eor r11, r11, #0x00140000 //add rconst
eor r12, r12, #0x00500000 //add rconst
mvn r12, r12 //to save a NOT in sbox calculations
strd r11, r12, [r0, #16] //store 1st half tk for 13th round
bs2fs_even 14, 4, 6
eor r6, r6, #0x00100000 //add rconst
eor r7, r7, #0x00100000 //add rconst
eor r8, r8, #0x04000000 //add rconst
eor r8, r8, #0x00000001 //add rconst
eor r9, r9, #0x04000000 //add rconst
mvn r9, r9 //to save a NOT in sbox calculations
strd r6, r7, [r0], #8 //store 1st half tk for 12th round
strd r8, r9, [r0], #24 //store 2nd half tk for 12th round
ldm r0, {r6-r9} //load tk
bl p14 //apply the permutation 6 times
movw r10, #0xc3c3
movt r10, #0xc3c3 //r10<- 0xc3c3c3c3
and r11, r10, r6, ror #10 //ror and mask to match fixslicing
and r12, r10, r7, ror #10 //ror and mask to match fixslicing
strd r11, r12, [r0], #8 //store 1st half tk for 14th round
and r11, r10, r8, ror #10 //ror and mask to match fixslicing
and r12, r10, r9, ror #10 //ror and mask to match fixslicing
eor r11, r11, #0x01400000 //add rconst
eor r11, r11, #0x00001000 //add rconst
eor r12, r12, #0x01400000 //add rconst
mvn r12, r12 //to save a NOT in sbox calculations
strd r11, r12, [r0], #8 //store 2nd half tk for 14th round
bs2fs_odd 12, 6, 28
eor r7, r7, #0x00000400 //add rconst
eor r8, r8, #0x01000000 //add rconst
eor r8, r8, #0x00004400 //add rconst
eor r9, r9, #0x00000400 //add const
mvn r9, r9 //to save a NOT in sbox calculations
strd r8, r9, [r0], #8 //store 1st half tk for 15th round
strd r6, r7, [r0], #8 //store 2nd half tk for 15th round
ldm r0, {r6-r9} //load tk
movw r10, #0xf0f0
movt r10, #0xf0f0 //r10<- 0xf0f0f0f0
and r11, r10, r6 //ror and mask to match fixslicing
and r12, r10, r7 //ror and mask to match fixslicing
eor r11, r11, #0x00000040 //add rconst
eor r12, r12, #0x00000040 //add rconst
strd r11, r12, [r0, #24] //store 2nd half tk for 17th round
and r11, r10, r8 //ror and mask to match fixslicing
and r12, r10, r9 //ror and mask to match fixslicing
eor r11, r11, #0x00000004 //add rconst
eor r12, r12, #0x00000050 //add rconst
mvn r12, r12 //to save a NOT in sbox calculations
strd r11, r12, [r0, #16] //store 1st half tk for 17th round
bs2fs_even 30, 4, 22
eor r6 ,r6, #0x00000010
eor r7 ,r7, #0x00000010
eor r8, r8, #0x00000010
eor r8, r8, #0x00010000
mvn r9, r9 //to save a NOT in sbox calculations
strd r6, r7, [r0], #8 //store 1st half tk for 16th round
strd r8, r9, [r0], #24 //store 2nd half tk for 16th round
ldm r0, {r6-r9} //load tk
bl p2 //apply the permutation twice
movw r10, #0xc3c3
movt r10, #0xc3c3 //r10<- 0xc3c3c3c3
and r11, r10, r6, ror #26 //ror and mask to match fixslicing
and r12, r10, r7, ror #26 //ror and mask to match fixslicing
eor r11, r11, #0x00000100 //add rconst
strd r11, r12, [r0], #8 //store 1st half tk for 18th round
and r11, r10, r8, ror #26 //ror and mask to match fixslicing
and r12, r10, r9, ror #26 //ror and mask to match fixslicing
eor r11, r11, #0x10000000 //add rconst
eor r11, r11, #0x00000140 //add rconst
eor r12, r12, #0x00000040 //add rconst
mvn r12, r12 //to save a NOT in sbox calculations
strd r11, r12, [r0], #8 //store 2nd half tk for 18th round
bs2fs_odd 28, 6, 12
eor r7, r7, #0x04000000 //add rconst
eor r8, r8, #0x40000000 //add rconst
eor r8, r8, #0x00000100 //add rconst
eor r9, r9, #0x04000000 //add rconst
eor r9, r9, #0x00000100 //add rconst
mvn r9, r9 //to save a NOT in sbox calculations
strd r8, r9, [r0], #8 //store 1st half tk for 19th round
strd r6, r7, [r0], #8 //store 2nd half tk for 19th round
ldm r0, {r6-r9} //load tk
bl p4 //apply the permutation 4 times
movw r10, #0xf0f0
movt r10, #0xf0f0 //r10<- 0xf0f0f0f0
and r11, r10, r6, ror #16 //ror and mask to match fixslicing
and r12, r10, r7, ror #16 //ror and mask to match fixslicing
eor r12, r12, #0x00400000 //add rconst
strd r11, r12, [r0, #24] //store 2nd half tk for 21th round
and r11, r10, r8, ror #16 //ror and mask to match fixslicing
and r12, r10, r9, ror #16 //ror and mask to match fixslicing
eor r11, r11, #0x00440000 //add rconst
eor r12, r12, #0x00100000 //add rconst
mvn r12, r12 //to save a NOT in sbox calculations
strd r11, r12, [r0, #16] //store 1st half tk for 21th round
bs2fs_even 14, 4, 6
eor r6, r6, #0x00100000 //add rconst
eor r8, r8, #0x04100000 //add rconst
eor r8, r8, #0x00000001 //add rconst
eor r9, r9, #0x00100000 //add rconst
mvn r9, r9 //to save a NOT in sbox calculations
strd r6, r7, [r0], #8 //store 1st half tk for 20th round
strd r8, r9, [r0], #24 //store 2nd half tk for 20th round
ldm r0, {r6-r9} //load tk
bl p6 //apply the permutation 6 times
movw r10, #0xc3c3
movt r10, #0xc3c3 //r10<- 0xc3c3c3c3
and r11, r10, r6, ror #10 //ror and mask to match fixslicing
and r12, r10, r7, ror #10 //ror and mask to match fixslicing
eor r11, r11, #0x01000000 //add rconst
eor r12, r12, #0x01000000 //add rconst
strd r11, r12, [r0], #8 //store 1st half tk for 22th round
and r11, r10, r8, ror #10 //ror and mask to match fixslicing
and r12, r10, r9, ror #10 //ror and mask to match fixslicing
eor r11, r11, #0x00400000 //add rconst
eor r11, r11, #0x00001000 //add rconst
mvn r12, r12 //to save a NOT in sbox calculations
strd r11, r12, [r0], #8 //store 2nd half tk for 22th round
bs2fs_odd 12, 6, 28
eor r6, r6, #0x00000400 //add rconst
eor r8, r8, #0x00004000 //add rconst
eor r9, r9, #0x01000000 //add rconst
mvn r9, r9 //to save a NOT in sbox calculations
strd r8, r9, [r0], #8 //store 1st half tk for 23th round
strd r6, r7, [r0], #8 //store 2nd half tk for 23th round
ldm r0, {r6-r9} //load tk
bl p8 //apply the permutation 8 times
movw r10, #0xf0f0
movt r10, #0xf0f0 //r10<- 0xf0f0f0f0
and r11, r10, r6 //ror and mask to match fixslicing
and r12, r10, r7 //ror and mask to match fixslicing
strd r11, r12, [r0, #24] //store 2nd half tk for 25th round
and r11, r10, r8 //ror and mask to match fixslicing
and r12, r10, r9 //ror and mask to match fixslicing
eor r11, r11, #0x00000014 //add rconst
eor r12, r12, #0x00000040 //add rconst
mvn r12, r12 //to save a NOT in sbox calculations
strd r11, r12, [r0, #16] //store 1st half tk for 25th round
bs2fs_even 30, 4, 22
eor r8, r8, #0x00010400
eor r9, r9, #0x00000400
mvn r9, r9 //to save a NOT in sbox calculations
strd r6, r7, [r0], #8 //store 1st half tk for 24th round
strd r8, r9, [r0], #24 //store 2nd half tk for 24th round
ldm r0, {r6-r9} //load tk
bl p10 //apply the permutation 10 times
movw r10, #0xc3c3
movt r10, #0xc3c3 //r10<- 0xc3c3c3c3
and r11, r10, r6, ror #26 //ror and mask to match fixslicing
and r12, r10, r7, ror #26 //ror and mask to match fixslicing
strd r11, r12, [r0], #8 //store 1st half tk for 26th round
and r11, r10, r8, ror #26 //ror and mask to match fixslicing
and r12, r10, r9, ror #26 //ror and mask to match fixslicing
eor r11, r11, #0x10000000 //add rconst
eor r11, r11, #0x00000100 //add rconst
mvn r12, r12 //to save a NOT in sbox calculations
strd r11, r12, [r0], #8 //store 2nd half tk for 26th round
bs2fs_odd 28, 6, 12
eor r7, r7, #0x04000000 //add rconst
eor r8, r8, #0x40000000 //add rconst
eor r9, r9, #0x04000000 //add rconst
mvn r9, r9 //to save a NOT in sbox calculations
strd r8, r9, [r0], #8 //store 1st half tk for 27th round
strd r6, r7, [r0], #8 //store 2nd half tk for 27th round
ldm r0, {r6-r9} //load tk
bl p12 //apply the permutation 4 times
movw r10, #0xf0f0
movt r10, #0xf0f0 //r10<- 0xf0f0f0f0
and r11, r10, r6, ror #16 //ror and mask to match fixslicing
and r12, r10, r7, ror #16 //ror and mask to match fixslicing
eor r12, r12, #0x00400000 //add rconst
strd r11, r12, [r0, #24] //store 2nd half tk for 29th round
and r11, r10, r8, ror #16 //ror and mask to match fixslicing
and r12, r10, r9, ror #16 //ror and mask to match fixslicing
eor r11, r11, #0x00440000 //add rconst
eor r12, r12, #0x00500000 //add rconst
mvn r12, r12 //to save a NOT in sbox calculations
strd r11, r12, [r0, #16] //store 1st half tk for 29th round
bs2fs_even 14, 4, 6
eor r6, r6, #0x00100000 //add rconst
eor r8, r8, #0x00100000 //add rconst
eor r8, r8, #0x00000001 //add rconst
eor r9, r9, #0x00100000 //add rconst
mvn r9, r9 //to save a NOT in sbox calculations
strd r6, r7, [r0], #8 //store 1st half tk for 28th round
strd r8, r9, [r0], #24 //store 2nd half tk for 28th round
ldm r0, {r6-r9} //load tk
bl p14 //apply the permutation 6 times
movw r10, #0xc3c3
movt r10, #0xc3c3 //r10<- 0xc3c3c3c3
and r11, r10, r6, ror #10 //ror and mask to match fixslicing
and r12, r10, r7, ror #10 //ror and mask to match fixslicing
eor r11, r11, #0x01000000 //add rconst
eor r12, r12, #0x01000000 //add rconst
strd r11, r12, [r0], #8 //store 1st half tk for 30th round
and r11, r10, r8, ror #10 //ror and mask to match fixslicing
and r12, r10, r9, ror #10 //ror and mask to match fixslicing
eor r11, r11, #0x01400000 //add rconst
eor r11, r11, #0x00001000 //add rconst
mvn r12, r12 //to save a NOT in sbox calculations
strd r11, r12, [r0], #8 //store 2nd half tk for 30th round
bs2fs_odd 12, 6, 28
eor r6, r6, #0x00000400 //add rconst
eor r7, r7, #0x00000400 //add rconst
eor r8, r8, #0x00004000 //add rconst
eor r9, r9, #0x01000000 //add rconst
mvn r9, r9 //to save a NOT in sbox calculations
strd r8, r9, [r0], #8 //store 1st half tk for 31th round
strd r6, r7, [r0], #8 //store 2nd half tk for 31th round
ldm r0, {r6-r9} //load tk
movw r10, #0xf0f0
movt r10, #0xf0f0 //r10<- 0xf0f0f0f0
and r11, r10, r6 //ror and mask to match fixslicing
and r12, r10, r7 //ror and mask to match fixslicing
strd r11, r12, [r0, #24] //store 2nd half tk for 33th round
and r11, r10, r8 //ror and mask to match fixslicing
and r12, r10, r9 //ror and mask to match fixslicing
eor r11, r11, #0x00000014 //add rconst
eor r12, r12, #0x00000050 //add rconst
mvn r12, r12 //to save a NOT in sbox calculations
strd r11, r12, [r0, #16] //store 1st half tk for 33th round
bs2fs_even 30, 4, 22
eor r6 ,r6, #0x00000010
eor r8, r8, #0x00010400
eor r9, r9, #0x00000400
mvn r9, r9 //to save a NOT in sbox calculations
strd r6, r7, [r0], #8 //store 1st half tk for 32th round
strd r8, r9, [r0], #24 //store 2nd half tk for 32th round
ldm r0, {r6-r9} //load tk
bl p2 //apply the permutation twice
movw r10, #0xc3c3
movt r10, #0xc3c3 //r10<- 0xc3c3c3c3
and r11, r10, r6, ror #26 //ror and mask to match fixslicing
and r12, r10, r7, ror #26 //ror and mask to match fixslicing
strd r11, r12, [r0], #8 //store 1st half tk for 34th round
and r11, r10, r8, ror #26 //ror and mask to match fixslicing
and r12, r10, r9, ror #26 //ror and mask to match fixslicing
eor r11, r11, #0x10000000 //add rconst
eor r11, r11, #0x00000140 //add rconst
eor r12, r12, #0x00000100 //add rconst
mvn r12, r12 //to save a NOT in sbox calculations
strd r11, r12, [r0], #8 //store 2nd half tk for 34th round
bs2fs_odd 28, 6, 12
eor r7, r7, #0x04000000 //add rconst
eor r8, r8, #0x44000000 //add rconst
mvn r9, r9 //to save a NOT in sbox calculations
strd r8, r9, [r0], #8 //store 1st half tk for 35th round
strd r6, r7, [r0], #8 //store 2nd half tk for 35th round
ldm r0, {r6-r9} //load tk
bl p4 //apply the permutation 4 times
movw r10, #0xf0f0
movt r10, #0xf0f0 //r10<- 0xf0f0f0f0
and r11, r10, r6, ror #16 //ror and mask to match fixslicing
and r12, r10, r7, ror #16 //ror and mask to match fixslicing
eor r11, r11, #0x00400000 //add rconst
strd r11, r12, [r0, #24] //store 2nd half tk for 37th round
and r11, r10, r8, ror #16 //ror and mask to match fixslicing
and r12, r10, r9, ror #16 //ror and mask to match fixslicing
eor r11, r11, #0x00440000 //add rconst
eor r12, r12, #0x00500000 //add rconst
mvn r12, r12 //to save a NOT in sbox calculations
strd r11, r12, [r0, #16] //store 1st half tk for 37th round
bs2fs_even 14, 4, 6
eor r6, r6, #0x00100000 //add rconst
eor r7, r7, #0x00100000 //add rconst
eor r8, r8, #0x00000001 //add rconst
eor r9, r9, #0x00100000 //add rconst
mvn r9, r9 //to save a NOT in sbox calculations
strd r6, r7, [r0], #8 //store 1st half tk for 36th round
strd r8, r9, [r0], #24 //store 2nd half tk for 36th round
ldm r0, {r6-r9} //load tk
bl p6 //apply the permutation 6 times
movw r10, #0xc3c3
movt r10, #0xc3c3 //r10<- 0xc3c3c3c3
and r11, r10, r6, ror #10 //ror and mask to match fixslicing
and r12, r10, r7, ror #10 //ror and mask to match fixslicing
eor r12, r12, #0x01000000 //add rconst
strd r11, r12, [r0], #8 //store 1st half tk for 38th round
and r11, r10, r8, ror #10 //ror and mask to match fixslicing
and r12, r10, r9, ror #10 //ror and mask to match fixslicing
eor r11, r11, #0x01400000 //add rconst
eor r11, r11, #0x00001000 //add rconst
eor r12, r12, #0x00400000 //add rconst
mvn r12, r12 //to save a NOT in sbox calculations
strd r11, r12, [r0], #8 //store 2nd half tk for 38th round
bs2fs_odd 12, 6, 28
eor r6, r6, #0x00000400 //add rconst
eor r7, r7, #0x00000400 //add rconst
eor r8, r8, #0x01000000
eor r8, r8, #0x00004000 //add rconst
eor r9, r9, #0x00000400 //add rconst
mvn r9, r9 //to save a NOT in sbox calculations
strd r8, r9, [r0], #8 //store 1st half tk for 39th round
strd r6, r7, [r0], #8 //store 2nd half tk for 39th round
ldm r0, {r6-r9} //load tk
bl p8 //apply the permutation 8 times
movw r10, #0xf0f0
movt r10, #0xf0f0 //r10<- 0xf0f0f0f0
bs2fs_even 30, 4, 22
eor r6, r6, #0x00000010
eor r8, r8, #0x00010000
eor r8, r8, #0x00000010
eor r9, r9, #0x00000400
mvn r9, r9 //to save a NOT in sbox calculations
strd r6, r7, [r0], #8 //store 1st half tk for 40th round
strd r8, r9, [r0] //store 2nd half tk for 40th round
add.w sp, #4
pop {r0-r12, lr}
bx lr
// Applies the permutations P^2, ..., P^14 to TK1 for rounds 0 to 16.
// Since P^16=Id, we don't need more calculations as no LFSR is applied to TK1
@ void tk_schedule_1(uint8_t* rtk1, const uint8_t* tk1)
.global tk_schedule_1
.type tk_schedule_1,%function
.align 2
tk_schedule_1:
push {r0-r12, lr}
ldr.w r3, [r1, #8] //load tk1 (3rd word)
ldr.w r4, [r1, #4] //load tk1 (2nd word)
ldr.w r5, [r1, #12] //load tk1 (4th word)
ldr.w r2, [r1] //load tk1 (1st word)
movw r10, #0x0a0a
movt r10, #0x0a0a //r6 <- 0x0a0a0a0a
movw r11, #0x3030
movt r11, #0x3030 //r7 <- 0x30303030
swpmv r2, r2, r12, r10, #3, #0
swpmv r3, r3, r12, r10, #3, #0
swpmv r4, r4, r12, r10, #3, #0
swpmv r5, r5, r12, r10, #3, #0
swpmv r4, r2, r12, r11, #2, #0
swpmv r3, r2, r12, r11, #4, #2
swpmv r5, r2, r12, r11, #6, #4
swpmv r3, r4, r12, r11, #2, #2
swpmv r5, r4, r12, r11, #4, #4
swpmv r5, r3, r12, r11, #2, #4
mov r6, r2 //move tk1 from r2-r5 to r6-r9
mov r7, r3 //move tk1 from r2-r5 to r6-r9
mov r8, r4 //move tk1 from r2-r5 to r6-r9
mov r9, r5 //move tk1 from r2-r5 to r6-r9
movw r2, #0xf0f0
movt r2, #0xf0f0 //r2<- 0xf0f0f0f0
and r11, r8, r2 //tk &= 0xf0f0f0f0 (3rd word)
and r12, r9, r2 //tk &= 0xf0f0f0f0 (4th word)
strd r11, r12, [r0], #8 //store 1st half tk for 1st round
and r11, r6, r2 //tk &= 0xf0f0f0f0 (1st word)
and r12, r7, r2 //tk &= 0xf0f0f0f0 (2nd word)
strd r11, r12, [r0], #8 //store 2nd half tk for 1st round
bl p2 //apply the permutation twice
movw r3, #0x0303
movt r3, #0x0303 //r3<- 0x03030303
and r11, r3, r6, ror #28 //--- ror and masks to match fixslicing
and r12, r6, r3, lsl #6
orr r12, r11, r12, ror #12
str.w r12, [r0, #8]
and r11, r3, r7, ror #28
and r12, r7, r3, lsl #6
orr r12, r11, r12, ror #12
str.w r12, [r0, #12]
and r11, r3, r9, ror #28
and r12, r9, r3, lsl #6
orr r12, r11, r12, ror #12
str.w r12, [r0, #4]
and r11, r3, r8, ror #28
and r12, r8, r3, lsl #6
orr r12, r11, r12, ror #12
str.w r12, [r0], #16 //ror and masks to match fixslicing ---
bl p2 //apply the permutation 4 times
and r11, r2, r6, ror #16 //ror and mask to match fixslicing
and r12, r2, r7, ror #16 //ror and mask to match fixslicing
strd r11, r12, [r0, #8] //store 2nd half tk for 5th round
and r11, r2, r8, ror #16 //ror and mask to match fixslicing
and r12, r2, r9, ror #16 //ror and mask to match fixslicing
strd r11, r12, [r0], #16 //store 1st half tk for 5th round
bl p2 //apply the permutation 6 times
and r11, r3, r6, ror #12 //--- ror and masks to match fixslicing
and r12, r6, r3, lsl #6
orr r12, r11, r12, ror #28
str.w r12, [r0, #8]
and r11, r3, r7, ror #12
and r12, r7, r3, lsl #6
orr r12, r11, r12, ror #28
str.w r12, [r0, #12]
and r11, r3, r9, ror #12
and r12, r9, r3, lsl #6
orr r12, r11, r12, ror #28
str.w r12, [r0, #4]
and r11, r3, r8, ror #12
and r12, r8, r3, lsl #6
orr r12, r11, r12, ror #28
str.w r12, [r0], #16 //ror and masks to match fixslicing ---
bl p2 //apply the permutation 8 times
and r11, r2, r6 //ror and mask to match fixslicing
and r12, r2, r7 //ror and mask to match fixslicing
strd r11, r12, [r0, #8] //store 2nd half tk for 9th round
and r11, r2, r8 //ror and mask to match fixslicing
and r12, r2, r9 //ror and mask to match fixslicing
strd r11, r12, [r0], #16 //store 1st half tk for 9th round
bl p2 //apply the permutation 10
and r11, r3, r6, ror #28 //--- ror and masks to match fixslicing
and r12, r6, r3, lsl #6
orr r12, r11, r12, ror #12
str.w r12, [r0, #8]
and r11, r3, r7, ror #28
and r12, r7, r3, lsl #6
orr r12, r11, r12, ror #12
str.w r12, [r0, #12]
and r11, r3, r9, ror #28
and r12, r9, r3, lsl #6
orr r12, r11, r12, ror #12
str.w r12, [r0, #4]
and r11, r3, r8, ror #28
and r12, r8, r3, lsl #6
orr r12, r11, r12, ror #12
str.w r12, [r0], #16 //ror and masks to match fixslicing ---
bl p2 //apply the permutation 12 times
and r11, r2, r6, ror #16 //ror and mask to match fixslicing
and r12, r2, r7, ror #16 //ror and mask to match fixslicing
strd r11, r12, [r0, #8] //store 2nd half tk for 5th round
and r11, r2, r8, ror #16 //ror and mask to match fixslicing
and r12, r2, r9, ror #16 //ror and mask to match fixslicing
strd r11, r12, [r0], #16 //store 1st half tk for 5th round
bl p2 //apply the permutation 14 times
and r11, r3, r6, ror #12 //--- ror and masks to match fixslicing
and r12, r6, r3, lsl #6
orr r12, r11, r12, ror #28
str.w r12, [r0, #8]
and r11, r3, r7, ror #12
and r12, r7, r3, lsl #6
orr r12, r11, r12, ror #28
str.w r12, [r0, #12]
and r11, r3, r9, ror #12
and r12, r9, r3, lsl #6
orr r12, r11, r12, ror #28
str.w r12, [r0, #4]
and r11, r3, r8, ror #12
and r12, r8, r3, lsl #6
orr r12, r11, r12, ror #28
str.w r12, [r0] //ror and masks to match fixslicing ---
pop {r0-r12, lr}
bx lr
|
aadomn/cymric
| 22,476
|
artifact_tches2025-3/benchmark_armv7m/lwc/xoodyak/Xoodoo-uf-armv7m-le-gcc.s
|
@
@ The eXtended Keccak Code Package (XKCP)
@ https://github.com/XKCP/XKCP
@
@ The Xoodoo permutation, designed by Joan Daemen, Seth Hoffert, Gilles Van Assche and Ronny Van Keer.
@
@ Implementation by Ronny Van Keer, hereby denoted as "the implementer".
@
@ For more information, feedback or questions, please refer to the Keccak Team website:
@ https://keccak.team/
@
@ To the extent possible under law, the implementer has waived all copyright
@ and related or neighboring rights to the source code in this file.
@ http://creativecommons.org/publicdomain/zero/1.0/
@
@ WARNING: These functions work only on little endian CPU with@ ARMv7m architecture (Cortex-M3, ...).
.thumb
.syntax unified
.text
@ ----------------------------------------------------------------------------
@
@ void Xoodoo_Initialize(void *state)
@
.align 4
.global Xoodoo_Initialize
.type Xoodoo_Initialize, %function;
Xoodoo_Initialize:
movs r1, #0
movs r2, #0
movs r3, #0
movs r12, #0
stmia r0!, { r1 - r3, r12 }
stmia r0!, { r1 - r3, r12 }
stmia r0!, { r1 - r3, r12 }
bx lr
.align 4
@ ----------------------------------------------------------------------------
@
@ void Xoodoo_AddBytes(void *state, const unsigned char *data, unsigned int offset, unsigned int length)
@
.global Xoodoo_AddBytes
.type Xoodoo_AddBytes, %function;
Xoodoo_AddBytes:
push {r4,lr}
adds r0, r0, r2 @ state += offset
subs r3, r3, #4 @ .if length >= 4
bcc Xoodoo_AddBytes_Bytes
Xoodoo_AddBytes_LanesLoop: @ then, perform on lanes
ldr r2, [r0]
ldr r4, [r1], #4
eors r2, r2, r4
str r2, [r0], #4
subs r3, r3, #4
bcs Xoodoo_AddBytes_LanesLoop
Xoodoo_AddBytes_Bytes:
adds r3, r3, #3
bcc Xoodoo_AddBytes_Exit
Xoodoo_AddBytes_BytesLoop:
ldrb r2, [r0]
ldrb r4, [r1], #1
eors r2, r2, r4
strb r2, [r0], #1
subs r3, r3, #1
bcs Xoodoo_AddBytes_BytesLoop
Xoodoo_AddBytes_Exit:
pop {r4,pc}
.align 4
@ ----------------------------------------------------------------------------
@
@ void Xoodoo_OverwriteBytes(void *state, const unsigned char *data, unsigned int offset, unsigned int length)
@
.global Xoodoo_OverwriteBytes
.type Xoodoo_OverwriteBytes, %function;
Xoodoo_OverwriteBytes:
adds r0, r0, r2 @ state += offset
subs r3, r3, #4 @ .if length >= 4
bcc Xoodoo_OverwriteBytes_Bytes
Xoodoo_OverwriteBytes_LanesLoop: @ then, perform on words
ldr r2, [r1], #4
str r2, [r0], #4
subs r3, r3, #4
bcs Xoodoo_OverwriteBytes_LanesLoop
Xoodoo_OverwriteBytes_Bytes:
adds r3, r3, #3
bcc Xoodoo_OverwriteBytes_Exit
Xoodoo_OverwriteBytes_BytesLoop:
ldrb r2, [r1], #1
strb r2, [r0], #1
subs r3, r3, #1
bcs Xoodoo_OverwriteBytes_BytesLoop
Xoodoo_OverwriteBytes_Exit:
bx lr
.align 4
@ ----------------------------------------------------------------------------
@
@ void Xoodoo_OverwriteWithZeroes(void *state, unsigned int byteCount)
@
.global Xoodoo_OverwriteWithZeroes
.type Xoodoo_OverwriteWithZeroes, %function;
Xoodoo_OverwriteWithZeroes:
movs r3, #0
lsrs r2, r1, #2
beq Xoodoo_OverwriteWithZeroes_Bytes
Xoodoo_OverwriteWithZeroes_LoopLanes:
str r3, [r0], #4
subs r2, r2, #1
bne Xoodoo_OverwriteWithZeroes_LoopLanes
Xoodoo_OverwriteWithZeroes_Bytes:
ands r1, #3
beq Xoodoo_OverwriteWithZeroes_Exit
Xoodoo_OverwriteWithZeroes_LoopBytes:
strb r3, [r0], #1
subs r1, r1, #1
bne Xoodoo_OverwriteWithZeroes_LoopBytes
Xoodoo_OverwriteWithZeroes_Exit:
bx lr
.align 4
@ ----------------------------------------------------------------------------
@
@ void Xoodoo_ExtractBytes(void *state, const unsigned char *data, unsigned int offset, unsigned int length)
@
.global Xoodoo_ExtractBytes
.type Xoodoo_ExtractBytes, %function;
Xoodoo_ExtractBytes:
adds r0, r0, r2 @ state += offset
subs r3, r3, #4 @ .if length >= 4
bcc Xoodoo_ExtractBytes_Bytes
Xoodoo_ExtractBytes_LanesLoop: @ then, handle words
ldr r2, [r0], #4
str r2, [r1], #4
subs r3, r3, #4
bcs Xoodoo_ExtractBytes_LanesLoop
Xoodoo_ExtractBytes_Bytes:
adds r3, r3, #3
bcc Xoodoo_ExtractBytes_Exit
Xoodoo_ExtractBytes_BytesLoop:
ldrb r2, [r0], #1
strb r2, [r1], #1
subs r3, r3, #1
bcs Xoodoo_ExtractBytes_BytesLoop
Xoodoo_ExtractBytes_Exit:
bx lr
.align 4
@ ----------------------------------------------------------------------------
@
@ void Xoodoo_ExtractAndAddBytes(void *state, const unsigned char *input, unsigned char *output, unsigned int offset, unsigned int length)
@
.global Xoodoo_ExtractAndAddBytes
.type Xoodoo_ExtractAndAddBytes, %function;
Xoodoo_ExtractAndAddBytes:
push {r4,r5}
adds r0, r0, r3 @ state += offset (offset register no longer needed, reuse for length)
ldr r3, [sp, #8] @ get length argument from stack
subs r3, r3, #4 @ .if length >= 4
bcc Xoodoo_ExtractAndAddBytes_Bytes
Xoodoo_ExtractAndAddBytes_LanesLoop: @ then, handle words
ldr r5, [r0], #4
ldr r4, [r1], #4
eors r5, r5, r4
str r5, [r2], #4
subs r3, r3, #4
bcs Xoodoo_ExtractAndAddBytes_LanesLoop
Xoodoo_ExtractAndAddBytes_Bytes:
adds r3, r3, #3
bcc Xoodoo_ExtractAndAddBytes_Exit
Xoodoo_ExtractAndAddBytes_BytesLoop:
ldrb r5, [r0], #1
ldrb r4, [r1], #1
eors r5, r5, r4
strb r5, [r2], #1
subs r3, r3, #1
bcs Xoodoo_ExtractAndAddBytes_BytesLoop
Xoodoo_ExtractAndAddBytes_Exit:
pop {r4,r5}
bx lr
.align 4
@ ----------------------------------------------------------------------------
.equ _r0 , 5
.equ _r1 , 14
.equ _t3 , 1
.equ _w1 , 11
.equ _e0 , 2
.equ _e1 , 8
.equ _rc12 , 0x00000058
.equ _rc11 , 0x00000038
.equ _rc10 , 0x000003C0
.equ _rc9 , 0x000000D0
.equ _rc8 , 0x00000120
.equ _rc7 , 0x00000014
.equ _rc6 , 0x00000060
.equ _rc5 , 0x0000002C
.equ _rc4 , 0x00000380
.equ _rc3 , 0x000000F0
.equ _rc2 , 0x000001A0
.equ _rc1 , 0x00000012
.equ _rc6x1, 0x00000003
.equ _rc5x2, 0x0b000000
.equ _rc4x3, 0x07000000
.equ _rc3x4, 0x000f0000
.equ _rc2x5, 0x0000d000
.equ _rc1x6, 0x00000048
.equ _rc12x1, 0xc0000002
.equ _rc11x2, 0x0e000000
.equ _rc10x3, 0x07800000
.equ _rc9x4 , 0x000d0000
.equ _rc8x5 , 0x00009000
.equ _rc7x6 , 0x00000050
.equ _rc6x7 , 0x0000000c
.equ _rc5x8 , 0x2c000000
.equ _rc4x9 , 0x1c000000
.equ _rc3x10, 0x003c0000
.equ _rc2x11, 0x00034000
.equ _rc1x12, 0x00000120
@ ----------------------------------------------------------------------------
.macro mXor3 ro, a0, a1, a2, rho_e1, rho_e2
.if ((\rho_e1)%32) == 0
eors \ro, \a0, \a1
.else
eor \ro, \a0, \a1, ROR #(32-(\rho_e1))%32
.endif
.if ((\rho_e2)%32) == 0
eors \ro, \ro, \a2
.else
eor \ro, \ro, \a2, ROR #(32-(\rho_e2))%32
.endif
.endm
.macro mRliXor ro, ri, rot
.if ((\rot)%32) == 0
eors \ro, \ro, \ri
.else
eor \ro, \ro, \ri, ROR #(32-(\rot))%32
.endif
.endm
.macro mRloXor ro, ri, rot
.if ((\rot)%32) == 0
eors \ro, \ro, \ri
.else
eor \ro, \ri, \ro, ROR #(32-(\rot))%32
.endif
.endm
.macro mChi3 a0,a1,a2,r0,r1
bic \r0, \a2, \a1, ROR #_w1
eors \a0, \a0, \r0, ROR #32-_w1
bic \r1, \a0, \a2, ROR #32-_w1
eors \a1, \a1, \r1
bic \r1, \a1, \a0
eors \a2, \a2, \r1, ROR #_w1
.endm
.macro mRound r6i, r7i, r8i, r9i, r6w, r7w, r8w, r9w, r10i, r11i, r12i, lri, rho_e1, rho_we2, rc
@ Theta: Column Parity Mixer (with late Rho-west, Rho-east bit rotations)
mXor3 r0, r5, \r9i, \lri, \rho_e1, \rho_we2
mXor3 r1, r2, \r6i, \r10i, \rho_e1, \rho_we2
mRliXor r0, r0, _r1-_r0
mRloXor r2, r0, 32-_r0
mRloXor \r6i, r0, \rho_e1-_r0
mRloXor \r10i, r0, \rho_we2-_r0
mXor3 r0, r3, \r7i, \r11i, \rho_e1, \rho_we2
mRliXor r1, r1, _r1-_r0
mRloXor r3, r1, 32-_r0
mRloXor \r7i, r1, \rho_e1-_r0
mRloXor \r11i, r1, \rho_we2-_r0
mXor3 r1, r4, \r8i, \r12i, \rho_e1, \rho_we2
mRliXor r0, r0, _r1-_r0
mRloXor r4, r0, 32-_r0
mRloXor \r8i, r0, \rho_e1-_r0
mRloXor \r12i, r0, \rho_we2-_r0
mRliXor r1, r1, _r1-_r0
mRloXor r5, r1, 32-_r0
mRloXor \r9i, r1, \rho_e1-_r0
mRloXor \lri, r1, \rho_we2-_r0
@ After Theta the whole state is rotated -r0
@ from here we must use a1.w instead of a1.i
@ Iota: round constant
.if \rc == 0xc0000002
eor r2, r2, #0x00000002
eor r2, r2, #0xc0000000
.else
eor r2, r2, #\rc
.endif
@ Chi: non linear step, on colums
mChi3 r2, \r6w, \r10i, r0, r1
mChi3 r3, \r7w, \r11i, r0, r1
mChi3 r4, \r8w, \r12i, r0, r1
mChi3 r5, \r9w, \lri, r0, r1
.endm
@ ----------------------------------------------------------------------------
@
@ void Xoodoo_Permute_6rounds( void *state )
@
.global Xoodoo_Permute_6rounds
.type Xoodoo_Permute_6rounds, %function;
Xoodoo_Permute_6rounds:
push {r0,r4-r11,lr}
ldmia r0!, {r2-r5}
ldmia r0!, {r8-r9}
ldmia r0!, {r6-r7}
ldmia r0, {r10-r12,lr}
mRound r8, r9, r6, r7, r7, r8, r9, r6, r10, r11, r12, lr, 32, 32, _rc6x1
mRound r7, r8, r9, r6, r6, r7, r8, r9, r12, lr, r10, r11, 1, _e1+_w1, _rc5x2
mRound r6, r7, r8, r9, r9, r6, r7, r8, r10, r11, r12, lr, 1, _e1+_w1, _rc4x3
mRound r9, r6, r7, r8, r8, r9, r6, r7, r12, lr, r10, r11, 1, _e1+_w1, _rc3x4
mRound r8, r9, r6, r7, r7, r8, r9, r6, r10, r11, r12, lr, 1, _e1+_w1, _rc2x5
mRound r7, r8, r9, r6, r6, r7, r8, r9, r12, lr, r10, r11, 1, _e1+_w1, _rc1x6
pop {r0,r1}
ror r2, r2, #32-(6*_r0)%32
ror r3, r3, #32-(6*_r0)%32
ror r4, r4, #32-(6*_r0)%32
ror r5, r5, #32-(6*_r0)%32
ror r6, r6, #32-(6*_r0+1)%32
ror r7, r7, #32-(6*_r0+1)%32
ror r8, r8, #32-(6*_r0+1)%32
ror r9, r9, #32-(6*_r0+1)%32
ror r10, r10, #32-(6*_r0+_e1+_w1)%32
ror r11, r11, #32-(6*_r0+_e1+_w1)%32
ror r12, r12, #32-(6*_r0+_e1+_w1)%32
ror lr, lr, #32-(6*_r0+_e1+_w1)%32
stmia r0, {r2-r12,lr}
mov r4, r1
pop {r5-r11,pc}
.align 4
@ ----------------------------------------------------------------------------
@
@ void Xoodoo_Permute_12rounds( void *state )
@
.global Xoodoo_Permute_12rounds
.type Xoodoo_Permute_12rounds, %function;
Xoodoo_Permute_12rounds:
push {r0,r4-r11,lr}
ldmia r0, {r2-r12,lr}
mRound r6, r7, r8, r9, r9, r6, r7, r8, r10, r11, r12, lr, 32, 32, _rc12x1
mRound r9, r6, r7, r8, r8, r9, r6, r7, r12, lr, r10, r11, 1, _e1+_w1, _rc11x2
mRound r8, r9, r6, r7, r7, r8, r9, r6, r10, r11, r12, lr, 1, _e1+_w1, _rc10x3
mRound r7, r8, r9, r6, r6, r7, r8, r9, r12, lr, r10, r11, 1, _e1+_w1, _rc9x4
mRound r6, r7, r8, r9, r9, r6, r7, r8, r10, r11, r12, lr, 1, _e1+_w1, _rc8x5
mRound r9, r6, r7, r8, r8, r9, r6, r7, r12, lr, r10, r11, 1, _e1+_w1, _rc7x6
mRound r8, r9, r6, r7, r7, r8, r9, r6, r10, r11, r12, lr, 1, _e1+_w1, _rc6x7
mRound r7, r8, r9, r6, r6, r7, r8, r9, r12, lr, r10, r11, 1, _e1+_w1, _rc5x8
mRound r6, r7, r8, r9, r9, r6, r7, r8, r10, r11, r12, lr, 1, _e1+_w1, _rc4x9
mRound r9, r6, r7, r8, r8, r9, r6, r7, r12, lr, r10, r11, 1, _e1+_w1, _rc3x10
mRound r8, r9, r6, r7, r7, r8, r9, r6, r10, r11, r12, lr, 1, _e1+_w1, _rc2x11
mRound r7, r8, r9, r6, r6, r7, r8, r9, r12, lr, r10, r11, 1, _e1+_w1, _rc1x12
ror r2, r2, #32-(12*_r0)%32
ror r3, r3, #32-(12*_r0)%32
ror r4, r4, #32-(12*_r0)%32
ror r5, r5, #32-(12*_r0)%32
ror r6, r6, #32-(12*_r0+1)%32
ror r7, r7, #32-(12*_r0+1)%32
ror r8, r8, #32-(12*_r0+1)%32
ror r9, r9, #32-(12*_r0+1)%32
ror r10, r10, #32-(12*_r0+_e1+_w1)%32
ror r11, r11, #32-(12*_r0+_e1+_w1)%32
ror r12, r12, #32-(12*_r0+_e1+_w1)%32
ror lr, lr, #32-(12*_r0+_e1+_w1)%32
pop {r0,r1}
stmia r0, {r2-r12,lr}
mov r4, r1
pop {r5-r11,pc}
.align 4
.equ Xoofff_BlockSize , 3*4*4
@ ----------------------------------------------------------------------------
@
@ void Xoofff_AddIs(BitSequence *output, const BitSequence *input, BitLength bitLen)
.global Xoofff_AddIs
.type Xoofff_AddIs, %function;
Xoofff_AddIs:
push {r4-r10,lr}
subs r2, r2, #Xoofff_BlockSize*8
bcc Xoofff_AddIs_LessThanBlock
Xoofff_AddIs_BlockLoop:
ldr r3, [r0, #0]
ldr r4, [r0, #4]
ldr r5, [r0, #8]
ldr r6, [r0, #12]
ldr r7, [r1], #4
ldr r8, [r1], #4
ldr r9, [r1], #4
ldr r10, [r1], #4
eor r3, r3, r7
eor r4, r4, r8
eor r5, r5, r9
eor r6, r6, r10
str r3, [r0], #4
str r4, [r0], #4
str r5, [r0], #4
str r6, [r0], #4
ldr r3, [r0, #0]
ldr r4, [r0, #4]
ldr r5, [r0, #8]
ldr r6, [r0, #12]
ldr r7, [r1], #4
ldr r8, [r1], #4
ldr r9, [r1], #4
ldr r10, [r1], #4
eor r3, r3, r7
eor r4, r4, r8
eor r5, r5, r9
eor r6, r6, r10
str r3, [r0], #4
str r4, [r0], #4
str r5, [r0], #4
str r6, [r0], #4
ldr r3, [r0, #0]
ldr r4, [r0, #4]
ldr r5, [r0, #8]
ldr r6, [r0, #12]
ldr r7, [r1], #4
ldr r8, [r1], #4
ldr r9, [r1], #4
ldr r10, [r1], #4
eor r3, r3, r7
eor r4, r4, r8
eor r5, r5, r9
eor r6, r6, r10
str r3, [r0], #4
str r4, [r0], #4
str r5, [r0], #4
str r6, [r0], #4
subs r2, r2, #Xoofff_BlockSize*8
bcs Xoofff_AddIs_BlockLoop
Xoofff_AddIs_LessThanBlock:
adds r2, r2, #Xoofff_BlockSize*8
beq Xoofff_AddIs_Return
subs r2, r2, #16*8
bcc Xoofff_AddIs_LessThan16
Xoofff_AddIs_16Loop:
ldr r3, [r0, #0]
ldr r4, [r0, #4]
ldr r5, [r0, #8]
ldr r6, [r0, #12]
ldr r7, [r1], #4
ldr r8, [r1], #4
ldr r9, [r1], #4
ldr r10, [r1], #4
eor r3, r3, r7
eor r4, r4, r8
eor r5, r5, r9
eor r6, r6, r10
str r3, [r0], #4
str r4, [r0], #4
str r5, [r0], #4
str r6, [r0], #4
subs r2, r2, #16*8
bcs Xoofff_AddIs_16Loop
Xoofff_AddIs_LessThan16:
adds r2, r2, #16*8
beq Xoofff_AddIs_Return
subs r2, r2, #4*8
bcc Xoofff_AddIs_LessThan4
Xoofff_AddIs_4Loop:
ldr r3, [r0]
ldr r7, [r1], #4
eors r3, r3, r7
str r3, [r0], #4
subs r2, r2, #4*8
bcs Xoofff_AddIs_4Loop
Xoofff_AddIs_LessThan4:
adds r2, r2, #4*8
beq Xoofff_AddIs_Return
subs r2, r2, #8
bcc Xoofff_AddIs_LessThan1
Xoofff_AddIs_1Loop:
ldrb r3, [r0]
ldrb r7, [r1], #1
eors r3, r3, r7
strb r3, [r0], #1
subs r2, r2, #8
bcs Xoofff_AddIs_1Loop
Xoofff_AddIs_LessThan1:
adds r2, r2, #8
beq Xoofff_AddIs_Return
ldrb r3, [r0]
ldrb r7, [r1]
movs r1, #1
eors r3, r3, r7
lsls r1, r1, r2
subs r1, r1, #1
ands r3, r3, r1
strb r3, [r0]
Xoofff_AddIs_Return:
pop {r4-r10,pc}
.align 4
@ ----------------------------------------------------------------------------
@
@ size_t Xoofff_CompressFastLoop(unsigned char *kRoll, unsigned char *xAccu, const unsigned char *input, size_t length)
@
.equ Xoofff_Compress_kRoll , 0
.equ Xoofff_Compress_input , 4
.equ Xoofff_Compress_xAccu , 8
.equ Xoofff_Compress_iInput , 12
.equ Xoofff_Compress_length , 16
.global Xoofff_CompressFastLoop
.type Xoofff_CompressFastLoop, %function;
Xoofff_CompressFastLoop:
subs r3, #Xoofff_BlockSize @ length must be greater than block size
push {r1-r12,lr}
push {r0,r2}
ldmia r0, {r2-r12,lr} @ get initial kRoll
Xoofff_CompressFastLoop_Loop:
ldr r0, [sp, #Xoofff_Compress_input] @ add input
ldr r1, [r0], #4
eors r2, r2, r1
ldr r1, [r0], #4
eors r3, r3, r1
ldr r1, [r0], #4
eors r4, r4, r1
ldr r1, [r0], #4
eors r5, r5, r1
ldr r1, [r0], #4
eors r6, r6, r1
ldr r1, [r0], #4
eors r7, r7, r1
ldr r1, [r0], #4
eors r8, r8, r1
ldr r1, [r0], #4
eors r9, r9, r1
ldr r1, [r0], #4
eors r10, r10, r1
ldr r1, [r0], #4
eors r11, r11, r1
ldr r1, [r0], #4
eors r12, r12, r1
ldr r1, [r0], #4
eors lr, lr, r1
str r0, [sp, #Xoofff_Compress_input]
@ permutation
mRound r6, r7, r8, r9, r9, r6, r7, r8, r10, r11, r12, lr, 32, 32, _rc6x1
mRound r9, r6, r7, r8, r8, r9, r6, r7, r12, lr, r10, r11, 1, _e1+_w1, _rc5x2
mRound r8, r9, r6, r7, r7, r8, r9, r6, r10, r11, r12, lr, 1, _e1+_w1, _rc4x3
mRound r7, r8, r9, r6, r6, r7, r8, r9, r12, lr, r10, r11, 1, _e1+_w1, _rc3x4
mRound r6, r7, r8, r9, r9, r6, r7, r8, r10, r11, r12, lr, 1, _e1+_w1, _rc2x5
mRound r9, r6, r7, r8, r8, r9, r6, r7, r12, lr, r10, r11, 1, _e1+_w1, _rc1x6
@ Extract and add into xAccu
ldr r0, [sp, #Xoofff_Compress_xAccu]
ldr r1, [r0]
mRloXor r2, r1, (6*_r0)%32
ldr r1, [r0, #4]
str r2, [r0], #4
mRloXor r3, r1, (6*_r0)%32
ldr r1, [r0, #4]
str r3, [r0], #4
mRloXor r4, r1, (6*_r0)%32
ldr r1, [r0, #4]
str r4, [r0], #4
mRloXor r5, r1, (6*_r0)%32
str r5, [r0], #4
ldm r0, {r2-r5} @ note that r6-r8 and r7-r9 are swapped
mRliXor r2, r8, (6*_r0+1)%32
mRliXor r3, r9, (6*_r0+1)%32
mRliXor r4, r6, (6*_r0+1)%32
mRliXor r5, r7, (6*_r0+1)%32
stm r0!, {r2-r5}
ldm r0, {r2-r5}
mRliXor r2, r10, (6*_r0+_e1+_w1)%32
mRliXor r3, r11, (6*_r0+_e1+_w1)%32
mRliXor r4, r12, (6*_r0+_e1+_w1)%32
mRliXor r5, lr, (6*_r0+_e1+_w1)%32
stm r0!, {r2-r5}
@roll kRoll
ldr r0, [sp, #Xoofff_Compress_kRoll]
ldr lr, [r0], #4
ldmia r0!, {r10-r12}
ldmia r0!, {r2-r9}
eors lr, lr, lr, LSL #13
eors lr, lr, r2, ROR #32-3
sub r0, #Xoofff_BlockSize
stmia r0, {r2-r12,lr}
@ loop management
ldr r0, [sp, #Xoofff_Compress_length]
subs r0, #Xoofff_BlockSize
str r0, [sp, #Xoofff_Compress_length]
bcs Xoofff_CompressFastLoop_Loop
@ return number of bytes processed
ldr r0, [sp, #Xoofff_Compress_input]
ldr r1, [sp, #Xoofff_Compress_iInput]
sub r0, r0, r1
pop {r1,r2}
pop {r1-r12,pc}
.align 4
@ ----------------------------------------------------------------------------
@
@ size_t Xoofff_ExpandFastLoop(unsigned char *yAccu, const unsigned char *kRoll, unsigned char *output, size_t length)
@
.equ Xoofff_Expand_yAccu , 0
.equ Xoofff_Expand_output , 4
.equ Xoofff_Expand_kRoll , 8
.equ Xoofff_Expand_iOutput , 12
.equ Xoofff_Expand_length , 16
.global Xoofff_ExpandFastLoop
.type Xoofff_ExpandFastLoop, %function;
Xoofff_ExpandFastLoop:
subs r3, #Xoofff_BlockSize @ length must be greater than block size
push {r1-r12,lr}
push {r0,r2}
ldmia r0, {r2-r12,lr} @ get initial yAccu
Xoofff_ExpandFastLoop_Loop:
@ permutation
mRound r6, r7, r8, r9, r9, r6, r7, r8, r10, r11, r12, lr, 32, 32, _rc6x1
mRound r9, r6, r7, r8, r8, r9, r6, r7, r12, lr, r10, r11, 1, _e1+_w1, _rc5x2
mRound r8, r9, r6, r7, r7, r8, r9, r6, r10, r11, r12, lr, 1, _e1+_w1, _rc4x3
mRound r7, r8, r9, r6, r6, r7, r8, r9, r12, lr, r10, r11, 1, _e1+_w1, _rc3x4
mRound r6, r7, r8, r9, r9, r6, r7, r8, r10, r11, r12, lr, 1, _e1+_w1, _rc2x5
mRound r9, r6, r7, r8, r8, r9, r6, r7, r12, lr, r10, r11, 1, _e1+_w1, _rc1x6
@ Add k and extract
ldr r0, [sp, #Xoofff_Expand_kRoll]
ldr r1, [r0], #4
mRloXor r2, r1, (6*_r0)%32
ldr r1, [sp, #Xoofff_Expand_output]
str r2, [r1], #4
ldr r2, [r0], #4
mRloXor r3, r2, (6*_r0)%32
ldr r2, [r0], #4
str r3, [r1], #4
mRloXor r4, r2, (6*_r0)%32
ldr r2, [r0], #4
str r4, [r1], #4
mRloXor r5, r2, (6*_r0)%32
str r5, [r1], #4
ldm r0!, {r2-r5} @ Note that r6-r8 and r7-r9 are swapped
mRliXor r2, r8, (6*_r0+1)%32
str r2, [r1], #4
mRliXor r3, r9, (6*_r0+1)%32
str r3, [r1], #4
mRliXor r4, r6, (6*_r0+1)%32
str r4, [r1], #4
mRliXor r5, r7, (6*_r0+1)%32
str r5, [r1], #4
ldm r0!, {r2-r5}
mRliXor r2, r10, (6*_r0+_e1+_w1)%32
str r2, [r1], #4
mRliXor r3, r11, (6*_r0+_e1+_w1)%32
str r3, [r1], #4
mRliXor r4, r12, (6*_r0+_e1+_w1)%32
str r4, [r1], #4
mRliXor r5, lr, (6*_r0+_e1+_w1)%32
str r5, [r1], #4
@ roll-e yAccu
ldr r0, [sp, #Xoofff_Expand_yAccu]
str r1, [sp, #Xoofff_Expand_output]
ldr lr, [r0], #4
ldmia r0!, {r10-r12}
ldmia r0!, {r2-r9}
and r1, r6, r2
eor lr, r1, lr, ROR #32-5
eor lr, lr, r2, ROR #32-13
eor lr, lr, #7
sub r0, #Xoofff_BlockSize
stmia r0, {r2-r12,lr}
@ loop management
ldr r0, [sp, #Xoofff_Expand_length]
subs r0, #Xoofff_BlockSize
str r0, [sp, #Xoofff_Expand_length]
bcs Xoofff_ExpandFastLoop_Loop
@ return number of bytes processed
ldr r0, [sp, #Xoofff_Expand_output]
ldr r1, [sp, #Xoofff_Expand_iOutput]
sub r0, r0, r1
pop {r1,r2}
pop {r1-r12,pc}
.align 4
|
aadomn/cymric
| 2,579
|
artifact_tches2025-3/benchmark_armv7m/lwc/cymric/lea/lea128.S
|
/****************************************************************************
* ARMv7M assembly implementation of the LEA-128 block cipher where key
* expansion is performed on-the-fly.
* @author Alexandre Adomnicai
* @date April 2025
****************************************************************************/
.syntax unified
.thumb
k .req r2
x .req r1
// key
k0 .req r0
k1 .req r1
k2 .req r3
k3 .req r4
// data
x0 .req r5
x1 .req r6
x2 .req r7
x3 .req r8
// constants
g0 .req r9
g1 .req r10
g2 .req r11
g3 .req r12
.macro lea_round rconst
// calculate round keys on-the-fly
add k0, \rconst, k0, ror #31
add k1, k1, \rconst, ror #31
ror k1, #29
add k2, k2, \rconst, ror #30
ror k2, #26
add k3, k3, \rconst, ror #29
ror k3, #21
// save x0
ror lr, x0, #23
// x0 = ROTR32((x0 ^ k0) + (x1 ^ k1),23);
eor r2, k1, x1, ror #5
eor x0, x0, k0, ror #8
add x0, r2, x0, ror #23
// x1 = ROTR32((x1 ^ k2) + (x2 ^ k1), 5);
eor r2, k1, x2, ror #3
eor x1, k2, x1, ror #5
add x1, r2
// x2 = ROTR32((x2 ^ k3) + (x3 ^ k1), 3);
eor r2, x3, k1
eor x2, k3, x2, ror #3
add x2, r2
// x3 = x0;
mov x3, lr
ror \rconst, #28
.endm
lea_quadruple_round:
push {lr}
lea_round g0
lea_round g1
lea_round g2
lea_round g3
pop {lr}
bx lr
.size lea_quadruple_round, .-lea_quadruple_round
.global lea128_encrypt
.type lea128_encrypt,%function
.align 4
lea128_encrypt:
// save registers
push {r0-r12, lr}
// load rconsts
movw g0, #0xe9db
movt g0, #0xc3ef
movw g1, #0xd604
movt g1, #0x88c4
movw g2, #0xf229
movt g2, #0xe789
movw g3, #0x8763
movt g3, #0xc6f9
// load ptext
ldr.w x0, [x, #0]
ldr.w x1, [x, #4]
ldr.w x2, [x, #8]
ldr.w x3, [x, #12]
// load key
ldr.w k0, [k, #0]
ldr.w k1, [k, #4]
ldr.w k2, [k, #8]
ldr.w k3, [k, #12]
// rotations to match lea_round alignments
ror k0, #1
ror x0, #9
ror x1, #27
ror x2, #29
// perform encryption
bl lea_quadruple_round
bl lea_quadruple_round
bl lea_quadruple_round
bl lea_quadruple_round
bl lea_quadruple_round
bl lea_quadruple_round
// save 128-bit cipher text
ldr.w r0, [sp], #4
ror x0, #23
ror x1, #5
ror x2, #3
str.w x0, [r0, #0]
str.w x1, [r0, #4]
str.w x2, [r0, #8]
str.w x3, [r0, #12]
// restore registers
pop {r1-r12, lr}
bx lr
.size lea128_encrypt, .-lea128_encrypt
|
aadomn/cymric
| 19,756
|
artifact_tches2025-3/benchmark_armv7m/lwc/cymric/gift/gift128.s
|
/****************************************************************************
* Compact ARM assembly implementation of the GIFT-128 block cipher. This
* implementation focuses on code size rather than speed.
*
* See "Fixslicing: A New GIFT Representation" paper available at
* https://eprint.iacr.org/2020/412.pdf for more details.
*
* @author Alexandre Adomnicai, Nanyang Technological University
*
* @date July 2021
****************************************************************************/
.syntax unified
.thumb
/*****************************************************************************
* Round constants look-up table according to the fixsliced representation.
*****************************************************************************/
.align 2
.type rconst,%object
rconst:
.word 0x10000008, 0x80018000, 0x54000002, 0x01010181
.word 0x8000001f, 0x10888880, 0x6001e000, 0x51500002
.word 0x03030180, 0x8000002f, 0x10088880, 0x60016000
.word 0x41500002, 0x03030080, 0x80000027, 0x10008880
.word 0x4001e000, 0x11500002, 0x03020180, 0x8000002b
.word 0x10080880, 0x60014000, 0x01400002, 0x02020080
.word 0x80000021, 0x10000080, 0x0001c000, 0x51000002
.word 0x03010180, 0x8000002e, 0x10088800, 0x60012000
.word 0x40500002, 0x01030080, 0x80000006, 0x10008808
.word 0xc001a000, 0x14500002, 0x01020181, 0x8000001a
/******************************************************************************
* Macro to compute the SWAPMOVE technique.
* - out0-out1 output registers
* - in0-in1 input registers
* - m mask
* - n shift value
* - tmp temporary register
******************************************************************************/
.macro swpmv out0, out1, in0, in1, m, n, tmp
eor \tmp, \in1, \in0, lsr \n
and \tmp, \m
eor \out1, \in1, \tmp
eor \out0, \in0, \tmp, lsl \n
.endm
/******************************************************************************
* Macro to compute a nibble-wise rotation to the right.
* - out output register
* - in input register
* - m0-m1 masks
* - n0-n1 shift value
* - tmp temporary register
******************************************************************************/
.macro nibror out, in, m0, m1, n0, n1, tmp
and \tmp, \m0, \in, lsr \n0
and \out, \in, \m1
orr \out, \tmp, \out, lsl \n1
.endm
/******************************************************************************
* Macro to compute the SBox (the NOT operation is included in the round keys).
* - in0-in3 input/output registers
* - tmp temporary register
* - n ror index value to math fixslicing
******************************************************************************/
.macro sbox in0, in1, in2, in3, tmp, n
and \tmp, \in2, \in0, ror \n
eor \in1, \in1, \tmp
and \tmp, \in1, \in3
eor \in0, \tmp, \in0, ror \n
orr \tmp, \in0, \in1
eor \in2, \tmp, \in2
eor \in3, \in3, \in2
eor \in1, \in1, \in3
and \tmp, \in0, \in1
eor \in2, \in2, \tmp
mvn \in3, \in3
.endm
/******************************************************************************
* Macro to compute the first round within a quintuple round routine.
* - in0-in3 input/output registers
******************************************************************************/
.macro round_0 in0, in1, in2, in3
ldr.w r5, [r0], #4 // load rconst
ldr.w r6, [r1], #4 // load 1st rkey word
ldr.w r7, [r1], #4 // load 2nd rkey word
sbox \in0, \in1, \in2, \in3, r8, #0 // sbox layer
nibror \in3, \in3, r4, r2, 1, 3, r8 // linear layer
nibror \in2, \in2, r2, r4, 3, 1, r8 // linear layer
orr r14, r2, r2, lsl #1 // 0x33333333 for 'nibror'
nibror \in1, \in1, r14, r14, 2, 2, r8 // linear layer
eor \in1, \in1, r6 // add 1st rkey word
eor \in2, \in2, r7 // add 2nd rkey word
eor \in0, \in0, r5 // add rconst
.endm
/******************************************************************************
* Macro to compute the second round within a quintuple round routine.
* - in0-in3 input/output registers
******************************************************************************/
.macro round_1 in0, in1, in2, in3
ldr.w r5, [r0], #4 // load rconst
ldr.w r6, [r1], #4 // load 1st rkey word
ldr.w r7, [r1], #4 // load 2nd rkey word
sbox \in0, \in1, \in2, \in3, r8, #0 // sbox layer
mvn r14, r3, lsl #12 // r14<-0x0fff0fff for HALF_ROR
nibror \in3, \in3, r14, r3, 4, 12, r8 // HALF_ROR(in3, 4)
nibror \in2, \in2, r3, r14, 12, 4, r8 // HALF_ROR(in2, 12)
rev16 \in1, \in1 // HALF_ROR(in1, 8)
eor \in1, \in1, r6 // add 1st rkey word
eor \in2, \in2, r7 // add 2nd rkey word
eor \in0, \in0, r5 // add rconst
.endm
/******************************************************************************
* Macro to compute the third round within a quintuple round routine.
* - in0-in3 input/output registers
******************************************************************************/
.macro round_2 in0, in1, in2, in3
ldr.w r5, [r0], #4 // load rconst
ldr.w r6, [r1], #4 // load 1st rkey word
ldr.w r7, [r1], #4 // load 2nd rkey word
sbox \in0, \in1, \in2, \in3, r8, #0 // sbox layer
orr r14, r2, r2, lsl #2 // r14<-0x55555555 for swpmv
swpmv \in1, \in1, \in1, \in1, r14, #1, r8
eor r8, \in3, \in3, lsr #1
and r8, r8, r14, lsr #16
eor \in3, \in3, r8
eor \in3, \in3, r8, lsl #1 //SWAPMOVE(r12,r12,0x55550000,1)
eor r8, \in2, \in2, lsr #1
and r8, r8, r14, lsl #16
eor \in2, \in2, r8
eor \in2, \in2, r8, lsl #1 //SWAPMOVE(r11,r11,0x00005555,1)
eor \in1, \in1, r6 // add 1st rkey word
eor \in2, r7, \in2, ror #16 // add 2nd rkey word
eor \in0, \in0, r5 // add rconst
.endm
/******************************************************************************
* Macro to compute the fourth round within a quintuple round routine.
* - in0-in3 input/output registers
******************************************************************************/
.macro round_3 in0, in1, in2, in3
ldr.w r6, [r1], #4 // load 1st rkey word
ldr.w r7, [r1], #4 // load 2nd rkey word
sbox \in0, \in1, \in2, \in3, r8, #16 // sbox layer
eor r14, r3, r3, lsl #8 // r14<-0x0f0f0f0f for nibror
nibror \in1, \in1, r14, r14, #4, #4, r8
orr r14, r14, r14, lsl #2 // r14<-0x3f3f3f3f for nibror
mvn r8, r14, lsr #6 // r8 <-0xc0c0c0c0 for nibror
nibror \in2, \in2, r14, r8, #2, #6, r5
nibror \in3, \in3, r8, r14, #6, #2, r8
ldr.w r5, [r0], #4 // load rconst
eor \in1, \in1, r6 // add 1st rkey word
eor \in2, \in2, r7 // add 2nd rkey word
eor \in0, \in0, r5 // add rconst
.endm
/******************************************************************************
* Macro to compute the fifth round within a quintuple round routine.
* - in0-in3 input/output registers
******************************************************************************/
.macro round_4 in0, in1, in2, in3
ldr.w r5, [r0], #4 // load rconst
ldr.w r6, [r1], #4 // load 1st rkey word
ldr.w r7, [r1], #4 // load 2nd rkey word
sbox \in0, \in1, \in2, \in3, r8, #0 // sbox layer
eor \in1, r6, \in1, ror #16 // add 1st keyword
eor \in2, r7, \in2, ror #8 // add 2nd keyword
eor \in0, \in0, r5 // add rconst
.endm
/******************************************************************************
* Macro to compute the GIFT-128 key update (in its classical representation).
* Two 16-bit rotations are computed on the 32-bit word 'v' given as input.
* - u 1st round key word as defined in the specification (U <- W2||W3)
* - v 2nd round key word as defined in the specification (V <- W6||W7)
******************************************************************************/
.macro k_upd u, v
and r2, r10, \v, lsr #12
and r3, \v, r9
orr r2, r2, r3, lsl #4
and r3, r12, \v, lsr #2
orr r2, r2, r3
and \v, \v, #0x00030000
orr \v, r2, \v, lsl #14
str.w \u, [r1], #4
str.w \v, [r1], #4
.endm
/******************************************************************************
* Macro to rearrange round key words from their classical to fixsliced
* representations.
* - rk0 1st round key word
* - rk1 2nd round key word
* - idx0 index for SWAPMOVE
* - idx1 index for SWAPMOVE
* - tmp temporary register for SWAPMOVE
******************************************************************************/
.macro rearr_rk rk0, rk1, idx0, idx1, tmp
swpmv \rk1, \rk1, \rk1, \rk1, r3, \idx0, \tmp
swpmv \rk0, \rk0, \rk0, \rk0, r3, \idx0, \tmp
swpmv \rk1, \rk1, \rk1, \rk1, r10, \idx1, \tmp
swpmv \rk0, \rk0, \rk0, \rk0, r10, \idx1, \tmp
swpmv \rk1, \rk1, \rk1, \rk1, r11, #12, \tmp
swpmv \rk0, \rk0, \rk0, \rk0, r11, #12, \tmp
swpmv \rk1, \rk1, \rk1, \rk1, #0xff, #24, \tmp
swpmv \rk0, \rk0, \rk0, \rk0, #0xff, #24, \tmp
.endm
/******************************************************************************
* Soubroutine to update the rkeys according to the classical representation.
******************************************************************************/
.align 2
classical_key_update:
k_upd r5, r7 // 1st classical key update
k_upd r4, r6 // 2nd classical key update
k_upd r7, r5 // 3rd classical key update
k_upd r6, r4 // 4th classical key update
bx lr
/******************************************************************************
* Soubroutine to rearrange round key words from classical to fixsliced
* representation for round i s.t. i mod 5 = 0.
******************************************************************************/
.align 2
rearrange_rkey_0:
ldr.w r6, [r1] // load 1st rkey word (classical rep)
ldr.w r4, [r1, #4] // load 2nd rkey word (classical rep)
rearr_rk r4, r6, #9, #18, r12 // rearrange rkey words for round 1
str.w r4, [r1, #4] // store 2nd rkey word (fixsliced rep)
str.w r6, [r1], #40 // store 1st rkey word (fixsliced rep)
bx lr
/******************************************************************************
* Soubroutine to rearrange round key words from classical to fixsliced
* representation for round i s.t. i mod 5 = 1 or 3.
******************************************************************************/
.align 2
rearrange_rkey_1:
ldr.w r5, [r1] // load 3rd rkey word (classical rep)
ldr.w r7, [r1, #4] // load 4th rkey word (classical rep)
rearr_rk r5, r7, #3, #6, r8 // rearrange rkey words for round 2
str.w r7, [r1, #4] // store 4th rkey word (fixsliced rep)
str.w r5, [r1], #40 // store 3rd rkey word (fixsliced rep)
bx lr
/******************************************************************************
* Soubroutine to rearrange round key words from classical to fixsliced
* representation for round i s.t. i mod 5 = 2.
******************************************************************************/
.align 2
rearrange_rkey_2:
ldr.w r5, [r1] // load 5th rkey word (classical rep)
ldr.w r7, [r1, #4] // load 6th rkey word (classical rep)
rearr_rk r5, r7, #15, #18, r8 // rearrange rkey words for round 3
str.w r7, [r1, #4] // store 6th rkey word (fixsliced rep)
str.w r5, [r1], #40 // store 5th rkey word (fixsliced rep)
bx lr
.align 2
/*****************************************************************************
* Implementation of the GIFT-128 key schedule according to fixslicing.
* The entire round key material is first computed according to the classical
* representation before being rearranged according to fixslicing.
*****************************************************************************/
@ void gift128_keyschedule(const u8* key, u32* rkey) {
.global gift128_keyschedule
.type gift128_keyschedule,%function
gift128_keyschedule:
push {r1-r12, r14}
ldm r1, {r4-r7} // load key words
mov r1, r0
rev r4, r4 // endianness
rev r5, r5 // endianness
rev r6, r6 // endianness
rev r7, r7 // endianness
str.w r5, [r1, #4]
str.w r7, [r1], #8 //the first rkeys are not updated
str.w r4, [r1, #4]
str.w r6, [r1], #8 //the first rkeys are not updated
movw r12, #0x3fff
lsl r12, r12, #16 //r12<- 0x3fff0000
movw r10, #0x000f //r10<- 0x0000000f
movw r9, #0x0fff //r9 <- 0x00000fff
bl classical_key_update
bl classical_key_update
bl classical_key_update
bl classical_key_update
bl classical_key_update
bl classical_key_update
bl classical_key_update
bl classical_key_update
bl classical_key_update
sub.w r1, r1, #320
movw r3, #0x0055
movt r3, #0x0055 //r3 <- 0x00550055
movw r10, #0x3333 //r10<- 0x00003333
movw r11, #0x000f
movt r11, #0x000f //r11<- 0x000f000f
bl rearrange_rkey_0 // fixslice the rkey words for round 0
bl rearrange_rkey_0 // fixslice the rkey words for round 5
bl rearrange_rkey_0 // fixslice the rkey words for round 10
bl rearrange_rkey_0 // fixslice the rkey words for round 15
bl rearrange_rkey_0 // fixslice the rkey words for round 20
bl rearrange_rkey_0 // fixslice the rkey words for round 25
bl rearrange_rkey_0 // fixslice the rkey words for round 30
bl rearrange_rkey_0 // fixslice the rkey words for round 35
sub.w r1, r1, #312
movw r3, #0x1111
movt r3, #0x1111 // r3 <- 0x11111111
movw r10, #0x0303
movt r10, #0x0303 // r10<- 0x03030303
bl rearrange_rkey_1 // fixslice the rkey words for round 1
bl rearrange_rkey_1 // fixslice the rkey words for round 6
bl rearrange_rkey_1 // fixslice the rkey words for round 11
bl rearrange_rkey_1 // fixslice the rkey words for round 16
bl rearrange_rkey_1 // fixslice the rkey words for round 21
bl rearrange_rkey_1 // fixslice the rkey words for round 26
bl rearrange_rkey_1 // fixslice the rkey words for round 31
bl rearrange_rkey_1 // fixslice the rkey words for round 36
sub.w r1, r1, #312
movw r3, #0xaaaa // r3 <- 0x0000aaaa
movw r10, #0x3333 // r10<- 0x00003333
movw r11, #0xf0f0 // r11<- 0x0000f0f0
bl rearrange_rkey_2 // fixslice the rkey words for round 2
bl rearrange_rkey_2 // fixslice the rkey words for round 7
bl rearrange_rkey_2 // fixslice the rkey words for round 12
bl rearrange_rkey_2 // fixslice the rkey words for round 17
bl rearrange_rkey_2 // fixslice the rkey words for round 22
bl rearrange_rkey_2 // fixslice the rkey words for round 27
bl rearrange_rkey_2 // fixslice the rkey words for round 32
bl rearrange_rkey_2 // fixslice the rkey words for round 37
sub.w r1, r1, #312
movw r3, #0x0a0a
movt r3, #0x0a0a // r3 <- 0x0a0a0a0a
movw r10, #0x00cc
movt r10, #0x00cc // r10<- 0x00cc00cc
bl rearrange_rkey_1 // fixslice the rkey words for round 3
bl rearrange_rkey_1 // fixslice the rkey words for round 8
bl rearrange_rkey_1 // fixslice the rkey words for round 13
bl rearrange_rkey_1 // fixslice the rkey words for round 18
bl rearrange_rkey_1 // fixslice the rkey words for round 23
bl rearrange_rkey_1 // fixslice the rkey words for round 28
bl rearrange_rkey_1 // fixslice the rkey words for round 33
bl rearrange_rkey_1 // fixslice the rkey words for round 38
pop {r1-r12,r14}
bx lr
.size gift128_keyschedule, .-gift128_keyschedule
/*****************************************************************************
* Subroutine to implement a quintuple round of GIFT-128.
*****************************************************************************/
.align 2
quintuple_round:
str.w r14, [sp]
round_0 r9, r10, r11, r12
round_1 r12, r10, r11, r9
round_2 r9, r10, r11, r12
round_3 r12, r10, r11, r9
round_4 r9, r10, r11, r12
ldr.w r14, [sp]
eor r9, r9, r12, ror #24
eor r12, r9, r12, ror #24
eor r9, r9, r12 // swap r9 with r12
bx lr
.size quintuple_round, .-quintuple_round
/*****************************************************************************
* Fully unrolled ARM assembly implementation of the GIFTb-128 block cipher.
* This function simply encrypts a 128-bit block, without any operation mode.
*****************************************************************************/
@ void giftb128_encrypt(u8 *out, const u8* in, const u32* rkey)
.global giftb128_encrypt
.type giftb128_encrypt,%function
giftb128_encrypt:
push {r0-r12,r14}
sub.w sp, #4 // to store 'lr' when calling 'quintuple_round'
ldm r1, {r9-r12} // load plaintext words
mov r1, r2
rev r9, r9
rev r10, r10
rev r11, r11
rev r12, r12
movw r2, #0x1111
movt r2, #0x1111 // r2 <- 0x11111111 (for NIBBLE_ROR)
movw r3, #0x000f
movt r3, #0x000f // r3 <- 0x000f000f (for HALF_ROR)
mvn r4, r2, lsl #3 // r4 <- 0x7777777 (for NIBBLE_ROR)
adr r0, rconst // r0 <- 'rconst' address
bl quintuple_round
bl quintuple_round
bl quintuple_round
bl quintuple_round
bl quintuple_round
bl quintuple_round
bl quintuple_round
bl quintuple_round
ldr.w r0, [sp ,#4] // restore 'ctext' address
rev r9, r9
rev r10, r10
rev r11, r11
rev r12, r12
stm r0, {r9-r12}
add.w sp, #4
pop {r0-r12,r14}
bx lr
.size giftb128_encrypt, .-giftb128_encrypt
|
aadomn/cymric
| 25,378
|
artifact_tches2025-3/benchmark_armv7m/aes/aes/aes_encrypt.s
|
/******************************************************************************
* Assembly fixsliced implementation of AES-128 and AES-256 (encryption only).
*
* Fully-fixsliced implementation runs faster than the semi-fixsliced variant
* at the cost of a larger code size.
*
* See the paper at https://eprint.iacr.org/2020/1123.pdf for more details.
*
* @author Alexandre Adomnicai, Nanyang Technological University, Singapore
* alexandre.adomnicai@ntu.edu.sg
*
* @date October 2020
******************************************************************************/
.syntax unified
.thumb
/******************************************************************************
* Macro to compute the SWAPMOVE technique: swap the bits in 'in1' masked by 'm'
* by the bits in 'in0' masked by 'm << n' and put the results in 'out0', 'out1'
******************************************************************************/
.macro swpmv out0, out1, in0, in1, m, n, tmp
eor \tmp, \in1, \in0, lsr \n
and \tmp, \m
eor \out1, \in1, \tmp
eor \out0, \in0, \tmp, lsl \n
.endm
/******************************************************************************
* Rotate all bytes in 'in' by 'n0' bits to the rights and put the results in
* 'out'. 'm' refers to the appropriate bitmask and 'n1' = 8-'n0'.
******************************************************************************/
.macro byteror out, in, m, n0, n1, tmp
and \out, \m, \in, lsr \n0
bic \tmp, \in, \m, ror \n1
orr \out, \out, \tmp, lsl \n1
.endm
/******************************************************************************
* Compute the MixColumns for rounds i st i%4 == 0 or 2.
* Between the two versions, only the masks and the shifts for the 'byteror' are
* differing.
******************************************************************************/
.macro mc_0_2 m, n0, n1, n2, n3
byteror r14, r1, \m, \n0, \n1, r9 // r14 <- BYTE_ROR_n0(S0)
eor r4, r1, r14, ror #8 // r4 <- S0 ^ (BYTE_ROR_6(S0) >>> 8)
movw r1, #0x0f0f
movt r1, #0x0f0f // r1 <- 0x0f0f0f0f (for BYTE_ROR)
byteror r5, r11, \m, \n0, \n1, r9 // r5 <- BYTE_ROR_n0(S7)
eor r10, r11, r5, ror #8 // r10<- S7 ^ BYTE_ROR_n0(S7 >>> 8)
byteror r11, r10, r1, 4, 4, r9 // r11<- BYTE_ROR_4(r10)
eor r11, r4, r11, ror #16 // r11<- BYTE_ROR_4(r10) ^ (r10 >>> 16)
eor r11, r11, r5, ror #8 // r11<- S'7
byteror r5, r2, \m, \n0, \n1, r9 // r5 <- BYTE_ROR_n0(S6)
eor r2, r2, r5, ror #8 // r2 <- S6 ^ BYTE_ROR_n0(S6 >>> 8)
eor r10, r10, r5, ror #8 // r10<- r10 ^ (BYTE_ROR_n0(S6) >>> 8)
byteror r5, r2, r1, 4, 4, r9 // r5 <- BYTE_ROR_4(r2)
eor r10, r10, r5, ror #16 // r10<- r10 ^ (r5 >>> 16)
eor r10, r10, r4 // r10<- S'6
byteror r5, r0, \m, \n0, \n1, r9 // r5 <- BYTE_ROR_n0(S5)
eor r0, r0, r5, ror #8 // r0 <- S5 ^ BYTE_ROR_6(S5 >>> 8)
eor r9, r2, r5, ror #8 // r9 <- r2 ^ (BYTE_ROR_n0(S5) >>> 8)
byteror r5, r0, r1, 4, 4, r2 // r5 <- BYTE_ROR_4(r0)
eor r9, r9, r5, ror #16 // r9 <- S'5
byteror r5, r8, \m, \n0, \n1, r2 // r5 <- BYTE_ROR_n0(S4)
eor r2, r8, r5, ror #8 // r2 <- S4 ^ BYTE_ROR_6(S4 >>> 8)
eor r8, r0, r5, ror #8 // r8 <- r0 ^ (BYTE_ROR_n0(S4) >>> 8)
byteror r5, r2, r1, 4, 4, r0 // r5 <- BYTE_ROR_4(r2)
eor r8, r8, r5, ror #16 // r8 <- r8 ^ (r5 >>> 16)
eor r8, r8, r4 // r8 <- S'4
byteror r5, r7, \m, \n0, \n1, r0 // r5 <- BYTE_ROR_n0(S3)
eor r0, r7, r5, ror #8 // r0 <- S3 ^ BYTE_ROR_6(S3 >>> 8)
eor r7, r2, r5, ror #8 // r2 ^ (BYTE_ROR_n0(S3) >>> 8)
byteror r5, r0, r1, 4, 4, r2 // r5 <- BYTE_ROR_4(r0)
eor r7, r7, r5, ror #16 // r7 <- r7 ^ (r5 >>> 16)
eor r7, r7, r4 // r7 <- S'3
byteror r5, r6, \m, \n0, \n1, r2 // r5 <- BYTE_ROR_n0(S2)
eor r2, r6, r5, ror #8 // r2 <- S2 ^ BYTE_ROR_6(S2 >>> 8)
eor r6, r0, r5, ror #8 // r6 <- r0 ^ (BYTE_ROR_n0(S2) >>> 8)
byteror r5, r2, r1, 4, 4, r0 // r5 <- BYTE_ROR_4(r2)
eor r6, r6, r5, ror #16 // r6 <- S'2
byteror r5, r3, \m, \n0, \n1, r0 // r5 <- BYTE_ROR_n0(S1)
eor r0, r3, r5, ror #8 // r0 <- S1 ^ BYTE_ROR_6(S1 >>> 8)
eor r3, r2, r5, ror #8 // r3 <- r0 ^ (BYTE_ROR_n0(S1) >>> 8)
byteror r5, r0, r1, 4, 4, r2 // r5 <- BYTE_ROR_4(r0)
eor r5, r3, r5, ror #16 // r5 <- S'1
eor r14, r0, r14, ror #8 // r14<- r0 ^ (BYTE_ROR_n0(S0) >>> 8)
byteror r0, r4, r1, 4, 4, r2 // r0 <- BYTE_ROR_4(r4)
eor r4, r14, r0, ror #16 // r4 <- S'0
.endm
/******************************************************************************
* Packs two 128-bit input blocs stored in r4-r7 and r8-r11, respectively, into
* the 256-bit internal state where the bits are packed as follows:
* r4 = b_24 b_56 b_88 b_120 || ... || b_0 b_32 b_64 b_96
* r5 = b_25 b_57 b_89 b_121 || ... || b_1 b_33 b_65 b_97
* r6 = b_26 b_58 b_90 b_122 || ... || b_2 b_34 b_66 b_98
* r7 = b_27 b_59 b_91 b_123 || ... || b_3 b_35 b_67 b_99
* r8 = b_28 b_60 b_92 b_124 || ... || b_4 b_36 b_68 b_100
* r9 = b_29 b_61 b_93 b_125 || ... || b_5 b_37 b_69 b_101
* r10 = b_30 b_62 b_94 b_126 || ... || b_6 b_38 b_70 b_102
* r11 = b_31 b_63 b_95 b_127 || ... || b_7 b_39 b_71 b_103
******************************************************************************/
.align 2
packing:
movw r3, #0x0f0f
movt r3, #0x0f0f // r3 <- 0x0f0f0f0f (mask for SWAPMOVE)
eor r2, r3, r3, lsl #2 // r2 <- 0x33333333 (mask for SWAPMOVE)
eor r1, r2, r2, lsl #1 // r1 <- 0x55555555 (mask for SWAPMOVE)
swpmv r8, r4, r8, r4, r1, #1, r12
swpmv r9, r5, r9, r5, r1, #1, r12
swpmv r10, r6, r10, r6, r1, #1, r12
swpmv r11, r7, r11, r7, r1, #1, r12
swpmv r0, r4, r5, r4, r2, #2, r12
swpmv r9, r5, r9, r8, r2, #2, r12
swpmv r7, r8, r7, r6, r2, #2, r12
swpmv r11, r2, r11, r10, r2, #2, r12
swpmv r8, r4, r8, r4, r3, #4, r12
swpmv r10, r6, r7, r0, r3, #4, r12
swpmv r11, r7, r11, r9, r3, #4, r12
swpmv r9, r5, r2, r5, r3, #4, r12
bx lr
/******************************************************************************
* Unpacks the 256-bit internal state in two 128-bit blocs.
******************************************************************************/
.align 2
unpacking:
movw r3, #0x0f0f
movt r3, #0x0f0f // r3 <- 0x0f0f0f0f (mask for SWAPMOVE)
swpmv r2, r5, r9, r5, r3, #4, r12
swpmv r11, r9, r11, r7, r3, #4, r12
swpmv r7, r1, r10, r6, r3, #4, r12
swpmv r8, r4, r8, r4, r3, #4, r12
eor r3, r3, r3, lsl #2 // r3 <- 0x33333333 (mask for SWAPMOVE)
swpmv r11, r10,r11, r2, r3, #2, r12
swpmv r7, r6, r7, r8, r3, #2, r12
swpmv r9, r8, r9, r5, r3, #2, r12
swpmv r5, r4, r1, r4, r3, #2, r12
eor r1, r3, r3, lsl #1 // r1 <- 0x55555555 (mask for SWAPMOVE)
swpmv r8, r4, r8, r4, r1, #1, r12
swpmv r9, r5,r9, r5, r1, #1, r12
swpmv r10, r6, r10, r6, r1, #1, r12
swpmv r11, r7, r11, r7, r1, #1, r12
bx lr
/******************************************************************************
* Subroutine that computes the AddRoundKey and the S-box.
* Credits to https://github.com/Ko-/aes-armcortexm for the S-box implementation
******************************************************************************/
.align 2
ark_sbox:
// add round key
ldr.w r1, [sp, #48]
ldmia r1!, {r0,r2,r3,r12}
eor r4, r0
eor r5, r2
eor r6, r3
eor r7, r12
ldmia r1!, {r0,r2,r3,r12}
eor r8, r0
eor r9, r2
eor r10, r3
eor r11, r12
str.w r1, [sp, #48]
str r14, [sp, #52]
// sbox: credits to https://github.com/Ko-/aes-armcortexm
eor r1, r7, r9 //Exec y14 = U3 ^ U5; into r1
eor r3, r4, r10 //Exec y13 = U0 ^ U6; into r3
eor r2, r3, r1 //Exec y12 = y13 ^ y14; into r2
eor r0, r8, r2 //Exec t1 = U4 ^ y12; into r0
eor r14, r0, r9 //Exec y15 = t1 ^ U5; into r14
and r12, r2, r14 //Exec t2 = y12 & y15; into r12
eor r8, r14, r11 //Exec y6 = y15 ^ U7; into r8
eor r0, r0, r5 //Exec y20 = t1 ^ U1; into r0
str.w r2, [sp, #44] //Store r2/y12 on stack
eor r2, r4, r7 //Exec y9 = U0 ^ U3; into r2
str r0, [sp, #40] //Store r0/y20 on stack
eor r0, r0, r2 //Exec y11 = y20 ^ y9; into r0
str r2, [sp, #36] //Store r2/y9 on stack
and r2, r2, r0 //Exec t12 = y9 & y11; into r2
str r8, [sp, #32] //Store r8/y6 on stack
eor r8, r11, r0 //Exec y7 = U7 ^ y11; into r8
eor r9, r4, r9 //Exec y8 = U0 ^ U5; into r9
eor r6, r5, r6 //Exec t0 = U1 ^ U2; into r6
eor r5, r14, r6 //Exec y10 = y15 ^ t0; into r5
str r14, [sp, #28] //Store r14/y15 on stack
eor r14, r5, r0 //Exec y17 = y10 ^ y11; into r14
str.w r1, [sp, #24] //Store r1/y14 on stack
and r1, r1, r14 //Exec t13 = y14 & y17; into r1
eor r1, r1, r2 //Exec t14 = t13 ^ t12; into r1
str r14, [sp, #20] //Store r14/y17 on stack
eor r14, r5, r9 //Exec y19 = y10 ^ y8; into r14
str.w r5, [sp, #16] //Store r5/y10 on stack
and r5, r9, r5 //Exec t15 = y8 & y10; into r5
eor r2, r5, r2 //Exec t16 = t15 ^ t12; into r2
eor r5, r6, r0 //Exec y16 = t0 ^ y11; into r5
str.w r0, [sp, #12] //Store r0/y11 on stack
eor r0, r3, r5 //Exec y21 = y13 ^ y16; into r0
str r3, [sp, #8] //Store r3/y13 on stack
and r3, r3, r5 //Exec t7 = y13 & y16; into r3
str r5, [sp, #4] //Store r5/y16 on stack
str r11, [sp, #0] //Store r11/U7 on stack
eor r5, r4, r5 //Exec y18 = U0 ^ y16; into r5
eor r6, r6, r11 //Exec y1 = t0 ^ U7; into r6
eor r7, r6, r7 //Exec y4 = y1 ^ U3; into r7
and r11, r7, r11 //Exec t5 = y4 & U7; into r11
eor r11, r11, r12 //Exec t6 = t5 ^ t2; into r11
eor r11, r11, r2 //Exec t18 = t6 ^ t16; into r11
eor r14, r11, r14 //Exec t22 = t18 ^ y19; into r14
eor r4, r6, r4 //Exec y2 = y1 ^ U0; into r4
and r11, r4, r8 //Exec t10 = y2 & y7; into r11
eor r11, r11, r3 //Exec t11 = t10 ^ t7; into r11
eor r2, r11, r2 //Exec t20 = t11 ^ t16; into r2
eor r2, r2, r5 //Exec t24 = t20 ^ y18; into r2
eor r10, r6, r10 //Exec y5 = y1 ^ U6; into r10
and r11, r10, r6 //Exec t8 = y5 & y1; into r11
eor r3, r11, r3 //Exec t9 = t8 ^ t7; into r3
eor r3, r3, r1 //Exec t19 = t9 ^ t14; into r3
eor r3, r3, r0 //Exec t23 = t19 ^ y21; into r3
eor r0, r10, r9 //Exec y3 = y5 ^ y8; into r0
ldr r11, [sp, #32] //Load y6 into r11
and r5, r0, r11 //Exec t3 = y3 & y6; into r5
eor r12, r5, r12 //Exec t4 = t3 ^ t2; into r12
ldr r5, [sp, #40] //Load y20 into r5
str r7, [sp, #32] //Store r7/y4 on stack
eor r12, r12, r5 //Exec t17 = t4 ^ y20; into r12
eor r1, r12, r1 //Exec t21 = t17 ^ t14; into r1
and r12, r1, r3 //Exec t26 = t21 & t23; into r12
eor r5, r2, r12 //Exec t27 = t24 ^ t26; into r5
eor r12, r14, r12 //Exec t31 = t22 ^ t26; into r12
eor r1, r1, r14 //Exec t25 = t21 ^ t22; into r1
and r7, r1, r5 //Exec t28 = t25 & t27; into r7
eor r14, r7, r14 //Exec t29 = t28 ^ t22; into r14
and r4, r14, r4 //Exec z14 = t29 & y2; into r4
and r8, r14, r8 //Exec z5 = t29 & y7; into r8
eor r7, r3, r2 //Exec t30 = t23 ^ t24; into r7
and r12, r12, r7 //Exec t32 = t31 & t30; into r12
eor r12, r12, r2 //Exec t33 = t32 ^ t24; into r12
eor r7, r5, r12 //Exec t35 = t27 ^ t33; into r7
and r2, r2, r7 //Exec t36 = t24 & t35; into r2
eor r5, r5, r2 //Exec t38 = t27 ^ t36; into r5
and r5, r14, r5 //Exec t39 = t29 & t38; into r5
eor r1, r1, r5 //Exec t40 = t25 ^ t39; into r1
eor r5, r14, r1 //Exec t43 = t29 ^ t40; into r5
ldr.w r7, [sp, #4] //Load y16 into r7
and r7, r5, r7 //Exec z3 = t43 & y16; into r7
eor r8, r7, r8 //Exec tc12 = z3 ^ z5; into r8
str r8, [sp, #40] //Store r8/tc12 on stack
ldr r8, [sp, #8] //Load y13 into r8
and r8, r5, r8 //Exec z12 = t43 & y13; into r8
and r10, r1, r10 //Exec z13 = t40 & y5; into r10
and r6, r1, r6 //Exec z4 = t40 & y1; into r6
eor r6, r7, r6 //Exec tc6 = z3 ^ z4; into r6
eor r3, r3, r12 //Exec t34 = t23 ^ t33; into r3
eor r3, r2, r3 //Exec t37 = t36 ^ t34; into r3
eor r1, r1, r3 //Exec t41 = t40 ^ t37; into r1
ldr.w r5, [sp, #16] //Load y10 into r5
and r2, r1, r5 //Exec z8 = t41 & y10; into r2
and r9, r1, r9 //Exec z17 = t41 & y8; into r9
str r9, [sp, #16] //Store r9/z17 on stack
eor r5, r12, r3 //Exec t44 = t33 ^ t37; into r5
ldr r9, [sp, #28] //Load y15 into r9
ldr.w r7, [sp, #44] //Load y12 into r7
and r9, r5, r9 //Exec z0 = t44 & y15; into r9
and r7, r5, r7 //Exec z9 = t44 & y12; into r7
and r0, r3, r0 //Exec z10 = t37 & y3; into r0
and r3, r3, r11 //Exec z1 = t37 & y6; into r3
eor r3, r3, r9 //Exec tc5 = z1 ^ z0; into r3
eor r3, r6, r3 //Exec tc11 = tc6 ^ tc5; into r3
ldr r11, [sp, #32] //Load y4 into r11
ldr.w r5, [sp, #20] //Load y17 into r5
and r11, r12, r11 //Exec z11 = t33 & y4; into r11
eor r14, r14, r12 //Exec t42 = t29 ^ t33; into r14
eor r1, r14, r1 //Exec t45 = t42 ^ t41; into r1
and r5, r1, r5 //Exec z7 = t45 & y17; into r5
eor r6, r5, r6 //Exec tc8 = z7 ^ tc6; into r6
ldr r5, [sp, #24] //Load y14 into r5
str r4, [sp, #32] //Store r4/z14 on stack
and r1, r1, r5 //Exec z16 = t45 & y14; into r1
ldr r5, [sp, #12] //Load y11 into r5
ldr r4, [sp, #36] //Load y9 into r4
and r5, r14, r5 //Exec z6 = t42 & y11; into r5
eor r5, r5, r6 //Exec tc16 = z6 ^ tc8; into r5
and r4, r14, r4 //Exec z15 = t42 & y9; into r4
eor r14, r4, r5 //Exec tc20 = z15 ^ tc16; into r14
eor r4, r4, r1 //Exec tc1 = z15 ^ z16; into r4
eor r1, r0, r4 //Exec tc2 = z10 ^ tc1; into r1
eor r0, r1, r11 //Exec tc21 = tc2 ^ z11; into r0
eor r7, r7, r1 //Exec tc3 = z9 ^ tc2; into r7
eor r1, r7, r5 //Exec S0 = tc3 ^ tc16; into r1
eor r7, r7, r3 //Exec S3 = tc3 ^ tc11; into r7
eor r3, r7, r5 //Exec S1 = S3 ^ tc16 ^ 1; into r3
eor r11, r10, r4 //Exec tc13 = z13 ^ tc1; into r11
ldr.w r4, [sp, #0] //Load U7 into r4
and r12, r12, r4 //Exec z2 = t33 & U7; into r12
eor r9, r9, r12 //Exec tc4 = z0 ^ z2; into r9
eor r12, r8, r9 //Exec tc7 = z12 ^ tc4; into r12
eor r2, r2, r12 //Exec tc9 = z8 ^ tc7; into r2
eor r2, r6, r2 //Exec tc10 = tc8 ^ tc9; into r2
ldr.w r4, [sp, #32] //Load z14 into r4
eor r12, r4, r2 //Exec tc17 = z14 ^ tc10; into r12
eor r0, r0, r12 //Exec S5 = tc21 ^ tc17; into r0
eor r6, r12, r14 //Exec tc26 = tc17 ^ tc20; into r6
ldr.w r4, [sp, #16] //Load z17 into r4
ldr r12, [sp, #40] //Load tc12 into r12
eor r6, r6, r4 //Exec S2 = tc26 ^ z17 ^ 1; into r6
eor r12, r9, r12 //Exec tc14 = tc4 ^ tc12; into r12
eor r14, r11, r12 //Exec tc18 = tc13 ^ tc14; into r14
eor r2, r2, r14 //Exec S6 = tc10 ^ tc18 ^ 1; into r2
eor r11, r8, r14 //Exec S7 = z12 ^ tc18 ^ 1; into r11
ldr r14, [sp, #52] // restore link register
eor r8, r12, r7 //Exec S4 = tc14 ^ S3; into r8
bx lr
// [('r0', 'S5'), ('r1', 'S0'), ('r2', 'S6'), ('r3', 'S1'),
// ('r6', 'S2'),('r7', 'S3'), ('r8', 'S4'), ('r11', 'S7')]
/******************************************************************************
* Computation of the MixColumns transformation in the fixsliced representation.
* For fully-fixsliced implementations, it is used for rounds i s.t. (i%4) == 0.
* For semi-fixsliced implementations, it is used for rounds i s.t. (i%2) == 0.
******************************************************************************/
.align 2
mixcolumns_0:
str r14, [sp, #52] // store link register
movw r12, #0x0303
movt r12, #0x0303
mc_0_2 r12, 6, 2, 26, 18
ldr r14, [sp, #52] // restore link register
bx lr
/******************************************************************************
* Computation of the MixColumns transformation in the fixsliced representation.
* For fully-fixsliced implementations, it is used for rounds i s.t. (i%4) == 3.
* For semi-fixsliced implementations, it is used for rounds i s.t. (i%2) == 1.
* Based on Käsper-Schwabe, similar to https://github.com/Ko-/aes-armcortexm.
******************************************************************************/
.align 2
mixcolumns_3:
eor r12, r11, r11, ror #8 // r12<- S7 ^ (S7 >>> 8)
eor r4, r1, r1, ror #8 // r4 <- S0 ^ (S0 >>> 8)
eor r11, r4, r11, ror #8 // r11<- S0 ^ (S0 >>> 8) ^ (S7 >>> 8)
eor r11, r11, r12, ror #16 // r11<- r11 ^ (S7 >>> 16) ^ (S7 >>> 24)
eor r10, r12, r2, ror #8 // r10<- S7 ^ (S7 >>> 8) ^ (S6 >>> 8)
eor r12, r2, r2, ror #8 // r12<- S6 ^ (S6 >>> 8)
eor r10, r10, r12, ror #16 // r10<- r10 ^ (S6 >>> 16) ^ (S6 >>> 24)
eor r10, r4 // r10<- r10 ^ S0 ^ (S0 >>> 8)
eor r9, r12, r0, ror #8 // r9 <- S6 ^ (S6 >>> 8) ^ (S5 >>> 8)
eor r12, r0, r0, ror #8 // r12<- S5 ^ (S5 >>> 8)
eor r9, r9, r12, ror #16 // r9 <- r9 ^ (S5 >>> 16) ^ (S5 >>> 24)
eor r2, r8, r8, ror #8 // r2 <- S4 ^ (S4 >>> 8)
eor r8, r12, r8, ror #8 // r8 <- S5 ^ (S5 >>> 8) ^ (S4 >>> 8)
eor r8, r4 // r8 <- r8 ^ S0 ^ (S0 >>> 8)
eor r8, r8, r2, ror #16 // r8 <- r8 ^ (S4 >>> 16) ^ (S4 >>> 24)
eor r12, r7, r7, ror #8 // r12<- S3 ^ (S3 >>> 8)
eor r7, r2, r7, ror #8 // r7 <- S4 ^ (S4 >>> 8) ^ (S3 >>> 8)
eor r7, r4 // r7 <- r7 ^ S0 ^ (S0 >>> 8)
eor r7, r7, r12, ror #16 // r7 <- r7 ^ (S3 >>> 16) ^ (S3 >>> 24)
eor r2, r6, r6, ror #8 // r2 <- S2 ^ (S2 >>> 8)
eor r6, r12, r6, ror #8 // r6 <- S3 ^ (S3 >>> 8) ^ (S2 >>> 8)
eor r6, r6, r2, ror #16 // r6 <- r6 ^ (S2 >>> 16) ^ (S2 >>> 24)
eor r12, r3, r3, ror #8 // r12<- S1 ^ (S1 >>> 8)
eor r5, r2, r3, ror #8 // r5 <- S2 ^ (S2 >>> 8) ^ (S1 >>> 8)
eor r5, r5, r12, ror #16 // r5 <- r5 ^ (S1 >>> 16) ^ (S1 >>> 24)
eor r4, r12, r4, ror #16 // r4 <- S1 ^ (S1 >>> 8) ^ (r4 >>> 16)
eor r4, r4, r1, ror #8 // r4 <- r4 ^ (S0 >>> 8)
bx lr
/******************************************************************************
* Applies the ShiftRows transformation twice (i.e. SR^2) on the internal state.
******************************************************************************/
.align 2
double_shiftrows:
movw r10, #0x0f00
movt r10, #0x0f00 // r10<- 0x0f000f00 (mask)
swpmv r0, r0, r0, r0, r10, #4, r12
swpmv r1, r1, r1, r1, r10, #4, r12
swpmv r2, r2, r2, r2, r10, #4, r12
swpmv r3, r3, r3, r3, r10, #4, r12
swpmv r6, r6, r6, r6, r10, #4, r12
swpmv r7, r7, r7, r7, r10, #4, r12
swpmv r8, r8, r8, r8, r10, #4, r12
swpmv r11, r11, r11, r11, r10, #4, r12
bx lr
/******************************************************************************
* Semi-fixsliced implementation of AES-128.
*
* Two blocks are encrypted in parallel.
*
* Note that additional 4 bytes are allocated on the stack as the function takes
* 5 arguments as input.
******************************************************************************/
@ void aes128_encrypt_sfs(u8* ctext, u8* ctext_bis, const u8* ptext,
@ const u8* ptext_bis, const u32* rkey);
.global aes128_encrypt_sfs
.type aes128_encrypt_sfs,%function
.align 2
aes128_encrypt_sfs:
push {r0-r12,r14}
sub.w sp, #56 // allow space on the stack for tmp var
ldr.w r4, [r2] // load the 1st 128-bit blocks in r4-r7
ldr r5, [r2, #4]
ldr r6, [r2, #8]
ldr r7, [r2, #12]
ldr.w r8, [r3] // load the 2nd 128-bit blocks in r8-r11
ldr r9, [r3, #4]
ldr r10,[r3, #8]
ldr r11,[r3, #12]
ldr.w r1, [sp, #112] // load 'rkey' argument from the stack
str.w r1, [sp, #48] // store it there for 'add_round_key'
bl packing // pack the 2 input blocks
bl ark_sbox // ark + sbox (round 0)
bl mixcolumns_0 // mixcolumns (round 0)
bl ark_sbox // ark + sbox (round 1)
bl double_shiftrows // to resynchronize with the classical rep
bl mixcolumns_3 // mixcolumns (round 1)
bl ark_sbox // ark + sbox (round 2)
bl mixcolumns_0 // mixcolumns (round 2)
bl ark_sbox // ark + sbox (round 3)
bl double_shiftrows // to resynchronize with the classical rep
bl mixcolumns_3 // mixcolumns (round 3)
bl ark_sbox // ark + sbox (round 4)
bl mixcolumns_0 // mixcolumns (round 4)
bl ark_sbox // ark + sbox (round 5)
bl double_shiftrows // to resynchronize with the classical rep
bl mixcolumns_3 // mixcolumns (round 5)
bl ark_sbox // ark + sbox (round 6)
bl mixcolumns_0 // mixcolumns (round 6)
bl ark_sbox // ark + sbox (round 7)
bl double_shiftrows // to resynchronize with the classical rep
bl mixcolumns_3 // mixcolumns (round 7)
bl ark_sbox // ark + sbox (round 8)
bl mixcolumns_0 // mixcolumns (round 8)
bl ark_sbox // ark + sbox (round 9)
bl double_shiftrows // to resynchronize with the classical rep
ldr r14, [sp, #48] // ---------------------------------------
ldmia r14!, {r4,r5,r10,r12} //
eor r4, r1 //
eor r5, r3 //
eor r6, r10 //
eor r7, r12 // Last add_round_key
ldmia r14!, {r1,r3,r10,r12} //
eor r8, r1 //
eor r9, r0, r3 //
eor r10, r2 //
eor r11, r12 // ---------------------------------------
bl unpacking // unpack the internal state
ldrd r0, r1, [sp, #56] // restore the addr to store the ciphertext
add.w sp, #64 // restore the stack pointer
str.w r4, [r0] // store the ciphertext
str r5, [r0, #4]
str r6, [r0, #8]
str r7, [r0, #12]
str.w r8, [r1] // store the ciphertext
str r9, [r1, #4]
str r10,[r1, #8]
str r11,[r1, #12]
pop {r2-r12, r14} // restore context
bx lr
|
aadomn/cymric
| 11,458
|
artifact_tches2025-3/benchmark_armv7m/aes/aes/aes_keyschedule_lut.s
|
/******************************************************************************
* ARM assembly implementations of the AES-128 and AES-256 key schedules to
* match fixslicing.
* Note that those implementations rely on Look-Up Tables (LUT).
*
* See the paper at https://eprint.iacr.org/2020/1123.pdf for more details.
*
* @author Alexandre Adomnicai, Nanyang Technological University, Singapore
* alexandre.adomnicai@ntu.edu.sg
*
* @date August 2020
******************************************************************************/
.syntax unified
.thumb
/******************************************************************************
* LUT of the AES S-box.
******************************************************************************/
.align 2
.type AES_Sbox_compact,%object
AES_Sbox_compact:
.word 0x7b777c63, 0xc56f6bf2, 0x2b670130, 0x76abd7fe
.word 0x7dc982ca, 0xf04759fa, 0xafa2d4ad, 0xc072a49c
.word 0x2693fdb7, 0xccf73f36, 0xf1e5a534, 0x1531d871
.word 0xc323c704, 0x9a059618, 0xe2801207, 0x75b227eb
.word 0x1a2c8309, 0xa05a6e1b, 0xb3d63b52, 0x842fe329
.word 0xed00d153, 0x5bb1fc20, 0x39becb6a, 0xcf584c4a
.word 0xfbaaefd0, 0x85334d43, 0x7f02f945, 0xa89f3c50
.word 0x8f40a351, 0xf5389d92, 0x21dab6bc, 0xd2f3ff10
.word 0xec130ccd, 0x1744975f, 0x3d7ea7c4, 0x73195d64
.word 0xdc4f8160, 0x88902a22, 0x14b8ee46, 0xdb0b5ede
.word 0x0a3a32e0, 0x5c240649, 0x62acd3c2, 0x79e49591
.word 0x6d37c8e7, 0xa94ed58d, 0xeaf4566c, 0x08ae7a65
.word 0x2e2578ba, 0xc6b4a61c, 0x1f74dde8, 0x8a8bbd4b
.word 0x66b53e70, 0x0ef60348, 0xb9573561, 0x9e1dc186
.word 0x1198f8e1, 0x948ed969, 0xe9871e9b, 0xdf2855ce
.word 0x0d89a18c, 0x6842e6bf, 0x0f2d9941, 0x16bb54b0
/******************************************************************************
* Round function of the AES-128 key expansion.
* Note that it expects r2 to contain the corresponding round constant and r3 to
* contain the S-box address.
******************************************************************************/
.align 2
aes128_keyschedule_rfunc:
movw r1, #0xfc
and r8, r1, r7, lsr #8
and r9, r1, r7, lsr #16
and r10, r1, r7, lsr #24
and r11, r1, r7
ldr r8, [r3, r8] // computes the sbox using the LUT
ldr r9, [r3, r9] // computes the sbox using the LUT
ldr r10, [r3, r10] // computes the sbox using the LUT
ldr r11, [r3, r11] // computes the sbox using the LUT
movw r1, #0x18
and r12, r1, r7, lsr #5
lsr r8, r8, r12
and r8, #0xff
and r12, r1, r7, lsr #13
lsr r9, r9, r12
and r9, #0xff
and r12, r1, r7, lsr #21
lsr r10, r10, r12
and r10, #0xff
and r12, r1, r7, lsl #3
lsr r11, r11, r12
and r11, #0xff
eor r4, r2 // adds the first rconst
eor r4, r8 // xor the columns (1st sbox byte)
eor r4, r4, r9, ror #24 // xor the columns (2nd sbox byte)
eor r4, r4, r10, ror #16 // xor the columns (3rd sbox byte)
eor r4, r4, r11, ror #8 // xor the columns (4th sbox byte)
eor r5, r4 // xor the columns
eor r6, r5 // xor the columns
eor r7, r6 // xor the columns
push.w {r4-r7}
bx lr
/******************************************************************************
* Packing routine. Note that it is the same as the one used in the encryption
* function so some code size could be saved by merging the two files.
******************************************************************************/
.align 2
packing_rkey:
eor r12, r8, r8, lsr #1 // SWAPMOVE(r8, r4, 0x55555555, 1) ....
and r12, r1
eor r4, r8, r12
eor r8, r8, r12, lsl #1 // .... SWAPMOVE(r8, r4, 0x55555555, 1)
eor r12, r9, r9, lsr #1 // SWAPMOVE(r9, r5, 0x55555555, 1) ....
and r12, r1
eor r5, r9, r12
eor r9, r9, r12, lsl #1 // .... SWAPMOVE(r9, r5, 0x55555555, 1)
eor r12, r10, r10, lsr #1 // SWAPMOVE(r10, r6, 0x55555555, 1) ....
and r12, r1
eor r6, r10, r12
eor r10, r10, r12, lsl #1 // .... SWAPMOVE(r10, r6, 0x55555555, 1)
eor r12, r11, r11, lsr #1 // SWAPMOVE(r11, r7, 0x55555555, 1) ....
and r12, r1
eor r7, r11, r12
eor r11, r11, r12, lsl #1 // .... SWAPMOVE(r11, r7, 0x55555555, 1)
eor r12, r4, r5, lsr #2 // SWAPMOVE(r5, r4, 0x33333333, 2) ....
and r12, r2
eor r4, r12
eor r5, r5, r12, lsl #2 // .... SWAPMOVE(r5, r4, 0x33333333, 2)
eor r12, r8, r9, lsr #2 // SWAPMOVE(r9, r8, 0x33333333, 2) ....
and r12, r2
eor r8, r8, r12
eor r9, r9, r12, lsl #2 // .... SWAPMOVE(r9, r8, 0x33333333, 2)
eor r12, r6, r7, lsr #2 // SWAPMOVE(r7, r6, 0x33333333, 2) ....
and r12, r2
eor r6, r6, r12
eor r7, r7, r12, lsl #2 // .... SWAPMOVE(r7, r6, 0x33333333, 2)
eor r12, r10, r11, lsr #2 // SWAPMOVE(r11, r10, 0x33333333, 2) ....
and r12, r2
eor r10, r10, r12
eor r11, r11, r12, lsl #2 // .... SWAPMOVE(r11, r10, 0x33333333, 2)
eor r12, r4, r6, lsr #4 // SWAPMOVE(r6, r4, 0x0f0f0f0f, 4) ....
and r12, r3
eor r4, r12
eor r6, r6, r12, lsl #4 // .... SWAPMOVE(r6, r4, 0x0f0f0f0f,4)
eor r12, r5, r7, lsr #4 // SWAPMOVE(r7, r5, 0x0f0f0f0f, 4) ....
and r12, r3
eor r5, r5, r12
eor r7, r7, r12, lsl #4 // .... SWAPMOVE(r7, r5, 0x0f0f0f0f, 4)
eor r12, r8, r10, lsr #4 // SWAPMOVE(r10, r8, 0x0f0f0f0f, 4) ....
and r12, r3
eor r8, r8, r12
eor r10, r10, r12, lsl #4 // .... SWAPMOVE(r10,r8, 0x0f0f0f0f, 4)
eor r12, r9, r11, lsr #4 // SWAPMOVE(r11, r9, 0x0f0f0f0f, 4) ....
and r12, r3
eor r9, r12
eor r11, r11, r12, lsl #4 // .... SWAPMOVE(r11, r9, 0x0f0f0f0f, 4)
mvn r5, r5
mvn r8, r8
mvn r7, r7
mvn r11, r11
strd r7, r11, [r0, #-8]
strd r6, r10, [r0, #-16]
strd r5, r9, [r0, #-24]
strd r4, r8, [r0, #-32]!
bx lr
/******************************************************************************
* Applies ShiftRows^(-1) on a round key to match fully/semi-fixslicing.
******************************************************************************/
.align 2
inv_shiftrows_1:
and r8, r4, #0xff
and r12, r7, #0xff00
orr r8, r8, r12
and r12, r6, #0xff0000
orr r8, r8, r12
and r12, r5, #0xff000000
orr r8, r8, r12
and r9, r5, #0xff
and r12, r4, #0xff00
orr r9, r9, r12
and r12, r7, #0xff0000
orr r9, r9, r12
and r12, r6, #0xff000000
orr r9, r9, r12
and r10, r6, #0xff
and r12, r5, #0xff00
orr r10, r10, r12
and r12, r4, #0xff0000
orr r10, r10, r12
and r12, r7, #0xff000000
orr r10, r10, r12
and r11, r7, #0xff
and r12, r6, #0xff00
orr r11, r11, r12
and r12, r5, #0xff0000
orr r11, r11, r12
and r12, r4, #0xff000000
orr r11, r11, r12
bx lr
/******************************************************************************
* Pre-computes all the round keys for a given encryption key, according to the
* semi-fixsliced (sfs) representation.
* Note that the round keys also include the NOTs omitted in the S-box.
******************************************************************************/
@ void aes128_keyschedule_sfs_lut(u32* rkeys, const u8* key);
.global aes128_keyschedule_sfs_lut
.type aes128_keyschedule_sfs_lut,%function
.align 2
aes128_keyschedule_sfs_lut:
push {r1-r12,r14}
ldr.w r4, [r1] // load the encryption key
ldr r5, [r1, #4]
ldr r6, [r1, #8]
ldr r7, [r1, #12]
adr r3, AES_Sbox_compact // load the sbox LUT address in r3
movw r2, #0x01 // 1st const
bl aes128_keyschedule_rfunc // 1st round
movw r2, #0x02 // 2nd rconst
bl aes128_keyschedule_rfunc // 2nd round
movw r2, #0x04 // 3rd rconst
bl aes128_keyschedule_rfunc // 3rd round
movw r2, #0x08 // 4th rconst
bl aes128_keyschedule_rfunc // 4th round
movw r2, #0x10 // 5th rconst
bl aes128_keyschedule_rfunc // 5th round
movw r2, #0x20 // 6th rconst
bl aes128_keyschedule_rfunc // 6th round
movw r2, #0x40 // 7th rconst
bl aes128_keyschedule_rfunc // 7th round
movw r2, #0x80 // 8th rconst
bl aes128_keyschedule_rfunc // 8th round
movw r2, #0x1b // 9th rconst
bl aes128_keyschedule_rfunc // 9th round
movw r2, #0x36 // 10th rconst
bl aes128_keyschedule_rfunc // 10th round
//done expanding, now start bitslicing
//set r0 to end of rk, to be filled backwards
add r0, #352
movw r3, #0x0f0f
movt r3, #0x0f0f // r3 <- 0x0f0f0f0f (mask for SWAPMOVE)
eor r2, r3, r3, lsl #2 // r2 <- 0x33333333 (mask for SWAPMOVE)
eor r1, r2, r2, lsl #1 // r1 <- 0x55555555 (mask for SWAPMOVE)
pop.w {r4-r7}
mov r8, r4
mov r9, r5
mov r10, r6
mov r11, r7
bl packing_rkey
pop.w {r4-r7}
bl inv_shiftrows_1
bl packing_rkey
pop.w {r4-r7}
mov r8, r4
mov r9, r5
mov r10, r6
mov r11, r7
bl packing_rkey
pop.w {r4-r7}
bl inv_shiftrows_1
bl packing_rkey
pop.w {r4-r7}
mov r8, r4
mov r9, r5
mov r10, r6
mov r11, r7
bl packing_rkey
pop.w {r4-r7}
bl inv_shiftrows_1
bl packing_rkey
pop.w {r4-r7}
mov r8, r4
mov r9, r5
mov r10, r6
mov r11, r7
bl packing_rkey
pop.w {r4-r7}
bl inv_shiftrows_1
bl packing_rkey
pop.w {r4-r7}
mov r8, r4
mov r9, r5
mov r10, r6
mov r11, r7
bl packing_rkey
pop.w {r4-r7}
bl inv_shiftrows_1
bl packing_rkey
ldr r12, [sp]
ldr.w r4, [r12] // load the encryption key
ldr r5, [r12, #4]
ldr r6, [r12, #8]
ldr r7, [r12, #12]
mov r8, r4
mov r9, r5
mov r10, r6
mov r11, r7
bl packing_rkey
mvn r5, r5 // cancels the NOT applied in 'packing_rkey'
mvn r8, r8 // cancels the NOT applied in 'packing_rkey'
mvn r7, r7 // cancels the NOT applied in 'packing_rkey'
mvn r11, r11 // cancels the NOT applied in 'packing_rkey'
strd r7, r11, [r0, #24] // restore after fix
strd r6, r10, [r0, #16] // restore after fix
strd r5, r9, [r0, #8] // restore after fix
strd r4, r8, [r0] // restore after fix
pop {r1-r12, r14} // restore context
bx lr
|
aadomn/cymric
| 7,425
|
src/cymric-lea128/avr8/lea128.S
|
/****************************************************************************
* AVR assembly implementation of the LEA-128 block cipher.
*
* @author Alexandre Adomnicai
*
* @date April 2025
****************************************************************************/
; Argument registers for function calls
#define ARG1 r24
#define ARG2 r22
#define ARG3 r20
/**
* push_registers macro:
*
* Pushes a given range of registers in ascending order
* To be called like: push_registers 0,15
*/
.macro push_registers from:req, to:req
push \from
.if \to-\from
push_registers "(\from+1)",\to
.endif
.endm
/**
* ldi_w macro:
*
* Load immediate for 16-bit values
*/
.macro ldi_w reg:req, val:req
ldi \reg, lo8(\val)
ldi "(\reg+1)", hi8(\val)
.endm
/**
* pop_registers macro:
*
* Pops a given range of registers in descending order
* To be called like: pop_registers 0,15
*/
.macro pop_registers from:req, to:req
pop \to
.if \to-\from
pop_registers \from,"(\to-1)"
.endif
.endm
.global lea128_kexpand
lea128_kexpand:
; Save r2-r17,r28-r31
push_registers 2,17
push_registers 28,31
push_registers 24,25
; Save the argument pointers to Z (key) and X (round keys)
movw XL, ARG1
movw ZL, ARG2
; Allocate 16 bytes on the stack and save pointer to Y
in r28, 0x3d
in r29, 0x3e
sbiw r28, 16
in r0, 0x3f
cli
out 0x3e, r29
out 0x3f, r0
out 0x3d, r28
.L__stack_usage = 42
; Load the key given by argument to register 2-17 instead of 0-15 because
; the mul instruction inconditionally overwrites registers r1:r0.
.irp param,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12,r13,r14,r15,r16,r17
ld \param, Z+
.endr
; Save loop counter
ldi r18, 24
; Constants for efficient bitshifts
ldi r19, 64
ldi r20, 8
; Save round constants to the stack
ldi r22, lo8(0xf4ed)
ldi r23, hi8(0xf4ed)
ldi r24, lo8(0xe1f7)
ldi r25, hi8(0xe1f7)
std Y+0, r22
std Y+1, r23
std Y+2, r24
std Y+3, r25
ldi r22, lo8(0x6b02)
ldi r23, hi8(0x6b02)
ldi r24, lo8(0x4462)
ldi r25, hi8(0x4462)
std Y+4, r22
std Y+5, r23
std Y+6, r24
std Y+7, r25
ldi r22, lo8(0xf914)
ldi r23, hi8(0xf914)
ldi r24, lo8(0xf3c4)
ldi r25, hi8(0xf3c4)
std Y+8, r22
std Y+9, r23
std Y+10, r24
std Y+11, r25
ldi r22, lo8(0xc3b1)
ldi r23, hi8(0xc3b1)
ldi r24, lo8(0xe37c)
ldi r25, hi8(0xe37c)
std Y+12, r22
std Y+13, r23
std Y+14, r24
std Y+15, r25
loop_kexp:
; Load round constant rc
ldd r22, Y+0
ldd r23, Y+1
ldd r24, Y+2
ldd r25, Y+3
; First round key word
; rc <<< 1
bst r25, 7
rol r22
rol r23
rol r24
rol r25
bld r22, 0
; T[0] = T[0] + rc
add r2, r22
adc r3, r23
adc r4, r24
adc r5, r25
; T[0] <<< 1
bst r5, 7
rol r2
rol r3
rol r4
rol r5
bld r2, 0
; Second round key word
; rc <<< 1
bst r25, 7
rol r22
rol r23
rol r24
rol r25
bld r22, 0
; T[1] = T[1] + rc
add r6, r22
adc r7, r23
adc r8, r24
adc r9, r25
; T[1] <<< 3
mov r30, r7
mov r31, r9
mul r6, r20
movw r6, r0
mul r8, r20
movw r8, r0
mul r30, r20
eor r7, r0
eor r8, r1
mul r31, r20
eor r9, r0
eor r6, r1
; Third round key word
; rc <<< 1
bst r25, 7
rol r22
rol r23
rol r24
rol r25
bld r22, 0
; T[2] = T[2] + rc
add r10, r22
adc r11, r23
adc r12, r24
adc r13, r25
; T[2] <<< 6
mov r30, r11
mov r31, r13
mul r10, r19
movw r10, r0
mul r12, r19
movw r12, r0
mul r30, r19
eor r11, r0
eor r12, r1
mul r31, r19
eor r13, r0
eor r10, r1
; Fourth round key word
; rc <<< 1
bst r25, 7
rol r22
rol r23
rol r24
rol r25
bld r22, 0
; T[3] = T[3] + rc
add r14, r22
adc r15, r23
adc r16, r24
adc r17, r25
; T[3] << 11
mov r30, r14
mov r14, r17
mov r17, r16
mov r16, r15
mov r15, r30
mov r31, r17
mul r14, r20
movw r14, r0
mul r16, r20
movw r16, r0
mul r30, r20
eor r15, r0
eor r16, r1
mul r31, r20
eor r17, r0
eor r14, r1
; Store the round key
st X+, r2
st X+, r3
st X+, r4
st X+, r5
st X+, r6
st X+, r7
st X+, r8
st X+, r9
st X+, r10
st X+, r11
st X+, r12
st X+, r13
st X+, r14
st X+, r15
st X+, r16
st X+, r17
; Store the updated round constant
st Y+, r22
st Y+, r23
st Y+, r24
st Y+, r25
; Decrement loop counter
subi r18, 1
; If counter != 0 mod 4, no need to wrap round constant
mov r21, r18
andi r21, 3
cpi r21, 0
brne no_wrap_rc
sbiw Y, 16
no_wrap_rc:
cpi r18, 0
breq exit_kexp
rjmp loop_kexp
exit_kexp:
adiw r28, 16
in r0, 0x3f
cli
out 0x3e, r29
out 0x3f, r0
out 0x3d, r28
pop_registers 24,25
; Restore r2-r19,r28-r31
pop_registers 28,31
pop_registers 2,17
ret
.size lea128_kexpand, .-lea128_kexpand
.global lea128_encrypt
lea128_encrypt:
; Save r2-r17,r28-r29
push_registers 2,17
push_registers 28,29
push_registers 24,25
.L__stack_usage = 20
; Save the argument pointers to Z (key) and X (plaintext)
movw XL, ARG2
movw ZL, ARG3
; Load the plaintext given by argument to register 2-17 instead of 0-15 because
; the mul instruction inconditionally overwrites registers r1:r0.
.irp param,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12,r13,r14,r15,r16,r17
ld \param, X+
.endr
ldi r18, 24
ldi r19, 32
ldi r20, 8
loop:
// save x[0]
movw r22, r2
movw r24, r4
// x[0] ^= k[0]
ld r26, Z+
ld r27, Z+
ld r28, Z+
ld r29, Z+
eor r2, r26
eor r3, r27
eor r4, r28
eor r5, r29
// x[0] += (x[1] ^ k[1])
ld r26, Z+
ld r27, Z+
ld r28, Z+
ld r29, Z+
movw r0, r26
eor r0, r6
eor r1, r7
add r2, r0
adc r3, r1
movw r0, r28
eor r0, r8
eor r1, r9
adc r4, r0
adc r5, r1
// x[1] ^= k[2]
ld r0, Z+
ld r1, Z+
eor r6, r0
eor r7, r1
ld r0, Z+
ld r1, Z+
eor r8, r0
eor r9, r1
// x[1] += (x[2] ^ k[3])
movw r0, r26
eor r0, r10
eor r1, r11
add r6, r0
adc r7, r1
movw r0, r28
eor r0, r12
eor r1, r13
adc r8, r0
adc r9, r1
// x[3] ^= k[5]
eor r14, r26
eor r15, r27
eor r16, r28
eor r17, r29
// x[2] ^= k[4]
ld r26, Z+
ld r27, Z+
ld r28, Z+
ld r29, Z+
eor r10, r26
eor r11, r27
eor r12, r28
eor r13, r29
// x[2] += x[3]
add r10, r14
adc r11, r15
adc r12, r16
adc r13, r17
// x[0] <<<= 9
mov r28, r5
mov r5, r4
mov r4, r3
mov r3, r2
mov r2, r28
bst r5, 7
rol r2
rol r3
rol r4
rol r5
bld r2, 0
// x[1] <<<= 27
mov r28, r6
mov r6, r7
mov r7, r8
mov r8, r9
mov r9, r28
mov r29, r7
mul r6, r20
movw r6, r0
mul r8, r20
movw r8, r0
mul r29, r20
eor r7, r0
eor r8, r1
mul r28, r20
eor r9, r0
eor r6, r1
// x[2] <<<= 29
mov r28, r10
mov r10, r11
mov r11, r12
mov r12, r13
mov r13, r28
mov r29, r11
mul r10, r19
movw r10, r0
mul r12, r19
movw r12, r0
mul r29, r19
eor r11, r0
eor r12, r1
mul r28, r19
eor r13, r0
eor r10, r1
// x[3] = x[0]
movw r14, r22
movw r16, r24
; Decrement loop counter
subi r18, 1
cpi r18, 0
breq exit
rjmp loop
exit:
; Store output
pop_registers 24,25
movw YL, ARG1
st Y+, r2
st Y+, r3
st Y+, r4
st Y+, r5
st Y+, r6
st Y+, r7
st Y+, r8
st Y+, r9
st Y+, r10
st Y+, r11
st Y+, r12
st Y+, r13
st Y+, r14
st Y+, r15
st Y+, r16
st Y+, r17
; Restore r2-r19,r28-r29
pop_registers 28,29
pop_registers 2,17
ret
.size lea128_encrypt, .-lea128_encrypt
|
aadomn/cymric
| 2,579
|
src/cymric-lea128/armv7m/lea128.S
|
/****************************************************************************
* ARMv7M assembly implementation of the LEA-128 block cipher where key
* expansion is performed on-the-fly.
* @author Alexandre Adomnicai
* @date April 2025
****************************************************************************/
.syntax unified
.thumb
k .req r2
x .req r1
// key
k0 .req r0
k1 .req r1
k2 .req r3
k3 .req r4
// data
x0 .req r5
x1 .req r6
x2 .req r7
x3 .req r8
// constants
g0 .req r9
g1 .req r10
g2 .req r11
g3 .req r12
.macro lea_round rconst
// calculate round keys on-the-fly
add k0, \rconst, k0, ror #31
add k1, k1, \rconst, ror #31
ror k1, #29
add k2, k2, \rconst, ror #30
ror k2, #26
add k3, k3, \rconst, ror #29
ror k3, #21
// save x0
ror lr, x0, #23
// x0 = ROTR32((x0 ^ k0) + (x1 ^ k1),23);
eor r2, k1, x1, ror #5
eor x0, x0, k0, ror #8
add x0, r2, x0, ror #23
// x1 = ROTR32((x1 ^ k2) + (x2 ^ k1), 5);
eor r2, k1, x2, ror #3
eor x1, k2, x1, ror #5
add x1, r2
// x2 = ROTR32((x2 ^ k3) + (x3 ^ k1), 3);
eor r2, x3, k1
eor x2, k3, x2, ror #3
add x2, r2
// x3 = x0;
mov x3, lr
ror \rconst, #28
.endm
lea_quadruple_round:
push {lr}
lea_round g0
lea_round g1
lea_round g2
lea_round g3
pop {lr}
bx lr
.size lea_quadruple_round, .-lea_quadruple_round
.global lea128_encrypt
.type lea128_encrypt,%function
.align 4
lea128_encrypt:
// save registers
push {r0-r12, lr}
// load rconsts
movw g0, #0xe9db
movt g0, #0xc3ef
movw g1, #0xd604
movt g1, #0x88c4
movw g2, #0xf229
movt g2, #0xe789
movw g3, #0x8763
movt g3, #0xc6f9
// load ptext
ldr.w x0, [x, #0]
ldr.w x1, [x, #4]
ldr.w x2, [x, #8]
ldr.w x3, [x, #12]
// load key
ldr.w k0, [k, #0]
ldr.w k1, [k, #4]
ldr.w k2, [k, #8]
ldr.w k3, [k, #12]
// rotations to match lea_round alignments
ror k0, #1
ror x0, #9
ror x1, #27
ror x2, #29
// perform encryption
bl lea_quadruple_round
bl lea_quadruple_round
bl lea_quadruple_round
bl lea_quadruple_round
bl lea_quadruple_round
bl lea_quadruple_round
// save 128-bit cipher text
ldr.w r0, [sp], #4
ror x0, #23
ror x1, #5
ror x2, #3
str.w x0, [r0, #0]
str.w x1, [r0, #4]
str.w x2, [r0, #8]
str.w x3, [r0, #12]
// restore registers
pop {r1-r12, lr}
bx lr
.size lea128_encrypt, .-lea128_encrypt
|
aadomn/cymric
| 28,705
|
src/cymric-aes128/avr8/rijndaelfast.s
|
; Copyright (C) 2003,2006 B. Poettering
;
; This program is free software; you can redistribute and/or modify
; it under the terms of the GNU General Public License as published by
; the Free Software Foundation; either version 2 of the License, or
; (at your option) any later version. Whenever you redistribute a copy
; of this document, make sure to include the copyright and license
; agreement without modification.
;
; This program is distributed in the hope that it will be useful,
; but WITHOUT ANY WARRANTY; without even the implied warranty of
; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
; GNU General Public License for more details.
;
; You should have received a copy of the GNU General Public License
; along with this program; if not, write to the Free Software
; Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
; The license text can be found here: http://www.gnu.org/licenses/gpl.txt
; http://point-at-infinity.org/avraes/
;
; This AES implementation was written in May 2003 by B. Poettering. It is
; published under the terms of the GNU General Public License. If you need
; AES code, but this license is unsuitable for your project, feel free to
; contact me: avraes AT point-at-infinity.org
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;
; RijndaelFast
;
; This is a microcontroller implementation of the Rijndael block cipher, better
; known as AES. The target device class is Atmel's AVR, a family of very fast
; and very powerful flash MCUs, operating at clock rates up to 16 MHz,
; executing one instruction per clock cycle (16 MIPS). The implementation
; given here is optimized for speed (versus codesize), and achieves an
; encryption rate of more than 100 kByte per second (on a 16MHz MCU).
; The decryption performs about 40% slower than encryption (typical for
; Rijndael).
;
; The implemented algorithm is restricted to block and key sizes of 128 bit.
; Larger key sizes can be obtained by altering the key scheduling code, which
; should be easy. As the cipher's state is completely kept in registers
; (which are limited in number), the block size is not that easy to enlarge.
;
; This implementation makes extensive use of the AVR's "lpm" instruction,
; which loads data bytes from program memory at given addresses (the s-boxes
; are realized that way). Some members of the AVR family don't offer that
; instruction at all (e.g. AT90S1200), others only in a restricted way
; (forcing the target register to be r0). The code below requires the least
; restricted lpm instruction (with free choice of the target register).
; The ATmega161 devices meet the above mentioned requirements.
;
; Statistics:
;
; 16 MHz MCU | clock cycles | blocks per second | bytes per second
; -----------+--------------+-------------------+------------------
; encryption | 2474 | 6467 | 103476
; decryption | 3411 | 4691 | 75051
;
; KEY SETUP TIME
; encryption: 756 clock cycles
; decryption: 756 + 4221 = 4977 clock cycles
;
; CODE SIZE
; instructions: 1306 byte ( 653 words)
; sboxes: 1792 byte ( 896 words) = 7 * 256 byte
; total: 3098 byte (1549 words)
;
; RAM REQUIREMENTS
; 16 * 11 = 176 byte for each expanded key
;
;
; This source code consists of four routines and an example application,
; which encrypts a certain plaintext and decrypts it afterwards with the
; same key. Comments in the code clarify the interaction between the key
; expansion and the encryption/decryption routines.
;
; I encourage to read the following Rijndael-related papers/books/sites:
; [1] "The Design of Rijndael", Daemen & Rijmen, Springer, ISBN 3-540-42580-2
; [2] http://www.esat.kuleuven.ac.be/~rijmen/rijndael/
; [3] http://www.esat.kuleuven.ac.be/~rijmen/rijndael/rijndaeldocV2.zip
; [4] http://www.esat.kuleuven.ac.be/~rijmen/rijndael/atmal.zip
; [5] http://csrc.nist.gov/CryptoToolkit/aes/rijndael/
;
; [1] is *the* book about Rijndael, [2] is the official Rijndael homepage,
; [3] contains the complete Rijndael AES specification, [4] is another
; Rijndael-implementation for AVR MCUs (but much slower than this one,
; taking 3815 clock cycles per encryption), [5] is the official NIST AES
; site with further links.
;
; AVR and ATmega are registered trademarks by the ATMEL corporation.
; See http://www.atmel.com and http://www.atmel.com/products/avr/ for
; further details.
;;; ***************************************************************************
;;; The Rijndael cipher acts on a so-called (128 bit) "state matrix",
;;; represented here by the 4x4 state bytes ST11-ST44. To guarantee maximum
;;; performance on AVR MCUs, these bytes are kept in registers (defaulted to
;;; the 16 low order registers r0-r15, but this may be changed if required).
;;;
;;; The implementation makes use of six auxiliary registers (H1-H5 and I),
;;; some of which must reside in the upper registers (r16-r31). In addition
;;; ramp-registers YH:YL and ZH:ZL are used.
;;;
;;; If the context *really* requires more registers than the remaining ones,
;;; it seems promising to move the I-register to a (fixed) ram location.
;;; In the time crititcal routines the I-value is rarely used, thus the
;;; speed loss obtained by dropping it from the register file is acceptible.
#include <avr/io.h>
#define ST11 r0
#define ST21 r1
#define ST31 r2
#define ST41 r3
#define ST12 r4
#define ST22 r5
#define ST32 r6
#define ST42 r7
#define ST13 r8
#define ST23 r9
#define ST33 r10
#define ST43 r11
#define ST14 r12
#define ST24 r13
#define ST34 r14
#define ST44 r15
#define H1 r16
#define H2 r17
#define H3 r18
#define H4 r19
#define H5 r20
#define I r21
; Argument registers for function calls
#define ARG1 r24
#define ARG2 r22
#define ARG3 r20
/**
* push_registers macro:
*
* Pushes a given range of registers in ascending order
* To be called like: push_registers 0,15
*/
.macro push_registers from:req, to:req
push \from
.if \to-\from
push_registers "(\from+1)",\to
.endif
.endm
/**
* pop_registers macro:
*
* Pops a given range of registers in descending order
* To be called like: pop_registers 0,15
*/
.macro pop_registers from:req, to:req
pop \to
.if \to-\from
pop_registers \from,"(\to-1)"
.endif
.endm
; saves registers, ensures calling convention is followed
.global expand_key
expand_key:
; Save r2-r17,r28-r29
push_registers 2,17
push_registers 28,29
; Save the argument pointers to Z (key) and X (plaintext)
movw XL, ARG2
movw YL, ARG1
; Load the plaintext given by argument to register 0-15
.irp param,r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12,r13,r14,r15
ld \param, X+
.endr
; Core function
ldi H1, 1
ldi H2, 0x1b
ldi ZH, hi8(sbox)
rjmp keyexp1
keyexp0:
mov ZL, ST24
ld H3, Z
eor ST11, H3
eor ST11, H1
mov ZL, ST34
ld H3, Z
eor ST21, H3
mov ZL, ST44
ld H3, Z
eor ST31, H3
mov ZL, ST14
ld H3, Z
eor ST41, H3
eor ST12, ST11
eor ST22, ST21
eor ST32, ST31
eor ST42, ST41
eor ST13, ST12
eor ST23, ST22
eor ST33, ST32
eor ST43, ST42
eor ST14, ST13
eor ST24, ST23
eor ST34, ST33
eor ST44, ST43
lsl H1
brcc keyexp1
eor H1, H2
keyexp1:
st Y+, ST11
st Y+, ST21
st Y+, ST31
st Y+, ST41
st Y+, ST12
st Y+, ST22
st Y+, ST32
st Y+, ST42
st Y+, ST13
st Y+, ST23
st Y+, ST33
st Y+, ST43
st Y+, ST14
st Y+, ST24
st Y+, ST34
st Y+, ST44
cpi H1, 0x6c
brne keyexp0
; Restore r2-r17,r28-r29
pop_registers 28,29
pop_registers 2,17
clr r1
ret
.size expand_key, .-expand_key
.global encrypt_data
encrypt_data:
;mov SP to X
in r26, 0x3d
in r27, 0x3e
; Save registers r2-17,r28-29
push_registers 2,17
push_registers 28,29
; Save the argument pointers to Z (key) and X (plaintext)
movw XL, ARG2
movw YL, ARG3
; Load the plaintext given by argument to register 0-15
.irp param,r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12,r13,r14,r15
ld \param, X+
.endr
rcall encrypt
; Save the final state from the registers to Y (ARG1)
movw YL, ARG1
.irp param,r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12,r13,r14,r15
st Y+, \param
.endr
; Restore registers r2-17,r28-29
pop_registers 28,29
pop_registers 2,17
clr r1
ret
.size encrypt_data, .-encrypt_data
;;; ***************************************************************************
;;;
;;; ENCRYPT
;;; This routine encrypts a 128 bit plaintext block (supplied in ST11-ST44),
;;; using an expanded key given in YH:YL. The resulting 128 bit ciphertext
;;; block is stored in ST11-ST44.
;;;
;;; Parameters:
;;; YH:YL: pointer to expanded key
;;; ST11-ST44: 128 bit plaintext block
;;; Touched registers:
;;; ST11-ST41,H1-H5,I,ZH,ZL,YH,YL
;;; Clock cycles: 2474
encrypt:
rcall encryp1
ldi ZH, hi8(sbox)
ldi I, 8
encryp0:mov ZL, ST11 ; 1
ld H2, Z
mov H3, H2
mov H4, H2
ldi ZH, hi8(sbox02)
ld H1, Z
eor H4, H1
mov ZL, ST22
ld H5, Z
eor H1, H5
eor H2, H5
ldi ZH, hi8(sbox)
ld H5, Z
eor H1, H5
eor H3, H5
eor H4, H5
mov ZL, ST33
ld H5, Z
eor H1, H5
eor H2, H5
eor H4, H5
ldi ZH, hi8(sbox02)
ld H5, Z
eor H2, H5
eor H3, H5
mov ZL, ST44
ld H5, Z
eor H3, H5
eor H4, H5
ldi ZH, hi8(sbox)
ld H5, Z
eor H1, H5
eor H2, H5
eor H3, H5
ldd ST11, Y+0
eor ST11, H1
mov ZL, ST41 ; 2
ldd ST41, Y+3
eor ST41, H4
ld H1, Z
mov H4, H1
mov ST33, H1
ldi ZH, hi8(sbox02)
ld ST44, Z
eor ST33, ST44
mov ZL, ST12
ld H5, Z
eor H1, H5
eor ST44, H5
ldi ZH, hi8(sbox)
ld H5, Z
eor H4, H5
eor ST33, H5
eor ST44, H5
mov ZL, ST23
ld H5, Z
eor H1, H5
eor ST33, H5
eor ST44, H5
ldi ZH, hi8(sbox02)
ld H5, Z
eor H1, H5
eor H4, H5
mov ZL, ST34
ld H5, Z
eor H4, H5
eor ST33, H5
ldi ZH, hi8(sbox)
ld H5, Z
eor H1, H5
eor H4, H5
eor ST44, H5
ldd ST12, Y+4
eor ST12, H1
ldd ST22, Y+5
eor ST22, H4
mov ZL, ST31 ; 3
ldd ST31, Y+2
eor ST31, H3
ld ST34, Z
mov H3, ST34
mov H1, ST34
ldi ZH, hi8(sbox02)
ld H4, Z
eor H3, H4
mov ZL, ST42
ldd ST42, Y+7
eor ST42, ST44
ld H5, Z
eor H4, H5
eor H1, H5
ldi ZH, hi8(sbox)
ld H5, Z
eor ST34, H5
eor H3, H5
eor H4, H5
mov ZL, ST13
ld H5, Z
eor H3, H5
eor H4, H5
eor H1, H5
ldi ZH, hi8(sbox02)
ld H5, Z
eor ST34, H5
eor H1, H5
mov ZL, ST24
ld H5, Z
eor ST34, H5
eor H3, H5
ldi ZH, hi8(sbox)
ld H5, Z
eor ST34, H5
eor H4, H5
eor H1, H5
ldd ST13, Y+8
eor ST13, ST34
ldd ST23, Y+9
eor ST23, H3
mov ZL, ST32 ; 4
ldd ST32, Y+6
eor ST32, ST33
ldd ST33, Y+10
eor ST33, H4
ld ST24, Z
mov ST34, ST24
mov H4, ST24
ldi ZH, hi8(sbox02)
ld H3, Z
eor ST34, H3
mov ZL, ST43
ldd ST43, Y+11
eor ST43, H1
ld H5, Z
eor H3, H5
eor H4, H5
ldi ZH, hi8(sbox)
ld H5, Z
eor ST24, H5
eor ST34, H5
eor H3, H5
mov ZL, ST14
ld H5, Z
eor ST34, H5
eor H3, H5
eor H4, H5
ldi ZH, hi8(sbox02)
ld H5, Z
eor ST24, H5
eor H4, H5
mov ZL, ST21
ld H5, Z
eor ST24, H5
eor ST34, H5
ldi ZH, hi8(sbox)
ld H5, Z
eor ST24, H5
eor H3, H5
eor H4, H5
ldd ST21, Y+1
eor ST21, H2
ldd ST14, Y+12
eor ST14, ST24
ldd ST24, Y+13
eor ST24, ST34
ldd ST34, Y+14
eor ST34, H3
ldd ST44, Y+15
eor ST44, H4
adiw Y, 16
dec I
sbrs I,7
jmp encryp0
; Omit MixColumns for the last round
mov ZL, ST11
ld ST11, Z
mov ZL, ST12
ld ST12, Z
mov ZL, ST13
ld ST13, Z
mov ZL, ST14
ld ST14, Z
mov H1, ST21
mov ZL, ST22
ld ST21, Z
mov ZL, ST23
ld ST22, Z
mov ZL, ST24
ld ST23, Z
mov ZL, H1
ld ST24, Z
mov H1, ST31
mov ZL, ST33
ld ST31, Z
mov ZL, H1
ld ST33, Z
mov H1, ST32
mov ZL, ST34
ld ST32, Z
mov ZL, H1
ld ST34, Z
mov H1, ST41
mov ZL, ST44
ld ST41, Z
mov ZL, ST43
ld ST44, Z
mov ZL, ST42
ld ST43, Z
mov ZL, H1
ld ST42, Z
encryp1:
; AddRoundKey
ld H1, Y+
eor ST11, H1
ld H1, Y+
eor ST21, H1
ld H1, Y+
eor ST31, H1
ld H1, Y+
eor ST41, H1
ld H1, Y+
eor ST12, H1
ld H1, Y+
eor ST22, H1
ld H1, Y+
eor ST32, H1
ld H1, Y+
eor ST42, H1
ld H1, Y+
eor ST13, H1
ld H1, Y+
eor ST23, H1
ld H1, Y+
eor ST33, H1
ld H1, Y+
eor ST43, H1
ld H1, Y+
eor ST14, H1
ld H1, Y+
eor ST24, H1
ld H1, Y+
eor ST34, H1
ld H1, Y+
eor ST44, H1
ret
.size encrypt, .-encrypt
.global decrypt_data
decrypt_data:
;mov SP to X
in r26, 0x3d
in r27, 0x3e
; Save registers r2-17,r28-29
push_registers 2,17
push_registers 28,29
; Save the argument pointers to Z (key) and X (plaintext)
movw XL, ARG2
movw YL, ARG3
; Load the ciphertext given by argument to register 0-15
.irp param,r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12,r13,r14,r15
ld \param, X+
.endr
rcall decrypt
; Save the final state from the registers to Y (ARG1)
movw YL, ARG1
.irp param,r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12,r13,r14,r15
st Y+, \param
.endr
; Restore registers r2-17,r28-29
pop_registers 28,29
pop_registers 2,17
clr r1
ret
;;; ***************************************************************************
;;;
;;; DECRYPT
;;; This routine decrypts a 128 bit ciphertext block (given in ST11-ST44),
;;; using an expanded (and patched) key supplied in the 16*11 memory locations
;;; BEFORE YH:YL (YH:YL points behind the last byte of key material!). The
;;; resulting 128 bit plaintext block is stored in ST11-ST44. The "equivalent
;;; decryption algorithm" of Rijndael is implemented, so the MixColumns
;;; diffusion operator has to be applied to the expanded key (done
;;; with the routine patch_decryption_key) before calling the "decrypt"
;;; routine.
;;;
;;; Parameters:
;;; YH:YL: pointer behind patched key
;;; ST11-ST44: 128 bit ciphertext block
;;; Touched registers:
;;; ST11-ST41,H1-H5,I,ZH,ZL,YH,YL
;;; Clock cycles: 3411
decrypt:rcall decryp1
ldi I, 8
ldi ZH, hi8(isbox0e)
decryp0:sbiw Y, 16
mov ZL, ST11 ; 1
ld H1, Z
ldi ZH, hi8(isbox09)
ld H2, Z
ldi ZH, hi8(isbox0d)
ld H3, Z
ldi ZH, hi8(isbox0b)
ld H4, Z
mov ZL, ST24
ld H5, Z
eor H1, H5
ldi ZH, hi8(isbox0d)
ld H5, Z
eor H4, H5
ldi ZH, hi8(isbox09)
ld H5, Z
eor H3, H5
ldi ZH, hi8(isbox0e)
ld H5, Z
eor H2, H5
mov ZL, ST33
ld H5, Z
eor H3, H5
ldi ZH, hi8(isbox09)
ld H5, Z
eor H4, H5
ldi ZH, hi8(isbox0d)
ld H5, Z
eor H1, H5
ldi ZH, hi8(isbox0b)
ld H5, Z
eor H2, H5
mov ZL, ST42
ld H5, Z
eor H3, H5
ldi ZH, hi8(isbox0d)
ld H5, Z
eor H2, H5
ldi ZH, hi8(isbox09)
ld H5, Z
eor H1, H5
ldi ZH, hi8(isbox0e)
ld H5, Z
eor H4, H5
ldd ST11, Y+0
eor ST11, H1
mov ZL, ST21 ; 2
ldd ST21, Y+1
eor ST21, H2
ld H2, Z
ldi ZH, hi8(isbox09)
ld ST24, Z
ldi ZH, hi8(isbox0d)
ld ST33, Z
ldi ZH, hi8(isbox0b)
ld H1, Z
mov ZL, ST12
ld H5, Z
eor ST33, H5
ldi ZH, hi8(isbox0d)
ld H5, Z
eor ST24, H5
ldi ZH, hi8(isbox09)
ld H5, Z
eor H2, H5
ldi ZH, hi8(isbox0e)
ld H5, Z
eor H1, H5
mov ZL, ST34
ld H5, Z
eor ST24, H5
ldi ZH, hi8(isbox09)
ld H5, Z
eor ST33, H5
ldi ZH, hi8(isbox0d)
ld H5, Z
eor H1, H5
ldi ZH, hi8(isbox0b)
ld H5, Z
eor H2, H5
mov ZL, ST43
ld H5, Z
eor ST24, H5
ldi ZH, hi8(isbox0d)
ld H5, Z
eor H2, H5
ldi ZH, hi8(isbox09)
ld H5, Z
eor H1, H5
ldi ZH, hi8(isbox0e)
ld H5, Z
eor ST33, H5
ldd ST12, Y+4
eor ST12, H1
ldd ST42, Y+7
eor ST42, ST33
mov ZL, ST31 ; 3
ldd ST31, Y+2
eor ST31, H3
ld ST34, Z
ldi ZH, hi8(isbox09)
ld H3, Z
ldi ZH, hi8(isbox0d)
ld H1, Z
ldi ZH, hi8(isbox0b)
ld ST33, Z
mov ZL, ST13
ld H5, Z
eor H3, H5
ldi ZH, hi8(isbox0d)
ld H5, Z
eor ST34, H5
ldi ZH, hi8(isbox09)
ld H5, Z
eor ST33, H5
ldi ZH, hi8(isbox0e)
ld H5, Z
eor H1, H5
mov ZL, ST22
ld H5, Z
eor ST33, H5
ldi ZH, hi8(isbox09)
ld H5, Z
eor ST34, H5
ldi ZH, hi8(isbox0d)
ld H5, Z
eor H3, H5
ldi ZH, hi8(isbox0b)
ld H5, Z
eor H1, H5
mov ZL, ST44
ld H5, Z
eor ST34, H5
ldi ZH, hi8(isbox0d)
ld H5, Z
eor ST33, H5
ldi ZH, hi8(isbox09)
ld H5, Z
eor H1, H5
ldi ZH, hi8(isbox0e)
ld H5, Z
eor H3, H5
ldd ST13, Y+8
eor ST13, H1
ldd ST43, Y+11
eor ST43, H3
ldd ST22, Y+5
eor ST22, H2
mov ZL, ST41 ; 4
ldd ST41, Y+3
eor ST41, H4
ld H4, Z
ldi ZH, hi8(isbox09)
ld H1, Z
ldi ZH, hi8(isbox0d)
ld H2, Z
ldi ZH, hi8(isbox0b)
ld H3, Z
mov ZL, ST14
ld H5, Z
eor H4, H5
ldi ZH, hi8(isbox0d)
ld H5, Z
eor H3, H5
ldi ZH, hi8(isbox09)
ld H5, Z
eor H2, H5
ldi ZH, hi8(isbox0e)
ld H5, Z
eor H1, H5
mov ZL, ST23
ld H5, Z
eor H2, H5
ldi ZH, hi8(isbox09)
ld H5, Z
eor H3, H5
ldi ZH, hi8(isbox0d)
ld H5, Z
eor H4, H5
ldi ZH, hi8(isbox0b)
ld H5, Z
eor H1, H5
mov ZL, ST32
ld H5, Z
eor H2, H5
ldi ZH, hi8(isbox0d)
ld H5, Z
eor H1, H5
ldi ZH, hi8(isbox09)
ld H5, Z
eor H4, H5
ldi ZH, hi8(isbox0e)
ld H5, Z
eor H3, H5
ldd ST14, Y+12
eor ST14, H1
ldd ST23, Y+9
eor ST23, ST33
ldd ST32, Y+6
eor ST32, ST24
ldd ST33, Y+10
eor ST33, ST34
ldd ST34, Y+14
eor ST34, H3
ldd ST44, Y+15
eor ST44, H4
ldd ST24, Y+13
eor ST24, H2
dec I
sbrs I,7
jmp decryp0
ldi ZH, hi8(isbox)
mov ZL, ST11
ld ST11, Z
mov ZL, ST12
ld ST12, Z
mov ZL, ST13
ld ST13, Z
mov ZL, ST14
ld ST14, Z
mov H1, ST24
mov ZL, ST23
ld ST24, Z
mov ZL, ST22
ld ST23, Z
mov ZL, ST21
ld ST22, Z
mov ZL, H1
ld ST21, Z
mov H1, ST31
mov ZL, ST33
ld ST31, Z
mov ZL, H1
ld ST33, Z
mov H1, ST32
mov ZL, ST34
ld ST32, Z
mov ZL, H1
ld ST34, Z
mov H1, ST41
mov ZL, ST42
ld ST41, Z
mov ZL, ST43
ld ST42, Z
mov ZL, ST44
ld ST43, Z
mov ZL, H1
ld ST44, Z
decryp1:ld H1, -Y
eor ST44, H1
ld H1, -Y
eor ST34, H1
ld H1, -Y
eor ST24, H1
ld H1, -Y
eor ST14, H1
ld H1, -Y
eor ST43, H1
ld H1, -Y
eor ST33, H1
ld H1, -Y
eor ST23, H1
ld H1, -Y
eor ST13, H1
ld H1, -Y
eor ST42, H1
ld H1, -Y
eor ST32, H1
ld H1, -Y
eor ST22, H1
ld H1, -Y
eor ST12, H1
ld H1, -Y
eor ST41, H1
ld H1, -Y
eor ST31, H1
ld H1, -Y
eor ST21, H1
ld H1, -Y
eor ST11, H1
ret
;;; ***************************************************************************
;;;
;;; S-BOX
;;; Rijndael consists of a non-linear step in its rounds (called "sbox step"),
;;; generally implemented with hard-coded lookup tables. The implementation
;;; given above makes use of seven lookup tables in total: the sbox itself,
;;; its inverse, and scaled versions of both (e.g. sbox02[] = 2*sbox[]).
;;;
;;; This generous employment of expensive space of flash memory has two
;;; important advantages: excellent performance and protection against
;;; timing and power measurement attacks.
;;;
;;; The seven tables have to be aligned to a flash position with its lower
;;; address byte equal to 0x00. In assembler syntax: lo8(sbox<<1) == 0.
;;; To ensure the proper alignment of the sboxes, the assembler directive
;;; .ORG is used (below the sboxes are defined to begin at 0x800). Note, that
;;; any other address can be used as well, as long as the lower byte is equal
;;; to 0x00.
;;;
;;; The order of the sboxes is totally arbitrary. They even do not have to be
;;; allocated in adjacent memory areas.
.data
.balign 256
sbox:
.byte 0x63,0x7c,0x77,0x7b,0xf2,0x6b,0x6f,0xc5,0x30,0x01,0x67,0x2b,0xfe,0xd7,0xab,0x76
.byte 0xca,0x82,0xc9,0x7d,0xfa,0x59,0x47,0xf0,0xad,0xd4,0xa2,0xaf,0x9c,0xa4,0x72,0xc0
.byte 0xb7,0xfd,0x93,0x26,0x36,0x3f,0xf7,0xcc,0x34,0xa5,0xe5,0xf1,0x71,0xd8,0x31,0x15
.byte 0x04,0xc7,0x23,0xc3,0x18,0x96,0x05,0x9a,0x07,0x12,0x80,0xe2,0xeb,0x27,0xb2,0x75
.byte 0x09,0x83,0x2c,0x1a,0x1b,0x6e,0x5a,0xa0,0x52,0x3b,0xd6,0xb3,0x29,0xe3,0x2f,0x84
.byte 0x53,0xd1,0x00,0xed,0x20,0xfc,0xb1,0x5b,0x6a,0xcb,0xbe,0x39,0x4a,0x4c,0x58,0xcf
.byte 0xd0,0xef,0xaa,0xfb,0x43,0x4d,0x33,0x85,0x45,0xf9,0x02,0x7f,0x50,0x3c,0x9f,0xa8
.byte 0x51,0xa3,0x40,0x8f,0x92,0x9d,0x38,0xf5,0xbc,0xb6,0xda,0x21,0x10,0xff,0xf3,0xd2
.byte 0xcd,0x0c,0x13,0xec,0x5f,0x97,0x44,0x17,0xc4,0xa7,0x7e,0x3d,0x64,0x5d,0x19,0x73
.byte 0x60,0x81,0x4f,0xdc,0x22,0x2a,0x90,0x88,0x46,0xee,0xb8,0x14,0xde,0x5e,0x0b,0xdb
.byte 0xe0,0x32,0x3a,0x0a,0x49,0x06,0x24,0x5c,0xc2,0xd3,0xac,0x62,0x91,0x95,0xe4,0x79
.byte 0xe7,0xc8,0x37,0x6d,0x8d,0xd5,0x4e,0xa9,0x6c,0x56,0xf4,0xea,0x65,0x7a,0xae,0x08
.byte 0xba,0x78,0x25,0x2e,0x1c,0xa6,0xb4,0xc6,0xe8,0xdd,0x74,0x1f,0x4b,0xbd,0x8b,0x8a
.byte 0x70,0x3e,0xb5,0x66,0x48,0x03,0xf6,0x0e,0x61,0x35,0x57,0xb9,0x86,0xc1,0x1d,0x9e
.byte 0xe1,0xf8,0x98,0x11,0x69,0xd9,0x8e,0x94,0x9b,0x1e,0x87,0xe9,0xce,0x55,0x28,0xdf
.byte 0x8c,0xa1,0x89,0x0d,0xbf,0xe6,0x42,0x68,0x41,0x99,0x2d,0x0f,0xb0,0x54,0xbb,0x16
.size sbox, .-sbox
sbox02:
.byte 0xc6,0xf8,0xee,0xf6,0xff,0xd6,0xde,0x91,0x60,0x02,0xce,0x56,0xe7,0xb5,0x4d,0xec
.byte 0x8f,0x1f,0x89,0xfa,0xef,0xb2,0x8e,0xfb,0x41,0xb3,0x5f,0x45,0x23,0x53,0xe4,0x9b
.byte 0x75,0xe1,0x3d,0x4c,0x6c,0x7e,0xf5,0x83,0x68,0x51,0xd1,0xf9,0xe2,0xab,0x62,0x2a
.byte 0x08,0x95,0x46,0x9d,0x30,0x37,0x0a,0x2f,0x0e,0x24,0x1b,0xdf,0xcd,0x4e,0x7f,0xea
.byte 0x12,0x1d,0x58,0x34,0x36,0xdc,0xb4,0x5b,0xa4,0x76,0xb7,0x7d,0x52,0xdd,0x5e,0x13
.byte 0xa6,0xb9,0x00,0xc1,0x40,0xe3,0x79,0xb6,0xd4,0x8d,0x67,0x72,0x94,0x98,0xb0,0x85
.byte 0xbb,0xc5,0x4f,0xed,0x86,0x9a,0x66,0x11,0x8a,0xe9,0x04,0xfe,0xa0,0x78,0x25,0x4b
.byte 0xa2,0x5d,0x80,0x05,0x3f,0x21,0x70,0xf1,0x63,0x77,0xaf,0x42,0x20,0xe5,0xfd,0xbf
.byte 0x81,0x18,0x26,0xc3,0xbe,0x35,0x88,0x2e,0x93,0x55,0xfc,0x7a,0xc8,0xba,0x32,0xe6
.byte 0xc0,0x19,0x9e,0xa3,0x44,0x54,0x3b,0x0b,0x8c,0xc7,0x6b,0x28,0xa7,0xbc,0x16,0xad
.byte 0xdb,0x64,0x74,0x14,0x92,0x0c,0x48,0xb8,0x9f,0xbd,0x43,0xc4,0x39,0x31,0xd3,0xf2
.byte 0xd5,0x8b,0x6e,0xda,0x01,0xb1,0x9c,0x49,0xd8,0xac,0xf3,0xcf,0xca,0xf4,0x47,0x10
.byte 0x6f,0xf0,0x4a,0x5c,0x38,0x57,0x73,0x97,0xcb,0xa1,0xe8,0x3e,0x96,0x61,0x0d,0x0f
.byte 0xe0,0x7c,0x71,0xcc,0x90,0x06,0xf7,0x1c,0xc2,0x6a,0xae,0x69,0x17,0x99,0x3a,0x27
.byte 0xd9,0xeb,0x2b,0x22,0xd2,0xa9,0x07,0x33,0x2d,0x3c,0x15,0xc9,0x87,0xaa,0x50,0xa5
.byte 0x03,0x59,0x09,0x1a,0x65,0xd7,0x84,0xd0,0x82,0x29,0x5a,0x1e,0x7b,0xa8,0x6d,0x2c
.size sbox02, .-sbox02
isbox:
.byte 0x52,0x09,0x6a,0xd5,0x30,0x36,0xa5,0x38,0xbf,0x40,0xa3,0x9e,0x81,0xf3,0xd7,0xfb
.byte 0x7c,0xe3,0x39,0x82,0x9b,0x2f,0xff,0x87,0x34,0x8e,0x43,0x44,0xc4,0xde,0xe9,0xcb
.byte 0x54,0x7b,0x94,0x32,0xa6,0xc2,0x23,0x3d,0xee,0x4c,0x95,0x0b,0x42,0xfa,0xc3,0x4e
.byte 0x08,0x2e,0xa1,0x66,0x28,0xd9,0x24,0xb2,0x76,0x5b,0xa2,0x49,0x6d,0x8b,0xd1,0x25
.byte 0x72,0xf8,0xf6,0x64,0x86,0x68,0x98,0x16,0xd4,0xa4,0x5c,0xcc,0x5d,0x65,0xb6,0x92
.byte 0x6c,0x70,0x48,0x50,0xfd,0xed,0xb9,0xda,0x5e,0x15,0x46,0x57,0xa7,0x8d,0x9d,0x84
.byte 0x90,0xd8,0xab,0x00,0x8c,0xbc,0xd3,0x0a,0xf7,0xe4,0x58,0x05,0xb8,0xb3,0x45,0x06
.byte 0xd0,0x2c,0x1e,0x8f,0xca,0x3f,0x0f,0x02,0xc1,0xaf,0xbd,0x03,0x01,0x13,0x8a,0x6b
.byte 0x3a,0x91,0x11,0x41,0x4f,0x67,0xdc,0xea,0x97,0xf2,0xcf,0xce,0xf0,0xb4,0xe6,0x73
.byte 0x96,0xac,0x74,0x22,0xe7,0xad,0x35,0x85,0xe2,0xf9,0x37,0xe8,0x1c,0x75,0xdf,0x6e
.byte 0x47,0xf1,0x1a,0x71,0x1d,0x29,0xc5,0x89,0x6f,0xb7,0x62,0x0e,0xaa,0x18,0xbe,0x1b
.byte 0xfc,0x56,0x3e,0x4b,0xc6,0xd2,0x79,0x20,0x9a,0xdb,0xc0,0xfe,0x78,0xcd,0x5a,0xf4
.byte 0x1f,0xdd,0xa8,0x33,0x88,0x07,0xc7,0x31,0xb1,0x12,0x10,0x59,0x27,0x80,0xec,0x5f
.byte 0x60,0x51,0x7f,0xa9,0x19,0xb5,0x4a,0x0d,0x2d,0xe5,0x7a,0x9f,0x93,0xc9,0x9c,0xef
.byte 0xa0,0xe0,0x3b,0x4d,0xae,0x2a,0xf5,0xb0,0xc8,0xeb,0xbb,0x3c,0x83,0x53,0x99,0x61
.byte 0x17,0x2b,0x04,0x7e,0xba,0x77,0xd6,0x26,0xe1,0x69,0x14,0x63,0x55,0x21,0x0c,0x7d
isbox0e:
.byte 0x51,0x7e,0x1a,0x3a,0x3b,0x1f,0xac,0x4b,0x20,0xad,0x88,0xf5,0x4f,0xc5,0x26,0xb5
.byte 0xde,0x25,0x45,0x5d,0xc3,0x81,0x8d,0x6b,0x03,0x15,0xbf,0x95,0xd4,0x58,0x49,0x8e
.byte 0x75,0xf4,0x99,0x27,0xbe,0xf0,0xc9,0x7d,0x63,0xe5,0x97,0x62,0xb1,0xbb,0xfe,0xf9
.byte 0x70,0x8f,0x94,0x52,0xab,0x72,0xe3,0x66,0xb2,0x2f,0x86,0xd3,0x30,0x23,0x02,0xed
.byte 0x8a,0xa7,0xf3,0x4e,0x65,0x06,0xd1,0xc4,0x34,0xa2,0x05,0xa4,0x0b,0x40,0x5e,0xbd
.byte 0x3e,0x96,0xdd,0x4d,0x91,0x71,0x04,0x60,0x19,0xd6,0x89,0x67,0xb0,0x07,0xe7,0x79
.byte 0xa1,0x7c,0xf8,0x00,0x09,0x32,0x1e,0x6c,0xfd,0x0f,0x3d,0x36,0x0a,0x68,0x9b,0x24
.byte 0x0c,0x93,0xb4,0x1b,0x80,0x61,0x5a,0x1c,0xe2,0xc0,0x3c,0x12,0x0e,0xf2,0x2d,0x14
.byte 0x57,0xaf,0xee,0xa3,0xf7,0x5c,0x44,0x5b,0x8b,0xcb,0xb6,0xb8,0xd7,0x42,0x13,0x84
.byte 0x85,0xd2,0xae,0xc7,0x1d,0xdc,0x0d,0x77,0x2b,0xa9,0x11,0x47,0xa8,0xa0,0x56,0x22
.byte 0x87,0xd9,0x8c,0x98,0xa6,0xa5,0xda,0x3f,0x2c,0x50,0x6a,0x54,0xf6,0x90,0x2e,0x82
.byte 0x9f,0x69,0x6f,0xcf,0xc8,0x10,0xe8,0xdb,0xcd,0x6e,0xec,0x83,0xe6,0xaa,0x21,0xef
.byte 0xba,0x4a,0xea,0x29,0x31,0x2a,0xc6,0x35,0x74,0xfc,0xe0,0x33,0xf1,0x41,0x7f,0x17
.byte 0x76,0x43,0xcc,0xe4,0x9e,0x4c,0xc1,0x46,0x9d,0x01,0xfa,0xfb,0xb3,0x92,0xe9,0x6d
.byte 0x9a,0x37,0x59,0xeb,0xce,0xb7,0xe1,0x7a,0x9c,0x55,0x18,0x73,0x53,0x5f,0xdf,0x78
.byte 0xca,0xb9,0x38,0xc2,0x16,0xbc,0x28,0xff,0x39,0x08,0xd8,0x64,0x7b,0xd5,0x48,0xd0
isbox09:
.byte 0xf4,0x41,0x17,0x27,0xab,0x9d,0xfa,0xe3,0x30,0x76,0xcc,0x02,0xe5,0x2a,0x35,0x62
.byte 0xb1,0xba,0xea,0xfe,0x2f,0x4c,0x46,0xd3,0x8f,0x92,0x6d,0x52,0xbe,0x74,0xe0,0xc9
.byte 0xc2,0x8e,0x58,0xb9,0xe1,0x88,0x20,0xce,0xdf,0x1a,0x51,0x53,0x64,0x6b,0x81,0x08
.byte 0x48,0x45,0xde,0x7b,0x73,0x4b,0x1f,0x55,0xeb,0xb5,0xc5,0x37,0x28,0xbf,0x03,0x16
.byte 0xcf,0x79,0x07,0x69,0xda,0x05,0x34,0xa6,0x2e,0xf3,0x8a,0xf6,0x83,0x60,0x71,0x6e
.byte 0x21,0xdd,0x3e,0xe6,0x54,0xc4,0x06,0x50,0x98,0xbd,0x40,0xd9,0xe8,0x89,0x19,0xc8
.byte 0x7c,0x42,0x84,0x00,0x80,0x2b,0x11,0x5a,0x0e,0x85,0xae,0x2d,0x0f,0x5c,0x5b,0x36
.byte 0x0a,0x57,0xee,0x9b,0xc0,0xdc,0x77,0x12,0x93,0xa0,0x22,0x1b,0x09,0x8b,0xb6,0x1e
.byte 0xf1,0x75,0x99,0x7f,0x01,0x72,0x66,0xfb,0x43,0x23,0xed,0xe4,0x31,0x63,0x97,0xc6
.byte 0x4a,0xbb,0xf9,0x29,0x9e,0xb2,0x86,0xc1,0xb3,0x70,0x94,0xe9,0xfc,0xf0,0x7d,0x33
.byte 0x49,0x38,0xca,0xd4,0xf5,0x7a,0xb7,0xad,0x3a,0x78,0x5f,0x7e,0x8d,0xd8,0x39,0xc3
.byte 0x5d,0xd0,0xd5,0x25,0xac,0x18,0x9c,0x3b,0x26,0x59,0x9a,0x4f,0x95,0xff,0xbc,0x15
.byte 0xe7,0x6f,0x9f,0xb0,0xa4,0x3f,0xa5,0xa2,0x4e,0x82,0x90,0xa7,0x04,0xec,0xcd,0x91
.byte 0x4d,0xef,0xaa,0x96,0xd1,0x6a,0x2c,0x65,0x5e,0x8c,0x87,0x0b,0x67,0xdb,0x10,0xd6
.byte 0xd7,0xa1,0xf8,0x13,0xa9,0x61,0x1c,0x47,0xd2,0xf2,0x14,0xc7,0xf7,0xfd,0x3d,0x44
.byte 0xaf,0x68,0x24,0xa3,0x1d,0xe2,0x3c,0x0d,0xa8,0x0c,0xb4,0x56,0xcb,0x32,0x6c,0xb8
isbox0d:
.byte 0xa7,0x65,0xa4,0x5e,0x6b,0x45,0x58,0x03,0xfa,0x6d,0x76,0x4c,0xd7,0xcb,0x44,0xa3
.byte 0x5a,0x1b,0x0e,0xc0,0x75,0xf0,0x97,0xf9,0x5f,0x9c,0x7a,0x59,0x83,0x21,0x69,0xc8
.byte 0x89,0x79,0x3e,0x71,0x4f,0xad,0xac,0x3a,0x4a,0x31,0x33,0x7f,0x77,0xae,0xa0,0x2b
.byte 0x68,0xfd,0x6c,0xf8,0xd3,0x02,0x8f,0xab,0x28,0xc2,0x7b,0x08,0x87,0xa5,0x6a,0x82
.byte 0x1c,0xb4,0xf2,0xe2,0xf4,0xbe,0x62,0xfe,0x53,0x55,0xe1,0xeb,0xec,0xef,0x9f,0x10
.byte 0x8a,0x06,0x05,0xbd,0x8d,0x5d,0xd4,0x15,0xfb,0xe9,0x43,0x9e,0x42,0x8b,0x5b,0xee
.byte 0x0a,0x0f,0x1e,0x00,0x86,0xed,0x70,0x72,0xff,0x38,0xd5,0x39,0xd9,0xa6,0x54,0x2e
.byte 0x67,0xe7,0x96,0x91,0xc5,0x20,0x4b,0x1a,0xba,0x2a,0xe0,0x17,0x0d,0xc7,0xa8,0xa9
.byte 0x19,0x07,0xdd,0x60,0x26,0xf5,0x3b,0x7e,0x29,0xc6,0xfc,0xf1,0xdc,0x85,0x22,0x11
.byte 0x24,0x3d,0x32,0xa1,0x2f,0x30,0x52,0xe3,0x16,0xb9,0x48,0x64,0x8c,0x3f,0x2c,0x90
.byte 0x4e,0xd1,0xa2,0x0b,0x81,0xde,0x8e,0xbf,0x9d,0x92,0xcc,0x46,0x13,0xb8,0xf7,0xaf
.byte 0x80,0x93,0x2d,0x12,0x99,0x7d,0x63,0xbb,0x78,0x18,0xb7,0x9a,0x6e,0xe6,0xcf,0xe8
.byte 0x9b,0x36,0x09,0x7c,0xb2,0x23,0x94,0x66,0xbc,0xca,0xd0,0xd8,0x98,0xda,0x50,0xf6
.byte 0xd6,0xb0,0x4d,0x04,0xb5,0x88,0x1f,0x51,0xea,0x35,0x74,0x41,0x1d,0xd2,0x56,0x47
.byte 0x61,0x0c,0x14,0x3c,0x27,0xc9,0xe5,0xb1,0xdf,0x73,0xce,0x37,0xcd,0xaa,0x6f,0xdb
.byte 0xf3,0xc4,0x34,0x40,0xc3,0x25,0x49,0x95,0x01,0xb3,0xe4,0xc1,0x84,0xb6,0x5c,0x57
isbox0b:
.byte 0x50,0x53,0xc3,0x96,0xcb,0xf1,0xab,0x93,0x55,0xf6,0x91,0x25,0xfc,0xd7,0x80,0x8f
.byte 0x49,0x67,0x98,0xe1,0x02,0x12,0xa3,0xc6,0xe7,0x95,0xeb,0xda,0x2d,0xd3,0x29,0x44
.byte 0x6a,0x78,0x6b,0xdd,0xb6,0x17,0x66,0xb4,0x18,0x82,0x60,0x45,0xe0,0x84,0x1c,0x94
.byte 0x58,0x19,0x87,0xb7,0x23,0xe2,0x57,0x2a,0x07,0x03,0x9a,0xa5,0xf2,0xb2,0xba,0x5c
.byte 0x2b,0x92,0xf0,0xa1,0xcd,0xd5,0x1f,0x8a,0x9d,0xa0,0x32,0x75,0x39,0xaa,0x06,0x51
.byte 0xf9,0x3d,0xae,0x46,0xb5,0x05,0x6f,0xff,0x24,0x97,0xcc,0x77,0xbd,0x88,0x38,0xdb
.byte 0x47,0xe9,0xc9,0x00,0x83,0x48,0xac,0x4e,0xfb,0x56,0x1e,0x27,0x64,0x21,0xd1,0x3a
.byte 0xb1,0x0f,0xd2,0x9e,0x4f,0xa2,0x69,0x16,0x0a,0xe5,0x43,0x1d,0x0b,0xad,0xb9,0xc8
.byte 0x85,0x4c,0xbb,0xfd,0x9f,0xbc,0xc5,0x34,0x76,0xdc,0x68,0x63,0xca,0x10,0x40,0x20
.byte 0x7d,0xf8,0x11,0x6d,0x4b,0xf3,0xec,0xd0,0x6c,0x99,0xfa,0x22,0xc4,0x1a,0xd8,0xef
.byte 0xc7,0xc1,0xfe,0x36,0xcf,0x28,0x26,0xa4,0xe4,0x0d,0x9b,0x62,0xc2,0xe8,0x5e,0xf5
.byte 0xbe,0x7c,0xa9,0xb3,0x3b,0xa7,0x6e,0x7b,0x09,0xf4,0x01,0xa8,0x65,0x7e,0x08,0xe6
.byte 0xd9,0xce,0xd4,0xd6,0xaf,0x31,0x30,0xc0,0x37,0xa6,0xb0,0x15,0x4a,0xf7,0x0e,0x2f
.byte 0x8d,0x4d,0x54,0xdf,0xe3,0x1b,0xb8,0x7f,0x04,0x5d,0x73,0x2e,0x5a,0x52,0x33,0x13
.byte 0x8c,0x7a,0x8e,0x89,0xee,0x35,0xed,0x3c,0x59,0x3f,0x79,0xbf,0xea,0x5b,0x14,0x86
.byte 0x81,0x3e,0x2c,0x5f,0x72,0x0c,0x8b,0x41,0x71,0xde,0x9c,0x90,0x61,0x70,0x74,0x42
|
aadomn/cymric
| 25,378
|
src/cymric-aes128/armv7m/aes_encrypt.s
|
/******************************************************************************
* Assembly fixsliced implementation of AES-128 and AES-256 (encryption only).
*
* Fully-fixsliced implementation runs faster than the semi-fixsliced variant
* at the cost of a larger code size.
*
* See the paper at https://eprint.iacr.org/2020/1123.pdf for more details.
*
* @author Alexandre Adomnicai, Nanyang Technological University, Singapore
* alexandre.adomnicai@ntu.edu.sg
*
* @date October 2020
******************************************************************************/
.syntax unified
.thumb
/******************************************************************************
* Macro to compute the SWAPMOVE technique: swap the bits in 'in1' masked by 'm'
* by the bits in 'in0' masked by 'm << n' and put the results in 'out0', 'out1'
******************************************************************************/
.macro swpmv out0, out1, in0, in1, m, n, tmp
eor \tmp, \in1, \in0, lsr \n
and \tmp, \m
eor \out1, \in1, \tmp
eor \out0, \in0, \tmp, lsl \n
.endm
/******************************************************************************
* Rotate all bytes in 'in' by 'n0' bits to the rights and put the results in
* 'out'. 'm' refers to the appropriate bitmask and 'n1' = 8-'n0'.
******************************************************************************/
.macro byteror out, in, m, n0, n1, tmp
and \out, \m, \in, lsr \n0
bic \tmp, \in, \m, ror \n1
orr \out, \out, \tmp, lsl \n1
.endm
/******************************************************************************
* Compute the MixColumns for rounds i st i%4 == 0 or 2.
* Between the two versions, only the masks and the shifts for the 'byteror' are
* differing.
******************************************************************************/
.macro mc_0_2 m, n0, n1, n2, n3
byteror r14, r1, \m, \n0, \n1, r9 // r14 <- BYTE_ROR_n0(S0)
eor r4, r1, r14, ror #8 // r4 <- S0 ^ (BYTE_ROR_6(S0) >>> 8)
movw r1, #0x0f0f
movt r1, #0x0f0f // r1 <- 0x0f0f0f0f (for BYTE_ROR)
byteror r5, r11, \m, \n0, \n1, r9 // r5 <- BYTE_ROR_n0(S7)
eor r10, r11, r5, ror #8 // r10<- S7 ^ BYTE_ROR_n0(S7 >>> 8)
byteror r11, r10, r1, 4, 4, r9 // r11<- BYTE_ROR_4(r10)
eor r11, r4, r11, ror #16 // r11<- BYTE_ROR_4(r10) ^ (r10 >>> 16)
eor r11, r11, r5, ror #8 // r11<- S'7
byteror r5, r2, \m, \n0, \n1, r9 // r5 <- BYTE_ROR_n0(S6)
eor r2, r2, r5, ror #8 // r2 <- S6 ^ BYTE_ROR_n0(S6 >>> 8)
eor r10, r10, r5, ror #8 // r10<- r10 ^ (BYTE_ROR_n0(S6) >>> 8)
byteror r5, r2, r1, 4, 4, r9 // r5 <- BYTE_ROR_4(r2)
eor r10, r10, r5, ror #16 // r10<- r10 ^ (r5 >>> 16)
eor r10, r10, r4 // r10<- S'6
byteror r5, r0, \m, \n0, \n1, r9 // r5 <- BYTE_ROR_n0(S5)
eor r0, r0, r5, ror #8 // r0 <- S5 ^ BYTE_ROR_6(S5 >>> 8)
eor r9, r2, r5, ror #8 // r9 <- r2 ^ (BYTE_ROR_n0(S5) >>> 8)
byteror r5, r0, r1, 4, 4, r2 // r5 <- BYTE_ROR_4(r0)
eor r9, r9, r5, ror #16 // r9 <- S'5
byteror r5, r8, \m, \n0, \n1, r2 // r5 <- BYTE_ROR_n0(S4)
eor r2, r8, r5, ror #8 // r2 <- S4 ^ BYTE_ROR_6(S4 >>> 8)
eor r8, r0, r5, ror #8 // r8 <- r0 ^ (BYTE_ROR_n0(S4) >>> 8)
byteror r5, r2, r1, 4, 4, r0 // r5 <- BYTE_ROR_4(r2)
eor r8, r8, r5, ror #16 // r8 <- r8 ^ (r5 >>> 16)
eor r8, r8, r4 // r8 <- S'4
byteror r5, r7, \m, \n0, \n1, r0 // r5 <- BYTE_ROR_n0(S3)
eor r0, r7, r5, ror #8 // r0 <- S3 ^ BYTE_ROR_6(S3 >>> 8)
eor r7, r2, r5, ror #8 // r2 ^ (BYTE_ROR_n0(S3) >>> 8)
byteror r5, r0, r1, 4, 4, r2 // r5 <- BYTE_ROR_4(r0)
eor r7, r7, r5, ror #16 // r7 <- r7 ^ (r5 >>> 16)
eor r7, r7, r4 // r7 <- S'3
byteror r5, r6, \m, \n0, \n1, r2 // r5 <- BYTE_ROR_n0(S2)
eor r2, r6, r5, ror #8 // r2 <- S2 ^ BYTE_ROR_6(S2 >>> 8)
eor r6, r0, r5, ror #8 // r6 <- r0 ^ (BYTE_ROR_n0(S2) >>> 8)
byteror r5, r2, r1, 4, 4, r0 // r5 <- BYTE_ROR_4(r2)
eor r6, r6, r5, ror #16 // r6 <- S'2
byteror r5, r3, \m, \n0, \n1, r0 // r5 <- BYTE_ROR_n0(S1)
eor r0, r3, r5, ror #8 // r0 <- S1 ^ BYTE_ROR_6(S1 >>> 8)
eor r3, r2, r5, ror #8 // r3 <- r0 ^ (BYTE_ROR_n0(S1) >>> 8)
byteror r5, r0, r1, 4, 4, r2 // r5 <- BYTE_ROR_4(r0)
eor r5, r3, r5, ror #16 // r5 <- S'1
eor r14, r0, r14, ror #8 // r14<- r0 ^ (BYTE_ROR_n0(S0) >>> 8)
byteror r0, r4, r1, 4, 4, r2 // r0 <- BYTE_ROR_4(r4)
eor r4, r14, r0, ror #16 // r4 <- S'0
.endm
/******************************************************************************
* Packs two 128-bit input blocs stored in r4-r7 and r8-r11, respectively, into
* the 256-bit internal state where the bits are packed as follows:
* r4 = b_24 b_56 b_88 b_120 || ... || b_0 b_32 b_64 b_96
* r5 = b_25 b_57 b_89 b_121 || ... || b_1 b_33 b_65 b_97
* r6 = b_26 b_58 b_90 b_122 || ... || b_2 b_34 b_66 b_98
* r7 = b_27 b_59 b_91 b_123 || ... || b_3 b_35 b_67 b_99
* r8 = b_28 b_60 b_92 b_124 || ... || b_4 b_36 b_68 b_100
* r9 = b_29 b_61 b_93 b_125 || ... || b_5 b_37 b_69 b_101
* r10 = b_30 b_62 b_94 b_126 || ... || b_6 b_38 b_70 b_102
* r11 = b_31 b_63 b_95 b_127 || ... || b_7 b_39 b_71 b_103
******************************************************************************/
.align 2
packing:
movw r3, #0x0f0f
movt r3, #0x0f0f // r3 <- 0x0f0f0f0f (mask for SWAPMOVE)
eor r2, r3, r3, lsl #2 // r2 <- 0x33333333 (mask for SWAPMOVE)
eor r1, r2, r2, lsl #1 // r1 <- 0x55555555 (mask for SWAPMOVE)
swpmv r8, r4, r8, r4, r1, #1, r12
swpmv r9, r5, r9, r5, r1, #1, r12
swpmv r10, r6, r10, r6, r1, #1, r12
swpmv r11, r7, r11, r7, r1, #1, r12
swpmv r0, r4, r5, r4, r2, #2, r12
swpmv r9, r5, r9, r8, r2, #2, r12
swpmv r7, r8, r7, r6, r2, #2, r12
swpmv r11, r2, r11, r10, r2, #2, r12
swpmv r8, r4, r8, r4, r3, #4, r12
swpmv r10, r6, r7, r0, r3, #4, r12
swpmv r11, r7, r11, r9, r3, #4, r12
swpmv r9, r5, r2, r5, r3, #4, r12
bx lr
/******************************************************************************
* Unpacks the 256-bit internal state in two 128-bit blocs.
******************************************************************************/
.align 2
unpacking:
movw r3, #0x0f0f
movt r3, #0x0f0f // r3 <- 0x0f0f0f0f (mask for SWAPMOVE)
swpmv r2, r5, r9, r5, r3, #4, r12
swpmv r11, r9, r11, r7, r3, #4, r12
swpmv r7, r1, r10, r6, r3, #4, r12
swpmv r8, r4, r8, r4, r3, #4, r12
eor r3, r3, r3, lsl #2 // r3 <- 0x33333333 (mask for SWAPMOVE)
swpmv r11, r10,r11, r2, r3, #2, r12
swpmv r7, r6, r7, r8, r3, #2, r12
swpmv r9, r8, r9, r5, r3, #2, r12
swpmv r5, r4, r1, r4, r3, #2, r12
eor r1, r3, r3, lsl #1 // r1 <- 0x55555555 (mask for SWAPMOVE)
swpmv r8, r4, r8, r4, r1, #1, r12
swpmv r9, r5,r9, r5, r1, #1, r12
swpmv r10, r6, r10, r6, r1, #1, r12
swpmv r11, r7, r11, r7, r1, #1, r12
bx lr
/******************************************************************************
* Subroutine that computes the AddRoundKey and the S-box.
* Credits to https://github.com/Ko-/aes-armcortexm for the S-box implementation
******************************************************************************/
.align 2
ark_sbox:
// add round key
ldr.w r1, [sp, #48]
ldmia r1!, {r0,r2,r3,r12}
eor r4, r0
eor r5, r2
eor r6, r3
eor r7, r12
ldmia r1!, {r0,r2,r3,r12}
eor r8, r0
eor r9, r2
eor r10, r3
eor r11, r12
str.w r1, [sp, #48]
str r14, [sp, #52]
// sbox: credits to https://github.com/Ko-/aes-armcortexm
eor r1, r7, r9 //Exec y14 = U3 ^ U5; into r1
eor r3, r4, r10 //Exec y13 = U0 ^ U6; into r3
eor r2, r3, r1 //Exec y12 = y13 ^ y14; into r2
eor r0, r8, r2 //Exec t1 = U4 ^ y12; into r0
eor r14, r0, r9 //Exec y15 = t1 ^ U5; into r14
and r12, r2, r14 //Exec t2 = y12 & y15; into r12
eor r8, r14, r11 //Exec y6 = y15 ^ U7; into r8
eor r0, r0, r5 //Exec y20 = t1 ^ U1; into r0
str.w r2, [sp, #44] //Store r2/y12 on stack
eor r2, r4, r7 //Exec y9 = U0 ^ U3; into r2
str r0, [sp, #40] //Store r0/y20 on stack
eor r0, r0, r2 //Exec y11 = y20 ^ y9; into r0
str r2, [sp, #36] //Store r2/y9 on stack
and r2, r2, r0 //Exec t12 = y9 & y11; into r2
str r8, [sp, #32] //Store r8/y6 on stack
eor r8, r11, r0 //Exec y7 = U7 ^ y11; into r8
eor r9, r4, r9 //Exec y8 = U0 ^ U5; into r9
eor r6, r5, r6 //Exec t0 = U1 ^ U2; into r6
eor r5, r14, r6 //Exec y10 = y15 ^ t0; into r5
str r14, [sp, #28] //Store r14/y15 on stack
eor r14, r5, r0 //Exec y17 = y10 ^ y11; into r14
str.w r1, [sp, #24] //Store r1/y14 on stack
and r1, r1, r14 //Exec t13 = y14 & y17; into r1
eor r1, r1, r2 //Exec t14 = t13 ^ t12; into r1
str r14, [sp, #20] //Store r14/y17 on stack
eor r14, r5, r9 //Exec y19 = y10 ^ y8; into r14
str.w r5, [sp, #16] //Store r5/y10 on stack
and r5, r9, r5 //Exec t15 = y8 & y10; into r5
eor r2, r5, r2 //Exec t16 = t15 ^ t12; into r2
eor r5, r6, r0 //Exec y16 = t0 ^ y11; into r5
str.w r0, [sp, #12] //Store r0/y11 on stack
eor r0, r3, r5 //Exec y21 = y13 ^ y16; into r0
str r3, [sp, #8] //Store r3/y13 on stack
and r3, r3, r5 //Exec t7 = y13 & y16; into r3
str r5, [sp, #4] //Store r5/y16 on stack
str r11, [sp, #0] //Store r11/U7 on stack
eor r5, r4, r5 //Exec y18 = U0 ^ y16; into r5
eor r6, r6, r11 //Exec y1 = t0 ^ U7; into r6
eor r7, r6, r7 //Exec y4 = y1 ^ U3; into r7
and r11, r7, r11 //Exec t5 = y4 & U7; into r11
eor r11, r11, r12 //Exec t6 = t5 ^ t2; into r11
eor r11, r11, r2 //Exec t18 = t6 ^ t16; into r11
eor r14, r11, r14 //Exec t22 = t18 ^ y19; into r14
eor r4, r6, r4 //Exec y2 = y1 ^ U0; into r4
and r11, r4, r8 //Exec t10 = y2 & y7; into r11
eor r11, r11, r3 //Exec t11 = t10 ^ t7; into r11
eor r2, r11, r2 //Exec t20 = t11 ^ t16; into r2
eor r2, r2, r5 //Exec t24 = t20 ^ y18; into r2
eor r10, r6, r10 //Exec y5 = y1 ^ U6; into r10
and r11, r10, r6 //Exec t8 = y5 & y1; into r11
eor r3, r11, r3 //Exec t9 = t8 ^ t7; into r3
eor r3, r3, r1 //Exec t19 = t9 ^ t14; into r3
eor r3, r3, r0 //Exec t23 = t19 ^ y21; into r3
eor r0, r10, r9 //Exec y3 = y5 ^ y8; into r0
ldr r11, [sp, #32] //Load y6 into r11
and r5, r0, r11 //Exec t3 = y3 & y6; into r5
eor r12, r5, r12 //Exec t4 = t3 ^ t2; into r12
ldr r5, [sp, #40] //Load y20 into r5
str r7, [sp, #32] //Store r7/y4 on stack
eor r12, r12, r5 //Exec t17 = t4 ^ y20; into r12
eor r1, r12, r1 //Exec t21 = t17 ^ t14; into r1
and r12, r1, r3 //Exec t26 = t21 & t23; into r12
eor r5, r2, r12 //Exec t27 = t24 ^ t26; into r5
eor r12, r14, r12 //Exec t31 = t22 ^ t26; into r12
eor r1, r1, r14 //Exec t25 = t21 ^ t22; into r1
and r7, r1, r5 //Exec t28 = t25 & t27; into r7
eor r14, r7, r14 //Exec t29 = t28 ^ t22; into r14
and r4, r14, r4 //Exec z14 = t29 & y2; into r4
and r8, r14, r8 //Exec z5 = t29 & y7; into r8
eor r7, r3, r2 //Exec t30 = t23 ^ t24; into r7
and r12, r12, r7 //Exec t32 = t31 & t30; into r12
eor r12, r12, r2 //Exec t33 = t32 ^ t24; into r12
eor r7, r5, r12 //Exec t35 = t27 ^ t33; into r7
and r2, r2, r7 //Exec t36 = t24 & t35; into r2
eor r5, r5, r2 //Exec t38 = t27 ^ t36; into r5
and r5, r14, r5 //Exec t39 = t29 & t38; into r5
eor r1, r1, r5 //Exec t40 = t25 ^ t39; into r1
eor r5, r14, r1 //Exec t43 = t29 ^ t40; into r5
ldr.w r7, [sp, #4] //Load y16 into r7
and r7, r5, r7 //Exec z3 = t43 & y16; into r7
eor r8, r7, r8 //Exec tc12 = z3 ^ z5; into r8
str r8, [sp, #40] //Store r8/tc12 on stack
ldr r8, [sp, #8] //Load y13 into r8
and r8, r5, r8 //Exec z12 = t43 & y13; into r8
and r10, r1, r10 //Exec z13 = t40 & y5; into r10
and r6, r1, r6 //Exec z4 = t40 & y1; into r6
eor r6, r7, r6 //Exec tc6 = z3 ^ z4; into r6
eor r3, r3, r12 //Exec t34 = t23 ^ t33; into r3
eor r3, r2, r3 //Exec t37 = t36 ^ t34; into r3
eor r1, r1, r3 //Exec t41 = t40 ^ t37; into r1
ldr.w r5, [sp, #16] //Load y10 into r5
and r2, r1, r5 //Exec z8 = t41 & y10; into r2
and r9, r1, r9 //Exec z17 = t41 & y8; into r9
str r9, [sp, #16] //Store r9/z17 on stack
eor r5, r12, r3 //Exec t44 = t33 ^ t37; into r5
ldr r9, [sp, #28] //Load y15 into r9
ldr.w r7, [sp, #44] //Load y12 into r7
and r9, r5, r9 //Exec z0 = t44 & y15; into r9
and r7, r5, r7 //Exec z9 = t44 & y12; into r7
and r0, r3, r0 //Exec z10 = t37 & y3; into r0
and r3, r3, r11 //Exec z1 = t37 & y6; into r3
eor r3, r3, r9 //Exec tc5 = z1 ^ z0; into r3
eor r3, r6, r3 //Exec tc11 = tc6 ^ tc5; into r3
ldr r11, [sp, #32] //Load y4 into r11
ldr.w r5, [sp, #20] //Load y17 into r5
and r11, r12, r11 //Exec z11 = t33 & y4; into r11
eor r14, r14, r12 //Exec t42 = t29 ^ t33; into r14
eor r1, r14, r1 //Exec t45 = t42 ^ t41; into r1
and r5, r1, r5 //Exec z7 = t45 & y17; into r5
eor r6, r5, r6 //Exec tc8 = z7 ^ tc6; into r6
ldr r5, [sp, #24] //Load y14 into r5
str r4, [sp, #32] //Store r4/z14 on stack
and r1, r1, r5 //Exec z16 = t45 & y14; into r1
ldr r5, [sp, #12] //Load y11 into r5
ldr r4, [sp, #36] //Load y9 into r4
and r5, r14, r5 //Exec z6 = t42 & y11; into r5
eor r5, r5, r6 //Exec tc16 = z6 ^ tc8; into r5
and r4, r14, r4 //Exec z15 = t42 & y9; into r4
eor r14, r4, r5 //Exec tc20 = z15 ^ tc16; into r14
eor r4, r4, r1 //Exec tc1 = z15 ^ z16; into r4
eor r1, r0, r4 //Exec tc2 = z10 ^ tc1; into r1
eor r0, r1, r11 //Exec tc21 = tc2 ^ z11; into r0
eor r7, r7, r1 //Exec tc3 = z9 ^ tc2; into r7
eor r1, r7, r5 //Exec S0 = tc3 ^ tc16; into r1
eor r7, r7, r3 //Exec S3 = tc3 ^ tc11; into r7
eor r3, r7, r5 //Exec S1 = S3 ^ tc16 ^ 1; into r3
eor r11, r10, r4 //Exec tc13 = z13 ^ tc1; into r11
ldr.w r4, [sp, #0] //Load U7 into r4
and r12, r12, r4 //Exec z2 = t33 & U7; into r12
eor r9, r9, r12 //Exec tc4 = z0 ^ z2; into r9
eor r12, r8, r9 //Exec tc7 = z12 ^ tc4; into r12
eor r2, r2, r12 //Exec tc9 = z8 ^ tc7; into r2
eor r2, r6, r2 //Exec tc10 = tc8 ^ tc9; into r2
ldr.w r4, [sp, #32] //Load z14 into r4
eor r12, r4, r2 //Exec tc17 = z14 ^ tc10; into r12
eor r0, r0, r12 //Exec S5 = tc21 ^ tc17; into r0
eor r6, r12, r14 //Exec tc26 = tc17 ^ tc20; into r6
ldr.w r4, [sp, #16] //Load z17 into r4
ldr r12, [sp, #40] //Load tc12 into r12
eor r6, r6, r4 //Exec S2 = tc26 ^ z17 ^ 1; into r6
eor r12, r9, r12 //Exec tc14 = tc4 ^ tc12; into r12
eor r14, r11, r12 //Exec tc18 = tc13 ^ tc14; into r14
eor r2, r2, r14 //Exec S6 = tc10 ^ tc18 ^ 1; into r2
eor r11, r8, r14 //Exec S7 = z12 ^ tc18 ^ 1; into r11
ldr r14, [sp, #52] // restore link register
eor r8, r12, r7 //Exec S4 = tc14 ^ S3; into r8
bx lr
// [('r0', 'S5'), ('r1', 'S0'), ('r2', 'S6'), ('r3', 'S1'),
// ('r6', 'S2'),('r7', 'S3'), ('r8', 'S4'), ('r11', 'S7')]
/******************************************************************************
* Computation of the MixColumns transformation in the fixsliced representation.
* For fully-fixsliced implementations, it is used for rounds i s.t. (i%4) == 0.
* For semi-fixsliced implementations, it is used for rounds i s.t. (i%2) == 0.
******************************************************************************/
.align 2
mixcolumns_0:
str r14, [sp, #52] // store link register
movw r12, #0x0303
movt r12, #0x0303
mc_0_2 r12, 6, 2, 26, 18
ldr r14, [sp, #52] // restore link register
bx lr
/******************************************************************************
* Computation of the MixColumns transformation in the fixsliced representation.
* For fully-fixsliced implementations, it is used for rounds i s.t. (i%4) == 3.
* For semi-fixsliced implementations, it is used for rounds i s.t. (i%2) == 1.
* Based on Käsper-Schwabe, similar to https://github.com/Ko-/aes-armcortexm.
******************************************************************************/
.align 2
mixcolumns_3:
eor r12, r11, r11, ror #8 // r12<- S7 ^ (S7 >>> 8)
eor r4, r1, r1, ror #8 // r4 <- S0 ^ (S0 >>> 8)
eor r11, r4, r11, ror #8 // r11<- S0 ^ (S0 >>> 8) ^ (S7 >>> 8)
eor r11, r11, r12, ror #16 // r11<- r11 ^ (S7 >>> 16) ^ (S7 >>> 24)
eor r10, r12, r2, ror #8 // r10<- S7 ^ (S7 >>> 8) ^ (S6 >>> 8)
eor r12, r2, r2, ror #8 // r12<- S6 ^ (S6 >>> 8)
eor r10, r10, r12, ror #16 // r10<- r10 ^ (S6 >>> 16) ^ (S6 >>> 24)
eor r10, r4 // r10<- r10 ^ S0 ^ (S0 >>> 8)
eor r9, r12, r0, ror #8 // r9 <- S6 ^ (S6 >>> 8) ^ (S5 >>> 8)
eor r12, r0, r0, ror #8 // r12<- S5 ^ (S5 >>> 8)
eor r9, r9, r12, ror #16 // r9 <- r9 ^ (S5 >>> 16) ^ (S5 >>> 24)
eor r2, r8, r8, ror #8 // r2 <- S4 ^ (S4 >>> 8)
eor r8, r12, r8, ror #8 // r8 <- S5 ^ (S5 >>> 8) ^ (S4 >>> 8)
eor r8, r4 // r8 <- r8 ^ S0 ^ (S0 >>> 8)
eor r8, r8, r2, ror #16 // r8 <- r8 ^ (S4 >>> 16) ^ (S4 >>> 24)
eor r12, r7, r7, ror #8 // r12<- S3 ^ (S3 >>> 8)
eor r7, r2, r7, ror #8 // r7 <- S4 ^ (S4 >>> 8) ^ (S3 >>> 8)
eor r7, r4 // r7 <- r7 ^ S0 ^ (S0 >>> 8)
eor r7, r7, r12, ror #16 // r7 <- r7 ^ (S3 >>> 16) ^ (S3 >>> 24)
eor r2, r6, r6, ror #8 // r2 <- S2 ^ (S2 >>> 8)
eor r6, r12, r6, ror #8 // r6 <- S3 ^ (S3 >>> 8) ^ (S2 >>> 8)
eor r6, r6, r2, ror #16 // r6 <- r6 ^ (S2 >>> 16) ^ (S2 >>> 24)
eor r12, r3, r3, ror #8 // r12<- S1 ^ (S1 >>> 8)
eor r5, r2, r3, ror #8 // r5 <- S2 ^ (S2 >>> 8) ^ (S1 >>> 8)
eor r5, r5, r12, ror #16 // r5 <- r5 ^ (S1 >>> 16) ^ (S1 >>> 24)
eor r4, r12, r4, ror #16 // r4 <- S1 ^ (S1 >>> 8) ^ (r4 >>> 16)
eor r4, r4, r1, ror #8 // r4 <- r4 ^ (S0 >>> 8)
bx lr
/******************************************************************************
* Applies the ShiftRows transformation twice (i.e. SR^2) on the internal state.
******************************************************************************/
.align 2
double_shiftrows:
movw r10, #0x0f00
movt r10, #0x0f00 // r10<- 0x0f000f00 (mask)
swpmv r0, r0, r0, r0, r10, #4, r12
swpmv r1, r1, r1, r1, r10, #4, r12
swpmv r2, r2, r2, r2, r10, #4, r12
swpmv r3, r3, r3, r3, r10, #4, r12
swpmv r6, r6, r6, r6, r10, #4, r12
swpmv r7, r7, r7, r7, r10, #4, r12
swpmv r8, r8, r8, r8, r10, #4, r12
swpmv r11, r11, r11, r11, r10, #4, r12
bx lr
/******************************************************************************
* Semi-fixsliced implementation of AES-128.
*
* Two blocks are encrypted in parallel.
*
* Note that additional 4 bytes are allocated on the stack as the function takes
* 5 arguments as input.
******************************************************************************/
@ void aes128_encrypt_sfs(u8* ctext, u8* ctext_bis, const u8* ptext,
@ const u8* ptext_bis, const u32* rkey);
.global aes128_encrypt_sfs
.type aes128_encrypt_sfs,%function
.align 2
aes128_encrypt_sfs:
push {r0-r12,r14}
sub.w sp, #56 // allow space on the stack for tmp var
ldr.w r4, [r2] // load the 1st 128-bit blocks in r4-r7
ldr r5, [r2, #4]
ldr r6, [r2, #8]
ldr r7, [r2, #12]
ldr.w r8, [r3] // load the 2nd 128-bit blocks in r8-r11
ldr r9, [r3, #4]
ldr r10,[r3, #8]
ldr r11,[r3, #12]
ldr.w r1, [sp, #112] // load 'rkey' argument from the stack
str.w r1, [sp, #48] // store it there for 'add_round_key'
bl packing // pack the 2 input blocks
bl ark_sbox // ark + sbox (round 0)
bl mixcolumns_0 // mixcolumns (round 0)
bl ark_sbox // ark + sbox (round 1)
bl double_shiftrows // to resynchronize with the classical rep
bl mixcolumns_3 // mixcolumns (round 1)
bl ark_sbox // ark + sbox (round 2)
bl mixcolumns_0 // mixcolumns (round 2)
bl ark_sbox // ark + sbox (round 3)
bl double_shiftrows // to resynchronize with the classical rep
bl mixcolumns_3 // mixcolumns (round 3)
bl ark_sbox // ark + sbox (round 4)
bl mixcolumns_0 // mixcolumns (round 4)
bl ark_sbox // ark + sbox (round 5)
bl double_shiftrows // to resynchronize with the classical rep
bl mixcolumns_3 // mixcolumns (round 5)
bl ark_sbox // ark + sbox (round 6)
bl mixcolumns_0 // mixcolumns (round 6)
bl ark_sbox // ark + sbox (round 7)
bl double_shiftrows // to resynchronize with the classical rep
bl mixcolumns_3 // mixcolumns (round 7)
bl ark_sbox // ark + sbox (round 8)
bl mixcolumns_0 // mixcolumns (round 8)
bl ark_sbox // ark + sbox (round 9)
bl double_shiftrows // to resynchronize with the classical rep
ldr r14, [sp, #48] // ---------------------------------------
ldmia r14!, {r4,r5,r10,r12} //
eor r4, r1 //
eor r5, r3 //
eor r6, r10 //
eor r7, r12 // Last add_round_key
ldmia r14!, {r1,r3,r10,r12} //
eor r8, r1 //
eor r9, r0, r3 //
eor r10, r2 //
eor r11, r12 // ---------------------------------------
bl unpacking // unpack the internal state
ldrd r0, r1, [sp, #56] // restore the addr to store the ciphertext
add.w sp, #64 // restore the stack pointer
str.w r4, [r0] // store the ciphertext
str r5, [r0, #4]
str r6, [r0, #8]
str r7, [r0, #12]
str.w r8, [r1] // store the ciphertext
str r9, [r1, #4]
str r10,[r1, #8]
str r11,[r1, #12]
pop {r2-r12, r14} // restore context
bx lr
|
aadomn/cymric
| 11,458
|
src/cymric-aes128/armv7m/aes_keyschedule_lut.s
|
/******************************************************************************
* ARM assembly implementations of the AES-128 and AES-256 key schedules to
* match fixslicing.
* Note that those implementations rely on Look-Up Tables (LUT).
*
* See the paper at https://eprint.iacr.org/2020/1123.pdf for more details.
*
* @author Alexandre Adomnicai, Nanyang Technological University, Singapore
* alexandre.adomnicai@ntu.edu.sg
*
* @date August 2020
******************************************************************************/
.syntax unified
.thumb
/******************************************************************************
* LUT of the AES S-box.
******************************************************************************/
.align 2
.type AES_Sbox_compact,%object
AES_Sbox_compact:
.word 0x7b777c63, 0xc56f6bf2, 0x2b670130, 0x76abd7fe
.word 0x7dc982ca, 0xf04759fa, 0xafa2d4ad, 0xc072a49c
.word 0x2693fdb7, 0xccf73f36, 0xf1e5a534, 0x1531d871
.word 0xc323c704, 0x9a059618, 0xe2801207, 0x75b227eb
.word 0x1a2c8309, 0xa05a6e1b, 0xb3d63b52, 0x842fe329
.word 0xed00d153, 0x5bb1fc20, 0x39becb6a, 0xcf584c4a
.word 0xfbaaefd0, 0x85334d43, 0x7f02f945, 0xa89f3c50
.word 0x8f40a351, 0xf5389d92, 0x21dab6bc, 0xd2f3ff10
.word 0xec130ccd, 0x1744975f, 0x3d7ea7c4, 0x73195d64
.word 0xdc4f8160, 0x88902a22, 0x14b8ee46, 0xdb0b5ede
.word 0x0a3a32e0, 0x5c240649, 0x62acd3c2, 0x79e49591
.word 0x6d37c8e7, 0xa94ed58d, 0xeaf4566c, 0x08ae7a65
.word 0x2e2578ba, 0xc6b4a61c, 0x1f74dde8, 0x8a8bbd4b
.word 0x66b53e70, 0x0ef60348, 0xb9573561, 0x9e1dc186
.word 0x1198f8e1, 0x948ed969, 0xe9871e9b, 0xdf2855ce
.word 0x0d89a18c, 0x6842e6bf, 0x0f2d9941, 0x16bb54b0
/******************************************************************************
* Round function of the AES-128 key expansion.
* Note that it expects r2 to contain the corresponding round constant and r3 to
* contain the S-box address.
******************************************************************************/
.align 2
aes128_keyschedule_rfunc:
movw r1, #0xfc
and r8, r1, r7, lsr #8
and r9, r1, r7, lsr #16
and r10, r1, r7, lsr #24
and r11, r1, r7
ldr r8, [r3, r8] // computes the sbox using the LUT
ldr r9, [r3, r9] // computes the sbox using the LUT
ldr r10, [r3, r10] // computes the sbox using the LUT
ldr r11, [r3, r11] // computes the sbox using the LUT
movw r1, #0x18
and r12, r1, r7, lsr #5
lsr r8, r8, r12
and r8, #0xff
and r12, r1, r7, lsr #13
lsr r9, r9, r12
and r9, #0xff
and r12, r1, r7, lsr #21
lsr r10, r10, r12
and r10, #0xff
and r12, r1, r7, lsl #3
lsr r11, r11, r12
and r11, #0xff
eor r4, r2 // adds the first rconst
eor r4, r8 // xor the columns (1st sbox byte)
eor r4, r4, r9, ror #24 // xor the columns (2nd sbox byte)
eor r4, r4, r10, ror #16 // xor the columns (3rd sbox byte)
eor r4, r4, r11, ror #8 // xor the columns (4th sbox byte)
eor r5, r4 // xor the columns
eor r6, r5 // xor the columns
eor r7, r6 // xor the columns
push.w {r4-r7}
bx lr
/******************************************************************************
* Packing routine. Note that it is the same as the one used in the encryption
* function so some code size could be saved by merging the two files.
******************************************************************************/
.align 2
packing_rkey:
eor r12, r8, r8, lsr #1 // SWAPMOVE(r8, r4, 0x55555555, 1) ....
and r12, r1
eor r4, r8, r12
eor r8, r8, r12, lsl #1 // .... SWAPMOVE(r8, r4, 0x55555555, 1)
eor r12, r9, r9, lsr #1 // SWAPMOVE(r9, r5, 0x55555555, 1) ....
and r12, r1
eor r5, r9, r12
eor r9, r9, r12, lsl #1 // .... SWAPMOVE(r9, r5, 0x55555555, 1)
eor r12, r10, r10, lsr #1 // SWAPMOVE(r10, r6, 0x55555555, 1) ....
and r12, r1
eor r6, r10, r12
eor r10, r10, r12, lsl #1 // .... SWAPMOVE(r10, r6, 0x55555555, 1)
eor r12, r11, r11, lsr #1 // SWAPMOVE(r11, r7, 0x55555555, 1) ....
and r12, r1
eor r7, r11, r12
eor r11, r11, r12, lsl #1 // .... SWAPMOVE(r11, r7, 0x55555555, 1)
eor r12, r4, r5, lsr #2 // SWAPMOVE(r5, r4, 0x33333333, 2) ....
and r12, r2
eor r4, r12
eor r5, r5, r12, lsl #2 // .... SWAPMOVE(r5, r4, 0x33333333, 2)
eor r12, r8, r9, lsr #2 // SWAPMOVE(r9, r8, 0x33333333, 2) ....
and r12, r2
eor r8, r8, r12
eor r9, r9, r12, lsl #2 // .... SWAPMOVE(r9, r8, 0x33333333, 2)
eor r12, r6, r7, lsr #2 // SWAPMOVE(r7, r6, 0x33333333, 2) ....
and r12, r2
eor r6, r6, r12
eor r7, r7, r12, lsl #2 // .... SWAPMOVE(r7, r6, 0x33333333, 2)
eor r12, r10, r11, lsr #2 // SWAPMOVE(r11, r10, 0x33333333, 2) ....
and r12, r2
eor r10, r10, r12
eor r11, r11, r12, lsl #2 // .... SWAPMOVE(r11, r10, 0x33333333, 2)
eor r12, r4, r6, lsr #4 // SWAPMOVE(r6, r4, 0x0f0f0f0f, 4) ....
and r12, r3
eor r4, r12
eor r6, r6, r12, lsl #4 // .... SWAPMOVE(r6, r4, 0x0f0f0f0f,4)
eor r12, r5, r7, lsr #4 // SWAPMOVE(r7, r5, 0x0f0f0f0f, 4) ....
and r12, r3
eor r5, r5, r12
eor r7, r7, r12, lsl #4 // .... SWAPMOVE(r7, r5, 0x0f0f0f0f, 4)
eor r12, r8, r10, lsr #4 // SWAPMOVE(r10, r8, 0x0f0f0f0f, 4) ....
and r12, r3
eor r8, r8, r12
eor r10, r10, r12, lsl #4 // .... SWAPMOVE(r10,r8, 0x0f0f0f0f, 4)
eor r12, r9, r11, lsr #4 // SWAPMOVE(r11, r9, 0x0f0f0f0f, 4) ....
and r12, r3
eor r9, r12
eor r11, r11, r12, lsl #4 // .... SWAPMOVE(r11, r9, 0x0f0f0f0f, 4)
mvn r5, r5
mvn r8, r8
mvn r7, r7
mvn r11, r11
strd r7, r11, [r0, #-8]
strd r6, r10, [r0, #-16]
strd r5, r9, [r0, #-24]
strd r4, r8, [r0, #-32]!
bx lr
/******************************************************************************
* Applies ShiftRows^(-1) on a round key to match fully/semi-fixslicing.
******************************************************************************/
.align 2
inv_shiftrows_1:
and r8, r4, #0xff
and r12, r7, #0xff00
orr r8, r8, r12
and r12, r6, #0xff0000
orr r8, r8, r12
and r12, r5, #0xff000000
orr r8, r8, r12
and r9, r5, #0xff
and r12, r4, #0xff00
orr r9, r9, r12
and r12, r7, #0xff0000
orr r9, r9, r12
and r12, r6, #0xff000000
orr r9, r9, r12
and r10, r6, #0xff
and r12, r5, #0xff00
orr r10, r10, r12
and r12, r4, #0xff0000
orr r10, r10, r12
and r12, r7, #0xff000000
orr r10, r10, r12
and r11, r7, #0xff
and r12, r6, #0xff00
orr r11, r11, r12
and r12, r5, #0xff0000
orr r11, r11, r12
and r12, r4, #0xff000000
orr r11, r11, r12
bx lr
/******************************************************************************
* Pre-computes all the round keys for a given encryption key, according to the
* semi-fixsliced (sfs) representation.
* Note that the round keys also include the NOTs omitted in the S-box.
******************************************************************************/
@ void aes128_keyschedule_sfs_lut(u32* rkeys, const u8* key);
.global aes128_keyschedule_sfs_lut
.type aes128_keyschedule_sfs_lut,%function
.align 2
aes128_keyschedule_sfs_lut:
push {r1-r12,r14}
ldr.w r4, [r1] // load the encryption key
ldr r5, [r1, #4]
ldr r6, [r1, #8]
ldr r7, [r1, #12]
adr r3, AES_Sbox_compact // load the sbox LUT address in r3
movw r2, #0x01 // 1st const
bl aes128_keyschedule_rfunc // 1st round
movw r2, #0x02 // 2nd rconst
bl aes128_keyschedule_rfunc // 2nd round
movw r2, #0x04 // 3rd rconst
bl aes128_keyschedule_rfunc // 3rd round
movw r2, #0x08 // 4th rconst
bl aes128_keyschedule_rfunc // 4th round
movw r2, #0x10 // 5th rconst
bl aes128_keyschedule_rfunc // 5th round
movw r2, #0x20 // 6th rconst
bl aes128_keyschedule_rfunc // 6th round
movw r2, #0x40 // 7th rconst
bl aes128_keyschedule_rfunc // 7th round
movw r2, #0x80 // 8th rconst
bl aes128_keyschedule_rfunc // 8th round
movw r2, #0x1b // 9th rconst
bl aes128_keyschedule_rfunc // 9th round
movw r2, #0x36 // 10th rconst
bl aes128_keyschedule_rfunc // 10th round
//done expanding, now start bitslicing
//set r0 to end of rk, to be filled backwards
add r0, #352
movw r3, #0x0f0f
movt r3, #0x0f0f // r3 <- 0x0f0f0f0f (mask for SWAPMOVE)
eor r2, r3, r3, lsl #2 // r2 <- 0x33333333 (mask for SWAPMOVE)
eor r1, r2, r2, lsl #1 // r1 <- 0x55555555 (mask for SWAPMOVE)
pop.w {r4-r7}
mov r8, r4
mov r9, r5
mov r10, r6
mov r11, r7
bl packing_rkey
pop.w {r4-r7}
bl inv_shiftrows_1
bl packing_rkey
pop.w {r4-r7}
mov r8, r4
mov r9, r5
mov r10, r6
mov r11, r7
bl packing_rkey
pop.w {r4-r7}
bl inv_shiftrows_1
bl packing_rkey
pop.w {r4-r7}
mov r8, r4
mov r9, r5
mov r10, r6
mov r11, r7
bl packing_rkey
pop.w {r4-r7}
bl inv_shiftrows_1
bl packing_rkey
pop.w {r4-r7}
mov r8, r4
mov r9, r5
mov r10, r6
mov r11, r7
bl packing_rkey
pop.w {r4-r7}
bl inv_shiftrows_1
bl packing_rkey
pop.w {r4-r7}
mov r8, r4
mov r9, r5
mov r10, r6
mov r11, r7
bl packing_rkey
pop.w {r4-r7}
bl inv_shiftrows_1
bl packing_rkey
ldr r12, [sp]
ldr.w r4, [r12] // load the encryption key
ldr r5, [r12, #4]
ldr r6, [r12, #8]
ldr r7, [r12, #12]
mov r8, r4
mov r9, r5
mov r10, r6
mov r11, r7
bl packing_rkey
mvn r5, r5 // cancels the NOT applied in 'packing_rkey'
mvn r8, r8 // cancels the NOT applied in 'packing_rkey'
mvn r7, r7 // cancels the NOT applied in 'packing_rkey'
mvn r11, r11 // cancels the NOT applied in 'packing_rkey'
strd r7, r11, [r0, #24] // restore after fix
strd r6, r10, [r0, #16] // restore after fix
strd r5, r9, [r0, #8] // restore after fix
strd r4, r8, [r0] // restore after fix
pop {r1-r12, r14} // restore context
bx lr
|
aadomn/cymric
| 15,730
|
src/cymric-gift128/avr8/gift128.S
|
/****************************************************************************
* AVR assembly implementation of fixsliced GIFT-128.
*
* @author Alexandre Adomnicai
*
* @date April 2025
****************************************************************************/
; Argument registers for function calls
#define ARG1 r24
#define ARG2 r22
#define ARG3 r20
/**
* push_registers macro:
*
* Pushes a given range of registers in ascending order
* To be called like: push_registers 0,15
*/
.macro push_registers from:req, to:req
push \from
.if \to-\from
push_registers "(\from+1)",\to
.endif
.endm
/**
* pop_registers macro:
*
* Pops a given range of registers in descending order
* To be called like: pop_registers 0,15
*/
.macro pop_registers from:req, to:req
pop \to
.if \to-\from
pop_registers \from,"(\to-1)"
.endif
.endm
/**
* sbox macro:
*
* Computes the S-box layer in a bitsliced manner on a quarter of the state
*/
.macro sbox x0, x1, x2, x3
mov r16, \x0
and r16, \x2
eor \x1, r16
mov r16, \x1
and r16, \x3
eor \x0, r16
mov r16, \x0
or r16, \x1
eor \x2, r16
eor \x3, \x2
eor \x1, \x3
com \x3
mov r16, \x0
and r16, \x1
eor \x2, r16
.endm
/**
* llayer1 macro:
*
* Computes the linear layer on a quarter of the state for the 1st round
* within the quintuple round routine
*/
.macro llayer1 x1, x2, x3
// NIBBLE_ROR2
mov r16, \x1
lsr r16
lsr r16
and r16, r17
and \x1, r17
lsl \x1
lsl \x1
or \x1, r16
// NIBBLE_ROR1
mov r16, \x3
lsr r16
cbr r16, 136
and \x3, r18
lsl \x3
lsl \x3
lsl \x3
or \x3, r16
//NIBBLE_ROR3
mov r16, \x2
lsr \x2
lsr \x2
lsr \x2
and \x2, r18
cbr r16, 136
lsl r16
or \x2, r16
.endm
/**
* half_ror_4 macro:
*
* Rotates a 16-bit word by 4 bits to the right.
* Assumes r18 contains 0x0f.
*/
.macro half_ror_4 hi, lo
swap \hi
swap \lo
movw r16, \hi
cbr r16, 15
and \hi, r18
cbr r17, 15
and \lo, r18
or \hi, r17
or \lo, r16
.endm
/**
* half_ror_12 macro:
*
* Rotates a 16-bit word by 12 bits to the right
*/
.macro half_ror_12 hi, lo
swap \hi
swap \lo
movw r16, \hi
cbr r16, 240
and \hi, r18
cbr r17, 240
and \lo, r18
or \hi, r17
or \lo, r16
.endm
/**
* byte_ror_2 macro:
*
* Rotates a byte by 2 bits to the right
*/
.macro byte_ror_2 x
bst \x, 0
lsr \x
bld \x, 7
bst \x, 0
lsr \x
bld \x, 7
.endm
/**
* byte_rol_2 macro:
*
* Rotates a byte by 2 bits to the left
*/
.macro byte_rol_2 x, zero
lsl \x
adc \x, \zero
lsl \x
adc \x, \zero
.endm
/**
* add_round_key macro:
*
* Adds a round key to half of the state
*/
.macro add_round_key x0, x1, x2, x3, x4, x5, x6, x7
ld r16, X+
ld r17, X+
eor \x0, r16
eor \x1, r17
ld r16, X+
ld r17, X+
eor \x2, r16
eor \x3, r17
ld r16, X+
ld r17, X+
eor \x4, r16
eor \x5, r17
ld r16, X+
ld r17, X+
eor \x6, r16
eor \x7, r17
.endm
/**
* add_rconst macro:
*
* Adds round constants to a quarter of the state
*/
.macro add_rconst x0, x1, x2, x3
ld r16, Z+
ld r17, Z+
eor \x0, r16
eor \x1, r17
ld r16, Z+
ld r17, Z+
eor \x2, r16
eor \x3, r17
.endm
/**
* add_rconst0 macro:
*
* Same as add_rconst but w/ a specificity for rounds r s.t.
* r = 0 mod 5: the last rconst byte is always 0x10 so we hardcode it
*/
.macro add_rconst0 x0, x1, x2, x3
ld r16, Z+
ld r17, Z+
eor \x0, r16
eor \x1, r17
ld r16, Z+
ldi r17, 16
eor \x2, r16
eor \x3, r17
.endm
/**
* add_rconst1 macro:
*
* Same as add_rconst but w/ a specificity for rounds r s.t.
* r = 1 mod 5: the 1st and 3rd rconst bytes are always 0x00 and 0x01
* respectively so we hardcode them
*/
.macro add_rconst1 x1, x2, x3
ld r16, Z+
ldi r17, 1
eor \x1, r16
eor \x2, r17
ld r16, Z+
eor \x3, r16
.endm
/**
* add_rconst2 macro:
*
* Same as add_rconst but w/ a specificity for rounds r s.t.
* r = 2 mod 5: the first two bytes are always 0x02 and 0x00
* respectively so we hardcode them
*/
.macro add_rconst2 x0, x2, x3
ldi r16, 2
ld r17, Z+
eor \x0, r16
eor \x2, r17
ld r16, Z+
eor \x3, r16
.endm
/**
* llayer3 macro:
*
* Computes the linear layer on a quarter of the state for the 3rd round
* within the quintuple round routine
*/
.macro llayer3 x1, x2
movw r16, \x1
movw r28, \x1
lsr r28
lsr r29
eor r16, r28
eor r17, r29
andi r16, 85
andi r17, 85
eor \x1, r16
eor \x2, r17
lsl r16
lsl r17
eor \x1, r16
eor \x2, r17
.endm
.macro kexp_round k0, k1, k2, k3
; k0||k1 >>> 2
bst \k1, 0
ror \k0
ror \k1
bld \k0, 7
bst \k1, 0
ror \k0
ror \k1
bld \k0, 7
; k2||k3 <<< 4
mov r30, \k3
mul \k2, r20
mov \k2, r1
mov \k3, r0
mul r30, r20
or \k2, r0
or \k3, r1
eor \k2, \k3
eor \k3, \k2
eor \k2, \k3
.endm
.macro rearrange_rkey0 a, b, c, d
mov r22, \a
mov r23, \b
mov r24, \c
mov r25, \d
// SWAPMOVE(x, x, 0x00550055, 9) : x & 550055 <-> x & aa00aa00
mov r28, \a
mov r30, \c
lsr r28
lsr r30
eor r28, \b
eor r30, \d
andi r28, 85
andi r30, 85
eor r23, r28
eor r25, r30
lsl r28
lsl r30
eor r22, r28
eor r24, r30
// SWAPMOVE(x, x, 0x000f000f, 12) : x & 000f000f <-> x & f000f000
movw r28, r22
mov r30, r24
mov r31, r25
swap r28
swap r29
swap r30
swap r31
andi r28, 15
andi r29, 240
andi r30, 15
andi r31, 240
and r22, r19
and r23, r18
and r24, r19
and r25, r18
or r22, r29
or r23, r28
or r24, r31
or r25, r30
// SWAPMOVE(x, x, 0x00003333, 18) : x & 00003333 <-> x & cccc0000
movw r30, r22
lsr r30
lsr r30
lsr r31
lsr r31
eor r30, r24
eor r31, r25
andi r30, 51
andi r31, 51
eor r24, r30
eor r25, r31
lsl r30
lsl r30
lsl r31
lsl r31
eor r22, r30
eor r23, r31
// SWAPMOVE(x, x, 0x000000ff, 24)
st X+, r22
st X+, r24
st X+, r23
st X+, r25
.endm
.macro swap_4bits reg, tmp, src0, dst0, src1, dst1
mov \tmp, \reg
bst \reg, \src0
bld \reg, \dst0
bst \reg, \src1
bld \reg, \dst1
bst \tmp, \dst0
bld \reg, \src0
bst \tmp, \dst1
bld \reg, \src1
.endm
.macro rearrange_rkey1 a, b, c, d
mov r22, \a
mov r23, \b
mov r24, \c
mov r25, \d
// SWAPMOVE(x, x, 0x11111111, 3) : x & 11111111 <-> x & 88888888
swap_4bits r22, r28, 0, 3, 4, 7
swap_4bits r23, r28, 0, 3, 4, 7
swap_4bits r24, r28, 0, 3, 4, 7
swap_4bits r25, r28, 0, 3, 4, 7
// SWAPMOVE(x, x, 0x03030303, 6) : x & 03030303 <-> x & c0c0c0c0
swap_4bits r22, r28, 0, 6, 1, 7
swap_4bits r23, r28, 0, 6, 1, 7
swap_4bits r24, r28, 0, 6, 1, 7
swap_4bits r25, r28, 0, 6, 1, 7
// SWAPMOVE(x, x, 0x000f000f, 12) : x & 000f000f <-> x & f000f000
movw r28, r22
mov r30, r24
mov r31, r25
swap r28
swap r29
swap r30
swap r31
andi r28, 15
andi r29, 240
andi r30, 15
andi r31, 240
and r22, r19
and r23, r18
and r24, r19
and r25, r18
or r22, r29
or r23, r28
or r24, r31
or r25, r30
// SWAPMOVE(x, x, 0x000000ff, 24)
st X+, r22
st X+, r24
st X+, r23
st X+, r25
.endm
.macro rearrange_rkey2 a, b, c, d
mov r22, \a
mov r23, \b
mov r24, \c
mov r25, \d
// SWAPMOVE(x, x, 0x0000aaaa, 15) : x & 0000aaaa <-> x &
movw r28, \a
lsl r29
lsl r28
eor r28, \c
eor r29, \d
andi r28, 170
andi r29, 170
eor r24, r28
eor r25, r29
lsr r28
lsr r29
eor r22, r28
eor r23, r29
// SWAPMOVE(x, x, 0x00003333, 18) : x & 00003333 <-> x & cccc0000
movw r30, r22
lsr r30
lsr r30
lsr r31
lsr r31
eor r30, r24
eor r31, r25
andi r30, 51
andi r31, 51
eor r24, r30
eor r25, r31
lsl r30
lsl r30
lsl r31
lsl r31
eor r22, r30
eor r23, r31
// SWAPMOVE(x, x, 0x0000f0f0, 12) : x & 000f000f <-> x & f000f000
movw r28, r22
movw r30, r24
swap r28
swap r29
swap r30
swap r31
andi r28, 240
andi r29, 240
andi r30, 15
andi r31, 15
and r22, r18
and r23, r18
and r24, r19
and r25, r19
or r22, r30
or r23, r31
or r24, r28
or r25, r29
// SWAPMOVE(x, x, 0x000000ff, 24)
st X+, r22
st X+, r24
st X+, r23
st X+, r25
.endm
.macro rearrange_rkey3 a, b, c, d
mov r22, \a
mov r23, \b
mov r24, \c
mov r25, \d
// SWAPMOVE(x, x, 0x0a0a0a0a, 3) : x & 11111111 <-> x & 88888888
swap_4bits r22, r28, 1, 4, 3, 6
swap_4bits r23, r28, 1, 4, 3, 6
swap_4bits r24, r28, 1, 4, 3, 6
swap_4bits r25, r28, 1, 4, 3, 6
// SWAPMOVE
mov r28, r22
mov r30, r24
lsl r28
lsl r28
lsl r30
lsl r30
eor r28, r23
eor r30, r25
andi r28, 204
andi r30, 204
eor r23, r28
eor r25, r30
lsr r28
lsr r28
lsr r30
lsr r30
eor r22, r28
eor r24, r30
// SWAPMOVE(x, x, 0x0000f0f0, 12) : x & 000f000f <-> x & f000f000
movw r28, r22
movw r30, r24
swap r28
swap r29
swap r30
swap r31
andi r28, 240
andi r29, 240
andi r30, 15
andi r31, 15
and r22, r18
and r23, r18
and r24, r19
and r25, r19
or r22, r30
or r23, r31
or r24, r28
or r25, r29
// SWAPMOVE(x, x, 0x000000ff, 24)
st X+, r22
st X+, r24
st X+, r23
st X+, r25
.endm
.macro swap_bytes x, y
eor \x, \y
eor \y, \x
eor \x, \y
.endm
.global gift128_kexpand
gift128_kexpand:
; Save r2-r17,r28-r31
push_registers 2,17
push_registers 28,31
push_registers 24,25
.L__stack_usage = 22
; Save the argument pointers to Z (key) and X (round keys)
movw XL, ARG1
movw YL, ARG2
; Load the key given by argument to register 2-17 instead of 0-15 because
; the mul instruction inconditionally overwrites registers r1:r0.
.irp param,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12,r13,r14,r15,r16,r17
ld \param, Y+
.endr
; Constants for efficient bitshifts
ldi r18, 240
ldi r19, 15
ldi r20, 16
rearrange_rkey0 r14, r15, r16, r17
rearrange_rkey0 r6, r7, r8, r9
rearrange_rkey1 r10, r11, r12, r13
rearrange_rkey1 r2, r3, r4, r5
rearrange_rkey2 r6, r7, r8, r9
kexp_round r14, r15, r16, r17
rearrange_rkey2 r14, r15, r16, r17
rearrange_rkey3 r2, r3, r4, r5
kexp_round r10, r11, r12, r13
rearrange_rkey3 r10, r11, r12, r13
st X+, r17
st X+, r16
st X+, r15
st X+, r14
kexp_round r6, r7, r8, r9
st X+, r9
st X+, r8
st X+, r7
st X+, r6
; Save loop counter
ldi r21, 7
kexp_loop:
cpi r21, 4
brne skip_swap_start
swap_bytes r10, r2
swap_bytes r11, r3
swap_bytes r12, r4
swap_bytes r13, r5
skip_swap_start:
rearrange_rkey0 r10, r11, r12, r13
kexp_round r2, r3, r4, r5
rearrange_rkey0 r2, r3, r4, r5
rearrange_rkey1 r6, r7, r8, r9
kexp_round r14, r15, r16, r17
rearrange_rkey1 r14, r15, r16, r17
rearrange_rkey2 r2, r3, r4, r5
kexp_round r10, r11, r12, r13
rearrange_rkey2 r10, r11, r12, r13
rearrange_rkey3 r14, r15, r16, r17
kexp_round r6, r7, r8, r9
rearrange_rkey3 r6, r7, r8, r9
st X+, r13
st X+, r12
st X+, r11
st X+, r10
kexp_round r2, r3, r4, r5
st X+, r5
st X+, r4
st X+, r3
st X+, r2
swap_bytes r10, r14
swap_bytes r11, r15
swap_bytes r12, r16
swap_bytes r13, r17
swap_bytes r2, r6
swap_bytes r3, r7
swap_bytes r4, r8
swap_bytes r5, r9
cpi r21, 4
brne skip_swap_end
swap_bytes r10, r2
swap_bytes r11, r3
swap_bytes r12, r4
swap_bytes r13, r5
skip_swap_end:
// decrement loop counter
subi r21, 1
cpi r21, 0
breq kexp_exit
rjmp kexp_loop
kexp_exit:
; Restore r2-r19,r28-r29
pop_registers 24,25
pop_registers 28,31
pop_registers 2,17
ret
.size gift128_kexpand, .-gift128_kexpand
.global gift128_encrypt
gift128_encrypt:
; Save r2-r17,r28-r29
push_registers 2,17
push_registers 28,29
.L__stack_usage = 18
; Save the argument pointers to Z (key) and X (plaintext)
movw XL, ARG2
; Load the plaintext given by argument to register 2-17 instead of 0-15 because
; the mul instruction inconditionally overwrites registers r1:r0.
.irp param,r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12,r13,r14,r15
ld \param, X+
.endr
ldi ZL, lo8(rconst)
ldi ZH, hi8(rconst)
movw XL, ARG3
// for byte_rol_2
ldi r20, 0
// Save loop counter
ldi r19, 8
quintuple_round:
// 1st_round
sbox r0, r4, r8, r12
sbox r1, r5, r9, r13
sbox r2, r6, r10, r14
sbox r3, r7, r11, r15
ldi r17, 51
ldi r18, 17
llayer1 r4, r8, r12
llayer1 r5, r9, r13
llayer1 r6, r10, r14
llayer1 r7, r11, r15
add_round_key r4, r5, r6, r7, r8, r9, r10, r11
add_rconst0 r0, r1, r2, r3
// 2nd round
sbox r12, r4, r8, r0
sbox r13, r5, r9, r1
sbox r14, r6, r10, r2
sbox r15, r7, r11, r3
subi r18, 2
half_ror_4 r0, r1
half_ror_4 r2, r3
ldi r18, 240
half_ror_12 r8, r9
half_ror_12 r10, r11
add_round_key r5, r4, r7, r6, r8, r9, r10, r11
add_rconst1 r13, r14, r15
// 3rd round
sbox r0, r5, r8, r12
sbox r1, r4, r9, r13
sbox r2, r7, r10, r14
sbox r3, r6, r11, r15
llayer3 r4, r5
llayer3 r6, r7
llayer3 r10, r11
llayer3 r12, r13
add_round_key r5, r4, r7, r6, r10, r11, r8, r9
add_rconst2 r0, r2, r3
// 4th round
sbox r14, r5, r10, r0
sbox r15, r4, r11, r1
sbox r12, r7, r8, r2
sbox r13, r6, r9, r3
// byte_ror_6
byte_rol_2 r0, r20
byte_rol_2 r1, r20
byte_rol_2 r2, r20
byte_rol_2 r3, r20
// byte_ror_4
swap r4
swap r5
swap r6
swap r7
// byte_ror_2
byte_ror_2 r8
byte_ror_2 r9
byte_ror_2 r10
byte_ror_2 r11
add_round_key r5, r4, r7, r6, r10, r11, r8, r9
add_rconst r14, r15, r12, r13
// 5th round
sbox r0, r5, r10, r14
sbox r1, r4, r11, r15
sbox r2, r7, r8, r12
sbox r3, r6, r9, r13
// swap state[0] w/ ROR(state[3], 24)
movw r16, r0
mov r0, r13
mov r1, r14
mov r13, r17
mov r14, r2
mov r17, r3
mov r2, r15
mov r3, r12
mov r15, r17
mov r12, r16
// state[1] = ROR(state[1], 16)
movw r16, r4
mov r4, r7
mov r7, r16
mov r5, r6
mov r6, r17
// state[2] = ROR(state[2], 8)
movw r16, r10
mov r10, r9
mov r9, r8
mov r8, r17
mov r11, r16
add_round_key r4, r5, r6, r7, r8, r9, r10, r11
// last rconst is always formed as 800000xx
ld r16, Z+
eor r12, r16
ldi r16, 128
eor r15, r16
// decrement loop counter
subi r19, 1
cpi r19, 0
breq exit
rjmp quintuple_round
exit:
; Store output
movw YL, ARG1
st Y+, r0
st Y+, r1
st Y+, r2
st Y+, r3
st Y+, r4
st Y+, r5
st Y+, r6
st Y+, r7
st Y+, r8
st Y+, r9
st Y+, r10
st Y+, r11
st Y+, r12
st Y+, r13
st Y+, r14
st Y+, r15
; Restore r2-r19,r28-r29
pop_registers 28,29
pop_registers 2,17
ret
.size gift128_encrypt, .-gift128_encrypt
.data
rconst:
.byte 0x08, 0x00, 0x00, 0x80, 0x80, 0x00, 0x54, 0x81, 0x01, 0x01, 0x01, 0x1f
.byte 0x80, 0x88, 0x88, 0xe0, 0x60, 0x50, 0x51, 0x80, 0x01, 0x03, 0x03, 0x2f
.byte 0x80, 0x88, 0x08, 0x60, 0x60, 0x50, 0x41, 0x80, 0x00, 0x03, 0x03, 0x27
.byte 0x80, 0x88, 0x00, 0xe0, 0x40, 0x50, 0x11, 0x80, 0x01, 0x02, 0x03, 0x2b
.byte 0x80, 0x08, 0x08, 0x40, 0x60, 0x40, 0x01, 0x80, 0x00, 0x02, 0x02, 0x21
.byte 0x80, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x51, 0x80, 0x01, 0x01, 0x03, 0x2e
.byte 0x00, 0x88, 0x08, 0x20, 0x60, 0x50, 0x40, 0x80, 0x00, 0x03, 0x01, 0x06
.byte 0x08, 0x88, 0x00, 0xa0, 0xc0, 0x50, 0x14, 0x81, 0x01, 0x02, 0x01, 0x1a
/*
.byte 0x08, 0x00, 0x00, 0x10, 0x00, 0x80, 0x01, 0x80, 0x02, 0x00, 0x00, 0x54, 0x81, 0x01, 0x01, 0x01, 0x1f, 0x00, 0x00, 0x80
.byte 0x80, 0x88, 0x88, 0x10, 0x00, 0xe0, 0x01, 0x60, 0x02, 0x00, 0x50, 0x51, 0x80, 0x01, 0x03, 0x03, 0x2f, 0x00, 0x00, 0x80
.byte 0x80, 0x88, 0x08, 0x10, 0x00, 0x60, 0x01, 0x60, 0x02, 0x00, 0x50, 0x41, 0x80, 0x00, 0x03, 0x03, 0x27, 0x00, 0x00, 0x80
.byte 0x80, 0x88, 0x00, 0x10, 0x00, 0xe0, 0x01, 0x40, 0x02, 0x00, 0x50, 0x11, 0x80, 0x01, 0x02, 0x03, 0x2b, 0x00, 0x00, 0x80
.byte 0x80, 0x08, 0x08, 0x10, 0x00, 0x40, 0x01, 0x60, 0x02, 0x00, 0x40, 0x01, 0x80, 0x00, 0x02, 0x02, 0x21, 0x00, 0x00, 0x80
.byte 0x80, 0x00, 0x00, 0x10, 0x00, 0xc0, 0x01, 0x00, 0x02, 0x00, 0x00, 0x51, 0x80, 0x01, 0x01, 0x03, 0x2e, 0x00, 0x00, 0x80
.byte 0x00, 0x88, 0x08, 0x10, 0x00, 0x20, 0x01, 0x60, 0x02, 0x00, 0x50, 0x40, 0x80, 0x00, 0x03, 0x01, 0x06, 0x00, 0x00, 0x80
.byte 0x08, 0x88, 0x00, 0x10, 0x00, 0xa0, 0x01, 0xc0, 0x02, 0x00, 0x50, 0x14, 0x81, 0x01, 0x02, 0x01, 0x1a, 0x00, 0x00, 0x80
*/
|
aadomn/cymric
| 19,756
|
src/cymric-gift128/armv7m/gift128.s
|
/****************************************************************************
* Compact ARM assembly implementation of the GIFT-128 block cipher. This
* implementation focuses on code size rather than speed.
*
* See "Fixslicing: A New GIFT Representation" paper available at
* https://eprint.iacr.org/2020/412.pdf for more details.
*
* @author Alexandre Adomnicai, Nanyang Technological University
*
* @date July 2021
****************************************************************************/
.syntax unified
.thumb
/*****************************************************************************
* Round constants look-up table according to the fixsliced representation.
*****************************************************************************/
.align 2
.type rconst,%object
rconst:
.word 0x10000008, 0x80018000, 0x54000002, 0x01010181
.word 0x8000001f, 0x10888880, 0x6001e000, 0x51500002
.word 0x03030180, 0x8000002f, 0x10088880, 0x60016000
.word 0x41500002, 0x03030080, 0x80000027, 0x10008880
.word 0x4001e000, 0x11500002, 0x03020180, 0x8000002b
.word 0x10080880, 0x60014000, 0x01400002, 0x02020080
.word 0x80000021, 0x10000080, 0x0001c000, 0x51000002
.word 0x03010180, 0x8000002e, 0x10088800, 0x60012000
.word 0x40500002, 0x01030080, 0x80000006, 0x10008808
.word 0xc001a000, 0x14500002, 0x01020181, 0x8000001a
/******************************************************************************
* Macro to compute the SWAPMOVE technique.
* - out0-out1 output registers
* - in0-in1 input registers
* - m mask
* - n shift value
* - tmp temporary register
******************************************************************************/
.macro swpmv out0, out1, in0, in1, m, n, tmp
eor \tmp, \in1, \in0, lsr \n
and \tmp, \m
eor \out1, \in1, \tmp
eor \out0, \in0, \tmp, lsl \n
.endm
/******************************************************************************
* Macro to compute a nibble-wise rotation to the right.
* - out output register
* - in input register
* - m0-m1 masks
* - n0-n1 shift value
* - tmp temporary register
******************************************************************************/
.macro nibror out, in, m0, m1, n0, n1, tmp
and \tmp, \m0, \in, lsr \n0
and \out, \in, \m1
orr \out, \tmp, \out, lsl \n1
.endm
/******************************************************************************
* Macro to compute the SBox (the NOT operation is included in the round keys).
* - in0-in3 input/output registers
* - tmp temporary register
* - n ror index value to math fixslicing
******************************************************************************/
.macro sbox in0, in1, in2, in3, tmp, n
and \tmp, \in2, \in0, ror \n
eor \in1, \in1, \tmp
and \tmp, \in1, \in3
eor \in0, \tmp, \in0, ror \n
orr \tmp, \in0, \in1
eor \in2, \tmp, \in2
eor \in3, \in3, \in2
eor \in1, \in1, \in3
and \tmp, \in0, \in1
eor \in2, \in2, \tmp
mvn \in3, \in3
.endm
/******************************************************************************
* Macro to compute the first round within a quintuple round routine.
* - in0-in3 input/output registers
******************************************************************************/
.macro round_0 in0, in1, in2, in3
ldr.w r5, [r0], #4 // load rconst
ldr.w r6, [r1], #4 // load 1st rkey word
ldr.w r7, [r1], #4 // load 2nd rkey word
sbox \in0, \in1, \in2, \in3, r8, #0 // sbox layer
nibror \in3, \in3, r4, r2, 1, 3, r8 // linear layer
nibror \in2, \in2, r2, r4, 3, 1, r8 // linear layer
orr r14, r2, r2, lsl #1 // 0x33333333 for 'nibror'
nibror \in1, \in1, r14, r14, 2, 2, r8 // linear layer
eor \in1, \in1, r6 // add 1st rkey word
eor \in2, \in2, r7 // add 2nd rkey word
eor \in0, \in0, r5 // add rconst
.endm
/******************************************************************************
* Macro to compute the second round within a quintuple round routine.
* - in0-in3 input/output registers
******************************************************************************/
.macro round_1 in0, in1, in2, in3
ldr.w r5, [r0], #4 // load rconst
ldr.w r6, [r1], #4 // load 1st rkey word
ldr.w r7, [r1], #4 // load 2nd rkey word
sbox \in0, \in1, \in2, \in3, r8, #0 // sbox layer
mvn r14, r3, lsl #12 // r14<-0x0fff0fff for HALF_ROR
nibror \in3, \in3, r14, r3, 4, 12, r8 // HALF_ROR(in3, 4)
nibror \in2, \in2, r3, r14, 12, 4, r8 // HALF_ROR(in2, 12)
rev16 \in1, \in1 // HALF_ROR(in1, 8)
eor \in1, \in1, r6 // add 1st rkey word
eor \in2, \in2, r7 // add 2nd rkey word
eor \in0, \in0, r5 // add rconst
.endm
/******************************************************************************
* Macro to compute the third round within a quintuple round routine.
* - in0-in3 input/output registers
******************************************************************************/
.macro round_2 in0, in1, in2, in3
ldr.w r5, [r0], #4 // load rconst
ldr.w r6, [r1], #4 // load 1st rkey word
ldr.w r7, [r1], #4 // load 2nd rkey word
sbox \in0, \in1, \in2, \in3, r8, #0 // sbox layer
orr r14, r2, r2, lsl #2 // r14<-0x55555555 for swpmv
swpmv \in1, \in1, \in1, \in1, r14, #1, r8
eor r8, \in3, \in3, lsr #1
and r8, r8, r14, lsr #16
eor \in3, \in3, r8
eor \in3, \in3, r8, lsl #1 //SWAPMOVE(r12,r12,0x55550000,1)
eor r8, \in2, \in2, lsr #1
and r8, r8, r14, lsl #16
eor \in2, \in2, r8
eor \in2, \in2, r8, lsl #1 //SWAPMOVE(r11,r11,0x00005555,1)
eor \in1, \in1, r6 // add 1st rkey word
eor \in2, r7, \in2, ror #16 // add 2nd rkey word
eor \in0, \in0, r5 // add rconst
.endm
/******************************************************************************
* Macro to compute the fourth round within a quintuple round routine.
* - in0-in3 input/output registers
******************************************************************************/
.macro round_3 in0, in1, in2, in3
ldr.w r6, [r1], #4 // load 1st rkey word
ldr.w r7, [r1], #4 // load 2nd rkey word
sbox \in0, \in1, \in2, \in3, r8, #16 // sbox layer
eor r14, r3, r3, lsl #8 // r14<-0x0f0f0f0f for nibror
nibror \in1, \in1, r14, r14, #4, #4, r8
orr r14, r14, r14, lsl #2 // r14<-0x3f3f3f3f for nibror
mvn r8, r14, lsr #6 // r8 <-0xc0c0c0c0 for nibror
nibror \in2, \in2, r14, r8, #2, #6, r5
nibror \in3, \in3, r8, r14, #6, #2, r8
ldr.w r5, [r0], #4 // load rconst
eor \in1, \in1, r6 // add 1st rkey word
eor \in2, \in2, r7 // add 2nd rkey word
eor \in0, \in0, r5 // add rconst
.endm
/******************************************************************************
* Macro to compute the fifth round within a quintuple round routine.
* - in0-in3 input/output registers
******************************************************************************/
.macro round_4 in0, in1, in2, in3
ldr.w r5, [r0], #4 // load rconst
ldr.w r6, [r1], #4 // load 1st rkey word
ldr.w r7, [r1], #4 // load 2nd rkey word
sbox \in0, \in1, \in2, \in3, r8, #0 // sbox layer
eor \in1, r6, \in1, ror #16 // add 1st keyword
eor \in2, r7, \in2, ror #8 // add 2nd keyword
eor \in0, \in0, r5 // add rconst
.endm
/******************************************************************************
* Macro to compute the GIFT-128 key update (in its classical representation).
* Two 16-bit rotations are computed on the 32-bit word 'v' given as input.
* - u 1st round key word as defined in the specification (U <- W2||W3)
* - v 2nd round key word as defined in the specification (V <- W6||W7)
******************************************************************************/
.macro k_upd u, v
and r2, r10, \v, lsr #12
and r3, \v, r9
orr r2, r2, r3, lsl #4
and r3, r12, \v, lsr #2
orr r2, r2, r3
and \v, \v, #0x00030000
orr \v, r2, \v, lsl #14
str.w \u, [r1], #4
str.w \v, [r1], #4
.endm
/******************************************************************************
* Macro to rearrange round key words from their classical to fixsliced
* representations.
* - rk0 1st round key word
* - rk1 2nd round key word
* - idx0 index for SWAPMOVE
* - idx1 index for SWAPMOVE
* - tmp temporary register for SWAPMOVE
******************************************************************************/
.macro rearr_rk rk0, rk1, idx0, idx1, tmp
swpmv \rk1, \rk1, \rk1, \rk1, r3, \idx0, \tmp
swpmv \rk0, \rk0, \rk0, \rk0, r3, \idx0, \tmp
swpmv \rk1, \rk1, \rk1, \rk1, r10, \idx1, \tmp
swpmv \rk0, \rk0, \rk0, \rk0, r10, \idx1, \tmp
swpmv \rk1, \rk1, \rk1, \rk1, r11, #12, \tmp
swpmv \rk0, \rk0, \rk0, \rk0, r11, #12, \tmp
swpmv \rk1, \rk1, \rk1, \rk1, #0xff, #24, \tmp
swpmv \rk0, \rk0, \rk0, \rk0, #0xff, #24, \tmp
.endm
/******************************************************************************
* Soubroutine to update the rkeys according to the classical representation.
******************************************************************************/
.align 2
classical_key_update:
k_upd r5, r7 // 1st classical key update
k_upd r4, r6 // 2nd classical key update
k_upd r7, r5 // 3rd classical key update
k_upd r6, r4 // 4th classical key update
bx lr
/******************************************************************************
* Soubroutine to rearrange round key words from classical to fixsliced
* representation for round i s.t. i mod 5 = 0.
******************************************************************************/
.align 2
rearrange_rkey_0:
ldr.w r6, [r1] // load 1st rkey word (classical rep)
ldr.w r4, [r1, #4] // load 2nd rkey word (classical rep)
rearr_rk r4, r6, #9, #18, r12 // rearrange rkey words for round 1
str.w r4, [r1, #4] // store 2nd rkey word (fixsliced rep)
str.w r6, [r1], #40 // store 1st rkey word (fixsliced rep)
bx lr
/******************************************************************************
* Soubroutine to rearrange round key words from classical to fixsliced
* representation for round i s.t. i mod 5 = 1 or 3.
******************************************************************************/
.align 2
rearrange_rkey_1:
ldr.w r5, [r1] // load 3rd rkey word (classical rep)
ldr.w r7, [r1, #4] // load 4th rkey word (classical rep)
rearr_rk r5, r7, #3, #6, r8 // rearrange rkey words for round 2
str.w r7, [r1, #4] // store 4th rkey word (fixsliced rep)
str.w r5, [r1], #40 // store 3rd rkey word (fixsliced rep)
bx lr
/******************************************************************************
* Soubroutine to rearrange round key words from classical to fixsliced
* representation for round i s.t. i mod 5 = 2.
******************************************************************************/
.align 2
rearrange_rkey_2:
ldr.w r5, [r1] // load 5th rkey word (classical rep)
ldr.w r7, [r1, #4] // load 6th rkey word (classical rep)
rearr_rk r5, r7, #15, #18, r8 // rearrange rkey words for round 3
str.w r7, [r1, #4] // store 6th rkey word (fixsliced rep)
str.w r5, [r1], #40 // store 5th rkey word (fixsliced rep)
bx lr
.align 2
/*****************************************************************************
* Implementation of the GIFT-128 key schedule according to fixslicing.
* The entire round key material is first computed according to the classical
* representation before being rearranged according to fixslicing.
*****************************************************************************/
@ void gift128_keyschedule(const u8* key, u32* rkey) {
.global gift128_keyschedule
.type gift128_keyschedule,%function
gift128_keyschedule:
push {r1-r12, r14}
ldm r1, {r4-r7} // load key words
mov r1, r0
rev r4, r4 // endianness
rev r5, r5 // endianness
rev r6, r6 // endianness
rev r7, r7 // endianness
str.w r5, [r1, #4]
str.w r7, [r1], #8 //the first rkeys are not updated
str.w r4, [r1, #4]
str.w r6, [r1], #8 //the first rkeys are not updated
movw r12, #0x3fff
lsl r12, r12, #16 //r12<- 0x3fff0000
movw r10, #0x000f //r10<- 0x0000000f
movw r9, #0x0fff //r9 <- 0x00000fff
bl classical_key_update
bl classical_key_update
bl classical_key_update
bl classical_key_update
bl classical_key_update
bl classical_key_update
bl classical_key_update
bl classical_key_update
bl classical_key_update
sub.w r1, r1, #320
movw r3, #0x0055
movt r3, #0x0055 //r3 <- 0x00550055
movw r10, #0x3333 //r10<- 0x00003333
movw r11, #0x000f
movt r11, #0x000f //r11<- 0x000f000f
bl rearrange_rkey_0 // fixslice the rkey words for round 0
bl rearrange_rkey_0 // fixslice the rkey words for round 5
bl rearrange_rkey_0 // fixslice the rkey words for round 10
bl rearrange_rkey_0 // fixslice the rkey words for round 15
bl rearrange_rkey_0 // fixslice the rkey words for round 20
bl rearrange_rkey_0 // fixslice the rkey words for round 25
bl rearrange_rkey_0 // fixslice the rkey words for round 30
bl rearrange_rkey_0 // fixslice the rkey words for round 35
sub.w r1, r1, #312
movw r3, #0x1111
movt r3, #0x1111 // r3 <- 0x11111111
movw r10, #0x0303
movt r10, #0x0303 // r10<- 0x03030303
bl rearrange_rkey_1 // fixslice the rkey words for round 1
bl rearrange_rkey_1 // fixslice the rkey words for round 6
bl rearrange_rkey_1 // fixslice the rkey words for round 11
bl rearrange_rkey_1 // fixslice the rkey words for round 16
bl rearrange_rkey_1 // fixslice the rkey words for round 21
bl rearrange_rkey_1 // fixslice the rkey words for round 26
bl rearrange_rkey_1 // fixslice the rkey words for round 31
bl rearrange_rkey_1 // fixslice the rkey words for round 36
sub.w r1, r1, #312
movw r3, #0xaaaa // r3 <- 0x0000aaaa
movw r10, #0x3333 // r10<- 0x00003333
movw r11, #0xf0f0 // r11<- 0x0000f0f0
bl rearrange_rkey_2 // fixslice the rkey words for round 2
bl rearrange_rkey_2 // fixslice the rkey words for round 7
bl rearrange_rkey_2 // fixslice the rkey words for round 12
bl rearrange_rkey_2 // fixslice the rkey words for round 17
bl rearrange_rkey_2 // fixslice the rkey words for round 22
bl rearrange_rkey_2 // fixslice the rkey words for round 27
bl rearrange_rkey_2 // fixslice the rkey words for round 32
bl rearrange_rkey_2 // fixslice the rkey words for round 37
sub.w r1, r1, #312
movw r3, #0x0a0a
movt r3, #0x0a0a // r3 <- 0x0a0a0a0a
movw r10, #0x00cc
movt r10, #0x00cc // r10<- 0x00cc00cc
bl rearrange_rkey_1 // fixslice the rkey words for round 3
bl rearrange_rkey_1 // fixslice the rkey words for round 8
bl rearrange_rkey_1 // fixslice the rkey words for round 13
bl rearrange_rkey_1 // fixslice the rkey words for round 18
bl rearrange_rkey_1 // fixslice the rkey words for round 23
bl rearrange_rkey_1 // fixslice the rkey words for round 28
bl rearrange_rkey_1 // fixslice the rkey words for round 33
bl rearrange_rkey_1 // fixslice the rkey words for round 38
pop {r1-r12,r14}
bx lr
.size gift128_keyschedule, .-gift128_keyschedule
/*****************************************************************************
* Subroutine to implement a quintuple round of GIFT-128.
*****************************************************************************/
.align 2
quintuple_round:
str.w r14, [sp]
round_0 r9, r10, r11, r12
round_1 r12, r10, r11, r9
round_2 r9, r10, r11, r12
round_3 r12, r10, r11, r9
round_4 r9, r10, r11, r12
ldr.w r14, [sp]
eor r9, r9, r12, ror #24
eor r12, r9, r12, ror #24
eor r9, r9, r12 // swap r9 with r12
bx lr
.size quintuple_round, .-quintuple_round
/*****************************************************************************
* Fully unrolled ARM assembly implementation of the GIFTb-128 block cipher.
* This function simply encrypts a 128-bit block, without any operation mode.
*****************************************************************************/
@ void giftb128_encrypt(u8 *out, const u8* in, const u32* rkey)
.global giftb128_encrypt
.type giftb128_encrypt,%function
giftb128_encrypt:
push {r0-r12,r14}
sub.w sp, #4 // to store 'lr' when calling 'quintuple_round'
ldm r1, {r9-r12} // load plaintext words
mov r1, r2
rev r9, r9
rev r10, r10
rev r11, r11
rev r12, r12
movw r2, #0x1111
movt r2, #0x1111 // r2 <- 0x11111111 (for NIBBLE_ROR)
movw r3, #0x000f
movt r3, #0x000f // r3 <- 0x000f000f (for HALF_ROR)
mvn r4, r2, lsl #3 // r4 <- 0x7777777 (for NIBBLE_ROR)
adr r0, rconst // r0 <- 'rconst' address
bl quintuple_round
bl quintuple_round
bl quintuple_round
bl quintuple_round
bl quintuple_round
bl quintuple_round
bl quintuple_round
bl quintuple_round
ldr.w r0, [sp ,#4] // restore 'ctext' address
rev r9, r9
rev r10, r10
rev r11, r11
rev r12, r12
stm r0, {r9-r12}
add.w sp, #4
pop {r0-r12,r14}
bx lr
.size giftb128_encrypt, .-giftb128_encrypt
|
a3f/bareDOOM
| 1,652
|
arch/kvx/lib/setjmp.S
|
/* SPDX-License-Identifier: LGPL-2.1 */
/* SPDX-FileCopyrightText: 2021 Jules Maselbas <jmaselbas@kalray.eu>, Kalray Inc. */
#define REG_SIZE 8
#include <linux/linkage.h>
/* jmp_buf layout:
* [0] = $ra, $sp, $cs, $r14,
* [4] = $r20, $r21, $r22, $r23,
* [8] = $r24, $r25, $r26, $r27,
* [12] = $r28, $r29, $r30, $r31,
* [16] = $r18, $r19,
* [18] = $lc, $le, $ls, xxxx
*/
/**
* int initjmp(jmp_buf jmp, void __noreturn (*func)(void), void *stack_top);
*/
ENTRY(initjmp)
/* store $ra */
sd (0 * REG_SIZE)[$r0] = $r1
;;
/* store $sp */
sd (1 * REG_SIZE)[$r0] = $r2
make $r0 = 0
ret
;;
ENDPROC(initjmp)
/**
* int setjmp(jmp_buf jmp);
*/
ENTRY(setjmp)
sq (16 * REG_SIZE)[$r0] = $r18r19
get $r40 = $ra
copyd $r41 = $sp
;;
so (4 * REG_SIZE)[$r0] = $r20r21r22r23
get $r42 = $cs
copyd $r43 = $r14
;;
so (0 * REG_SIZE)[$r0] = $r40r41r42r43
get $r40 = $lc
;;
so (8 * REG_SIZE)[$r0] = $r24r25r26r27
get $r41 = $le
;;
so (12 * REG_SIZE)[$r0] = $r28r29r30r31
get $r42 = $ls
;;
so (18 * REG_SIZE)[$r0] = $r40r41r42r43
make $r0 = 0
ret
;;
ENDPROC(setjmp)
/**
* void longjmp(jmp_buf jmp, int ret);
*/
ENTRY(longjmp)
lo $r40r41r42r43 = (0 * REG_SIZE)[$r0]
;;
lo $r44r45r46r47 = (18 * REG_SIZE)[$r0]
set $ra = $r40
copyd $sp = $r41
;;
lo $r20r21r22r23 = (4 * REG_SIZE)[$r0]
set $cs = $r42
copyd $r14 = $r43
;;
lo $r24r25r26r27 = (8 * REG_SIZE)[$r0]
set $lc = $r44
;;
lo $r28r29r30r31 = (12 * REG_SIZE)[$r0]
set $le = $r45
;;
lq $r18r19 = (16 * REG_SIZE)[$r0]
set $ls = $r46
;;
/* According to man, if retval is equal to 0, then we should return 1 */
maxud $r0 = $r1, 1
ret
;;
ENDPROC(longjmp)
|
a3f/bareDOOM
| 1,586
|
arch/kvx/cpu/barebox.lds.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2019 Kalray Inc.
*/
#include <asm/common.h>
#include <asm/sys_arch.h>
#include <asm-generic/barebox.lds.h>
OUTPUT_FORMAT("elf64-kvx")
OUTPUT_ARCH("kvx:kv3-1:64")
SECTIONS
{
. = CONFIG_ARCH_TEXT_BASE;
.text ALIGN(4) : {
*(.startup);
_stext = .;
*(.text)
}
/* Exception vector must be aligned on a huge frontier */
.exception ALIGN(EXCEPTION_ALIGNMENT) :
{
_exception_start = ABSOLUTE(.);
/**
* First handler is at _exception_start + EXCEPTION_STRIDE
* In order to force getting to the next stride, add at
* least 1 byte of data. The next ALIGN will then be
* forced to get to the next stride.
*/
. += 1;
. = ALIGN(EXCEPTION_STRIDE);
/* Entry for traps */
KEEP(*(.exception.trap));
. += 1;
/* Entry for interrupts */
. = ALIGN(EXCEPTION_STRIDE);
KEEP(*(.exception.interrupt));
. += 1;
/* Entry for syscall */
. = ALIGN(EXCEPTION_STRIDE);
KEEP(*(.exception.syscall));
}
.rodata ALIGN(8) : {
*(.rodata*)
. = ALIGN(8);
RO_DATA_SECTION
}
_etext = .; /* End of text and rodata section */
.data ALIGN(4): {
sdata = .;
_sdata = .;
*(.data)
. = ALIGN(8);
__stack_end = .;
. += CONFIG_STACK_SIZE;
__stack_start = .;
}
.gdb_page ALIGN(4 * 1024) :
{
_debug_start = ABSOLUTE(.);
_debug_phy_start = ABSOLUTE(.);
. += 4 * 1024;
}
__debug_phy_end = ABSOLUTE(.);
_edata = .;
/* We use store quad for bss init so align on 16 bytes */
.bss ALIGN(16):
{
__bss_start = .;
*(.bss)
. = ALIGN(16);
__bss_stop = .;
}
__end = .;
}
|
a3f/bareDOOM
| 4,036
|
arch/kvx/cpu/start.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2019 Kalray Inc.
*/
#include <linux/linkage.h>
#include <asm/privilege.h>
#include <asm/sys_arch.h>
#define PS_VAL_WFXL(__field, __val) \
SFR_SET_VAL_WFXL(PS, __field, __val)
#define PS_WFXL_START_VALUE PS_VAL_WFXL(HLE, 1) | \
PS_VAL_WFXL(USE, 1) | \
PS_VAL_WFXL(DCE, 1) | \
PS_VAL_WFXL(ICE, 1) | \
PS_VAL_WFXL(V64, 1) | \
PS_VAL_WFXL(ET, 0)
#define PCR_VAL_WFXM(__field, __val) \
SFR_SET_VAL_WFXM(PCR, __field, __val)
#define PCR_WFXM_START_VALUE PCR_VAL_WFXM(L1CE, 1)
/* Enable STOP in WS */
#define WS_ENABLE_WU2 (KVX_SFR_WS_WU2_MASK)
#define WS_WFXL_VALUE (WS_ENABLE_WU2)
/*
* This is our entry point. When entering from bootloader,
* the following registers are set:
* $r0 is a magic "KALARGV1" (FSBL_PARAM_MAGIC) indicating parameters are passed
* $r1 Device tree pointer
*
* WARNING WARNING WARNING
* ! DO NOT CLOBBER THEM !
* WARNING WARNING WARNING
*
* Try to use register above $r20 to ease parameter adding in future
*/
.section .startup, "ax"
ENTRY(kvx_start)
/* (Re)initialize performance counter */
make $r20 = 0x00000000
;;
set $pmc = $r20
;;
call asm_init_pl
;;
/* Setup default processor status */
make $r25 = PS_WFXL_START_VALUE
;;
wfxl $ps = $r25
;;
make $r25 = PCR_WFXM_START_VALUE
;;
wfxm $pcr = $r25
;;
/* Clear BSS */
make $r22 = __bss_stop
make $r21 = __bss_start
;;
sbfd $r22 = $r21, $r22
make $r24 = 0
;;
/* Divide by 16 for hardware loop */
srld $r22, $r22, 4
make $r25 = 0
;;
/* Clear bss with hardware loop */
loopdo $r22, clear_bss_done
;;
sq 0[$r21] = $r24r25
addd $r21 = $r21, 16
;;
clear_bss_done:
/* Setup stack */
make $sp, __stack_start
;;
call kvx_lowlevel_setup
;;
call kvx_start_barebox
;;
goto kvx_proc_power_off
;;
ENDPROC(kvx_start)
#define request_ownership(__pl) ;\
make $r21 = SYO_WFXL_VALUE_##__pl ;\
;; ;\
wfxl $syow = $r21 ;\
;; ;\
make $r21 = HTO_WFXL_VALUE_##__pl ;\
;; ;\
wfxl $htow = $r21 ;\
;; ;\
make $r21 = MO_WFXL_VALUE_##__pl ;\
make $r22 = MO_WFXM_VALUE_##__pl ;\
;; ;\
wfxl $mow = $r21 ;\
;; ;\
wfxm $mow = $r22 ;\
;; ;\
make $r21 = ITO_WFXL_VALUE_##__pl ;\
make $r22 = ITO_WFXM_VALUE_##__pl ;\
;; ;\
wfxl $itow = $r21 ;\
;; ;\
wfxm $itow = $r22 ;\
;; ;\
make $r21 = PSO_WFXL_VALUE_##__pl ;\
make $r22 = PSO_WFXM_VALUE_##__pl ;\
;; ;\
wfxl $psow = $r21 ;\
;; ;\
wfxm $psow = $r22 ;\
;; ;\
make $r21 = DO_WFXL_VALUE_##__pl ;\
;; ;\
wfxl $dow = $r21 ;\
;;
/**
* Initialize privilege level
*/
ENTRY(asm_init_pl)
get $r21 = $ps
;;
/* Extract privilege level from $ps to check if we need to
* lower our privilege level (we might already be in PL1)
*/
extfz $r20 = $r21, KVX_SFR_END(PS_PL), KVX_SFR_START(PS_PL)
;;
/* If our privilege level is 0, then we need to lower in execution level
* to ring 1 in order to let the debug routines be inserted at runtime
* by the JTAG. In both case, we will request the resources we need for
* barebox to run.
*/
cb.deqz $r20? delegate_pl
;;
/*
* When someone is already above us, request the resources we need to
* run barebox . No need to request double exception or ECC traps for
* instance. When doing so, the more privileged level will trap for
* permission and delegate us the required resources.
*/
request_ownership(PL_CUR)
;;
ret
;;
delegate_pl:
request_ownership(PL_CUR_PLUS_1)
;;
/* Copy our $ps into $sps for 1:1 restoration */
get $r22 = $ps
;;
/* We will return to $ra after rfe */
get $r21 = $ra
/* Set privilege level to +1 in $sps (relative level from the
* current one)
*/
addd $r22 = $r22, PL_CUR_PLUS_1
;;
set $spc = $r21
;;
set $sps = $r22
;;
/* When using rfe, $spc and $sps will be restored in $ps and $pc,
* We will then return to the caller ($ra) in current PL + 1
*/
rfe
;;
ENDPROC(asm_init_pl)
ENTRY(kvx_proc_power_off):
dinval
make $r1 = WS_WFXL_VALUE
;;
/* Enable STOP */
wfxl $ws, $r1
;;
1: stop
;;
goto 1b
;;
ENDPROC(kvx_proc_power_off)
|
a3f/bareDOOM
| 1,075
|
arch/x86/lib/setjmp_32.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Written by H. Peter Anvin <hpa@zytor.com>
* Brought in from Linux v4.4 and modified for U-Boot
* From Linux arch/um/sys-i386/setjmp.S
*/
#define _REGPARM
#include <linux/linkage.h>
.text
.align 8
/*
* The jmp_buf is assumed to contain the following, in order:
* %ebx
* %esp
* %ebp
* %esi
* %edi
* <return address>
*/
ENTRY(setjmp)
movl %eax, %edx
popl %ecx /* Return address, and adjust the stack */
xorl %eax, %eax /* Return value */
movl %ebx, (%edx)
movl %esp, 4(%edx) /* Post-return %esp! */
pushl %ecx /* Make the call/return stack happy */
movl %ebp, 8(%edx)
movl %esi, 12(%edx)
movl %edi, 16(%edx)
movl %ecx, 20(%edx) /* Return address */
ret
ENDPROC(setjmp)
ENTRY(longjmp)
xchgl %eax, %edx
movl (%edx), %ebx
movl 4(%edx), %esp
movl 8(%edx), %ebp
movl 12(%edx), %esi
movl 16(%edx), %edi
jmp *20(%edx)
ENDPROC(longjmp)
ENTRY(initjmp)
movl %edx, 20(%eax) /* Return address */
movl %ecx, 4(%eax) /* Post-return %esp! */
xorl %eax, %eax /* Return value */
ret
ENDPROC(initjmp)
|
a3f/bareDOOM
| 2,514
|
arch/x86/mach-efi/crt0-efi-ia32.S
|
/* crt0-efi-ia32.S - x86 EFI startup code.
Copyright (C) 1999 Hewlett-Packard Co.
Contributed by David Mosberger <davidm@hpl.hp.com>.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials
provided with the distribution.
* Neither the name of Hewlett-Packard Co. nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
BE LIABLE FOR ANYDIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
*/
.text
.align 4
.globl _start
_start:
pushl %ebp
movl %esp,%ebp
pushl 12(%ebp) # copy "image" argument
pushl 8(%ebp) # copy "systab" argument
call 0f
0: popl %eax
movl %eax,%ebx
addl $image_base-0b,%eax # %eax = ldbase
addl $_DYNAMIC-0b,%ebx # %ebx = _DYNAMIC
pushl %ebx # pass _DYNAMIC as second argument
pushl %eax # pass ldbase as first argument
call _relocate
popl %ebx
popl %ebx
testl %eax,%eax
jne .exit
call efi_main # call app with "image" and "systab" argument
.exit: leave
ret
/* hand-craft a dummy .reloc section so EFI knows it's a relocatable executable: */
.data
dummy: .long 0
#define IMAGE_REL_ABSOLUTE 0
.section .reloc
.long dummy /* Page RVA */
.long 10 /* Block Size (2*4+2) */
.word (IMAGE_REL_ABSOLUTE<<12) + 0 /* reloc for dummy */
|
a3f/bareDOOM
| 2,362
|
arch/x86/mach-efi/crt0-efi-x86_64.S
|
/* crt0-efi-x86_64.S - x86_64 EFI startup code.
Copyright (C) 1999 Hewlett-Packard Co.
Contributed by David Mosberger <davidm@hpl.hp.com>.
Copyright (C) 2005 Intel Co.
Contributed by Fenghua Yu <fenghua.yu@intel.com>.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials
provided with the distribution.
* Neither the name of Hewlett-Packard Co. nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
BE LIABLE FOR ANYDIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
*/
.text
.align 4
.globl _start
_start:
subq $8, %rsp
pushq %rcx
pushq %rdx
0:
lea image_base(%rip), %rdi
lea _DYNAMIC(%rip), %rsi
popq %rcx
popq %rdx
pushq %rcx
pushq %rdx
call _relocate
popq %rdi
popq %rsi
call efi_main
addq $8, %rsp
.exit:
ret
/* hand-craft a dummy .reloc section so EFI knows it's a relocatable executable: */
.data
dummy: .long 0
#define IMAGE_REL_ABSOLUTE 0
.section .reloc, "a"
label1:
.long dummy-label1 /* Page RVA */
.long 10 /* Block Size (2*4+2) */
.word (IMAGE_REL_ABSOLUTE<<12) + 0 /* reloc for dummy */
|
a3f/bareDOOM
| 1,267
|
arch/x86/mach-efi/elf_ia32_efi.lds.S
|
#include <asm-generic/barebox.lds.h>
OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
OUTPUT_ARCH(i386)
ENTRY(_start)
SECTIONS
{
. = 0;
image_base = .;
.hash : { *(.hash) } /* this MUST come first! */
. = ALIGN(4096);
.text :
{
_stext = .;
_text = .;
*(.text)
*(.text.*)
*(.gnu.linkonce.t.*)
}
_etext = .;
. = ALIGN(4096);
.sdata : {
*(.got.plt)
*(.got)
*(.srodata)
*(.sdata)
*(.sbss)
*(.scommon)
}
. = ALIGN(4096);
_sdata = .;
.data : {
*(.rodata*)
RO_DATA_SECTION
*(.data)
*(.data1)
*(.data.*)
*(.sdata)
*(.got.plt)
*(.got)
/* the EFI loader doesn't seem to like a .bss section, so we stick
* it all into .data: */
*(.sbss)
*(.scommon)
*(.dynbss)
*(.bss)
*(COMMON)
}
. = ALIGN(4096);
.dynamic : { *(.dynamic) }
. = ALIGN(4096);
.rel : {
*(.rel.data)
*(.rel.data.*)
*(.rel.got)
*(.rel.stab)
*(.data.rel.ro.local)
*(.data.rel.local)
*(.data.rel.ro)
*(.data.rel*)
}
. = ALIGN(4096);
.reloc : /* This is the PECOFF .reloc section! */
{
*(.reloc)
}
. = ALIGN(4096);
.dynsym : { *(.dynsym) }
. = ALIGN(4096);
.dynstr : { *(.dynstr) }
. = ALIGN(4096);
/DISCARD/ :
{
*(.rel.reloc)
*(.eh_frame)
*(.note.GNU-stack)
}
.comment 0 : { *(.comment) }
}
|
a3f/bareDOOM
| 1,188
|
arch/x86/mach-efi/elf_x86_64_efi.lds.S
|
#include <asm-generic/barebox.lds.h>
/* Same as elf_x86_64_fbsd_efi.lds, except for OUTPUT_FORMAT below - KEEP IN SYNC */
OUTPUT_FORMAT("elf64-x86-64", "elf64-x86-64", "elf64-x86-64")
OUTPUT_ARCH(i386:x86-64)
ENTRY(_start)
SECTIONS
{
. = 0;
image_base = .;
.hash : { *(.hash) } /* this MUST come first! */
. = ALIGN(4096);
.eh_frame : {
*(.eh_frame)
}
. = ALIGN(4096);
.text : {
_stext = .;
_text = .;
*(.text)
*(.text.*)
*(.gnu.linkonce.t.*)
}
_etext = .;
. = ALIGN(4096);
.reloc : {
*(.reloc)
}
. = ALIGN(4096);
_sdata = .;
.data : {
*(.rodata*)
RO_DATA_SECTION
*(.got.plt)
*(.got)
*(.data*)
__bss_start = .;
*(.sdata)
/* the EFI loader doesn't seem to like a .bss section, so we stick
* it all into .data: */
*(.sbss)
*(.scommon)
*(.dynbss)
*(.bss)
*(COMMON)
*(.rel.local)
__bss_stop = .;
}
_edata = .;
. = ALIGN(4096);
.dynamic : { *(.dynamic) }
. = ALIGN(4096);
.rela : {
*(.rela*)
}
. = ALIGN(4096);
.dynsym : { *(.dynsym) }
. = ALIGN(4096);
.dynstr : { *(.dynstr) }
. = ALIGN(4096);
.ignored.reloc : {
*(.rela.reloc)
*(.eh_frame)
*(.note.GNU-stack)
}
.comment 0 : { *(.comment) }
}
|
a3f/bareDOOM
| 1,063
|
arch/mips/lib/barebox.lds.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2011 Antony Pavlov <antonynpavlov@gmail.com>
*/
#include <asm-generic/barebox.lds.h>
OUTPUT_ARCH(mips)
ENTRY(_start)
SECTIONS
{
. = TEXT_BASE;
. = ALIGN(4);
.image_start : { *(.__image_start) }
.text :
{
_stext = .;
_text = .;
*(.text_entry*)
__bare_init_start = .;
*(.text_bare_init*)
__bare_init_end = .;
*(.text*)
}
BAREBOX_BARE_INIT_SIZE
PRE_IMAGE
. = ALIGN(4);
.rodata : {
*(.rodata*)
RO_DATA_SECTION
}
_etext = .; /* End of text and rodata section */
_sdata = .;
. = ALIGN(4);
.data : { *(.data*) }
.barebox_imd : { BAREBOX_IMD }
_edata = .;
.image_end : { *(.__image_end) }
. = ALIGN(4);
/*
* .rel must come last so that the mips-relocs tool can shrink
* the section size & the PT_LOAD program header filesz.
*/
.data.reloc : {
__rel_start = .;
BYTE(0x0)
. += CONFIG_MIPS_RELOCATION_TABLE_SIZE - 1;
}
_end = .;
.bss : {
__bss_start = .;
*(.sbss.*)
*(.bss.*)
*(COMMON)
. = ALIGN(4);
__bss_stop = .;
}
}
|
a3f/bareDOOM
| 6,606
|
arch/mips/lib/memcpy.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 1998, 99, 2000, 01, 2002 Ralf Baechle (ralf@gnu.org)
* Copyright (C) 1999, 2000, 01, 2002 Silicon Graphics, Inc.
* Copyright (C) 2002 Broadcom, Inc.
* memcpy/copy_user author: Mark Vandevoorde
* Copyright (C) 2007 Maciej W. Rozycki
* Copyright (C) 2014 Imagination Technologies Ltd.
*
* Kernel-mode memcpy function without exceptions for _some_ MIPS CPUs
* by Aleksey Kuleshov (rndfax@yandex.ru), 2015
*
*/
#include <asm/asm.h>
#include <asm/asm-offsets.h>
#include <asm/regdef.h>
#define dst a0
#define src a1
#define len a2
#define LOADK lw /* No exception */
#define LOAD(reg, addr) lw reg, addr
#define LOADL(reg, addr) lwl reg, addr
#define LOADR(reg, addr) lwr reg, addr
#define STOREL(reg, addr) swl reg, addr
#define STORER(reg, addr) swr reg, addr
#define STORE(reg, addr) sw reg, addr
#define ADD addu
#define SUB subu
#define SRL srl
#define SLL sll
#define SRA sra
#define SLLV sllv
#define SRLV srlv
#define NBYTES 4
#define LOG_NBYTES 2
#define LOADB(reg, addr) lb reg, addr
#define STOREB(reg, addr) sb reg, addr
#ifdef CONFIG_CPU_LITTLE_ENDIAN
#define LDFIRST LOADR
#define LDREST LOADL
#define STFIRST STORER
#define STREST STOREL
#define SHIFT_DISCARD SLLV
#else
#define LDFIRST LOADL
#define LDREST LOADR
#define STFIRST STOREL
#define STREST STORER
#define SHIFT_DISCARD SRLV
#endif
#define FIRST(unit) ((unit)*NBYTES)
#define REST(unit) (FIRST(unit)+NBYTES-1)
#define UNIT(unit) FIRST(unit)
#define ADDRMASK (NBYTES-1)
.text
.align 5
.set noreorder
LEAF(memcpy) /* a0=dst a1=src a2=len */
move v0, dst /* return value */
/*
* Note: dst & src may be unaligned, len may be 0
* Temps
*/
#define rem t8
/*
* The "issue break"s below are very approximate.
* Issue delays for dcache fills will perturb the schedule, as will
* load queue full replay traps, etc.
*
* If len < NBYTES use byte operations.
*/
sltu t2, len, NBYTES
and t1, dst, ADDRMASK
bnez t2, .Lcopy_bytes_checklen
and t0, src, ADDRMASK
bnez t1, .Ldst_unaligned
nop
bnez t0, .Lsrc_unaligned_dst_aligned
/*
* use delay slot for fall-through
* src and dst are aligned; need to compute rem
*/
.Lboth_aligned:
SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter
beqz t0, .Lcleanup_both_aligned # len < 8*NBYTES
and rem, len, (8*NBYTES-1) # rem = len % (8*NBYTES)
.align 4
1:
LOAD(t0, UNIT(0)(src))
LOAD(t1, UNIT(1)(src))
LOAD(t2, UNIT(2)(src))
LOAD(t3, UNIT(3)(src))
SUB len, len, 8*NBYTES
LOAD(t4, UNIT(4)(src))
LOAD(t7, UNIT(5)(src))
STORE(t0, UNIT(0)(dst))
STORE(t1, UNIT(1)(dst))
LOAD(t0, UNIT(6)(src))
LOAD(t1, UNIT(7)(src))
ADD src, src, 8*NBYTES
ADD dst, dst, 8*NBYTES
STORE(t2, UNIT(-6)(dst))
STORE(t3, UNIT(-5)(dst))
STORE(t4, UNIT(-4)(dst))
STORE(t7, UNIT(-3)(dst))
STORE(t0, UNIT(-2)(dst))
STORE(t1, UNIT(-1)(dst))
bne len, rem, 1b
nop
/*
* len == rem == the number of bytes left to copy < 8*NBYTES
*/
.Lcleanup_both_aligned:
beqz len, .Ldone
sltu t0, len, 4*NBYTES
bnez t0, .Lless_than_4units
and rem, len, (NBYTES-1) # rem = len % NBYTES
/*
* len >= 4*NBYTES
*/
LOAD( t0, UNIT(0)(src))
LOAD( t1, UNIT(1)(src))
LOAD( t2, UNIT(2)(src))
LOAD( t3, UNIT(3)(src))
SUB len, len, 4*NBYTES
ADD src, src, 4*NBYTES
STORE(t0, UNIT(0)(dst))
STORE(t1, UNIT(1)(dst))
STORE(t2, UNIT(2)(dst))
STORE(t3, UNIT(3)(dst))
.set reorder /* DADDI_WAR */
ADD dst, dst, 4*NBYTES
beqz len, .Ldone
.set noreorder
.Lless_than_4units:
/*
* rem = len % NBYTES
*/
beq rem, len, .Lcopy_bytes
nop
1:
LOAD(t0, 0(src))
ADD src, src, NBYTES
SUB len, len, NBYTES
STORE(t0, 0(dst))
.set reorder /* DADDI_WAR */
ADD dst, dst, NBYTES
bne rem, len, 1b
.set noreorder
/*
* src and dst are aligned, need to copy rem bytes (rem < NBYTES)
* A loop would do only a byte at a time with possible branch
* mispredicts. Can't do an explicit LOAD dst,mask,or,STORE
* because can't assume read-access to dst. Instead, use
* STREST dst, which doesn't require read access to dst.
*
* This code should perform better than a simple loop on modern,
* wide-issue mips processors because the code has fewer branches and
* more instruction-level parallelism.
*/
#define bits t2
beqz len, .Ldone
ADD t1, dst, len # t1 is just past last byte of dst
li bits, 8*NBYTES
SLL rem, len, 3 # rem = number of bits to keep
LOAD(t0, 0(src))
SUB bits, bits, rem # bits = number of bits to discard
SHIFT_DISCARD t0, t0, bits
STREST(t0, -1(t1))
jr ra
move len, zero
.Ldst_unaligned:
/*
* dst is unaligned
* t0 = src & ADDRMASK
* t1 = dst & ADDRMASK; T1 > 0
* len >= NBYTES
*
* Copy enough bytes to align dst
* Set match = (src and dst have same alignment)
*/
#define match rem
LDFIRST(t3, FIRST(0)(src))
ADD t2, zero, NBYTES
LDREST(t3, REST(0)(src))
SUB t2, t2, t1 # t2 = number of bytes copied
xor match, t0, t1
STFIRST(t3, FIRST(0)(dst))
beq len, t2, .Ldone
SUB len, len, t2
ADD dst, dst, t2
beqz match, .Lboth_aligned
ADD src, src, t2
.Lsrc_unaligned_dst_aligned:
SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
beqz t0, .Lcleanup_src_unaligned
and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
1:
/*
* Avoid consecutive LD*'s to the same register since some mips
* implementations can't issue them in the same cycle.
* It's OK to load FIRST(N+1) before REST(N) because the two addresses
* are to the same unit (unless src is aligned, but it's not).
*/
LDFIRST(t0, FIRST(0)(src))
LDFIRST(t1, FIRST(1)(src))
SUB len, len, 4*NBYTES
LDREST(t0, REST(0)(src))
LDREST(t1, REST(1)(src))
LDFIRST(t2, FIRST(2)(src))
LDFIRST(t3, FIRST(3)(src))
LDREST(t2, REST(2)(src))
LDREST(t3, REST(3)(src))
ADD src, src, 4*NBYTES
STORE(t0, UNIT(0)(dst))
STORE(t1, UNIT(1)(dst))
STORE(t2, UNIT(2)(dst))
STORE(t3, UNIT(3)(dst))
.set reorder /* DADDI_WAR */
ADD dst, dst, 4*NBYTES
bne len, rem, 1b
.set noreorder
.Lcleanup_src_unaligned:
beqz len, .Ldone
and rem, len, NBYTES-1 # rem = len % NBYTES
beq rem, len, .Lcopy_bytes
nop
1:
LDFIRST(t0, FIRST(0)(src))
LDREST(t0, REST(0)(src))
ADD src, src, NBYTES
SUB len, len, NBYTES
STORE(t0, 0(dst))
.set reorder /* DADDI_WAR */
ADD dst, dst, NBYTES
bne len, rem, 1b
.set noreorder
.Lcopy_bytes_checklen:
beqz len, .Ldone
nop
.Lcopy_bytes:
/* 0 < len < NBYTES */
#define COPY_BYTE(N) \
LOADB(t0, N(src)); \
SUB len, len, 1; \
beqz len, .Ldone; \
STOREB(t0, N(dst))
COPY_BYTE(0)
COPY_BYTE(1)
LOADB(t0, NBYTES-2(src))
SUB len, len, 1
jr ra
STOREB(t0, NBYTES-2(dst))
.Ldone:
jr ra
nop
END(memcpy)
|
a3f/bareDOOM
| 1,030
|
arch/mips/lib/pbl.lds.S
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* (C) Copyright 2018 Oleksij Rempel <o.rempel@pengutronix.de>, Pengutronix
* (C) Copyright 2012 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
*/
#include <asm-generic/barebox.lds.h>
#include <asm-generic/memory_layout.h>
#include <linux/sizes.h>
#define BASE (TEXT_BASE - SZ_2M)
OUTPUT_ARCH("mips")
SECTIONS
{
. = BASE;
PRE_IMAGE
. = ALIGN(4);
.text :
{
_stext = .;
_text = .;
*(.text_head_entry*)
__bare_init_start = .;
*(.text_bare_init*)
__bare_init_end = .;
*(.text*)
}
BAREBOX_BARE_INIT_SIZE
. = ALIGN(4);
.rodata : { *(.rodata*) }
.barebox_imd : { BAREBOX_IMD }
_etext = .; /* End of text and rodata section */
. = ALIGN(4);
.data : { *(.data*) }
pbl_code_size = . - BASE;
. = ALIGN(4);
__piggydata_start = .;
.piggydata : {
*(.piggydata)
}
__piggydata_end = . - BASE;
pbl_image_size = .;
. = ALIGN(4);
__bss_start = .;
.bss : { *(.bss*) }
__bss_stop = .;
pbl_memory_size = . - BASE;
_end = .;
}
|
a3f/bareDOOM
| 3,207
|
arch/mips/lib/memset.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 1998, 1999, 2000 by Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
* Copyright (C) 2007 by Maciej W. Rozycki
* Copyright (C) 2011, 2012 MIPS Technologies, Inc.
*
* Kernel-mode memset function without exceptions
* by Aleksey Kuleshov (rndfax@yandex.ru), 2015
*/
#include <asm/asm.h>
#include <asm/asm-offsets.h>
#include <asm/regdef.h>
#if LONGSIZE == 4
#define LONG_S_L swl
#define LONG_S_R swr
#else
#define LONG_S_L sdl
#define LONG_S_R sdr
#endif
#define STORSIZE LONGSIZE
#define STORMASK LONGMASK
#define FILL64RG a1
#define FILLPTRG t0
/*
* memset(void *s, int c, size_t n)
*
* a0: start of area to clear
* a1: char to fill with
* a2: size of area to clear
*/
LEAF(memset)
beqz a1, 1f
move v0, a0 /* result */
andi a1, 0xff /* spread fillword */
LONG_SLL t1, a1, 8
or a1, t1
LONG_SLL t1, a1, 16
#if LONGSIZE == 8
or a1, t1
LONG_SLL t1, a1, 32
#endif
or a1, t1
.macro f_fill64 dst, offset, val
LONG_S \val, (\offset + 0 * STORSIZE)(\dst)
LONG_S \val, (\offset + 1 * STORSIZE)(\dst)
LONG_S \val, (\offset + 2 * STORSIZE)(\dst)
LONG_S \val, (\offset + 3 * STORSIZE)(\dst)
LONG_S \val, (\offset + 4 * STORSIZE)(\dst)
LONG_S \val, (\offset + 5 * STORSIZE)(\dst)
LONG_S \val, (\offset + 6 * STORSIZE)(\dst)
LONG_S \val, (\offset + 7 * STORSIZE)(\dst)
#if (LONGSIZE == 4)
LONG_S \val, (\offset + 8 * STORSIZE)(\dst)
LONG_S \val, (\offset + 9 * STORSIZE)(\dst)
LONG_S \val, (\offset + 10 * STORSIZE)(\dst)
LONG_S \val, (\offset + 11 * STORSIZE)(\dst)
LONG_S \val, (\offset + 12 * STORSIZE)(\dst)
LONG_S \val, (\offset + 13 * STORSIZE)(\dst)
LONG_S \val, (\offset + 14 * STORSIZE)(\dst)
LONG_S \val, (\offset + 15 * STORSIZE)(\dst)
#endif
.endm
.set noreorder
.align 5
1:
sltiu t0, a2, STORSIZE /* very small region? */
bnez t0, .Lsmall_memset
andi t0, a0, STORMASK /* aligned? */
beqz t0, 1f
PTR_SUBU t0, STORSIZE /* alignment in bytes */
#ifdef __MIPSEB__
LONG_S_L a1, (a0) /* make word/dword aligned */
#else
LONG_S_R a1, (a0) /* make word/dword aligned */
#endif
PTR_SUBU a0, t0 /* long align ptr */
PTR_ADDU a2, t0 /* correct size */
1: ori t1, a2, 0x3f /* # of full blocks */
xori t1, 0x3f
beqz t1, .Lmemset_partial /* no block to fill */
andi t0, a2, 0x40-STORSIZE
PTR_ADDU t1, a0 /* end address */
.set reorder
1: PTR_ADDIU a0, 64
f_fill64 a0, -64, FILL64RG
bne t1, a0, 1b
.set noreorder
.Lmemset_partial:
PTR_LA t1, 2f /* where to start */
#if LONGSIZE == 4
PTR_SUBU t1, FILLPTRG
#else
.set noat
LONG_SRL AT, FILLPTRG, 1
PTR_SUBU t1, AT
.set at
#endif
jr t1
PTR_ADDU a0, t0 /* dest ptr */
.set push
.set noreorder
.set nomacro
/* ... but first do longs ... */
f_fill64 a0, -64, FILL64RG
2: .set pop
andi a2, STORMASK /* At most one long to go */
beqz a2, 1f
PTR_ADDU a0, a2 /* What's left */
#ifdef __MIPSEB__
LONG_S_R a1, -1(a0)
#else
LONG_S_L a1, -1(a0)
#endif
1: jr ra
move a2, zero
.Lsmall_memset:
beqz a2, 2f
PTR_ADDU t1, a0, a2
1: PTR_ADDIU a0, 1 /* fill bytewise */
bne t1, a0, 1b
sb a1, -1(a0)
2: jr ra /* done */
move a2, zero
END(memset)
|
a3f/bareDOOM
| 1,312
|
arch/mips/boards/netgear-wg102/lowlevel.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2013 Antony Pavlov <antonynpavlov@gmail.com>
* Copyright (C) 2013 Oleksij Rempel <linux@rempel-privat.de>
*/
#define BOARD_PBL_START start_netgear_wg102
#include <asm/pbl_macros.h>
#include <mach/pbl_macros.h>
#include <mach/ar2312_regs.h>
#include <mach/debug_ll.h>
ENTRY_FUNCTION(BOARD_PBL_START)
mips_barebox_10h
mips_disable_interrupts
pbl_ar2312_pll
pbl_ar2312_rst_uart0
debug_ll_ns16550_init
debug_ll_outc 'a'
debug_ll_ns16550_outnl
/* check if SDRAM is already configured,
* if yes, we are probably starting
* as second stage loader and can skip configuration */
la t0, KSEG1 | AR2312_MEM_CFG1
lw t1, 0(t0)
and t0, t1, MEM_CFG1_E0
beq zero, t0, 1f
nop
pbl_probe_mem t0, t1, KSEG1
beq t0, t1, sdram_configured
nop
1:
/* start SDRAM configuration */
pbl_ar2312_x16_sdram
/* check one more time. if some thing wrong,
* we don't need to continue */
pbl_probe_mem t0, t1, KSEG1
beq t0, t1, sdram_configured
nop
debug_ll_outc '#'
debug_ll_ns16550_outnl
1:
b 1b /* dead end */
nop
sdram_configured:
debug_ll_outc 'b'
debug_ll_ns16550_outnl
copy_to_link_location BOARD_PBL_START;
stack_setup;
lw a0, 0;
la v0, pbl_main_entry;
jal v0;
nop;
/* No return */
__error:
b __error;
nop;
|
a3f/bareDOOM
| 1,884
|
arch/mips/boards/qemu-malta/lowlevel.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Startup Code for MIPS CPU
*
* Copyright (C) 2012 Antony Pavlov <antonynpavlov@gmail.com>
*/
#define BOARD_PBL_START start_qemu_malta
#include <asm/asm.h>
#include <asm/pbl_macros.h>
#include <asm/pbl_nmon.h>
#include <linux/sizes.h>
#include <asm/addrspace.h>
#include <asm/gt64120.h>
#include <mach/mach-gt64120.h>
#ifdef CONFIG_CPU_LITTLE_ENDIAN
#define GT_CPU_TO_LE32(x) (x)
#elif defined CONFIG_CPU_BIG_ENDIAN
#define GT_CPU_TO_LE32(x) ( \
(((x) & 0x000000ff) << 24) | \
(((x) & 0x0000ff00) << 8) | \
(((x) & 0x00ff0000) >> 8) | \
(((x) & 0xff000000) >> 24))
#else
#error "could not determine byte order"
#endif
#define GT_LD(x) (GT_CPU_TO_LE32(((x) >> 21) & 0x7fff))
#define GT_HD(x) (GT_CPU_TO_LE32(((x) >> 21) & 0x7f))
ENTRY_FUNCTION(BOARD_PBL_START)
b __start
nop
/*
On MIPS Technologies boards
0x1fc00010 address is reserved for BoardID
*/
.org 0x10
.asciiz "barebox"
__start:
mips_disable_interrupts
/* cpu specific setup ... */
/* ... absent */
/*
* Load BAR registers of GT64120 as done by YAMON
*
* based on write_bootloader() in qemu.git/hw/mips/mips_malta.c
* see GT64120 manual and qemu.git/hw/mips/gt64xxx_pci.c for details
*/
/* move GT64120 registers to 0x1be00000 */
li t1, KSEG1ADDR(GT_DEF_BASE)
li t0, GT_LD(MIPS_GT_BASE)
sw t0, GT_ISD_OFS(t1)
/*
* setup MEM-to-PCI0 mapping
*/
li t1, KSEG1ADDR(MIPS_GT_BASE)
/* setup PCI0 io window */
li t0, GT_LD(0x18000000)
sw t0, GT_PCI0IOLD_OFS(t1)
li t0, GT_HD(0x181fffff)
sw t0, GT_PCI0IOHD_OFS(t1)
/* setup PCI0 mem windows */
li t0, GT_LD(0x10000000)
sw t0, GT_PCI0M0LD_OFS(t1)
li t0, GT_HD(0x17efffff)
sw t0, GT_PCI0M0HD_OFS(t1)
li t0, GT_LD(0x18200000)
sw t0, GT_PCI0M1LD_OFS(t1)
li t0, GT_LD(0x1bdfffff)
sw t0, GT_PCI0M1HD_OFS(t1)
ENTRY_FUNCTION_END(BOARD_PBL_START, qemu_malta, SZ_256M)
|
a3f/bareDOOM
| 3,386
|
arch/powerpc/mach-mpc85xx/barebox.lds.S
|
/*
* Copyright 2013 GE Intelligent Platforms, Inc.
* Copyright 2007-2009, 2011 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <asm-generic/barebox.lds.h>
#ifdef CONFIG_RESET_VECTOR_ADDRESS
#define RESET_VECTOR_ADDRESS CONFIG_RESET_VECTOR_ADDRESS
#else
#define RESET_VECTOR_ADDRESS 0xfffffffc
#endif
OUTPUT_ARCH("powerpc")
ENTRY(_start_e500)
PHDRS
{
text PT_LOAD;
bss PT_LOAD;
}
SECTIONS
{
. = TEXT_BASE;
_stext = .;
PROVIDE (stext = .);
.interp : { *(.interp) }
.hash : { *(.hash) }
.dynsym : { *(.dynsym) }
.dynstr : { *(.dynstr) }
.rel.text : { *(.rel.text) }
.rela.text : { *(.rela.text) }
.rel.data : { *(.rel.data) }
.rela.data : { *(.rela.data) }
.rel.rodata : { *(.rel.rodata) }
.rela.rodata : { *(.rela.rodata)}
.rel.got : { *(.rel.got) }
.rela.got : { *(.rela.got) }
.rel.ctors : { *(.rel.ctors) }
.rela.ctors : { *(.rela.ctors) }
.rel.dtors : { *(.rel.dtors) }
.rela.dtors : { *(.rela.dtors) }
.rel.bss : { *(.rel.bss) }
.rela.bss : { *(.rela.bss) }
.rel.plt : { *(.rel.plt) }
.rela.plt : { *(.rela.plt) }
.init : { *(.init) }
.plt : { *(.plt) }
.text :
{
*(.text*)
*(.got1*)
} :text
_etext = .;
PROVIDE (etext = .);
_sdata = .;
.rodata :
{
*(SORT_BY_ALIGNMENT(SORT_BY_NAME(.rodata*)))
RO_DATA_SECTION
} :text
/* Read-write section, merged into data segment: */
. = (. + 0x00FF) & 0xFFFFFF00;
_erotext = .;
PROVIDE (erotext = .);
.reloc :
{
KEEP(*(.got))
_GOT2_TABLE_ = .;
KEEP(*(.got2))
PROVIDE(_GLOBAL_OFFSET_TABLE_ = . + 4);
_FIXUP_TABLE_ = .;
KEEP(*(.fixup))
}
__got2_entries = ((_FIXUP_TABLE_ - _GOT2_TABLE_) >> 2);
__fixup_entries = (. - _FIXUP_TABLE_) >> 2;
.data :
{
*(.data*)
*(.data1*)
*(.sdata*)
*(.sdata2*)
*(.dynamic*)
CONSTRUCTORS
}
. = .;
__start___ex_table = .;
__ex_table : { *(__ex_table) }
__stop___ex_table = .;
_edata = .;
PROVIDE (edata = .);
. = ALIGN(256);
__init_begin = .;
.text.init : { *(.text.init) }
.data.init : { *(.data.init) }
. = ALIGN(256);
__init_end = .;
__init_size = __init_end - _start;
.bootpg RESET_VECTOR_ADDRESS - 0xffc :
{
_text = .;
arch/powerpc/cpu-85xx/start.o (.bootpg)
} :text = 0xffff
.resetvec RESET_VECTOR_ADDRESS :
{
KEEP(*(.resetvec))
arch/powerpc/cpu-85xx/resetvec.o (.resetvec)
} :text = 0xffff
. = RESET_VECTOR_ADDRESS + 0x4;
#if (RESET_VECTOR_ADDRESS == 0xfffffffc)
/* This avoids wrapping around to offset 0 */
. |= 0x10;
#endif
__bss_start = .;
.bss :
{
*(.sbss*) *(.scommon*)
*(.dynbss*)
*(.bss*)
*(COMMON)
} :bss
. = ALIGN(4);
__bss_stop = .;
_end = . ;
PROVIDE (end = .);
}
|
a3f/bareDOOM
| 1,065
|
arch/powerpc/lib/reloc.S
|
/*
* Copyright (C) 2009 Wolfgang Denk <wd@denx.de>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <asm/ppc_asm.tmpl>
.file "reloc.S"
.text
/*
* Function: relocate entries for one exception vector
*/
.globl trap_reloc
.type trap_reloc, @function
trap_reloc:
lwz r0, 0(r7) /* hdlr ... */
add r0, r0, r3 /* ... += dest_addr */
stw r0, 0(r7)
lwz r0, 4(r7) /* int_return ... */
add r0, r0, r3 /* ... += dest_addr */
stw r0, 4(r7)
lwz r0, 8(r7) /* transfer_to_handler ...*/
add r0, r0, r3 /* ... += dest_addr */
stw r0, 8(r7)
blr
.size trap_reloc, .-trap_reloc
|
a3f/bareDOOM
| 1,930
|
arch/powerpc/lib/setjmp.S
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* This is a simple version of setjmp and longjmp for the PowerPC.
* Ian Lance Taylor, Cygnus Support, 9 Feb 1994.
*/
#include <linux/linkage.h>
#include <asm/ppc_asm.tmpl>
ENTRY(setjmp)
addi r3,r3,7 # align to 8 byte boundary
rlwinm r3,r3,0,0,28
stw r1,0(r3) # offset 0
stwu r2,4(r3) # offset 4
stwu r13,4(r3) # offset 8
stwu r14,4(r3) # offset 12
stwu r15,4(r3) # offset 16
stwu r16,4(r3) # offset 20
stwu r17,4(r3) # offset 24
stwu r18,4(r3) # offset 28
stwu r19,4(r3) # offset 32
stwu r20,4(r3) # offset 36
stwu r21,4(r3) # offset 40
stwu r22,4(r3) # offset 44
stwu r23,4(r3) # offset 48
stwu r24,4(r3) # offset 52
stwu r25,4(r3) # offset 56
stwu r26,4(r3) # offset 60
stwu r27,4(r3) # offset 64
stwu r28,4(r3) # offset 68
stwu r29,4(r3) # offset 72
stwu r30,4(r3) # offset 76
stwu r31,4(r3) # offset 80
mflr r4
stwu r4,4(r3) # offset 84
mfcr r4
stwu r4,4(r3) # offset 88
li r3,0
blr
END(setjmp)
ENTRY(longjmp)
addi r3,r3,7 # align to 8 byte boundary
rlwinm r3,r3,0,0,28
lwz r1,0(r3) # offset 0
lwzu r2,4(r3) # offset 4
lwzu r13,4(r3) # offset 8
lwzu r14,4(r3) # offset 12
lwzu r15,4(r3) # offset 16
lwzu r16,4(r3) # offset 20
lwzu r17,4(r3) # offset 24
lwzu r18,4(r3) # offset 28
lwzu r19,4(r3) # offset 32
lwzu r20,4(r3) # offset 36
lwzu r21,4(r3) # offset 40
lwzu r22,4(r3) # offset 44
lwzu r23,4(r3) # offset 48
lwzu r24,4(r3) # offset 52
lwzu r25,4(r3) # offset 56
lwzu r26,4(r3) # offset 60
lwzu r27,4(r3) # offset 64
lwzu r28,4(r3) # offset 68
lwzu r29,4(r3) # offset 72
lwzu r30,4(r3) # offset 76
lwzu r31,4(r3) # offset 80
lwzu r5,4(r3) # offset 84
mtlr r5
lwzu r5,4(r3) # offset 88
mtcrf 255,r5
mr. r3,r4
bclr+ 4,2
li r3,1
blr
END(longjmp)
ENTRY(initjmp)
addi r3,r3,7 # align to 8 byte boundary
rlwinm r3,r3,0,0,28
stw r5,0(r3) # offset 0
stwu r4,88(r3) # offset 88
li r3,0
blr
END(initjmp)
|
a3f/bareDOOM
| 1,186
|
arch/powerpc/lib/misc.S
|
#include <asm/ppc_asm.tmpl>
#include <asm-generic/errno.h>
.globl __ashrdi3
__ashrdi3:
subfic r6,r5,32
srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
addi r7,r5,32 # could be xori, or addi with -32
slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
rlwinm r8,r7,0,32 # t3 = (count < 32) ? 32 : 0
sraw r7,r3,r7 # t2 = MSW >> (count-32)
or r4,r4,r6 # LSW |= t1
slw r7,r7,r8 # t2 = (count < 32) ? 0 : t2
sraw r3,r3,r5 # MSW = MSW >> count
or r4,r4,r7 # LSW |= t2
blr
.globl __ashldi3
__ashldi3:
subfic r6,r5,32
slw r3,r3,r5 # MSW = count > 31 ? 0 : MSW << count
addi r7,r5,32 # could be xori, or addi with -32
srw r6,r4,r6 # t1 = count > 31 ? 0 : LSW >> (32-count)
slw r7,r4,r7 # t2 = count < 32 ? 0 : LSW << (count-32)
or r3,r3,r6 # MSW |= t1
slw r4,r4,r5 # LSW = LSW << count
or r3,r3,r7 # MSW |= t2
blr
.globl __lshrdi3
__lshrdi3:
subfic r6,r5,32
srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
addi r7,r5,32 # could be xori, or addi with -32
slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
srw r7,r3,r7 # t2 = count < 32 ? 0 : MSW >> (count-32)
or r4,r4,r6 # LSW |= t1
srw r3,r3,r5 # MSW = MSW >> count
or r4,r4,r7 # LSW |= t2
blr
|
a3f/bareDOOM
| 3,045
|
arch/powerpc/lib/ppcstring.S
|
/*
* String handling functions for PowerPC.
*
* Copyright (C) 1996 Paul Mackerras.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <asm/ppc_asm.tmpl>
#include <asm-generic/errno.h>
.globl strcpy
strcpy:
addi r5,r3,-1
addi r4,r4,-1
1: lbzu r0,1(r4)
cmpwi 0,r0,0
stbu r0,1(r5)
bne 1b
blr
.globl strncpy
strncpy:
cmpwi 0,r5,0
beqlr
mtctr r5
addi r6,r3,-1
addi r4,r4,-1
1: lbzu r0,1(r4)
cmpwi 0,r0,0
stbu r0,1(r6)
bdnzf 2,1b /* dec ctr, branch if ctr != 0 && !cr0.eq */
blr
.globl strcat
strcat:
addi r5,r3,-1
addi r4,r4,-1
1: lbzu r0,1(r5)
cmpwi 0,r0,0
bne 1b
addi r5,r5,-1
1: lbzu r0,1(r4)
cmpwi 0,r0,0
stbu r0,1(r5)
bne 1b
blr
.globl strcmp
strcmp:
addi r5,r3,-1
addi r4,r4,-1
1: lbzu r3,1(r5)
cmpwi 1,r3,0
lbzu r0,1(r4)
subf. r3,r0,r3
beqlr 1
beq 1b
blr
.globl strlen
strlen:
addi r4,r3,-1
1: lbzu r0,1(r4)
cmpwi 0,r0,0
bne 1b
subf r3,r3,r4
blr
.globl memset
memset:
rlwimi r4,r4,8,16,23
rlwimi r4,r4,16,0,15
addi r6,r3,-4
cmplwi 0,r5,4
blt 7f
stwu r4,4(r6)
beqlr
andi. r0,r6,3
add r5,r0,r5
subf r6,r0,r6
rlwinm r0,r5,32-2,2,31
mtctr r0
bdz 6f
1: stwu r4,4(r6)
bdnz 1b
6: andi. r5,r5,3
7: cmpwi 0,r5,0
beqlr
mtctr r5
addi r6,r6,3
8: stbu r4,1(r6)
bdnz 8b
blr
.globl bcopy
bcopy:
mr r6,r3
mr r3,r4
mr r4,r6
b memcpy
.globl memmove
memmove:
cmplw 0,r3,r4
bgt backwards_memcpy
/* fall through */
.globl memcpy
memcpy:
rlwinm. r7,r5,32-3,3,31 /* r0 = r5 >> 3 */
addi r6,r3,-4
addi r4,r4,-4
beq 2f /* if less than 8 bytes to do */
andi. r0,r6,3 /* get dest word aligned */
mtctr r7
bne 5f
1: lwz r7,4(r4)
lwzu r8,8(r4)
stw r7,4(r6)
stwu r8,8(r6)
bdnz 1b
andi. r5,r5,7
2: cmplwi 0,r5,4
blt 3f
lwzu r0,4(r4)
addi r5,r5,-4
stwu r0,4(r6)
3: cmpwi 0,r5,0
beqlr
mtctr r5
addi r4,r4,3
addi r6,r6,3
4: lbzu r0,1(r4)
stbu r0,1(r6)
bdnz 4b
blr
5: subfic r0,r0,4
mtctr r0
6: lbz r7,4(r4)
addi r4,r4,1
stb r7,4(r6)
addi r6,r6,1
bdnz 6b
subf r5,r0,r5
rlwinm. r7,r5,32-3,3,31
beq 2b
mtctr r7
b 1b
.globl backwards_memcpy
backwards_memcpy:
rlwinm. r7,r5,32-3,3,31 /* r0 = r5 >> 3 */
add r6,r3,r5
add r4,r4,r5
beq 2f
andi. r0,r6,3
mtctr r7
bne 5f
1: lwz r7,-4(r4)
lwzu r8,-8(r4)
stw r7,-4(r6)
stwu r8,-8(r6)
bdnz 1b
andi. r5,r5,7
2: cmplwi 0,r5,4
blt 3f
lwzu r0,-4(r4)
subi r5,r5,4
stwu r0,-4(r6)
3: cmpwi 0,r5,0
beqlr
mtctr r5
4: lbzu r0,-1(r4)
stbu r0,-1(r6)
bdnz 4b
blr
5: mtctr r0
6: lbzu r7,-1(r4)
stbu r7,-1(r6)
bdnz 6b
subf r5,r0,r5
rlwinm. r7,r5,32-3,3,31
beq 2b
mtctr r7
b 1b
.globl memcmp
memcmp:
cmpwi 0,r5,0
ble- 2f
mtctr r5
addi r6,r3,-1
addi r4,r4,-1
1: lbzu r3,1(r6)
lbzu r0,1(r4)
subf. r3,r0,r3
bdnzt 2,1b
blr
2: li r3,0
blr
.global memchr
memchr:
cmpwi 0,r5,0
ble- 2f
mtctr r5
addi r3,r3,-1
1: lbzu r0,1(r3)
cmpw 0,r0,r4
bdnzf 2,1b
beqlr
2: li r3,0
blr
|
a3f/bareDOOM
| 5,942
|
arch/powerpc/lib/crtsavres.S
|
/*
* Special support for eabi and SVR4
*
* Copyright (C) 1995, 1996, 1998, 2000, 2001 Free Software Foundation, Inc.
* Copyright 2008 Freescale Semiconductor, Inc.
* Written By Michael Meissner
*
* Based on gcc/config/rs6000/crtsavres.asm from gcc
* 64 bit additions from reading the PPC elf64abi document.
*
* This file is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2, or (at your option) any
* later version.
*
* In addition to the permissions in the GNU General Public License, the
* Free Software Foundation gives you unlimited permission to link the
* compiled version of this file with other programs, and to distribute
* those programs without any restriction coming from the use of this
* file. (The General Public License restrictions do apply in other
* respects; for example, they cover modification of the file, and
* distribution when not linked into another program.)
*
* This file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* As a special exception, if you link this library with files
* compiled with GCC to produce an executable, this does not cause
* the resulting executable to be covered by the GNU General Public License.
* This exception does not however invalidate any other reasons why
* the executable file might be covered by the GNU General Public License.
*/
#include <linux/stringify.h>
#include <asm/ppc_asm.tmpl>
#define N_FUN 36
#define _GLOBAL(n) \
.text; \
.stabs __stringify(n:F-1),N_FUN,0,0,n;\
.globl n; \
n:
.file "crtsavres.S"
.section ".text"
/* Routines for saving integer registers, called by the compiler. */
/* Called with r11 pointing to the stack header word of the caller of the */
/* function, just beyond the end of the integer save area. */
_GLOBAL(_savegpr_14)
_GLOBAL(_save32gpr_14)
stw 14,-72(11) /* save gp registers */
_GLOBAL(_savegpr_15)
_GLOBAL(_save32gpr_15)
stw 15,-68(11)
_GLOBAL(_savegpr_16)
_GLOBAL(_save32gpr_16)
stw 16,-64(11)
_GLOBAL(_savegpr_17)
_GLOBAL(_save32gpr_17)
stw 17,-60(11)
_GLOBAL(_savegpr_18)
_GLOBAL(_save32gpr_18)
stw 18,-56(11)
_GLOBAL(_savegpr_19)
_GLOBAL(_save32gpr_19)
stw 19,-52(11)
_GLOBAL(_savegpr_20)
_GLOBAL(_save32gpr_20)
stw 20,-48(11)
_GLOBAL(_savegpr_21)
_GLOBAL(_save32gpr_21)
stw 21,-44(11)
_GLOBAL(_savegpr_22)
_GLOBAL(_save32gpr_22)
stw 22,-40(11)
_GLOBAL(_savegpr_23)
_GLOBAL(_save32gpr_23)
stw 23,-36(11)
_GLOBAL(_savegpr_24)
_GLOBAL(_save32gpr_24)
stw 24,-32(11)
_GLOBAL(_savegpr_25)
_GLOBAL(_save32gpr_25)
stw 25,-28(11)
_GLOBAL(_savegpr_26)
_GLOBAL(_save32gpr_26)
stw 26,-24(11)
_GLOBAL(_savegpr_27)
_GLOBAL(_save32gpr_27)
stw 27,-20(11)
_GLOBAL(_savegpr_28)
_GLOBAL(_save32gpr_28)
stw 28,-16(11)
_GLOBAL(_savegpr_29)
_GLOBAL(_save32gpr_29)
stw 29,-12(11)
_GLOBAL(_savegpr_30)
_GLOBAL(_save32gpr_30)
stw 30,-8(11)
_GLOBAL(_savegpr_31)
_GLOBAL(_save32gpr_31)
stw 31,-4(11)
blr
/* Routines for restoring integer registers, called by the compiler. */
/* Called with r11 pointing to the stack header word of the caller of the */
/* function, just beyond the end of the integer restore area. */
_GLOBAL(_restgpr_14)
_GLOBAL(_rest32gpr_14)
lwz 14,-72(11) /* restore gp registers */
_GLOBAL(_restgpr_15)
_GLOBAL(_rest32gpr_15)
lwz 15,-68(11)
_GLOBAL(_restgpr_16)
_GLOBAL(_rest32gpr_16)
lwz 16,-64(11)
_GLOBAL(_restgpr_17)
_GLOBAL(_rest32gpr_17)
lwz 17,-60(11)
_GLOBAL(_restgpr_18)
_GLOBAL(_rest32gpr_18)
lwz 18,-56(11)
_GLOBAL(_restgpr_19)
_GLOBAL(_rest32gpr_19)
lwz 19,-52(11)
_GLOBAL(_restgpr_20)
_GLOBAL(_rest32gpr_20)
lwz 20,-48(11)
_GLOBAL(_restgpr_21)
_GLOBAL(_rest32gpr_21)
lwz 21,-44(11)
_GLOBAL(_restgpr_22)
_GLOBAL(_rest32gpr_22)
lwz 22,-40(11)
_GLOBAL(_restgpr_23)
_GLOBAL(_rest32gpr_23)
lwz 23,-36(11)
_GLOBAL(_restgpr_24)
_GLOBAL(_rest32gpr_24)
lwz 24,-32(11)
_GLOBAL(_restgpr_25)
_GLOBAL(_rest32gpr_25)
lwz 25,-28(11)
_GLOBAL(_restgpr_26)
_GLOBAL(_rest32gpr_26)
lwz 26,-24(11)
_GLOBAL(_restgpr_27)
_GLOBAL(_rest32gpr_27)
lwz 27,-20(11)
_GLOBAL(_restgpr_28)
_GLOBAL(_rest32gpr_28)
lwz 28,-16(11)
_GLOBAL(_restgpr_29)
_GLOBAL(_rest32gpr_29)
lwz 29,-12(11)
_GLOBAL(_restgpr_30)
_GLOBAL(_rest32gpr_30)
lwz 30,-8(11)
_GLOBAL(_restgpr_31)
_GLOBAL(_rest32gpr_31)
lwz 31,-4(11)
blr
/* Routines for restoring integer registers, called by the compiler. */
/* Called with r11 pointing to the stack header word of the caller of the */
/* function, just beyond the end of the integer restore area. */
_GLOBAL(_restgpr_14_x)
_GLOBAL(_rest32gpr_14_x)
lwz 14,-72(11) /* restore gp registers */
_GLOBAL(_restgpr_15_x)
_GLOBAL(_rest32gpr_15_x)
lwz 15,-68(11)
_GLOBAL(_restgpr_16_x)
_GLOBAL(_rest32gpr_16_x)
lwz 16,-64(11)
_GLOBAL(_restgpr_17_x)
_GLOBAL(_rest32gpr_17_x)
lwz 17,-60(11)
_GLOBAL(_restgpr_18_x)
_GLOBAL(_rest32gpr_18_x)
lwz 18,-56(11)
_GLOBAL(_restgpr_19_x)
_GLOBAL(_rest32gpr_19_x)
lwz 19,-52(11)
_GLOBAL(_restgpr_20_x)
_GLOBAL(_rest32gpr_20_x)
lwz 20,-48(11)
_GLOBAL(_restgpr_21_x)
_GLOBAL(_rest32gpr_21_x)
lwz 21,-44(11)
_GLOBAL(_restgpr_22_x)
_GLOBAL(_rest32gpr_22_x)
lwz 22,-40(11)
_GLOBAL(_restgpr_23_x)
_GLOBAL(_rest32gpr_23_x)
lwz 23,-36(11)
_GLOBAL(_restgpr_24_x)
_GLOBAL(_rest32gpr_24_x)
lwz 24,-32(11)
_GLOBAL(_restgpr_25_x)
_GLOBAL(_rest32gpr_25_x)
lwz 25,-28(11)
_GLOBAL(_restgpr_26_x)
_GLOBAL(_rest32gpr_26_x)
lwz 26,-24(11)
_GLOBAL(_restgpr_27_x)
_GLOBAL(_rest32gpr_27_x)
lwz 27,-20(11)
_GLOBAL(_restgpr_28_x)
_GLOBAL(_rest32gpr_28_x)
lwz 28,-16(11)
_GLOBAL(_restgpr_29_x)
_GLOBAL(_rest32gpr_29_x)
lwz 29,-12(11)
_GLOBAL(_restgpr_30_x)
_GLOBAL(_rest32gpr_30_x)
lwz 30,-8(11)
_GLOBAL(_restgpr_31_x)
_GLOBAL(_rest32gpr_31_x)
lwz 0,4(11)
lwz 31,-4(11)
mtlr 0
mr 1,11
blr
|
a3f/bareDOOM
| 17,007
|
arch/powerpc/mach-mpc5xxx/start.S
|
/*
* Copyright (C) 1998 Dan Malek <dmalek@jlc.net>
* Copyright (C) 1999 Magnus Damm <kieraypc01.p.y.kie.era.ericsson.se>
* Copyright (C) 2000 - 2003 Wolfgang Denk <wd@denx.de>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
/*
* barebox - Startup Code for MPC5xxx CPUs
*/
#include <config.h>
#include <asm/ppc_asm.tmpl>
#include <asm/ppc_defs.h>
#include <asm/cache.h>
#include <asm/mmu.h>
/* We don't want the MMU yet.
*/
#undef MSR_KERNEL
/* Floating Point enable, Machine Check and Recoverable Interr. */
#ifdef DEBUG
#define MSR_KERNEL (MSR_FP|MSR_RI)
#else
#define MSR_KERNEL (MSR_FP|MSR_ME|MSR_RI)
#endif
/*
* Set up GOT: Global Offset Table
*
* Use r14 to access the GOT
*/
START_GOT
GOT_ENTRY(_GOT2_TABLE_)
GOT_ENTRY(_FIXUP_TABLE_)
GOT_ENTRY(_start)
GOT_ENTRY(_start_of_vectors)
GOT_ENTRY(_end_of_vectors)
GOT_ENTRY(transfer_to_handler)
GOT_ENTRY(__init_end)
GOT_ENTRY(_end)
GOT_ENTRY(__bss_start)
END_GOT
/*
* Exception vectors
*/
.text
/*
* Second stage loader entry. When entered here we assume that spr 311
* is set to the current MBAR address.
*/
mfspr r4, MBAR
b setup_mbar
. = EXC_OFF_SYS_RESET
.globl _start
_start:
/*
* Reset entry. When entered here we assume that MBAR is at reset default
* 0x80000000.
*/
lis r4, 0x80000000@h
ori r4, r4, 0x80000000@l
setup_mbar:
/* r4 == current MBAR */
mfmsr r5 /* save msr contents */
/* Switch MBAR to 0xf0000000 */
lis r3, 0xf0000000@h
ori r3, r3, 0xf0000000@l
mtspr MBAR, r3
rlwinm r3, r3, 16, 16, 31
stw r3, 0(r4)
/* Initialise the MPC5xxx processor core */
/*--------------------------------------------------------------*/
bl init_5xxx_core
/* initialize some things that are hard to access from C */
/*--------------------------------------------------------------*/
/* set up stack in on-chip SRAM */
lis r1, (MPC5XXX_SRAM + MPC5XXX_SRAM_SIZE)@h
ori r1, r1, (MPC5XXX_SRAM + MPC5XXX_SRAM_SIZE)@l
li r0, 0 /* Make room for stack frame header and */
stwu r0, -4(r1) /* clear final stack frame so that */
stwu r0, -4(r1) /* stack backtraces terminate cleanly */
/* let the C-code set up the rest */
/* */
/* Be careful to keep code relocatable ! */
/*--------------------------------------------------------------*/
GET_GOT /* initialize GOT access */
/* r3: IMMR */
bl cpu_init /* run low-level CPU init code (in Flash)*/
mr r3, r21
/* r3: BOOTFLAG */
bl initdram /* initialize sdram */
/* r3: End of RAM */
b _continue_init
/*
* Vector Table
*/
.globl _start_of_vectors
_start_of_vectors:
/* Machine check */
STD_EXCEPTION(0x200, MachineCheck, MachineCheckException)
/* Data Storage exception. */
STD_EXCEPTION(0x300, DataStorage, UnknownException)
/* Instruction Storage exception. */
STD_EXCEPTION(0x400, InstStorage, UnknownException)
/* Alignment exception. */
. = 0x600
Alignment:
EXCEPTION_PROLOG(SRR0, SRR1)
mfspr r4,DAR
stw r4,_DAR(r21)
mfspr r5,DSISR
stw r5,_DSISR(r21)
addi r3,r1,STACK_FRAME_OVERHEAD
EXC_XFER_TEMPLATE(Alignment, AlignmentException, MSR_KERNEL, COPY_EE)
/* Program check exception */
. = 0x700
ProgramCheck:
EXCEPTION_PROLOG(SRR0, SRR1)
addi r3,r1,STACK_FRAME_OVERHEAD
EXC_XFER_TEMPLATE(ProgramCheck, ProgramCheckException,
MSR_KERNEL, COPY_EE)
STD_EXCEPTION(0x800, FPUnavailable, UnknownException)
/* I guess we could implement decrementer, and may have
* to someday for timekeeping.
*/
STD_EXCEPTION(0x900, Decrementer, UnknownException)
STD_EXCEPTION(0xa00, Trap_0a, UnknownException)
STD_EXCEPTION(0xb00, Trap_0b, UnknownException)
STD_EXCEPTION(0xc00, SystemCall, UnknownException)
STD_EXCEPTION(0xd00, SingleStep, UnknownException)
STD_EXCEPTION(0xe00, Trap_0e, UnknownException)
STD_EXCEPTION(0xf00, Trap_0f, UnknownException)
STD_EXCEPTION(0x1000, InstructionTLBMiss, UnknownException)
STD_EXCEPTION(0x1100, DataLoadTLBMiss, UnknownException)
STD_EXCEPTION(0x1200, DataStoreTLBMiss, UnknownException)
#ifdef DEBUG
. = 0x1300
/*
* This exception occurs when the program counter matches the
* Instruction Address Breakpoint Register (IABR).
*
* I want the cpu to halt if this occurs so I can hunt around
* with the debugger and look at things.
*
* When DEBUG is defined, both machine check enable (in the MSR)
* and checkstop reset enable (in the reset mode register) are
* turned off and so a checkstop condition will result in the cpu
* halting.
*
* I force the cpu into a checkstop condition by putting an illegal
* instruction here (at least this is the theory).
*
* well - that didnt work, so just do an infinite loop!
*/
1: b 1b
#else
STD_EXCEPTION(0x1300, InstructionBreakpoint, DebugException)
#endif
STD_EXCEPTION(0x1400, SMI, UnknownException)
STD_EXCEPTION(0x1500, Trap_15, UnknownException)
STD_EXCEPTION(0x1600, Trap_16, UnknownException)
STD_EXCEPTION(0x1700, Trap_17, UnknownException)
STD_EXCEPTION(0x1800, Trap_18, UnknownException)
STD_EXCEPTION(0x1900, Trap_19, UnknownException)
STD_EXCEPTION(0x1a00, Trap_1a, UnknownException)
STD_EXCEPTION(0x1b00, Trap_1b, UnknownException)
STD_EXCEPTION(0x1c00, Trap_1c, UnknownException)
STD_EXCEPTION(0x1d00, Trap_1d, UnknownException)
STD_EXCEPTION(0x1e00, Trap_1e, UnknownException)
STD_EXCEPTION(0x1f00, Trap_1f, UnknownException)
STD_EXCEPTION(0x2000, Trap_20, UnknownException)
STD_EXCEPTION(0x2100, Trap_21, UnknownException)
STD_EXCEPTION(0x2200, Trap_22, UnknownException)
STD_EXCEPTION(0x2300, Trap_23, UnknownException)
STD_EXCEPTION(0x2400, Trap_24, UnknownException)
STD_EXCEPTION(0x2500, Trap_25, UnknownException)
STD_EXCEPTION(0x2600, Trap_26, UnknownException)
STD_EXCEPTION(0x2700, Trap_27, UnknownException)
STD_EXCEPTION(0x2800, Trap_28, UnknownException)
STD_EXCEPTION(0x2900, Trap_29, UnknownException)
STD_EXCEPTION(0x2a00, Trap_2a, UnknownException)
STD_EXCEPTION(0x2b00, Trap_2b, UnknownException)
STD_EXCEPTION(0x2c00, Trap_2c, UnknownException)
STD_EXCEPTION(0x2d00, Trap_2d, UnknownException)
STD_EXCEPTION(0x2e00, Trap_2e, UnknownException)
STD_EXCEPTION(0x2f00, Trap_2f, UnknownException)
.globl _end_of_vectors
_end_of_vectors:
. = 0x3000
_continue_init:
mr r9, r3 /* Save copy of end of RAM */
#ifdef CONFIG_RELOCATABLE
lis r10, (_end - _start)@h /* Size */
ori r10, r10, (_end - _start)@l
sub r3, r3, r10
subi r3, r3, 0x100
#else
lis r3, (TEXT_BASE)@h /* Destination Address */
ori r3, r3, (TEXT_BASE)@l
#endif
mr r1, r3 /* Set new stack just below barebox code */
subi r1, r1, 0x10
mr r10, r3 /* Save copy of Destination Address */
bl calc_source /* Calculate Source Address */
calc_source:
mfspr r4, LR
subi r4, r4, (calc_source - _start)
subi r4, r4, 0x100
lis r5, __init_size@h /* Size */
ori r5, r5, __init_size@l
before_relocate:
/*
* We are now ready to copy barebox to RAM.
*
* destination = r3
* source = r4
* size = r5
*
*/
li r6, CACHELINE_SIZE
/*
* Fix GOT pointer:
*
* New GOT-PTR = (old GOT-PTR - CFG_MONITOR_BASE) + Destination Address
*
* Offset:
*/
sub r15, r10, r4
/* First our own GOT */
add r14, r14, r15
/* then the one used by the C code */
add r30, r30, r15
/*
* Now relocate code
*/
cmplw cr1,r3,r4
addi r0,r5,3
srwi. r0,r0,2
beq cr1,4f /* In place copy is not necessary */
beq 7f /* Protect against 0 count */
mtctr r0
bge cr1,2f
la r8,-4(r4)
la r7,-4(r3)
1: lwzu r0,4(r8)
stwu r0,4(r7)
bdnz 1b
b 4f
2: slwi r0,r0,2
add r8,r4,r0
add r7,r3,r0
3: lwzu r0,-4(r8)
stwu r0,-4(r7)
bdnz 3b
/*
* Now flush the cache: note that we must start from a cache aligned
* address. Otherwise we might miss one cache line.
*/
4: cmpwi r6,0
add r5,r3,r5
beq 7f /* Always flush prefetch queue in any case */
subi r0,r6,1
andc r3,r3,r0
mfspr r7,HID0 /* don't do dcbst if dcache is disabled */
rlwinm r7,r7,HID0_DCE_BITPOS+1,31,31
cmpwi r7,0
beq 9f
mr r4,r3
5: dcbst 0,r4
add r4,r4,r6
cmplw r4,r5
blt 5b
sync /* Wait for all dcbst to complete on bus */
9: mfspr r7,HID0 /* don't do icbi if icache is disabled */
rlwinm r7,r7,HID0_ICE_BITPOS+1,31,31
cmpwi r7,0
beq 7f
mr r4,r3
6: icbi 0,r4
add r4,r4,r6
cmplw r4,r5
blt 6b
7: sync /* Wait for all icbi to complete on bus */
isync
/*
* We are done. Do not return, instead branch to second part of board
* initialization, now running from RAM.
*/
addi r0, r10, in_ram - _start + EXC_OFF_SYS_RESET
mtlr r0
blr
in_ram:
/*
* Relocation Function, r14 point to got2+0x8000
*
* Adjust got2 pointers, no need to check for 0, this code
* already puts a few entries in the table.
*/
li r0,__got2_entries@sectoff@l
la r3,GOT(_GOT2_TABLE_)
lwz r11,GOT(_GOT2_TABLE_)
mtctr r0
sub r11,r3,r11
addi r3,r3,-4
1: lwzu r0,4(r3)
add r0,r0,r11
stw r0,0(r3)
bdnz 1b
/*
* Now adjust the fixups and the pointers to the fixups
* in case we need to move ourselves again.
*/
2: li r0,__fixup_entries@sectoff@l
lwz r3,GOT(_FIXUP_TABLE_)
cmpwi r0,0
mtctr r0
addi r3,r3,-4
beq 4f
3: lwzu r4,4(r3)
lwzux r0,r4,r11
add r0,r0,r11
stw r10,0(r3)
stw r0,0(r4)
bdnz 3b
4:
clear_bss:
/*
* Now clear BSS segment
*/
lwz r3,GOT(__bss_start)
lwz r4,GOT(_end)
cmplw 0, r3, r4
beq 6f
li r0, 0
5:
stw r0, 0(r3)
addi r3, r3, 4
cmplw 0, r3, r4
bne 5b
6:
mr r3, r9 /* end of RAM */
bl board_init_r
/*
* This code finishes saving the registers to the exception frame
* and jumps to the appropriate handler for the exception.
* Register r21 is pointer into trap frame, r1 has new stack pointer.
*/
.globl transfer_to_handler
transfer_to_handler:
stw r22,_NIP(r21)
lis r22,MSR_POW@h
andc r23,r23,r22
stw r23,_MSR(r21)
SAVE_GPR(7, r21)
SAVE_4GPRS(8, r21)
SAVE_8GPRS(12, r21)
SAVE_8GPRS(24, r21)
mflr r23
andi. r24,r23,0x3f00 /* get vector offset */
stw r24,TRAP(r21)
li r22,0
stw r22,RESULT(r21)
lwz r24,0(r23) /* virtual address of handler */
lwz r23,4(r23) /* where to go when done */
mtspr SRR0,r24
mtspr SRR1,r20
mtlr r23
SYNC
rfi /* jump to handler, enable MMU */
int_return:
mfmsr r28 /* Disable interrupts */
li r4,0
ori r4,r4,MSR_EE
andc r28,r28,r4
SYNC /* Some chip revs need this... */
mtmsr r28
SYNC
lwz r2,_CTR(r1)
lwz r0,_LINK(r1)
mtctr r2
mtlr r0
lwz r2,_XER(r1)
lwz r0,_CCR(r1)
mtspr XER,r2
mtcrf 0xFF,r0
REST_10GPRS(3, r1)
REST_10GPRS(13, r1)
REST_8GPRS(23, r1)
REST_GPR(31, r1)
lwz r2,_NIP(r1) /* Restore environment */
lwz r0,_MSR(r1)
mtspr SRR0,r2
mtspr SRR1,r0
lwz r0,GPR0(r1)
lwz r2,GPR2(r1)
lwz r1,GPR1(r1)
SYNC
rfi
/*
* This code initialises the MPC5xxx processor core
* (conforms to PowerPC 603e spec)
* Note: expects original MSR contents to be in r5.
*/
.globl init_5xx_core
init_5xxx_core:
/* Initialize machine status; enable machine check interrupt */
/*--------------------------------------------------------------*/
li r3, MSR_KERNEL /* Set ME and RI flags */
rlwimi r3, r5, 0, 25, 25 /* preserve IP bit set by HRCW */
#ifdef DEBUG
rlwimi r3, r5, 0, 21, 22 /* debugger might set SE & BE bits */
#endif
SYNC /* Some chip revs need this... */
mtmsr r3
SYNC
mtspr SRR1, r3 /* Make SRR1 match MSR */
/* Initialize the Hardware Implementation-dependent Registers */
/* HID0 also contains cache control */
/*--------------------------------------------------------------*/
lis r3, CFG_HID0_INIT@h
ori r3, r3, CFG_HID0_INIT@l
SYNC
mtspr HID0, r3
lis r3, CFG_HID0_FINAL@h
ori r3, r3, CFG_HID0_FINAL@l
SYNC
mtspr HID0, r3
/* clear all BAT's */
/*--------------------------------------------------------------*/
li r0, 0
mtspr DBAT0U, r0
mtspr DBAT0L, r0
mtspr DBAT1U, r0
mtspr DBAT1L, r0
mtspr DBAT2U, r0
mtspr DBAT2L, r0
mtspr DBAT3U, r0
mtspr DBAT3L, r0
mtspr DBAT4U, r0
mtspr DBAT4L, r0
mtspr DBAT5U, r0
mtspr DBAT5L, r0
mtspr DBAT6U, r0
mtspr DBAT6L, r0
mtspr DBAT7U, r0
mtspr DBAT7L, r0
mtspr IBAT0U, r0
mtspr IBAT0L, r0
mtspr IBAT1U, r0
mtspr IBAT1L, r0
mtspr IBAT2U, r0
mtspr IBAT2L, r0
mtspr IBAT3U, r0
mtspr IBAT3L, r0
mtspr IBAT4U, r0
mtspr IBAT4L, r0
mtspr IBAT5U, r0
mtspr IBAT5L, r0
mtspr IBAT6U, r0
mtspr IBAT6L, r0
mtspr IBAT7U, r0
mtspr IBAT7L, r0
SYNC
/* invalidate all tlb's */
/* */
/* From the 603e User Manual: "The 603e provides the ability to */
/* invalidate a TLB entry. The TLB Invalidate Entry (tlbie) */
/* instruction invalidates the TLB entry indexed by the EA, and */
/* operates on both the instruction and data TLBs simultaneously*/
/* invalidating four TLB entries (both sets in each TLB). The */
/* index corresponds to bits 15-19 of the EA. To invalidate all */
/* entries within both TLBs, 32 tlbie instructions should be */
/* issued, incrementing this field by one each time." */
/* */
/* "Note that the tlbia instruction is not implemented on the */
/* 603e." */
/* */
/* bits 15-19 correspond to addresses 0x00000000 to 0x0001F000 */
/* incrementing by 0x1000 each time. The code below is sort of */
/* based on code in "flush_tlbs" from arch/ppc/kernel/head.S */
/* */
/*--------------------------------------------------------------*/
li r3, 32
mtctr r3
li r3, 0
1: tlbie r3
addi r3, r3, 0x1000
bdnz 1b
SYNC
/* Done! */
/*--------------------------------------------------------------*/
blr
/* Cache functions.
*
* Note: requires that all cache bits in
* HID0 are in the low half word.
*/
.globl icache_enable
icache_enable:
mfspr r3, HID0
ori r3, r3, HID0_ICE
lis r4, 0
ori r4, r4, HID0_ILOCK
andc r3, r3, r4
ori r4, r3, HID0_ICFI
isync
mtspr HID0, r4 /* sets enable and invalidate, clears lock */
isync
mtspr HID0, r3 /* clears invalidate */
blr
.globl icache_disable
icache_disable:
mfspr r3, HID0
lis r4, 0
ori r4, r4, HID0_ICE|HID0_ILOCK
andc r3, r3, r4
ori r4, r3, HID0_ICFI
isync
mtspr HID0, r4 /* sets invalidate, clears enable and lock */
isync
mtspr HID0, r3 /* clears invalidate */
blr
.globl icache_status
icache_status:
mfspr r3, HID0
rlwinm r3, r3, HID0_ICE_BITPOS + 1, 31, 31
blr
.globl dcache_enable
dcache_enable:
mfspr r3, HID0
ori r3, r3, HID0_DCE
lis r4, 0
ori r4, r4, HID0_DLOCK
andc r3, r3, r4
ori r4, r3, HID0_DCI
sync
mtspr HID0, r4 /* sets enable and invalidate, clears lock */
sync
mtspr HID0, r3 /* clears invalidate */
blr
.globl dcache_disable
dcache_disable:
mfspr r3, HID0
lis r4, 0
ori r4, r4, HID0_DCE|HID0_DLOCK
andc r3, r3, r4
ori r4, r3, HID0_DCI
sync
mtspr HID0, r4 /* sets invalidate, clears enable and lock */
sync
mtspr HID0, r3 /* clears invalidate */
blr
.globl dcache_status
dcache_status:
mfspr r3, HID0
rlwinm r3, r3, HID0_DCE_BITPOS + 1, 31, 31
blr
.globl get_svr
get_svr:
mfspr r3, SVR
blr
.globl get_pvr
get_pvr:
mfspr r3, PVR
blr
/*
* Copy exception vector code to low memory
*
* r3: dest_addr
* r7: source address, r8: end address, r9: target address
*/
.globl trap_init
trap_init:
lwz r7, GOT(_start)
lwz r8, GOT(_end_of_vectors)
li r9, 0x100 /* reset vector always at 0x100 */
cmplw 0, r7, r8
bgelr /* return if r7>=r8 - just in case */
mflr r4 /* save link register */
1:
lwz r0, 0(r7)
stw r0, 0(r9)
addi r7, r7, 4
addi r9, r9, 4
cmplw 0, r7, r8
bne 1b
/*
* relocate `hdlr' and `int_return' entries
*/
li r7, .L_MachineCheck - _start + EXC_OFF_SYS_RESET
li r8, Alignment - _start + EXC_OFF_SYS_RESET
2:
bl trap_reloc
addi r7, r7, 0x100 /* next exception vector */
cmplw 0, r7, r8
blt 2b
li r7, .L_Alignment - _start + EXC_OFF_SYS_RESET
bl trap_reloc
li r7, .L_ProgramCheck - _start + EXC_OFF_SYS_RESET
bl trap_reloc
li r7, .L_FPUnavailable - _start + EXC_OFF_SYS_RESET
li r8, SystemCall - _start + EXC_OFF_SYS_RESET
3:
bl trap_reloc
addi r7, r7, 0x100 /* next exception vector */
cmplw 0, r7, r8
blt 3b
li r7, .L_SingleStep - _start + EXC_OFF_SYS_RESET
li r8, _end_of_vectors - _start + EXC_OFF_SYS_RESET
4:
bl trap_reloc
addi r7, r7, 0x100 /* next exception vector */
cmplw 0, r7, r8
blt 4b
mfmsr r3 /* now that the vectors have */
lis r7, MSR_IP@h /* relocated into low memory */
ori r7, r7, MSR_IP@l /* MSR[IP] can be turned off */
andc r3, r3, r7 /* (if it was on) */
SYNC /* Some chip revs need this... */
mtmsr r3
SYNC
mtlr r4 /* restore link register */
blr
.globl _text_base
_text_base:
.long TEXT_BASE
.globl _barebox_start
_barebox_start:
.long _start
.globl _bss_start
_bss_start:
.long __bss_start
.globl _bss_end
_bss_end:
.long _end
|
a3f/bareDOOM
| 14,375
|
arch/powerpc/mach-mpc5xxx/firmware_sc_task_bestcomm.impl.S
|
/*
* Copyright (C) 2001, Software Center, Motorola China.
*
* This file contains microcode for the FEC controller of the MPC5200 CPU.
*/
/* sas/sccg, gas target */
.section smartdmaInitData,"aw",@progbits /* Initialized data for task variables */
.section smartdmaTaskTable,"aw",@progbits /* Task tables */
.align 9
.globl taskTable
taskTable:
.globl scEthernetRecv_Entry
scEthernetRecv_Entry: /* Task 0 */
.long scEthernetRecv_TDT - taskTable /* Task 0 Descriptor Table */
.long scEthernetRecv_TDT - taskTable + 0x000000a4
.long scEthernetRecv_VarTab - taskTable /* Task 0 Variable Table */
.long scEthernetRecv_FDT - taskTable + 0x03 /* Task 0 Function Descriptor Table & Flags */
.long 0x00000000
.long 0x00000000
.long scEthernetRecv_CSave - taskTable /* Task 0 context save space */
.long 0xf0000000
.globl scEthernetXmit_Entry
scEthernetXmit_Entry: /* Task 1 */
.long scEthernetXmit_TDT - taskTable /* Task 1 Descriptor Table */
.long scEthernetXmit_TDT - taskTable + 0x000000d0
.long scEthernetXmit_VarTab - taskTable /* Task 1 Variable Table */
.long scEthernetXmit_FDT - taskTable + 0x03 /* Task 1 Function Descriptor Table & Flags */
.long 0x00000000
.long 0x00000000
.long scEthernetXmit_CSave - taskTable /* Task 1 context save space */
.long 0xf0000000
.globl scEthernetRecv_TDT
scEthernetRecv_TDT: /* Task 0 Descriptor Table */
.long 0xc4c50000 /* 0000: LCDEXT: idx0 = var9 + var10; idx0 once var0; idx0 += inc0 */
.long 0x84c5e000 /* 0004: LCD: idx1 = var9 + var11; ; idx1 += inc0 */
.long 0x10001f08 /* 0008: DRD1A: var7 = idx1; FN=0 MORE init=0 WS=0 RS=0 */
.long 0x10000380 /* 000C: DRD1A: var0 = *idx0; FN=0 MORE init=0 WS=0 RS=0 */
.long 0x00000f88 /* 0010: DRD1A: var3 = *idx1; FN=0 init=0 WS=0 RS=0 */
.long 0x81980000 /* 0014: LCD: idx0 = var3; idx0 once var0; idx0 += inc0 */
.long 0x10000780 /* 0018: DRD1A: var1 = *idx0; FN=0 MORE init=0 WS=0 RS=0 */
.long 0x60000000 /* 001C: DRD2A: EU0=0 EU1=0 EU2=0 EU3=0 EXT init=0 WS=0 RS=0 */
.long 0x010cf04c /* 0020: DRD2B1: var4 = EU3(); EU3(var1,var12) */
.long 0x82180349 /* 0024: LCD: idx0 = var4; idx0 != var13; idx0 += inc1 */
.long 0x81c68004 /* 0028: LCD: idx1 = var3 + var13 + 4; idx1 once var0; idx1 += inc0 */
.long 0x70000000 /* 002C: DRD2A: EU0=0 EU1=0 EU2=0 EU3=0 EXT MORE init=0 WS=0 RS=0 */
.long 0x018cf04e /* 0030: DRD2B1: var6 = EU3(); EU3(var1,var14) */
.long 0x70000000 /* 0034: DRD2A: EU0=0 EU1=0 EU2=0 EU3=0 EXT MORE init=0 WS=0 RS=0 */
.long 0x020cf04f /* 0038: DRD2B1: var8 = EU3(); EU3(var1,var15) */
.long 0x00000b88 /* 003C: DRD1A: var2 = *idx1; FN=0 init=0 WS=0 RS=0 */
.long 0x8000d184 /* 0040: LCDEXT: idx1 = 0xf0003184; ; */
.long 0xc6990452 /* 0044: LCDEXT: idx2 = var13; idx2 < var17; idx2 += inc2 */
.long 0x81486010 /* 0048: LCD: idx3 = var2 + var16; ; idx3 += inc2 */
.long 0x006acf88 /* 004C: DRD1A: *idx3 = *idx1; FN=0 init=3 WS=1 RS=1 */
.long 0x8000d184 /* 0050: LCDEXT: idx1 = 0xf0003184; ; */
.long 0x86810492 /* 0054: LCD: idx2 = var13, idx3 = var2; idx2 < var18; idx2 += inc2, idx3 += inc2 */
.long 0x006acf88 /* 0058: DRD1A: *idx3 = *idx1; FN=0 init=3 WS=1 RS=1 */
.long 0x8000d184 /* 005C: LCDEXT: idx1 = 0xf0003184; ; */
.long 0x868184d2 /* 0060: LCD: idx2 = var13, idx3 = var3; idx2 < var19; idx2 += inc2, idx3 += inc2 */
.long 0x000acf88 /* 0064: DRD1A: *idx3 = *idx1; FN=0 init=0 WS=1 RS=1 */
.long 0xc318839b /* 0068: LCDEXT: idx1 = var6; idx1 == var14; idx1 += inc3 */
.long 0x80190000 /* 006C: LCD: idx2 = var0; idx2 once var0; idx2 += inc0 */
.long 0x04008468 /* 0070: DRD1A: idx1 = var13; FN=0 INT init=0 WS=0 RS=0 */
.long 0xc4038358 /* 0074: LCDEXT: idx1 = var8, idx2 = var7; idx1 == var13; idx1 += inc3, idx2 += inc0 */
.long 0x81c50000 /* 0078: LCD: idx3 = var3 + var10; idx3 once var0; idx3 += inc0 */
.long 0x1000cb18 /* 007C: DRD1A: *idx2 = idx3; FN=0 MORE init=0 WS=0 RS=0 */
.long 0x00000f18 /* 0080: DRD1A: var3 = idx3; FN=0 init=0 WS=0 RS=0 */
.long 0xc4188364 /* 0084: LCDEXT: idx1 = var8; idx1 > var13; idx1 += inc4 */
.long 0x83990000 /* 0088: LCD: idx2 = var7; idx2 once var0; idx2 += inc0 */
.long 0x10000c00 /* 008C: DRD1A: var3 = var0; FN=0 MORE init=0 WS=0 RS=0 */
.long 0x0000c800 /* 0090: DRD1A: *idx2 = var0; FN=0 init=0 WS=0 RS=0 */
.long 0x81988000 /* 0094: LCD: idx1 = var3; idx1 once var0; idx1 += inc0 */
.long 0x10000788 /* 0098: DRD1A: var1 = *idx1; FN=0 MORE init=0 WS=0 RS=0 */
.long 0x60000000 /* 009C: DRD2A: EU0=0 EU1=0 EU2=0 EU3=0 EXT init=0 WS=0 RS=0 */
.long 0x080cf04c /* 00A0: DRD2B1: idx0 = EU3(); EU3(var1,var12) */
.long 0x000001f8 /* 00A4(:0): NOP */
.globl scEthernetXmit_TDT
scEthernetXmit_TDT: /* Task 1 Descriptor Table */
.long 0x80024800 /* 0000: LCDEXT: idx0 = 0xf0008800; ; */
.long 0x85c60004 /* 0004: LCD: idx1 = var11 + var12 + 4; idx1 once var0; idx1 += inc0 */
.long 0x10002308 /* 0008: DRD1A: var8 = idx1; FN=0 MORE init=0 WS=0 RS=0 */
.long 0x10000f88 /* 000C: DRD1A: var3 = *idx1; FN=0 MORE init=0 WS=0 RS=0 */
.long 0x00000380 /* 0010: DRD1A: var0 = *idx0; FN=0 init=0 WS=0 RS=0 */
.long 0x81980000 /* 0014: LCD: idx0 = var3; idx0 once var0; idx0 += inc0 */
.long 0x10000780 /* 0018: DRD1A: var1 = *idx0; FN=0 MORE init=0 WS=0 RS=0 */
.long 0x60000000 /* 001C: DRD2A: EU0=0 EU1=0 EU2=0 EU3=0 EXT init=0 WS=0 RS=0 */
.long 0x024cf04d /* 0020: DRD2B1: var9 = EU3(); EU3(var1,var13) */
.long 0x84980309 /* 0024: LCD: idx0 = var9; idx0 != var12; idx0 += inc1 */
.long 0xc0004003 /* 0028: LCDEXT: idx1 = 0x00000003; ; */
.long 0x81c60004 /* 002C: LCD: idx2 = var3 + var12 + 4; idx2 once var0; idx2 += inc0 */
.long 0x70000000 /* 0030: DRD2A: EU0=0 EU1=0 EU2=0 EU3=0 EXT MORE init=0 WS=0 RS=0 */
.long 0x010cf04e /* 0034: DRD2B1: var4 = EU3(); EU3(var1,var14) */
.long 0x70000000 /* 0038: DRD2A: EU0=0 EU1=0 EU2=0 EU3=0 EXT MORE init=0 WS=0 RS=0 */
.long 0x014cf04f /* 003C: DRD2B1: var5 = EU3(); EU3(var1,var15) */
.long 0x70000000 /* 0040: DRD2A: EU0=0 EU1=0 EU2=0 EU3=0 EXT MORE init=0 WS=0 RS=0 */
.long 0x028cf050 /* 0044: DRD2B1: var10 = EU3(); EU3(var1,var16) */
.long 0x70000000 /* 0048: DRD2A: EU0=0 EU1=0 EU2=0 EU3=0 EXT MORE init=0 WS=0 RS=0 */
.long 0x018cf051 /* 004C: DRD2B1: var6 = EU3(); EU3(var1,var17) */
.long 0x10000b90 /* 0050: DRD1A: var2 = *idx2; FN=0 MORE init=0 WS=0 RS=0 */
.long 0x60000000 /* 0054: DRD2A: EU0=0 EU1=0 EU2=0 EU3=0 EXT init=0 WS=0 RS=0 */
.long 0x01ccf0a1 /* 0058: DRD2B1: var7 = EU3(); EU3(var2,idx1) */
.long 0xc2988312 /* 005C: LCDEXT: idx1 = var5; idx1 > var12; idx1 += inc2 */
.long 0x83490000 /* 0060: LCD: idx2 = var6 + var18; idx2 once var0; idx2 += inc0 */
.long 0x00001b10 /* 0064: DRD1A: var6 = idx2; FN=0 init=0 WS=0 RS=0 */
.long 0x8000d1a4 /* 0068: LCDEXT: idx1 = 0xf00031a4; ; */
.long 0x8301031c /* 006C: LCD: idx2 = var6, idx3 = var2; idx2 > var12; idx2 += inc3, idx3 += inc4 */
.long 0x008ac798 /* 0070: DRD1A: *idx1 = *idx3; FN=0 init=4 WS=1 RS=1 */
.long 0x8000d1a4 /* 0074: LCDEXT: idx1 = 0xf00031a4; ; */
.long 0xc1430000 /* 0078: LCDEXT: idx2 = var2 + var6; idx2 once var0; idx2 += inc0 */
.long 0x82998312 /* 007C: LCD: idx3 = var5; idx3 > var12; idx3 += inc2 */
.long 0x088ac790 /* 0080: DRD1A: *idx1 = *idx2; FN=0 TFD init=4 WS=1 RS=1 */
.long 0x81988000 /* 0084: LCD: idx1 = var3; idx1 once var0; idx1 += inc0 */
.long 0x60000001 /* 0088: DRD2A: EU0=0 EU1=0 EU2=0 EU3=1 EXT init=0 WS=0 RS=0 */
.long 0x0c4cfc4d /* 008C: DRD2B1: *idx1 = EU3(); EU3(*idx1,var13) */
.long 0xc21883ad /* 0090: LCDEXT: idx1 = var4; idx1 == var14; idx1 += inc5 */
.long 0x80190000 /* 0094: LCD: idx2 = var0; idx2 once var0; idx2 += inc0 */
.long 0x04008460 /* 0098: DRD1A: idx1 = var12; FN=0 INT init=0 WS=0 RS=0 */
.long 0xc4052305 /* 009C: LCDEXT: idx1 = var8, idx2 = var10; idx2 == var12; idx1 += inc0, idx2 += inc5 */
.long 0x81c98000 /* 00A0: LCD: idx3 = var3 + var19; idx3 once var0; idx3 += inc0 */
.long 0x1000c718 /* 00A4: DRD1A: *idx1 = idx3; FN=0 MORE init=0 WS=0 RS=0 */
.long 0x00000f18 /* 00A8: DRD1A: var3 = idx3; FN=0 init=0 WS=0 RS=0 */
.long 0xc4188000 /* 00AC: LCDEXT: idx1 = var8; idx1 once var0; idx1 += inc0 */
.long 0x85190312 /* 00B0: LCD: idx2 = var10; idx2 > var12; idx2 += inc2 */
.long 0x10000c00 /* 00B4: DRD1A: var3 = var0; FN=0 MORE init=0 WS=0 RS=0 */
.long 0x1000c400 /* 00B8: DRD1A: *idx1 = var0; FN=0 MORE init=0 WS=0 RS=0 */
.long 0x00008860 /* 00BC: DRD1A: idx2 = var12; FN=0 init=0 WS=0 RS=0 */
.long 0x81988000 /* 00C0: LCD: idx1 = var3; idx1 once var0; idx1 += inc0 */
.long 0x10000788 /* 00C4: DRD1A: var1 = *idx1; FN=0 MORE init=0 WS=0 RS=0 */
.long 0x60000000 /* 00C8: DRD2A: EU0=0 EU1=0 EU2=0 EU3=0 EXT init=0 WS=0 RS=0 */
.long 0x080cf04d /* 00CC: DRD2B1: idx0 = EU3(); EU3(var1,var13) */
.long 0x000001f8 /* 00D0(:0): NOP */
.align 8
.globl scEthernetRecv_VarTab
scEthernetRecv_VarTab: /* Task 0 Variable Table */
.long 0x00000000 /* var[0] */
.long 0x00000000 /* var[1] */
.long 0x00000000 /* var[2] */
.long 0x00000000 /* var[3] */
.long 0x00000000 /* var[4] */
.long 0x00000000 /* var[5] */
.long 0x00000000 /* var[6] */
.long 0x00000000 /* var[7] */
.long 0x00000000 /* var[8] */
.long 0xf0008800 /* var[9] */
.long 0x00000008 /* var[10] */
.long 0x0000000c /* var[11] */
.long 0x80000000 /* var[12] */
.long 0x00000000 /* var[13] */
.long 0x10000000 /* var[14] */
.long 0x20000000 /* var[15] */
.long 0x000005e4 /* var[16] */
.long 0x0000000e /* var[17] */
.long 0x000005e0 /* var[18] */
.long 0x00000004 /* var[19] */
.long 0x00000000 /* var[20] */
.long 0x00000000 /* var[21] */
.long 0x00000000 /* var[22] */
.long 0x00000000 /* var[23] */
.long 0x00000000 /* inc[0] */
.long 0x60000000 /* inc[1] */
.long 0x20000001 /* inc[2] */
.long 0x80000000 /* inc[3] */
.long 0x40000000 /* inc[4] */
.long 0x00000000 /* inc[5] */
.long 0x00000000 /* inc[6] */
.long 0x00000000 /* inc[7] */
.align 8
.globl scEthernetXmit_VarTab
scEthernetXmit_VarTab: /* Task 1 Variable Table */
.long 0x00000000 /* var[0] */
.long 0x00000000 /* var[1] */
.long 0x00000000 /* var[2] */
.long 0x00000000 /* var[3] */
.long 0x00000000 /* var[4] */
.long 0x00000000 /* var[5] */
.long 0x00000000 /* var[6] */
.long 0x00000000 /* var[7] */
.long 0x00000000 /* var[8] */
.long 0x00000000 /* var[9] */
.long 0x00000000 /* var[10] */
.long 0xf0008800 /* var[11] */
.long 0x00000000 /* var[12] */
.long 0x80000000 /* var[13] */
.long 0x10000000 /* var[14] */
.long 0x08000000 /* var[15] */
.long 0x20000000 /* var[16] */
.long 0x0000ffff /* var[17] */
.long 0xffffffff /* var[18] */
.long 0x00000008 /* var[19] */
.long 0x00000000 /* var[20] */
.long 0x00000000 /* var[21] */
.long 0x00000000 /* var[22] */
.long 0x00000000 /* var[23] */
.long 0x00000000 /* inc[0] */
.long 0x60000000 /* inc[1] */
.long 0x40000000 /* inc[2] */
.long 0x4000ffff /* inc[3] */
.long 0xe0000001 /* inc[4] */
.long 0x80000000 /* inc[5] */
.long 0x00000000 /* inc[6] */
.long 0x00000000 /* inc[7] */
.align 8
.globl scEthernetRecv_FDT
scEthernetRecv_FDT: /* Task 0 Function Descriptor Table */
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x21800000 /* and(), EU# 3 */
.long 0x21400000 /* andn(), EU# 3 */
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.align 8
.globl scEthernetXmit_FDT
scEthernetXmit_FDT: /* Task 1 Function Descriptor Table */
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x21800000 /* and(), EU# 3 */
.long 0x21400000 /* andn(), EU# 3 */
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.long 0x00000000
.globl scEthernetRecv_CSave
scEthernetRecv_CSave: /* Task 0 context save space */
.space 128, 0x0
.globl scEthernetXmit_CSave
scEthernetXmit_CSave: /* Task 1 context save space */
.space 128, 0x0
|
a3f/bareDOOM
| 27,513
|
arch/powerpc/cpu-85xx/start.S
|
/*
* Copyright 2004, 2007-2012 Freescale Semiconductor, Inc.
* Copyright (C) 2003 Motorola,Inc.
*
* SPDX-License-Identifier: GPL-2.0+
*/
/*
* U-Boot Startup Code for Motorola 85xx PowerPC based Embedded Boards
*
* The processor starts at 0xfffffffc and the code is first executed in the
* last 4K page(0xfffff000-0xffffffff) in flash/rom.
*/
#include <config.h>
#include <asm/config.h>
#include <asm/processor.h>
#include <asm/ppc_asm.tmpl>
#include <asm/ppc_defs.h>
#include <asm/cache.h>
#include <mach/mpc85xx.h>
#include <mach/mmu.h>
#undef MSR_KERNEL
#define MSR_KERNEL ( MSR_ME ) /* Machine Check */
/*
* Set up GOT: Global Offset Table
*
* Use r14 to access the GOT
*/
START_GOT
GOT_ENTRY(_GOT2_TABLE_)
GOT_ENTRY(_FIXUP_TABLE_)
GOT_ENTRY(_start)
GOT_ENTRY(_start_of_vectors)
GOT_ENTRY(_end_of_vectors)
GOT_ENTRY(transfer_to_handler)
GOT_ENTRY(__init_end)
GOT_ENTRY(__bss_start)
GOT_ENTRY(__bss_stop)
END_GOT
/*
* e500 Startup -- after reset only the last 4KB of the effective
* address space is mapped in the MMU L2 TLB1 Entry0. The .bootpg
* section is located at THIS LAST page and basically does three
* things: clear some registers, set up exception tables and
* add more TLB entries for 'larger spaces'(e.g. the boot rom) to
* continue the boot procedure.
* Once the boot rom is mapped by TLB entries we can proceed
* with normal startup.
*/
.section .bootpg,"ax"
.globl _start_e500
_start_e500:
/* Enable debug exception */
li r1,MSR_DE
mtmsr r1
#ifdef FSL_ERRATUM_A005125
msync
isync
mfspr r3, SPRN_HDBCR0
oris r3, r3, 0x0080
mtspr SPRN_HDBCR0, r3
#endif
/* clear registers/arrays not reset by hardware */
/* L1 */
li r0,2
mtspr L1CSR0,r0 /* invalidate d-cache */
mtspr L1CSR1,r0 /* invalidate i-cache */
mfspr r1,DBSR
mtspr DBSR,r1 /* Clear all valid bits */
.macro create_tlb1_entry esel ts tsize epn wimg rpn \
perm phy_high scratch
lis \scratch, FSL_BOOKE_MAS0(1, \esel, 0)@h
ori \scratch, \scratch, FSL_BOOKE_MAS0(1, \esel, 0)@l
mtspr MAS0, \scratch
lis \scratch, FSL_BOOKE_MAS1(1, 1, 0, \ts, \tsize)@h
ori \scratch, \scratch, FSL_BOOKE_MAS1(1, 1, 0, \ts, \tsize)@l
mtspr MAS1, \scratch
lis \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@h
ori \scratch, \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@l
mtspr MAS2, \scratch
lis \scratch, FSL_BOOKE_MAS3(\rpn, 0, \perm)@h
ori \scratch, \scratch, FSL_BOOKE_MAS3(\rpn, 0, \perm)@l
mtspr MAS3, \scratch
lis \scratch, \phy_high@h
ori \scratch, \scratch, \phy_high@l
mtspr MAS7, \scratch
isync
msync
tlbwe
isync
.endm
.macro create_tlb0_entry esel ts tsize epn wimg rpn perm phy_high \
scratch
lis \scratch, FSL_BOOKE_MAS0(0, \esel, 0)@h
ori \scratch, \scratch, FSL_BOOKE_MAS0(0, \esel, 0)@l
mtspr MAS0, \scratch
lis \scratch, FSL_BOOKE_MAS1(1, 0, 0, \ts, \tsize)@h
ori \scratch, \scratch, FSL_BOOKE_MAS1(1, 0, 0, \ts, \tsize)@l
mtspr MAS1, \scratch
lis \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@h
ori \scratch, \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@l
mtspr MAS2, \scratch
lis \scratch, FSL_BOOKE_MAS3(\rpn, 0, \perm)@h
ori \scratch, \scratch, FSL_BOOKE_MAS3(\rpn, 0, \perm)@l
mtspr MAS3, \scratch
lis \scratch, \phy_high@h
ori \scratch, \scratch, \phy_high@l
mtspr MAS7, \scratch
isync
msync
tlbwe
isync
.endm
/* Setup interrupt vectors */
lis r1,TEXT_BASE@h
mtspr IVPR,r1
lis r3,(TEXT_BASE & 0xffff)@h
ori r3,r3,(TEXT_BASE & 0xffff)@l
addi r4,r3,CriticalInput - _start + _START_OFFSET
mtspr IVOR0,r4 /* 0: Critical input */
addi r4,r3,MachineCheck - _start + _START_OFFSET
mtspr IVOR1,r4 /* 1: Machine check */
addi r4,r3,DataStorage - _start + _START_OFFSET
mtspr IVOR2,r4 /* 2: Data storage */
addi r4,r3,InstStorage - _start + _START_OFFSET
mtspr IVOR3,r4 /* 3: Instruction storage */
addi r4,r3,ExtInterrupt - _start + _START_OFFSET
mtspr IVOR4,r4 /* 4: External interrupt */
addi r4,r3,Alignment - _start + _START_OFFSET
mtspr IVOR5,r4 /* 5: Alignment */
addi r4,r3,ProgramCheck - _start + _START_OFFSET
mtspr IVOR6,r4 /* 6: Program check */
addi r4,r3,FPUnavailable - _start + _START_OFFSET
mtspr IVOR7,r4 /* 7: floating point unavailable */
addi r4,r3,SystemCall - _start + _START_OFFSET
mtspr IVOR8,r4 /* 8: System call */
/* 9: Auxiliary processor unavailable(unsupported) */
addi r4,r3,Decrementer - _start + _START_OFFSET
mtspr IVOR10,r4 /* 10: Decrementer */
addi r4,r3,IntervalTimer - _start + _START_OFFSET
mtspr IVOR11,r4 /* 11: Interval timer */
addi r4,r3,WatchdogTimer - _start + _START_OFFSET
mtspr IVOR12,r4 /* 12: Watchdog timer */
addi r4,r3,DataTLBError - _start + _START_OFFSET
mtspr IVOR13,r4 /* 13: Data TLB error */
addi r4,r3,InstructionTLBError - _start + _START_OFFSET
mtspr IVOR14,r4 /* 14: Instruction TLB error */
addi r4,r3,DebugBreakpoint - _start + _START_OFFSET
mtspr IVOR15,r4 /* 15: Debug */
/* Clear and set up some registers. */
li r0,0x0000
lis r1,0xffff
mtspr DEC,r0 /* prevent dec exceptions */
mttbl r0 /* prevent fit & wdt exceptions */
mttbu r0
mtspr TSR,r1 /* clear all timer exception status */
mtspr TCR,r0 /* disable all */
mtspr ESR,r0 /* clear exception syndrome register */
mtspr MCSR,r0 /* machine check syndrome register */
mtxer r0 /* clear integer exception register */
/* Enable Time Base and Select Time Base Clock */
lis r0,HID0_EMCP@h /* Enable machine check */
ori r0,r0,HID0_TBEN@l /* Enable Timebase */
mtspr HID0,r0
li r0,(HID1_ASTME|HID1_ABE)@l /* Addr streaming & broadcast */
mfspr r3,PVR
andi. r3,r3, 0xff
cmpwi r3,0x50@l /* if we are rev 5.0 or greater set MBDD */
blt 1f
/* Set MBDD bit also */
ori r0, r0, HID1_MBDD@l
1:
mtspr HID1,r0
/* Enable Branch Prediction */
#if defined(CONFIG_BTB)
lis r0,BUCSR_ENABLE@h
ori r0,r0,BUCSR_ENABLE@l
mtspr SPRN_BUCSR,r0
#endif
/*
* Search for the TLB that covers the code we're executing, and shrink it
* so that it covers only this 4K page. That will ensure that any other
* TLB we create won't interfere with it. We assume that the TLB exists,
* which is why we don't check the Valid bit of MAS1. We also assume
* it is in TLB1.
*
* This is necessary, for example, when booting from the on-chip ROM,
* which (oddly) creates a single 4GB TLB that covers CCSR and DDR.
*/
bl nexti /* Find our address */
nexti: mflr r1 /* R1 = our PC */
li r2, 0
mtspr MAS6, r2 /* Assume the current PID and AS are 0 */
isync
msync
tlbsx 0, r1 /* This must succeed */
mfspr r14, MAS0 /* Save ESEL for later */
rlwinm r14, r14, 16, 0xfff
/* Set the size of the TLB to 4KB */
mfspr r3, MAS1
li r2, 0xF80
andc r3, r3, r2 /* Clear the TSIZE bits */
ori r3, r3, MAS1_TSIZE(BOOKE_PAGESZ_4K)@l
oris r3, r3, MAS1_IPROT@h
mtspr MAS1, r3
/*
* Set the base address of the TLB to our PC. We assume that
* virtual == physical. We also assume that MAS2_EPN == MAS3_RPN.
*/
lis r3, MAS2_EPN@h
ori r3, r3, MAS2_EPN@l /* R3 = MAS2_EPN */
and r1, r1, r3 /* Our PC, rounded down to the nearest page */
mfspr r2, MAS2
andc r2, r2, r3
or r2, r2, r1
mtspr MAS2, r2 /* Set the EPN to our PC base address */
mfspr r2, MAS3
andc r2, r2, r3
or r2, r2, r1
mtspr MAS3, r2 /* Set the RPN to our PC base address */
isync
msync
tlbwe
/*
* Clear out any other TLB entries that may exist, to avoid conflicts.
* Our TLB entry is in r14.
*/
li r0, TLBIVAX_ALL | TLBIVAX_TLB0
tlbivax 0, r0
tlbsync
mfspr r4, SPRN_TLB1CFG
rlwinm r4, r4, 0, TLBnCFG_NENTRY_MASK
li r3, 0
mtspr MAS1, r3
1: cmpw r3, r14
rlwinm r5, r3, 16, MAS0_ESEL_MSK
addi r3, r3, 1
beq 2f /* skip the entry we're executing from */
oris r5, r5, MAS0_TLBSEL(1)@h
mtspr MAS0, r5
isync
tlbwe
isync
msync
2: cmpw r3, r4
blt 1b
#if defined(PPC_E500_DEBUG_TLB)
/*
* TLB entry for debuggging in AS1
* Create temporary TLB entry in AS0 to handle debug exception
* As on debug exception MSR is cleared i.e. Address space is changed
* to 0. A TLB entry (in AS0) is required to handle debug exception generated
* in AS1.
*
* TLB entry is created for IVPR + IVOR15 to map on valid OP code address
* because flash's virtual address maps to 0xff800000 - 0xffffffff.
* and this window is outside of 4K boot window.
*/
create_tlb1_entry PPC_E500_DEBUG_TLB, \
0, BOOKE_PAGESZ_4M, \
TEXT_BASE & 0xffc00000, MAS2_I|MAS2_G, \
0xffc00000, MAS3_SX|MAS3_SW|MAS3_SR, \
0, r6
#endif
/*
* Relocate CCSR, if necessary. We relocate CCSR if (obviously) the default
* location is not where we want it. This typically happens on a 36-bit
* system, where we want to move CCSR to near the top of 36-bit address space.
*
* To move CCSR, we create two temporary TLBs, one for the old location, and
* another for the new location. On CoreNet systems, we also need to create
* a special, temporary LAW.
*
* As a general rule, TLB0 is used for short-term TLBs, and TLB1 is used for
* long-term TLBs, so we use TLB0 here.
*/
#if (CFG_CCSRBAR_DEFAULT != CFG_CCSRBAR_PHYS)
create_ccsr_new_tlb:
/*
* Create a TLB for the new location of CCSR. Register R8 is reserved
* for the virtual address of this TLB (CFG_CCSRBAR).
*/
lis r8, CFG_CCSRBAR@h
ori r8, r8, CFG_CCSRBAR@l
lis r9, (CFG_CCSRBAR + 0x1000)@h
ori r9, r9, (CFG_CCSRBAR + 0x1000)@l
create_tlb0_entry 0, \
0, BOOKE_PAGESZ_4K, \
CFG_CCSRBAR, MAS2_I|MAS2_G, \
CFG_CCSRBAR_PHYS, MAS3_SW|MAS3_SR, \
0, r3
/*
* Create a TLB for the current location of CCSR. Register R9 is
* reserved for the virtual address of this TLB (CFG_CCSRBAR + 0x1000).
*/
create_ccsr_old_tlb:
create_tlb0_entry 1, \
0, BOOKE_PAGESZ_4K, \
CFG_CCSRBAR + 0x1000, MAS2_I|MAS2_G, \
CFG_CCSRBAR_DEFAULT, MAS3_SW|MAS3_SR, \
0, r3
/*
* We have a TLB for what we think is the current (old) CCSR. Let's
* verify that, otherwise we won't be able to move it.
* CFG_CCSRBAR_DEFAULT is always a 32-bit number, so we only
* need to compare the lower 32 bits of CCSRBAR on CoreNet systems.
*/
verify_old_ccsr:
lis r0, CFG_CCSRBAR_DEFAULT@h
ori r0, r0, CFG_CCSRBAR_DEFAULT@l
lwz r1, 0(r9)
slwi r1, r1, 12
cmpl 0, r0, r1
/*
* If the value we read from CCSRBAR is not what we expect, then
* enter an infinite loop. This will at least allow a debugger to
* halt execution and examine TLBs, etc. There's no point in going
* on.
*/
infinite_debug_loop:
bne infinite_debug_loop
/*
* Read the current value of CCSRBAR using a load word instruction
* followed by an isync. This forces all accesses to configuration
* space to complete.
*/
write_new_ccsrbar:
sync
lwz r0, 0(r9)
isync
lis r0, (CFG_CCSRBAR_PHYS >> 12)@h
ori r0, r0, (CFG_CCSRBAR_PHYS >> 12)@l
stw r0, 0(r9)
sync
isync
/*
* Read the contents of CCSRBAR from its new location, followed by
* another isync.
*/
lwz r0, 0(r8)
isync
#endif
/* Enable/invalidate the I-Cache */
lis r2,(L1CSR1_ICFI|L1CSR1_ICLFR)@h
ori r2,r2,(L1CSR1_ICFI|L1CSR1_ICLFR)@l
mtspr SPRN_L1CSR1,r2
1:
mfspr r3,SPRN_L1CSR1
and. r1,r3,r2
bne 1b
lis r3,(L1CSR1_CPE|L1CSR1_ICE)@h
ori r3,r3,(L1CSR1_CPE|L1CSR1_ICE)@l
mtspr SPRN_L1CSR1,r3
isync
2:
mfspr r3,SPRN_L1CSR1
andi. r1,r3,L1CSR1_ICE@l
beq 2b
/* Enable/invalidate the D-Cache */
lis r2,(L1CSR0_DCFI|L1CSR0_DCLFR)@h
ori r2,r2,(L1CSR0_DCFI|L1CSR0_DCLFR)@l
mtspr SPRN_L1CSR0,r2
1:
mfspr r3,SPRN_L1CSR0
and. r1,r3,r2
bne 1b
lis r3,(L1CSR0_CPE|L1CSR0_DCE)@h
ori r3,r3,(L1CSR0_CPE|L1CSR0_DCE)@l
mtspr SPRN_L1CSR0,r3
isync
2:
mfspr r3,SPRN_L1CSR0
andi. r1,r3,L1CSR0_DCE@l
beq 2b
create_init_ram_area:
lis r6,FSL_BOOKE_MAS0(1, 15, 0)@h
ori r6,r6,FSL_BOOKE_MAS0(1, 15, 0)@l
/* create a temp mapping in AS=1 to the 4M boot window */
create_tlb1_entry 15, \
1, BOOKE_PAGESZ_4M, \
TEXT_BASE & 0xffc00000, MAS2_I|MAS2_G, \
0xffc00000, MAS3_SX|MAS3_SW|MAS3_SR, \
0, r6
/* create a temp mapping in AS=1 to the stack */
create_tlb1_entry 14, \
1, BOOKE_PAGESZ_16K, \
CFG_INIT_RAM_ADDR, 0, \
CFG_INIT_RAM_ADDR, MAS3_SX|MAS3_SW|MAS3_SR, \
0, r6
lis r6,MSR_IS|MSR_DS|MSR_DE@h
ori r6,r6,MSR_IS|MSR_DS|MSR_DE@l
lis r7,switch_as@h
ori r7,r7,switch_as@l
mtspr SPRN_SRR0,r7
mtspr SPRN_SRR1,r6
rfi
switch_as:
/* L1 DCache is used for initial RAM */
/* Allocate initial RAM in data cache. */
lis r3,CFG_INIT_RAM_ADDR@h
ori r3,r3,CFG_INIT_RAM_ADDR@l
mfspr r2, L1CFG0
andi. r2, r2, 0x1ff
/* cache size * 1024 / (2 * L1 line size) */
slwi r2, r2, (10 - 1 - L1_CACHE_SHIFT)
mtctr r2
li r0,0
1:
dcbz r0,r3
dcbtls 0,r0,r3
addi r3,r3,CACHELINE_SIZE
bdnz 1b
/*
* Jump out the last 4K page and continue to 'normal' start.
* Calculate absolute address in FLASH and jump there.
*/
lis r3,TEXT_BASE@h
ori r3,r3,TEXT_BASE@l
addi r3,r3,_start_cont - _start + _START_OFFSET
mtlr r3
blr
.text
.globl _start
_start:
.long 0x62626F78 /* Magic Number */
.align 4
.globl _start_cont
_start_cont:
/* Setup the stack in initial RAM,could be L2-as-SRAM or L1 dcache */
lis r3,(CFG_INIT_RAM_ADDR)@h
ori r3,r3,((CFG_INIT_SP_OFFSET-16)&~0xf)@l
li r0,0
stw r0,0(r3) /* Terminate Back Chain */
stw r0,+4(r3) /* NULL return address. */
mr r1,r3 /* Transfer to SP(r1) */
GET_GOT
bl cpu_init_early_f
/* switch back to AS = 0 */
lis r3,(MSR_CE|MSR_ME|MSR_DE)@h
ori r3,r3,(MSR_CE|MSR_ME|MSR_DE)@l
mtmsr r3
isync
bl initdram
b relocate_code
isync
. = EXC_OFF_SYS_RESET
.globl _start_of_vectors
_start_of_vectors:
/* Critical input. */
CRIT_EXCEPTION(0x0100, CriticalInput, CritcalInputException)
/* Machine check */
MCK_EXCEPTION(0x200, MachineCheck, MachineCheckException)
/* Data Storage exception. */
STD_EXCEPTION(0x0300, DataStorage, UnknownException)
/* Instruction Storage exception. */
STD_EXCEPTION(0x0400, InstStorage, UnknownException)
/* External Interrupt exception. */
STD_EXCEPTION(0x0500, ExtInterrupt, UnknownException)
/* Alignment exception. */
. = 0x0600
Alignment:
EXCEPTION_PROLOG(SRR0, SRR1)
mfspr r4,DAR
stw r4,_DAR(r21)
mfspr r5,DSISR
stw r5,_DSISR(r21)
addi r3,r1,STACK_FRAME_OVERHEAD
EXC_XFER_TEMPLATE(Alignment, AlignmentException, MSR_KERNEL, COPY_EE)
/* Program check exception */
. = 0x0700
ProgramCheck:
EXCEPTION_PROLOG(SRR0, SRR1)
addi r3,r1,STACK_FRAME_OVERHEAD
EXC_XFER_TEMPLATE(ProgramCheck, ProgramCheckException,
MSR_KERNEL, COPY_EE)
/* No FPU on MPC85xx. This exception is not supposed to happen. */
STD_EXCEPTION(0x0800, FPUnavailable, UnknownException)
. = 0x0900
/*
* r0 - SYSCALL number
* r3-... arguments
*/
SystemCall:
addis r11,r0,0 /* get functions table addr */
ori r11,r11,0 /* Note: this code is patched in trap_init */
addis r12,r0,0 /* get number of functions */
ori r12,r12,0
cmplw 0,r0,r12
bge 1f
rlwinm r0,r0,2,0,31 /* fn_addr = fn_tbl[r0] */
add r11,r11,r0
lwz r11,0(r11)
li r20,0xd00-4 /* Get stack pointer */
lwz r12,0(r20)
subi r12,r12,12 /* Adjust stack pointer */
li r0,0xc00+_end_back-SystemCall
cmplw 0,r0,r12 /* Check stack overflow */
bgt 1f
stw r12,0(r20)
mflr r0
stw r0,0(r12)
mfspr r0,SRR0
stw r0,4(r12)
mfspr r0,SRR1
stw r0,8(r12)
li r12,0xc00+_back-SystemCall
mtlr r12
mtspr SRR0,r11
1: SYNC
rfi
_back:
mfmsr r11 /* Disable interrupts */
li r12,0
ori r12,r12,MSR_EE
andc r11,r11,r12
SYNC /* Some chip revs need this... */
mtmsr r11
SYNC
li r12,0xd00-4 /* restore regs */
lwz r12,0(r12)
lwz r11,0(r12)
mtlr r11
lwz r11,4(r12)
mtspr SRR0,r11
lwz r11,8(r12)
mtspr SRR1,r11
addi r12,r12,12 /* Adjust stack pointer */
li r20,0xd00-4
stw r12,0(r20)
SYNC
rfi
_end_back:
STD_EXCEPTION(0x0a00, Decrementer, UnknownException)
STD_EXCEPTION(0x0b00, IntervalTimer, UnknownException)
STD_EXCEPTION(0x0c00, WatchdogTimer, UnknownException)
STD_EXCEPTION(0x0d00, DataTLBError, UnknownException)
STD_EXCEPTION(0x0e00, InstructionTLBError, UnknownException)
CRIT_EXCEPTION(0x0f00, DebugBreakpoint, DebugException )
.globl _end_of_vectors
_end_of_vectors:
. = . + (0x100 - ( . & 0xff )) /* align for debug */
/*
* This code finishes saving the registers to the exception frame
* and jumps to the appropriate handler for the exception.
* Register r21 is pointer into trap frame, r1 has new stack pointer.
*/
.globl transfer_to_handler
transfer_to_handler:
stw r22,_NIP(r21)
lis r22,MSR_POW@h
andc r23,r23,r22
stw r23,_MSR(r21)
SAVE_GPR(7, r21)
SAVE_4GPRS(8, r21)
SAVE_8GPRS(12, r21)
SAVE_8GPRS(24, r21)
mflr r23
andi. r24,r23,0x3f00 /* get vector offset */
stw r24,TRAP(r21)
li r22,0
stw r22,RESULT(r21)
mtspr SPRG2,r22 /* r1 is now kernel sp */
lwz r24,0(r23) /* virtual address of handler */
lwz r23,4(r23) /* where to go when done */
mtspr SRR0,r24
mtspr SRR1,r20
mtlr r23
SYNC
rfi /* jump to handler, enable MMU */
int_return:
mfmsr r28 /* Disable interrupts */
li r4,0
ori r4,r4,MSR_EE
andc r28,r28,r4
SYNC /* Some chip revs need this... */
mtmsr r28
SYNC
lwz r2,_CTR(r1)
lwz r0,_LINK(r1)
mtctr r2
mtlr r0
lwz r2,_XER(r1)
lwz r0,_CCR(r1)
mtspr XER,r2
mtcrf 0xFF,r0
REST_10GPRS(3, r1)
REST_10GPRS(13, r1)
REST_8GPRS(23, r1)
REST_GPR(31, r1)
lwz r2,_NIP(r1) /* Restore environment */
lwz r0,_MSR(r1)
mtspr SRR0,r2
mtspr SRR1,r0
lwz r0,GPR0(r1)
lwz r2,GPR2(r1)
lwz r1,GPR1(r1)
SYNC
rfi
crit_return:
mfmsr r28 /* Disable interrupts */
li r4,0
ori r4,r4,MSR_EE
andc r28,r28,r4
SYNC /* Some chip revs need this... */
mtmsr r28
SYNC
lwz r2,_CTR(r1)
lwz r0,_LINK(r1)
mtctr r2
mtlr r0
lwz r2,_XER(r1)
lwz r0,_CCR(r1)
mtspr XER,r2
mtcrf 0xFF,r0
REST_10GPRS(3, r1)
REST_10GPRS(13, r1)
REST_8GPRS(23, r1)
REST_GPR(31, r1)
lwz r2,_NIP(r1) /* Restore environment */
lwz r0,_MSR(r1)
mtspr SPRN_CSRR0,r2
mtspr SPRN_CSRR1,r0
lwz r0,GPR0(r1)
lwz r2,GPR2(r1)
lwz r1,GPR1(r1)
SYNC
rfci
mck_return:
mfmsr r28 /* Disable interrupts */
li r4,0
ori r4,r4,MSR_EE
andc r28,r28,r4
SYNC /* Some chip revs need this... */
mtmsr r28
SYNC
lwz r2,_CTR(r1)
lwz r0,_LINK(r1)
mtctr r2
mtlr r0
lwz r2,_XER(r1)
lwz r0,_CCR(r1)
mtspr XER,r2
mtcrf 0xFF,r0
REST_10GPRS(3, r1)
REST_10GPRS(13, r1)
REST_8GPRS(23, r1)
REST_GPR(31, r1)
lwz r2,_NIP(r1) /* Restore environment */
lwz r0,_MSR(r1)
mtspr SPRN_MCSRR0,r2
mtspr SPRN_MCSRR1,r0
lwz r0,GPR0(r1)
lwz r2,GPR2(r1)
lwz r1,GPR1(r1)
SYNC
rfmci
/*
* Cache functions.
*/
.globl invalidate_icache
invalidate_icache:
mfspr r0,L1CSR1
ori r0,r0,L1CSR1_ICFI
msync
isync
mtspr L1CSR1,r0
isync
blr
.globl invalidate_dcache
invalidate_dcache:
mfspr r0,L1CSR0
ori r0,r0,L1CSR0_DCFI
msync
isync
mtspr L1CSR0,r0
isync
blr
.globl icache_enable
icache_enable:
mflr r8
bl invalidate_icache
mtlr r8
isync
mfspr r4,L1CSR1
ori r4,r4,0x0001
oris r4,r4,0x0001
mtspr L1CSR1,r4
isync
blr
.globl icache_disable
icache_disable:
mfspr r0,L1CSR1
lis r3,0
ori r3,r3,L1CSR1_ICE
andc r0,r0,r3
mtspr L1CSR1,r0
isync
blr
.globl icache_status
icache_status:
mfspr r3,L1CSR1
andi. r3,r3,L1CSR1_ICE
blr
.globl dcache_enable
dcache_enable:
mflr r8
bl invalidate_dcache
mtlr r8
isync
mfspr r0,L1CSR0
ori r0,r0,0x0001
oris r0,r0,0x0001
msync
isync
mtspr L1CSR0,r0
isync
blr
.globl dcache_disable
dcache_disable:
mfspr r3,L1CSR0
lis r4,0
ori r4,r4,L1CSR0_DCE
andc r3,r3,r4
mtspr L1CSR0,r3
isync
blr
.globl dcache_status
dcache_status:
mfspr r3,L1CSR0
andi. r3,r3,L1CSR0_DCE
blr
.globl get_pir
get_pir:
mfspr r3,PIR
blr
.globl get_pvr
get_pvr:
mfspr r3,PVR
blr
.globl get_svr
get_svr:
mfspr r3,SVR
blr
.globl wr_tcr
wr_tcr:
mtspr TCR,r3
blr
/*
* Function: in8
* Description: Input 8 bits
*/
.globl in8
in8:
lbz r3,0x0000(r3)
blr
/*
* Function: out8
* Description: Output 8 bits
*/
.globl out8
out8:
stb r4,0x0000(r3)
sync
blr
/*
* Function: out16
* Description: Output 16 bits
*/
.globl out16
out16:
sth r4,0x0000(r3)
sync
blr
/*
* Function: out16r
* Description: Byte reverse and output 16 bits
*/
.globl out16r
out16r:
sthbrx r4,r0,r3
sync
blr
/*
* Function: out32
* Description: Output 32 bits
*/
.globl out32
out32:
stw r4,0x0000(r3)
sync
blr
/*
* Function: out32r
* Description: Byte reverse and output 32 bits
*/
.globl out32r
out32r:
stwbrx r4,r0,r3
sync
blr
/*
* Function: in16
* Description: Input 16 bits
*/
.globl in16
in16:
lhz r3,0x0000(r3)
blr
/*
* Function: in16r
* Description: Input 16 bits and byte reverse
*/
.globl in16r
in16r:
lhbrx r3,r0,r3
blr
/*
* Function: in32
* Description: Input 32 bits
*/
.globl in32
in32:
lwz 3,0x0000(3)
blr
/*
* Function: in32r
* Description: Input 32 bits and byte reverse
*/
.globl in32r
in32r:
lwbrx r3,r0,r3
blr
/*
* void e500_write_tlb(mas0, mas1, mas2, mas3, mas7)
*/
.globl e500_write_tlb
e500_write_tlb:
mtspr MAS0,r3
mtspr MAS1,r4
mtspr MAS2,r5
mtspr MAS3,r6
li r3,0
isync
tlbwe
msync
isync
blr
/*
* void relocate_code (end of ram)
*
* This "function" does not return, instead it continues in RAM
* after relocating the monitor code.
*
* r3 = end_of_ram
* r4 = src
* r5 = length in bytes
* r6 = cachelinesize
*/
.globl relocate_code
relocate_code:
mr r9, r3 /* Save end of RAM */
GET_GOT
lis r4,TEXT_BASE@h
ori r4,r4,TEXT_BASE@l
lwz r5,GOT(__bss_stop) /* size */
sub r5,r5,r4
sub r3, r3, r5
lwz r5,GOT(__init_end) /* Copy to init_end only */
sub r5,r5,r4
mr r1, r3
mr r10, r3
li r6,CACHELINE_SIZE
/*
* Fix GOT pointer:
*
* New GOT-PTR = (old GOT-PTR - TEXT_BASE) + Destination Address
*
* Offset:
*/
sub r15,r10,r4
/* First our own GOT */
add r14,r14,r15
/* then the one used by the C code */
add r30,r30,r15
/*
* Now relocate code
*/
cmplw cr1,r3,r4
addi r0,r5,3
srwi. r0,r0,2
beq cr1,4f /* In place copy is not necessary */
beq 7f /* Protect against 0 count */
mtctr r0
bge cr1,2f
la r8,-4(r4)
la r7,-4(r3)
1: lwzu r0,4(r8)
stwu r0,4(r7)
bdnz 1b
b 4f
2: slwi r0,r0,2
add r8,r4,r0
add r7,r3,r0
3: lwzu r0,-4(r8)
stwu r0,-4(r7)
bdnz 3b
/*
* Now flush the cache: note that we must start from a cache aligned
* address. Otherwise we might miss one cache line.
*/
4: cmpwi r6,0
add r5,r3,r5
beq 7f /* Always flush prefetch queue in any case */
subi r0,r6,1
andc r3,r3,r0
mr r4,r3
5: dcbst 0,r4
add r4,r4,r6
cmplw r4,r5
blt 5b
sync /* Wait for all dcbst to complete on bus */
mr r4,r3
6: icbi 0,r4
add r4,r4,r6
cmplw r4,r5
blt 6b
7: sync /* Wait for all icbi to complete on bus */
isync
/*
* We are done. Do not return, instead branch to second part of board
* initialization, now running from RAM.
*/
addi r0,r10,in_ram - _start + _START_OFFSET
/*
* As IVPR is going to point RAM address,
* Make sure IVOR15 has valid opcode to support debugger
*/
mtspr IVOR15,r0
/*
* Re-point the IVPR at RAM
*/
mtspr IVPR,r10
mtlr r0
blr /* NEVER RETURNS! */
.globl in_ram
in_ram:
/*
* Relocation Function, r14 point to got2+0x8000
*
* Adjust got2 pointers, no need to check for 0, this code
* already puts a few entries in the table.
*/
li r0,__got2_entries@sectoff@l
la r3,GOT(_GOT2_TABLE_)
lwz r11,GOT(_GOT2_TABLE_)
mtctr r0
sub r11,r3,r11
addi r3,r3,-4
1: lwzu r0,4(r3)
cmpwi r0,0
beq- 2f
add r0,r0,r11
stw r0,0(r3)
2: bdnz 1b
/*
* Now adjust the fixups and the pointers to the fixups
* in case we need to move ourselves again.
*/
li r0,__fixup_entries@sectoff@l
lwz r3,GOT(_FIXUP_TABLE_)
cmpwi r0,0
mtctr r0
addi r3,r3,-4
beq 4f
3: lwzu r4,4(r3)
lwzux r0,r4,r11
cmpwi r0,0
add r0,r0,r11
stw r4,0(r3)
beq- 5f
stw r0,0(r4)
5: bdnz 3b
4:
clear_bss:
/*
* Now clear BSS segment
*/
lwz r3,GOT(__bss_start)
lwz r4,GOT(__bss_stop)
cmplw 0,r3,r4
beq 6f
li r0,0
5:
stw r0,0(r3)
addi r3,r3,4
cmplw 0,r3,r4
blt 5b
6:
mr r3, r10 /* Destination Address */
bl board_init_r
/*
* Copy exception vector code to low memory
*
* r3: dest_addr
* r7: source address, r8: end address, r9: target address
*/
.globl trap_init
trap_init:
mflr r4 /* save link register */
GET_GOT
lwz r7,GOT(_start_of_vectors)
lwz r8,GOT(_end_of_vectors)
li r9,0x100 /* reset vector always at 0x100 */
cmplw 0,r7,r8
bgelr /* return if r7>=r8 - just in case */
1:
lwz r0,0(r7)
stw r0,0(r9)
addi r7,r7,4
addi r9,r9,4
cmplw 0,r7,r8
bne 1b
/*
* relocate `hdlr' and `int_return' entries
*/
li r7,.L_CriticalInput - _start + _START_OFFSET
bl trap_reloc
li r7,.L_MachineCheck - _start + _START_OFFSET
bl trap_reloc
li r7,.L_DataStorage - _start + _START_OFFSET
bl trap_reloc
li r7,.L_InstStorage - _start + _START_OFFSET
bl trap_reloc
li r7,.L_ExtInterrupt - _start + _START_OFFSET
bl trap_reloc
li r7,.L_Alignment - _start + _START_OFFSET
bl trap_reloc
li r7,.L_ProgramCheck - _start + _START_OFFSET
bl trap_reloc
li r7,.L_FPUnavailable - _start + _START_OFFSET
bl trap_reloc
li r7,.L_Decrementer - _start + _START_OFFSET
bl trap_reloc
li r7,.L_IntervalTimer - _start + _START_OFFSET
li r8,_end_of_vectors - _start + _START_OFFSET
2:
bl trap_reloc
addi r7,r7,0x100 /* next exception vector */
cmplw 0,r7,r8
blt 2b
/* Update IVORs as per relocated vector table address */
li r7,0x0100
mtspr IVOR0,r7 /* 0: Critical input */
li r7,0x0200
mtspr IVOR1,r7 /* 1: Machine check */
li r7,0x0300
mtspr IVOR2,r7 /* 2: Data storage */
li r7,0x0400
mtspr IVOR3,r7 /* 3: Instruction storage */
li r7,0x0500
mtspr IVOR4,r7 /* 4: External interrupt */
li r7,0x0600
mtspr IVOR5,r7 /* 5: Alignment */
li r7,0x0700
mtspr IVOR6,r7 /* 6: Program check */
li r7,0x0800
mtspr IVOR7,r7 /* 7: floating point unavailable */
li r7,0x0900
mtspr IVOR8,r7 /* 8: System call */
/* 9: Auxiliary processor unavailable(unsupported) */
li r7,0x0a00
mtspr IVOR10,r7 /* 10: Decrementer */
li r7,0x0b00
mtspr IVOR11,r7 /* 11: Interval timer */
li r7,0x0c00
mtspr IVOR12,r7 /* 12: Watchdog timer */
li r7,0x0d00
mtspr IVOR13,r7 /* 13: Data TLB error */
li r7,0x0e00
mtspr IVOR14,r7 /* 14: Instruction TLB error */
li r7,0x0f00
mtspr IVOR15,r7 /* 15: Debug */
lis r7,0x0
mtspr IVPR,r7
mtlr r4 /* restore link register */
blr
.globl _text_base
_text_base:
.long TEXT_BASE
.globl unlock_ram_in_cache
unlock_ram_in_cache:
/* invalidate the INIT_RAM section */
lis r3,(CFG_INIT_RAM_ADDR & ~(CACHELINE_SIZE-1))@h
ori r3,r3,(CFG_INIT_RAM_ADDR & ~(CACHELINE_SIZE-1))@l
mfspr r4,L1CFG0
andi. r4,r4,0x1ff
slwi r4,r4,(10 - 1 - L1_CACHE_SHIFT)
mtctr r4
1: dcbi r0,r3
dcblc r0,r3
addi r3,r3,CACHELINE_SIZE
bdnz 1b
sync
/* Invalidate the TLB entries for the cache */
lis r3,CFG_INIT_RAM_ADDR@h
ori r3,r3,CFG_INIT_RAM_ADDR@l
tlbivax 0,r3
addi r3,r3,0x1000
tlbivax 0,r3
addi r3,r3,0x1000
tlbivax 0,r3
addi r3,r3,0x1000
tlbivax 0,r3
isync
blr
.globl flush_dcache
flush_dcache:
mfspr r3,SPRN_L1CFG0
rlwinm r5,r3,9,3 /* Extract cache block size */
twlgti r5,1 /* Only 32 and 64 byte cache blocks
* are currently defined.
*/
li r4,32
subfic r6,r5,2 /* r6 = log2(1KiB / cache block size) -
* log2(number of ways)
*/
slw r5,r4,r5 /* r5 = cache block size */
rlwinm r7,r3,0,0xff /* Extract number of KiB in the cache */
mulli r7,r7,13 /* An 8-way cache will require 13
* loads per set.
*/
slw r7,r7,r6
/* save off HID0 and set DCFA */
mfspr r8,SPRN_HID0
ori r9,r8,HID0_DCFA@l
mtspr SPRN_HID0,r9
isync
lis r4,0
mtctr r7
1: lwz r3,0(r4) /* Load... */
add r4,r4,r5
bdnz 1b
msync
lis r4,0
mtctr r7
1: dcbf 0,r4 /* ...and flush. */
add r4,r4,r5
bdnz 1b
/* restore HID0 */
mtspr SPRN_HID0,r8
isync
blr
.globl setup_ivors
setup_ivors:
#include "fixed_ivor.S"
blr
|
a3f/bareDOOM
| 1,932
|
arch/powerpc/cpu-85xx/fixed_ivor.S
|
/*
* Copyright 2009 Freescale Semiconductor, Inc.
*
* Kumar Gala <kumar.gala@freescale.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
/* This file is intended to be included by other asm code since
* we will want to execute this on both the primary core when
* it does a bootm and the secondary core's that get released
* out of the spin table.
*/
#define SET_IVOR(vector_number, vector_offset) \
li r3,vector_offset@l; \
mtspr SPRN_IVOR##vector_number,r3;
#define SET_GIVOR(vector_number, vector_offset) \
li r3,vector_offset@l; \
mtspr SPRN_GIVOR##vector_number,r3;
SET_IVOR(0, 0x020) /* Critical Input */
SET_IVOR(1, 0x000) /* Machine Check */
SET_IVOR(2, 0x060) /* Data Storage */
SET_IVOR(3, 0x080) /* Instruction Storage */
SET_IVOR(4, 0x0a0) /* External Input */
SET_IVOR(5, 0x0c0) /* Alignment */
SET_IVOR(6, 0x0e0) /* Program */
SET_IVOR(7, 0x100) /* FP Unavailable */
SET_IVOR(8, 0x120) /* System Call */
SET_IVOR(9, 0x140) /* Auxiliary Processor Unavailable */
SET_IVOR(10, 0x160) /* Decrementer */
SET_IVOR(11, 0x180) /* Fixed Interval Timer */
SET_IVOR(12, 0x1a0) /* Watchdog Timer */
SET_IVOR(13, 0x1c0) /* Data TLB Error */
SET_IVOR(14, 0x1e0) /* Instruction TLB Error */
SET_IVOR(15, 0x040) /* Debug */
/* e500v1 & e500v2 only */
SET_IVOR(32, 0x200) /* SPE Unavailable */
SET_IVOR(33, 0x220) /* Embedded FP Data */
SET_IVOR(34, 0x240) /* Embedded FP Round */
SET_IVOR(35, 0x260) /* Performance monitor */
|
a3f/bareDOOM
| 2,866
|
arch/powerpc/boards/pcm030/barebox.lds.S
|
/*
* (C) Copyright 2003
* Wolfgang Denk, DENX Software Engineering, wd@denx.de.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <asm-generic/barebox.lds.h>
OUTPUT_ARCH("powerpc")
ENTRY(_start)
/* Do we need any of these for elf?
__DYNAMIC = 0; */
SECTIONS
{
. = TEXT_BASE;
.text :
{
_text = .;
_stext = .;
arch/powerpc/mach-mpc5xxx/start.o (.text)
*(.text*)
*(.got1*)
. = ALIGN(16);
*(.rodata*)
*(.rodata1*)
*(.rodata.str1.4)
RO_DATA_SECTION
}
/* Read-only sections, merged into text segment: */
/*
.interp : { *(.interp) }
.hash : { *(.hash) }
.dynsym : { *(.dynsym) }
.dynstr : { *(.dynstr) }
.rel.text : { *(.rel.text) }
.rela.text : { *(.rela.text) }
.rel.data : { *(.rel.data) }
.rela.data : { *(.rela.data) }
.rel.rodata : { *(.rel.rodata) }
.rela.rodata : { *(.rela.rodata) }
.rel.got : { *(.rel.got) }
.rela.got : { *(.rela.got) }
.rel.ctors : { *(.rel.ctors) }
.rela.ctors : { *(.rela.ctors) }
.rel.dtors : { *(.rel.dtors) }
.rela.dtors : { *(.rela.dtors) }
.rel.bss : { *(.rel.bss) }
.rela.bss : { *(.rela.bss) }
.rel.plt : { *(.rel.plt) }
.rela.plt : { *(.rela.plt) }
.init : { *(.init) }
.plt : { *(.plt) }
.text :
.fini : { *(.fini) } =0
.ctors : { *(.ctors) }
.dtors : { *(.dtors) }
*/
/* Read-write section, merged into data segment: */
. = (. + 0x0FFF) & 0xFFFFF000;
_etext = .;
PROVIDE (erotext = .);
_sdata = .;
.reloc :
{
*(.got)
_GOT2_TABLE_ = .;
*(.got2)
_FIXUP_TABLE_ = .;
*(.fixup)
}
__got2_entries = (_FIXUP_TABLE_ - _GOT2_TABLE_) >> 2;
__fixup_entries = (. - _FIXUP_TABLE_) >> 2;
.data :
{
*(.data*)
*(.data1*)
*(.sdata*)
*(.sdata2*)
*(.dynamic*)
CONSTRUCTORS
}
_edata = .;
PROVIDE (edata = .);
__start___ex_table = .;
__ex_table : { *(__ex_table) }
__stop___ex_table = .;
. = ALIGN(4096);
__init_begin = .;
.text.init : { *(.text.init) }
.data.init : { *(.data.init) }
. = ALIGN(4096);
__init_end = .;
__init_size = __init_end - _start;
__bss_start = .;
.bss :
{
*(.sbss*) *(.scommon*)
*(.dynbss*)
*(.bss*)
*(COMMON)
}
__bss_stop = .;
_end = . ;
PROVIDE (end = .);
}
|
AAAAyl0n/Lambda0
| 12,458
|
1.Hardware/Screen/1.14寸方形屏/03-程序源码/11-1.14IPS显示屏STM32F103硬件SPI+DMA例程/CORE/startup_stm32f10x_md.s
|
;******************** (C) COPYRIGHT 2011 STMicroelectronics ********************
;* File Name : startup_stm32f10x_md.s
;* Author : MCD Application Team
;* Version : V3.5.0
;* Date : 11-March-2011
;* Description : STM32F10x Medium Density Devices vector table for MDK-ARM
;* toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Configure the clock system
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM3 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;* <<< Use Configuration Wizard in Context Menu >>>
;*******************************************************************************
; THE PRESENT FIRMWARE WHICH IS FOR GUIDANCE ONLY AIMS AT PROVIDING CUSTOMERS
; WITH CODING INFORMATION REGARDING THEIR PRODUCTS IN ORDER FOR THEM TO SAVE TIME.
; AS A RESULT, STMICROELECTRONICS SHALL NOT BE HELD LIABLE FOR ANY DIRECT,
; INDIRECT OR CONSEQUENTIAL DAMAGES WITH RESPECT TO ANY CLAIMS ARISING FROM THE
; CONTENT OF SUCH FIRMWARE AND/OR THE USE MADE BY CUSTOMERS OF THE CODING
; INFORMATION CONTAINED HEREIN IN CONNECTION WITH THEIR PRODUCTS.
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x00000400
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x00000200
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD MemManage_Handler ; MPU Fault Handler
DCD BusFault_Handler ; Bus Fault Handler
DCD UsageFault_Handler ; Usage Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD DebugMon_Handler ; Debug Monitor Handler
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_IRQHandler ; PVD through EXTI Line detect
DCD TAMPER_IRQHandler ; Tamper
DCD RTC_IRQHandler ; RTC
DCD FLASH_IRQHandler ; Flash
DCD RCC_IRQHandler ; RCC
DCD EXTI0_IRQHandler ; EXTI Line 0
DCD EXTI1_IRQHandler ; EXTI Line 1
DCD EXTI2_IRQHandler ; EXTI Line 2
DCD EXTI3_IRQHandler ; EXTI Line 3
DCD EXTI4_IRQHandler ; EXTI Line 4
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_IRQHandler ; DMA1 Channel 2
DCD DMA1_Channel3_IRQHandler ; DMA1 Channel 3
DCD DMA1_Channel4_IRQHandler ; DMA1 Channel 4
DCD DMA1_Channel5_IRQHandler ; DMA1 Channel 5
DCD DMA1_Channel6_IRQHandler ; DMA1 Channel 6
DCD DMA1_Channel7_IRQHandler ; DMA1 Channel 7
DCD ADC1_2_IRQHandler ; ADC1_2
DCD USB_HP_CAN1_TX_IRQHandler ; USB High Priority or CAN1 TX
DCD USB_LP_CAN1_RX0_IRQHandler ; USB Low Priority or CAN1 RX0
DCD CAN1_RX1_IRQHandler ; CAN1 RX1
DCD CAN1_SCE_IRQHandler ; CAN1 SCE
DCD EXTI9_5_IRQHandler ; EXTI Line 9..5
DCD TIM1_BRK_IRQHandler ; TIM1 Break
DCD TIM1_UP_IRQHandler ; TIM1 Update
DCD TIM1_TRG_COM_IRQHandler ; TIM1 Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM4_IRQHandler ; TIM4
DCD I2C1_EV_IRQHandler ; I2C1 Event
DCD I2C1_ER_IRQHandler ; I2C1 Error
DCD I2C2_EV_IRQHandler ; I2C2 Event
DCD I2C2_ER_IRQHandler ; I2C2 Error
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_IRQHandler ; USART3
DCD EXTI15_10_IRQHandler ; EXTI Line 15..10
DCD RTCAlarm_IRQHandler ; RTC Alarm through EXTI Line
DCD USBWakeUp_IRQHandler ; USB Wakeup from suspend
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
MemManage_Handler\
PROC
EXPORT MemManage_Handler [WEAK]
B .
ENDP
BusFault_Handler\
PROC
EXPORT BusFault_Handler [WEAK]
B .
ENDP
UsageFault_Handler\
PROC
EXPORT UsageFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
DebugMon_Handler\
PROC
EXPORT DebugMon_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_IRQHandler [WEAK]
EXPORT TAMPER_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_IRQHandler [WEAK]
EXPORT EXTI0_IRQHandler [WEAK]
EXPORT EXTI1_IRQHandler [WEAK]
EXPORT EXTI2_IRQHandler [WEAK]
EXPORT EXTI3_IRQHandler [WEAK]
EXPORT EXTI4_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_IRQHandler [WEAK]
EXPORT DMA1_Channel3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_IRQHandler [WEAK]
EXPORT DMA1_Channel5_IRQHandler [WEAK]
EXPORT DMA1_Channel6_IRQHandler [WEAK]
EXPORT DMA1_Channel7_IRQHandler [WEAK]
EXPORT ADC1_2_IRQHandler [WEAK]
EXPORT USB_HP_CAN1_TX_IRQHandler [WEAK]
EXPORT USB_LP_CAN1_RX0_IRQHandler [WEAK]
EXPORT CAN1_RX1_IRQHandler [WEAK]
EXPORT CAN1_SCE_IRQHandler [WEAK]
EXPORT EXTI9_5_IRQHandler [WEAK]
EXPORT TIM1_BRK_IRQHandler [WEAK]
EXPORT TIM1_UP_IRQHandler [WEAK]
EXPORT TIM1_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM4_IRQHandler [WEAK]
EXPORT I2C1_EV_IRQHandler [WEAK]
EXPORT I2C1_ER_IRQHandler [WEAK]
EXPORT I2C2_EV_IRQHandler [WEAK]
EXPORT I2C2_ER_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_IRQHandler [WEAK]
EXPORT EXTI15_10_IRQHandler [WEAK]
EXPORT RTCAlarm_IRQHandler [WEAK]
EXPORT USBWakeUp_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_IRQHandler
TAMPER_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_IRQHandler
EXTI0_IRQHandler
EXTI1_IRQHandler
EXTI2_IRQHandler
EXTI3_IRQHandler
EXTI4_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_IRQHandler
DMA1_Channel3_IRQHandler
DMA1_Channel4_IRQHandler
DMA1_Channel5_IRQHandler
DMA1_Channel6_IRQHandler
DMA1_Channel7_IRQHandler
ADC1_2_IRQHandler
USB_HP_CAN1_TX_IRQHandler
USB_LP_CAN1_RX0_IRQHandler
CAN1_RX1_IRQHandler
CAN1_SCE_IRQHandler
EXTI9_5_IRQHandler
TIM1_BRK_IRQHandler
TIM1_UP_IRQHandler
TIM1_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM4_IRQHandler
I2C1_EV_IRQHandler
I2C1_ER_IRQHandler
I2C2_EV_IRQHandler
I2C2_ER_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_IRQHandler
EXTI15_10_IRQHandler
RTCAlarm_IRQHandler
USBWakeUp_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;******************* (C) COPYRIGHT 2011 STMicroelectronics *****END OF FILE*****
|
AAAAyl0n/Lambda0
| 15,145
|
1.Hardware/Screen/1.14寸方形屏/03-程序源码/11-1.14IPS显示屏STM32F103硬件SPI+DMA例程/CORE/startup_stm32f10x_hd.s
|
;******************** (C) COPYRIGHT 2011 STMicroelectronics ********************
;* File Name : startup_stm32f10x_hd.s
;* Author : MCD Application Team
;* Version : V3.5.0
;* Date : 11-March-2011
;* Description : STM32F10x High Density Devices vector table for MDK-ARM
;* toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Configure the clock system and also configure the external
;* SRAM mounted on STM3210E-EVAL board to be used as data
;* memory (optional, to be enabled by user)
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM3 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;* <<< Use Configuration Wizard in Context Menu >>>
;*******************************************************************************
; THE PRESENT FIRMWARE WHICH IS FOR GUIDANCE ONLY AIMS AT PROVIDING CUSTOMERS
; WITH CODING INFORMATION REGARDING THEIR PRODUCTS IN ORDER FOR THEM TO SAVE TIME.
; AS A RESULT, STMICROELECTRONICS SHALL NOT BE HELD LIABLE FOR ANY DIRECT,
; INDIRECT OR CONSEQUENTIAL DAMAGES WITH RESPECT TO ANY CLAIMS ARISING FROM THE
; CONTENT OF SUCH FIRMWARE AND/OR THE USE MADE BY CUSTOMERS OF THE CODING
; INFORMATION CONTAINED HEREIN IN CONNECTION WITH THEIR PRODUCTS.
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x00000400
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x00000200
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD MemManage_Handler ; MPU Fault Handler
DCD BusFault_Handler ; Bus Fault Handler
DCD UsageFault_Handler ; Usage Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD DebugMon_Handler ; Debug Monitor Handler
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_IRQHandler ; PVD through EXTI Line detect
DCD TAMPER_IRQHandler ; Tamper
DCD RTC_IRQHandler ; RTC
DCD FLASH_IRQHandler ; Flash
DCD RCC_IRQHandler ; RCC
DCD EXTI0_IRQHandler ; EXTI Line 0
DCD EXTI1_IRQHandler ; EXTI Line 1
DCD EXTI2_IRQHandler ; EXTI Line 2
DCD EXTI3_IRQHandler ; EXTI Line 3
DCD EXTI4_IRQHandler ; EXTI Line 4
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_IRQHandler ; DMA1 Channel 2
DCD DMA1_Channel3_IRQHandler ; DMA1 Channel 3
DCD DMA1_Channel4_IRQHandler ; DMA1 Channel 4
DCD DMA1_Channel5_IRQHandler ; DMA1 Channel 5
DCD DMA1_Channel6_IRQHandler ; DMA1 Channel 6
DCD DMA1_Channel7_IRQHandler ; DMA1 Channel 7
DCD ADC1_2_IRQHandler ; ADC1 & ADC2
DCD USB_HP_CAN1_TX_IRQHandler ; USB High Priority or CAN1 TX
DCD USB_LP_CAN1_RX0_IRQHandler ; USB Low Priority or CAN1 RX0
DCD CAN1_RX1_IRQHandler ; CAN1 RX1
DCD CAN1_SCE_IRQHandler ; CAN1 SCE
DCD EXTI9_5_IRQHandler ; EXTI Line 9..5
DCD TIM1_BRK_IRQHandler ; TIM1 Break
DCD TIM1_UP_IRQHandler ; TIM1 Update
DCD TIM1_TRG_COM_IRQHandler ; TIM1 Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM4_IRQHandler ; TIM4
DCD I2C1_EV_IRQHandler ; I2C1 Event
DCD I2C1_ER_IRQHandler ; I2C1 Error
DCD I2C2_EV_IRQHandler ; I2C2 Event
DCD I2C2_ER_IRQHandler ; I2C2 Error
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_IRQHandler ; USART3
DCD EXTI15_10_IRQHandler ; EXTI Line 15..10
DCD RTCAlarm_IRQHandler ; RTC Alarm through EXTI Line
DCD USBWakeUp_IRQHandler ; USB Wakeup from suspend
DCD TIM8_BRK_IRQHandler ; TIM8 Break
DCD TIM8_UP_IRQHandler ; TIM8 Update
DCD TIM8_TRG_COM_IRQHandler ; TIM8 Trigger and Commutation
DCD TIM8_CC_IRQHandler ; TIM8 Capture Compare
DCD ADC3_IRQHandler ; ADC3
DCD FSMC_IRQHandler ; FSMC
DCD SDIO_IRQHandler ; SDIO
DCD TIM5_IRQHandler ; TIM5
DCD SPI3_IRQHandler ; SPI3
DCD UART4_IRQHandler ; UART4
DCD UART5_IRQHandler ; UART5
DCD TIM6_IRQHandler ; TIM6
DCD TIM7_IRQHandler ; TIM7
DCD DMA2_Channel1_IRQHandler ; DMA2 Channel1
DCD DMA2_Channel2_IRQHandler ; DMA2 Channel2
DCD DMA2_Channel3_IRQHandler ; DMA2 Channel3
DCD DMA2_Channel4_5_IRQHandler ; DMA2 Channel4 & Channel5
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
MemManage_Handler\
PROC
EXPORT MemManage_Handler [WEAK]
B .
ENDP
BusFault_Handler\
PROC
EXPORT BusFault_Handler [WEAK]
B .
ENDP
UsageFault_Handler\
PROC
EXPORT UsageFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
DebugMon_Handler\
PROC
EXPORT DebugMon_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_IRQHandler [WEAK]
EXPORT TAMPER_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_IRQHandler [WEAK]
EXPORT EXTI0_IRQHandler [WEAK]
EXPORT EXTI1_IRQHandler [WEAK]
EXPORT EXTI2_IRQHandler [WEAK]
EXPORT EXTI3_IRQHandler [WEAK]
EXPORT EXTI4_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_IRQHandler [WEAK]
EXPORT DMA1_Channel3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_IRQHandler [WEAK]
EXPORT DMA1_Channel5_IRQHandler [WEAK]
EXPORT DMA1_Channel6_IRQHandler [WEAK]
EXPORT DMA1_Channel7_IRQHandler [WEAK]
EXPORT ADC1_2_IRQHandler [WEAK]
EXPORT USB_HP_CAN1_TX_IRQHandler [WEAK]
EXPORT USB_LP_CAN1_RX0_IRQHandler [WEAK]
EXPORT CAN1_RX1_IRQHandler [WEAK]
EXPORT CAN1_SCE_IRQHandler [WEAK]
EXPORT EXTI9_5_IRQHandler [WEAK]
EXPORT TIM1_BRK_IRQHandler [WEAK]
EXPORT TIM1_UP_IRQHandler [WEAK]
EXPORT TIM1_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM4_IRQHandler [WEAK]
EXPORT I2C1_EV_IRQHandler [WEAK]
EXPORT I2C1_ER_IRQHandler [WEAK]
EXPORT I2C2_EV_IRQHandler [WEAK]
EXPORT I2C2_ER_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_IRQHandler [WEAK]
EXPORT EXTI15_10_IRQHandler [WEAK]
EXPORT RTCAlarm_IRQHandler [WEAK]
EXPORT USBWakeUp_IRQHandler [WEAK]
EXPORT TIM8_BRK_IRQHandler [WEAK]
EXPORT TIM8_UP_IRQHandler [WEAK]
EXPORT TIM8_TRG_COM_IRQHandler [WEAK]
EXPORT TIM8_CC_IRQHandler [WEAK]
EXPORT ADC3_IRQHandler [WEAK]
EXPORT FSMC_IRQHandler [WEAK]
EXPORT SDIO_IRQHandler [WEAK]
EXPORT TIM5_IRQHandler [WEAK]
EXPORT SPI3_IRQHandler [WEAK]
EXPORT UART4_IRQHandler [WEAK]
EXPORT UART5_IRQHandler [WEAK]
EXPORT TIM6_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT DMA2_Channel1_IRQHandler [WEAK]
EXPORT DMA2_Channel2_IRQHandler [WEAK]
EXPORT DMA2_Channel3_IRQHandler [WEAK]
EXPORT DMA2_Channel4_5_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_IRQHandler
TAMPER_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_IRQHandler
EXTI0_IRQHandler
EXTI1_IRQHandler
EXTI2_IRQHandler
EXTI3_IRQHandler
EXTI4_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_IRQHandler
DMA1_Channel3_IRQHandler
DMA1_Channel4_IRQHandler
DMA1_Channel5_IRQHandler
DMA1_Channel6_IRQHandler
DMA1_Channel7_IRQHandler
ADC1_2_IRQHandler
USB_HP_CAN1_TX_IRQHandler
USB_LP_CAN1_RX0_IRQHandler
CAN1_RX1_IRQHandler
CAN1_SCE_IRQHandler
EXTI9_5_IRQHandler
TIM1_BRK_IRQHandler
TIM1_UP_IRQHandler
TIM1_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM4_IRQHandler
I2C1_EV_IRQHandler
I2C1_ER_IRQHandler
I2C2_EV_IRQHandler
I2C2_ER_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_IRQHandler
EXTI15_10_IRQHandler
RTCAlarm_IRQHandler
USBWakeUp_IRQHandler
TIM8_BRK_IRQHandler
TIM8_UP_IRQHandler
TIM8_TRG_COM_IRQHandler
TIM8_CC_IRQHandler
ADC3_IRQHandler
FSMC_IRQHandler
SDIO_IRQHandler
TIM5_IRQHandler
SPI3_IRQHandler
UART4_IRQHandler
UART5_IRQHandler
TIM6_IRQHandler
TIM7_IRQHandler
DMA2_Channel1_IRQHandler
DMA2_Channel2_IRQHandler
DMA2_Channel3_IRQHandler
DMA2_Channel4_5_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;******************* (C) COPYRIGHT 2011 STMicroelectronics *****END OF FILE*****
|
AAAAyl0n/Lambda0
| 29,171
|
1.Hardware/Screen/1.14寸方形屏/03-程序源码/05-1.14IPS显示屏STM32F407ZG_SPI例程/CORE/startup_stm32f40_41xxx.s
|
;******************** (C) COPYRIGHT 2014 STMicroelectronics ********************
;* File Name : startup_stm32f40_41xxx.s
;* Author : MCD Application Team
;* @version : V1.4.0
;* @date : 04-August-2014
;* Description : STM32F40xxx/41xxx devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Configure the system clock and the external SRAM mounted on
;* STM324xG-EVAL board to be used as data memory (optional,
;* to be enabled by user)
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM4 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;* <<< Use Configuration Wizard in Context Menu >>>
;*******************************************************************************
;
; Licensed under MCD-ST Liberty SW License Agreement V2, (the "License");
; You may not use this file except in compliance with the License.
; You may obtain a copy of the License at:
;
; http://www.st.com/software_license_agreement_liberty_v2
;
; Unless required by applicable law or agreed to in writing, software
; distributed under the License is distributed on an "AS IS" BASIS,
; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; See the License for the specific language governing permissions and
; limitations under the License.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x00000400
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x00000200
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD MemManage_Handler ; MPU Fault Handler
DCD BusFault_Handler ; Bus Fault Handler
DCD UsageFault_Handler ; Usage Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD DebugMon_Handler ; Debug Monitor Handler
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window WatchDog
DCD PVD_IRQHandler ; PVD through EXTI Line detection
DCD TAMP_STAMP_IRQHandler ; Tamper and TimeStamps through the EXTI line
DCD RTC_WKUP_IRQHandler ; RTC Wakeup through the EXTI line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_IRQHandler ; RCC
DCD EXTI0_IRQHandler ; EXTI Line0
DCD EXTI1_IRQHandler ; EXTI Line1
DCD EXTI2_IRQHandler ; EXTI Line2
DCD EXTI3_IRQHandler ; EXTI Line3
DCD EXTI4_IRQHandler ; EXTI Line4
DCD DMA1_Stream0_IRQHandler ; DMA1 Stream 0
DCD DMA1_Stream1_IRQHandler ; DMA1 Stream 1
DCD DMA1_Stream2_IRQHandler ; DMA1 Stream 2
DCD DMA1_Stream3_IRQHandler ; DMA1 Stream 3
DCD DMA1_Stream4_IRQHandler ; DMA1 Stream 4
DCD DMA1_Stream5_IRQHandler ; DMA1 Stream 5
DCD DMA1_Stream6_IRQHandler ; DMA1 Stream 6
DCD ADC_IRQHandler ; ADC1, ADC2 and ADC3s
DCD CAN1_TX_IRQHandler ; CAN1 TX
DCD CAN1_RX0_IRQHandler ; CAN1 RX0
DCD CAN1_RX1_IRQHandler ; CAN1 RX1
DCD CAN1_SCE_IRQHandler ; CAN1 SCE
DCD EXTI9_5_IRQHandler ; External Line[9:5]s
DCD TIM1_BRK_TIM9_IRQHandler ; TIM1 Break and TIM9
DCD TIM1_UP_TIM10_IRQHandler ; TIM1 Update and TIM10
DCD TIM1_TRG_COM_TIM11_IRQHandler ; TIM1 Trigger and Commutation and TIM11
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM4_IRQHandler ; TIM4
DCD I2C1_EV_IRQHandler ; I2C1 Event
DCD I2C1_ER_IRQHandler ; I2C1 Error
DCD I2C2_EV_IRQHandler ; I2C2 Event
DCD I2C2_ER_IRQHandler ; I2C2 Error
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_IRQHandler ; USART3
DCD EXTI15_10_IRQHandler ; External Line[15:10]s
DCD RTC_Alarm_IRQHandler ; RTC Alarm (A and B) through EXTI Line
DCD OTG_FS_WKUP_IRQHandler ; USB OTG FS Wakeup through EXTI line
DCD TIM8_BRK_TIM12_IRQHandler ; TIM8 Break and TIM12
DCD TIM8_UP_TIM13_IRQHandler ; TIM8 Update and TIM13
DCD TIM8_TRG_COM_TIM14_IRQHandler ; TIM8 Trigger and Commutation and TIM14
DCD TIM8_CC_IRQHandler ; TIM8 Capture Compare
DCD DMA1_Stream7_IRQHandler ; DMA1 Stream7
DCD FSMC_IRQHandler ; FSMC
DCD SDIO_IRQHandler ; SDIO
DCD TIM5_IRQHandler ; TIM5
DCD SPI3_IRQHandler ; SPI3
DCD UART4_IRQHandler ; UART4
DCD UART5_IRQHandler ; UART5
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC1&2 underrun errors
DCD TIM7_IRQHandler ; TIM7
DCD DMA2_Stream0_IRQHandler ; DMA2 Stream 0
DCD DMA2_Stream1_IRQHandler ; DMA2 Stream 1
DCD DMA2_Stream2_IRQHandler ; DMA2 Stream 2
DCD DMA2_Stream3_IRQHandler ; DMA2 Stream 3
DCD DMA2_Stream4_IRQHandler ; DMA2 Stream 4
DCD ETH_IRQHandler ; Ethernet
DCD ETH_WKUP_IRQHandler ; Ethernet Wakeup through EXTI line
DCD CAN2_TX_IRQHandler ; CAN2 TX
DCD CAN2_RX0_IRQHandler ; CAN2 RX0
DCD CAN2_RX1_IRQHandler ; CAN2 RX1
DCD CAN2_SCE_IRQHandler ; CAN2 SCE
DCD OTG_FS_IRQHandler ; USB OTG FS
DCD DMA2_Stream5_IRQHandler ; DMA2 Stream 5
DCD DMA2_Stream6_IRQHandler ; DMA2 Stream 6
DCD DMA2_Stream7_IRQHandler ; DMA2 Stream 7
DCD USART6_IRQHandler ; USART6
DCD I2C3_EV_IRQHandler ; I2C3 event
DCD I2C3_ER_IRQHandler ; I2C3 error
DCD OTG_HS_EP1_OUT_IRQHandler ; USB OTG HS End Point 1 Out
DCD OTG_HS_EP1_IN_IRQHandler ; USB OTG HS End Point 1 In
DCD OTG_HS_WKUP_IRQHandler ; USB OTG HS Wakeup through EXTI
DCD OTG_HS_IRQHandler ; USB OTG HS
DCD DCMI_IRQHandler ; DCMI
DCD CRYP_IRQHandler ; CRYP crypto
DCD HASH_RNG_IRQHandler ; Hash and Rng
DCD FPU_IRQHandler ; FPU
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT SystemInit
IMPORT __main
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
MemManage_Handler\
PROC
EXPORT MemManage_Handler [WEAK]
B .
ENDP
BusFault_Handler\
PROC
EXPORT BusFault_Handler [WEAK]
B .
ENDP
UsageFault_Handler\
PROC
EXPORT UsageFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
DebugMon_Handler\
PROC
EXPORT DebugMon_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_IRQHandler [WEAK]
EXPORT TAMP_STAMP_IRQHandler [WEAK]
EXPORT RTC_WKUP_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_IRQHandler [WEAK]
EXPORT EXTI0_IRQHandler [WEAK]
EXPORT EXTI1_IRQHandler [WEAK]
EXPORT EXTI2_IRQHandler [WEAK]
EXPORT EXTI3_IRQHandler [WEAK]
EXPORT EXTI4_IRQHandler [WEAK]
EXPORT DMA1_Stream0_IRQHandler [WEAK]
EXPORT DMA1_Stream1_IRQHandler [WEAK]
EXPORT DMA1_Stream2_IRQHandler [WEAK]
EXPORT DMA1_Stream3_IRQHandler [WEAK]
EXPORT DMA1_Stream4_IRQHandler [WEAK]
EXPORT DMA1_Stream5_IRQHandler [WEAK]
EXPORT DMA1_Stream6_IRQHandler [WEAK]
EXPORT ADC_IRQHandler [WEAK]
EXPORT CAN1_TX_IRQHandler [WEAK]
EXPORT CAN1_RX0_IRQHandler [WEAK]
EXPORT CAN1_RX1_IRQHandler [WEAK]
EXPORT CAN1_SCE_IRQHandler [WEAK]
EXPORT EXTI9_5_IRQHandler [WEAK]
EXPORT TIM1_BRK_TIM9_IRQHandler [WEAK]
EXPORT TIM1_UP_TIM10_IRQHandler [WEAK]
EXPORT TIM1_TRG_COM_TIM11_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM4_IRQHandler [WEAK]
EXPORT I2C1_EV_IRQHandler [WEAK]
EXPORT I2C1_ER_IRQHandler [WEAK]
EXPORT I2C2_EV_IRQHandler [WEAK]
EXPORT I2C2_ER_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_IRQHandler [WEAK]
EXPORT EXTI15_10_IRQHandler [WEAK]
EXPORT RTC_Alarm_IRQHandler [WEAK]
EXPORT OTG_FS_WKUP_IRQHandler [WEAK]
EXPORT TIM8_BRK_TIM12_IRQHandler [WEAK]
EXPORT TIM8_UP_TIM13_IRQHandler [WEAK]
EXPORT TIM8_TRG_COM_TIM14_IRQHandler [WEAK]
EXPORT TIM8_CC_IRQHandler [WEAK]
EXPORT DMA1_Stream7_IRQHandler [WEAK]
EXPORT FSMC_IRQHandler [WEAK]
EXPORT SDIO_IRQHandler [WEAK]
EXPORT TIM5_IRQHandler [WEAK]
EXPORT SPI3_IRQHandler [WEAK]
EXPORT UART4_IRQHandler [WEAK]
EXPORT UART5_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT DMA2_Stream0_IRQHandler [WEAK]
EXPORT DMA2_Stream1_IRQHandler [WEAK]
EXPORT DMA2_Stream2_IRQHandler [WEAK]
EXPORT DMA2_Stream3_IRQHandler [WEAK]
EXPORT DMA2_Stream4_IRQHandler [WEAK]
EXPORT ETH_IRQHandler [WEAK]
EXPORT ETH_WKUP_IRQHandler [WEAK]
EXPORT CAN2_TX_IRQHandler [WEAK]
EXPORT CAN2_RX0_IRQHandler [WEAK]
EXPORT CAN2_RX1_IRQHandler [WEAK]
EXPORT CAN2_SCE_IRQHandler [WEAK]
EXPORT OTG_FS_IRQHandler [WEAK]
EXPORT DMA2_Stream5_IRQHandler [WEAK]
EXPORT DMA2_Stream6_IRQHandler [WEAK]
EXPORT DMA2_Stream7_IRQHandler [WEAK]
EXPORT USART6_IRQHandler [WEAK]
EXPORT I2C3_EV_IRQHandler [WEAK]
EXPORT I2C3_ER_IRQHandler [WEAK]
EXPORT OTG_HS_EP1_OUT_IRQHandler [WEAK]
EXPORT OTG_HS_EP1_IN_IRQHandler [WEAK]
EXPORT OTG_HS_WKUP_IRQHandler [WEAK]
EXPORT OTG_HS_IRQHandler [WEAK]
EXPORT DCMI_IRQHandler [WEAK]
EXPORT CRYP_IRQHandler [WEAK]
EXPORT HASH_RNG_IRQHandler [WEAK]
EXPORT FPU_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_IRQHandler
TAMP_STAMP_IRQHandler
RTC_WKUP_IRQHandler
FLASH_IRQHandler
RCC_IRQHandler
EXTI0_IRQHandler
EXTI1_IRQHandler
EXTI2_IRQHandler
EXTI3_IRQHandler
EXTI4_IRQHandler
DMA1_Stream0_IRQHandler
DMA1_Stream1_IRQHandler
DMA1_Stream2_IRQHandler
DMA1_Stream3_IRQHandler
DMA1_Stream4_IRQHandler
DMA1_Stream5_IRQHandler
DMA1_Stream6_IRQHandler
ADC_IRQHandler
CAN1_TX_IRQHandler
CAN1_RX0_IRQHandler
CAN1_RX1_IRQHandler
CAN1_SCE_IRQHandler
EXTI9_5_IRQHandler
TIM1_BRK_TIM9_IRQHandler
TIM1_UP_TIM10_IRQHandler
TIM1_TRG_COM_TIM11_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM4_IRQHandler
I2C1_EV_IRQHandler
I2C1_ER_IRQHandler
I2C2_EV_IRQHandler
I2C2_ER_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_IRQHandler
EXTI15_10_IRQHandler
RTC_Alarm_IRQHandler
OTG_FS_WKUP_IRQHandler
TIM8_BRK_TIM12_IRQHandler
TIM8_UP_TIM13_IRQHandler
TIM8_TRG_COM_TIM14_IRQHandler
TIM8_CC_IRQHandler
DMA1_Stream7_IRQHandler
FSMC_IRQHandler
SDIO_IRQHandler
TIM5_IRQHandler
SPI3_IRQHandler
UART4_IRQHandler
UART5_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
DMA2_Stream0_IRQHandler
DMA2_Stream1_IRQHandler
DMA2_Stream2_IRQHandler
DMA2_Stream3_IRQHandler
DMA2_Stream4_IRQHandler
ETH_IRQHandler
ETH_WKUP_IRQHandler
CAN2_TX_IRQHandler
CAN2_RX0_IRQHandler
CAN2_RX1_IRQHandler
CAN2_SCE_IRQHandler
OTG_FS_IRQHandler
DMA2_Stream5_IRQHandler
DMA2_Stream6_IRQHandler
DMA2_Stream7_IRQHandler
USART6_IRQHandler
I2C3_EV_IRQHandler
I2C3_ER_IRQHandler
OTG_HS_EP1_OUT_IRQHandler
OTG_HS_EP1_IN_IRQHandler
OTG_HS_WKUP_IRQHandler
OTG_HS_IRQHandler
DCMI_IRQHandler
CRYP_IRQHandler
HASH_RNG_IRQHandler
FPU_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
AAAAyl0n/Lambda0
| 11,003
|
1.Hardware/Screen/1.14寸方形屏/03-程序源码/08-1.14IPS显示屏STM32L053C8_SPI例程/CORE/startup_stm32l053xx.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32l053xx.s
;* Author : MCD Application Team
;* Version : V1.7.1
;* Date : 25-November-2016
;* Description : STM32l053xx Devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M0+ processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_CRS_IRQHandler ; RCC and CRS
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TSC_IRQHandler ; TSC
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_6_7_IRQHandler ; DMA1 Channel 4, Channel 5, Channel 6 and Channel 7
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD LPTIM1_IRQHandler ; LPTIM1
DCD 0 ; Reserved
DCD TIM2_IRQHandler ; TIM2
DCD 0 ; Reserved
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD TIM21_IRQHandler ; TIM21
DCD 0 ; Reserved
DCD TIM22_IRQHandler ; TIM22
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD RNG_LPUART1_IRQHandler ; RNG and LPUART1
DCD LCD_IRQHandler ; LCD
DCD USB_IRQHandler ; USB
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_CRS_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_6_7_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT LPTIM1_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM21_IRQHandler [WEAK]
EXPORT TIM22_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT RNG_LPUART1_IRQHandler [WEAK]
EXPORT LCD_IRQHandler [WEAK]
EXPORT USB_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_CRS_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TSC_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_6_7_IRQHandler
ADC1_COMP_IRQHandler
LPTIM1_IRQHandler
TIM2_IRQHandler
TIM6_DAC_IRQHandler
TIM21_IRQHandler
TIM22_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
RNG_LPUART1_IRQHandler
LCD_IRQHandler
USB_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
AAAAyl0n/Lambda0
| 10,390
|
1.Hardware/Screen/1.14寸方形屏/03-程序源码/06-1.14IPS显示屏STM32F030C8_SPI例程/CORE/startup_stm32f0xx.s
|
;******************** (C) COPYRIGHT 2012 STMicroelectronics ********************
;* File Name : startup_stm32f0xx.s
;* Author : MCD Application Team
;* Version : V1.0.0
;* Date : 23-March-2012
;* Description : STM32F0xx Devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM0 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;* <<< Use Configuration Wizard in Context Menu >>>
;*******************************************************************************
; @attention
;
; Licensed under MCD-ST Liberty SW License Agreement V2, (the "License");
; You may not use this file except in compliance with the License.
; You may obtain a copy of the License at:
;
; http://www.st.com/software_license_agreement_liberty_v2
;
; Unless required by applicable law or agreed to in writing, software
; distributed under the License is distributed on an "AS IS" BASIS,
; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; See the License for the specific language governing permissions and
; limitations under the License.
;
;*******************************************************************************
;
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x00000400
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x00000200
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_IRQHandler ; PVD through EXTI Line detect
DCD RTC_IRQHandler ; RTC through EXTI Line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_IRQHandler ; RCC
DCD EXTI0_1_IRQHandler ; EXTI Line 0 and 1
DCD EXTI2_3_IRQHandler ; EXTI Line 2 and 3
DCD EXTI4_15_IRQHandler ; EXTI Line 4 to 15
DCD TS_IRQHandler ; TS
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_3_IRQHandler ; DMA1 Channel 2 and Channel 3
DCD DMA1_Channel4_5_IRQHandler ; DMA1 Channel 4 and Channel 5
DCD ADC1_COMP_IRQHandler ; ADC1, COMP1 and COMP2
DCD TIM1_BRK_UP_TRG_COM_IRQHandler ; TIM1 Break, Update, Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC
DCD 0 ; Reserved
DCD TIM14_IRQHandler ; TIM14
DCD TIM15_IRQHandler ; TIM15
DCD TIM16_IRQHandler ; TIM16
DCD TIM17_IRQHandler ; TIM17
DCD I2C1_IRQHandler ; I2C1
DCD I2C2_IRQHandler ; I2C2
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD 0 ; Reserved
DCD CEC_IRQHandler ; CEC
DCD 0 ; Reserved
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_IRQHandler [WEAK]
EXPORT EXTI0_1_IRQHandler [WEAK]
EXPORT EXTI2_3_IRQHandler [WEAK]
EXPORT EXTI4_15_IRQHandler [WEAK]
EXPORT TS_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_5_IRQHandler [WEAK]
EXPORT ADC1_COMP_IRQHandler [WEAK]
EXPORT TIM1_BRK_UP_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM14_IRQHandler [WEAK]
EXPORT TIM15_IRQHandler [WEAK]
EXPORT TIM16_IRQHandler [WEAK]
EXPORT TIM17_IRQHandler [WEAK]
EXPORT I2C1_IRQHandler [WEAK]
EXPORT I2C2_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT CEC_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_IRQHandler
EXTI0_1_IRQHandler
EXTI2_3_IRQHandler
EXTI4_15_IRQHandler
TS_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_3_IRQHandler
DMA1_Channel4_5_IRQHandler
ADC1_COMP_IRQHandler
TIM1_BRK_UP_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM6_DAC_IRQHandler
TIM14_IRQHandler
TIM15_IRQHandler
TIM16_IRQHandler
TIM17_IRQHandler
I2C1_IRQHandler
I2C2_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
CEC_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
AAAAyl0n/Lambda0
| 12,458
|
1.Hardware/Screen/1.14寸方形屏/03-程序源码/02-1.14IPS显示屏STM32F103C8T6_SPI例程/CORE/startup_stm32f10x_md.s
|
;******************** (C) COPYRIGHT 2011 STMicroelectronics ********************
;* File Name : startup_stm32f10x_md.s
;* Author : MCD Application Team
;* Version : V3.5.0
;* Date : 11-March-2011
;* Description : STM32F10x Medium Density Devices vector table for MDK-ARM
;* toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Configure the clock system
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM3 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;* <<< Use Configuration Wizard in Context Menu >>>
;*******************************************************************************
; THE PRESENT FIRMWARE WHICH IS FOR GUIDANCE ONLY AIMS AT PROVIDING CUSTOMERS
; WITH CODING INFORMATION REGARDING THEIR PRODUCTS IN ORDER FOR THEM TO SAVE TIME.
; AS A RESULT, STMICROELECTRONICS SHALL NOT BE HELD LIABLE FOR ANY DIRECT,
; INDIRECT OR CONSEQUENTIAL DAMAGES WITH RESPECT TO ANY CLAIMS ARISING FROM THE
; CONTENT OF SUCH FIRMWARE AND/OR THE USE MADE BY CUSTOMERS OF THE CODING
; INFORMATION CONTAINED HEREIN IN CONNECTION WITH THEIR PRODUCTS.
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x00000400
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x00000200
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD MemManage_Handler ; MPU Fault Handler
DCD BusFault_Handler ; Bus Fault Handler
DCD UsageFault_Handler ; Usage Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD DebugMon_Handler ; Debug Monitor Handler
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_IRQHandler ; PVD through EXTI Line detect
DCD TAMPER_IRQHandler ; Tamper
DCD RTC_IRQHandler ; RTC
DCD FLASH_IRQHandler ; Flash
DCD RCC_IRQHandler ; RCC
DCD EXTI0_IRQHandler ; EXTI Line 0
DCD EXTI1_IRQHandler ; EXTI Line 1
DCD EXTI2_IRQHandler ; EXTI Line 2
DCD EXTI3_IRQHandler ; EXTI Line 3
DCD EXTI4_IRQHandler ; EXTI Line 4
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_IRQHandler ; DMA1 Channel 2
DCD DMA1_Channel3_IRQHandler ; DMA1 Channel 3
DCD DMA1_Channel4_IRQHandler ; DMA1 Channel 4
DCD DMA1_Channel5_IRQHandler ; DMA1 Channel 5
DCD DMA1_Channel6_IRQHandler ; DMA1 Channel 6
DCD DMA1_Channel7_IRQHandler ; DMA1 Channel 7
DCD ADC1_2_IRQHandler ; ADC1_2
DCD USB_HP_CAN1_TX_IRQHandler ; USB High Priority or CAN1 TX
DCD USB_LP_CAN1_RX0_IRQHandler ; USB Low Priority or CAN1 RX0
DCD CAN1_RX1_IRQHandler ; CAN1 RX1
DCD CAN1_SCE_IRQHandler ; CAN1 SCE
DCD EXTI9_5_IRQHandler ; EXTI Line 9..5
DCD TIM1_BRK_IRQHandler ; TIM1 Break
DCD TIM1_UP_IRQHandler ; TIM1 Update
DCD TIM1_TRG_COM_IRQHandler ; TIM1 Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM4_IRQHandler ; TIM4
DCD I2C1_EV_IRQHandler ; I2C1 Event
DCD I2C1_ER_IRQHandler ; I2C1 Error
DCD I2C2_EV_IRQHandler ; I2C2 Event
DCD I2C2_ER_IRQHandler ; I2C2 Error
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_IRQHandler ; USART3
DCD EXTI15_10_IRQHandler ; EXTI Line 15..10
DCD RTCAlarm_IRQHandler ; RTC Alarm through EXTI Line
DCD USBWakeUp_IRQHandler ; USB Wakeup from suspend
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
MemManage_Handler\
PROC
EXPORT MemManage_Handler [WEAK]
B .
ENDP
BusFault_Handler\
PROC
EXPORT BusFault_Handler [WEAK]
B .
ENDP
UsageFault_Handler\
PROC
EXPORT UsageFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
DebugMon_Handler\
PROC
EXPORT DebugMon_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_IRQHandler [WEAK]
EXPORT TAMPER_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_IRQHandler [WEAK]
EXPORT EXTI0_IRQHandler [WEAK]
EXPORT EXTI1_IRQHandler [WEAK]
EXPORT EXTI2_IRQHandler [WEAK]
EXPORT EXTI3_IRQHandler [WEAK]
EXPORT EXTI4_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_IRQHandler [WEAK]
EXPORT DMA1_Channel3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_IRQHandler [WEAK]
EXPORT DMA1_Channel5_IRQHandler [WEAK]
EXPORT DMA1_Channel6_IRQHandler [WEAK]
EXPORT DMA1_Channel7_IRQHandler [WEAK]
EXPORT ADC1_2_IRQHandler [WEAK]
EXPORT USB_HP_CAN1_TX_IRQHandler [WEAK]
EXPORT USB_LP_CAN1_RX0_IRQHandler [WEAK]
EXPORT CAN1_RX1_IRQHandler [WEAK]
EXPORT CAN1_SCE_IRQHandler [WEAK]
EXPORT EXTI9_5_IRQHandler [WEAK]
EXPORT TIM1_BRK_IRQHandler [WEAK]
EXPORT TIM1_UP_IRQHandler [WEAK]
EXPORT TIM1_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM4_IRQHandler [WEAK]
EXPORT I2C1_EV_IRQHandler [WEAK]
EXPORT I2C1_ER_IRQHandler [WEAK]
EXPORT I2C2_EV_IRQHandler [WEAK]
EXPORT I2C2_ER_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_IRQHandler [WEAK]
EXPORT EXTI15_10_IRQHandler [WEAK]
EXPORT RTCAlarm_IRQHandler [WEAK]
EXPORT USBWakeUp_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_IRQHandler
TAMPER_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_IRQHandler
EXTI0_IRQHandler
EXTI1_IRQHandler
EXTI2_IRQHandler
EXTI3_IRQHandler
EXTI4_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_IRQHandler
DMA1_Channel3_IRQHandler
DMA1_Channel4_IRQHandler
DMA1_Channel5_IRQHandler
DMA1_Channel6_IRQHandler
DMA1_Channel7_IRQHandler
ADC1_2_IRQHandler
USB_HP_CAN1_TX_IRQHandler
USB_LP_CAN1_RX0_IRQHandler
CAN1_RX1_IRQHandler
CAN1_SCE_IRQHandler
EXTI9_5_IRQHandler
TIM1_BRK_IRQHandler
TIM1_UP_IRQHandler
TIM1_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM4_IRQHandler
I2C1_EV_IRQHandler
I2C1_ER_IRQHandler
I2C2_EV_IRQHandler
I2C2_ER_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_IRQHandler
EXTI15_10_IRQHandler
RTCAlarm_IRQHandler
USBWakeUp_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;******************* (C) COPYRIGHT 2011 STMicroelectronics *****END OF FILE*****
|
AAAAyl0n/Lambda0
| 15,145
|
1.Hardware/Screen/1.14寸方形屏/03-程序源码/02-1.14IPS显示屏STM32F103C8T6_SPI例程/CORE/startup_stm32f10x_hd.s
|
;******************** (C) COPYRIGHT 2011 STMicroelectronics ********************
;* File Name : startup_stm32f10x_hd.s
;* Author : MCD Application Team
;* Version : V3.5.0
;* Date : 11-March-2011
;* Description : STM32F10x High Density Devices vector table for MDK-ARM
;* toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Configure the clock system and also configure the external
;* SRAM mounted on STM3210E-EVAL board to be used as data
;* memory (optional, to be enabled by user)
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM3 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;* <<< Use Configuration Wizard in Context Menu >>>
;*******************************************************************************
; THE PRESENT FIRMWARE WHICH IS FOR GUIDANCE ONLY AIMS AT PROVIDING CUSTOMERS
; WITH CODING INFORMATION REGARDING THEIR PRODUCTS IN ORDER FOR THEM TO SAVE TIME.
; AS A RESULT, STMICROELECTRONICS SHALL NOT BE HELD LIABLE FOR ANY DIRECT,
; INDIRECT OR CONSEQUENTIAL DAMAGES WITH RESPECT TO ANY CLAIMS ARISING FROM THE
; CONTENT OF SUCH FIRMWARE AND/OR THE USE MADE BY CUSTOMERS OF THE CODING
; INFORMATION CONTAINED HEREIN IN CONNECTION WITH THEIR PRODUCTS.
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x00000400
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x00000200
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD MemManage_Handler ; MPU Fault Handler
DCD BusFault_Handler ; Bus Fault Handler
DCD UsageFault_Handler ; Usage Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD DebugMon_Handler ; Debug Monitor Handler
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_IRQHandler ; PVD through EXTI Line detect
DCD TAMPER_IRQHandler ; Tamper
DCD RTC_IRQHandler ; RTC
DCD FLASH_IRQHandler ; Flash
DCD RCC_IRQHandler ; RCC
DCD EXTI0_IRQHandler ; EXTI Line 0
DCD EXTI1_IRQHandler ; EXTI Line 1
DCD EXTI2_IRQHandler ; EXTI Line 2
DCD EXTI3_IRQHandler ; EXTI Line 3
DCD EXTI4_IRQHandler ; EXTI Line 4
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_IRQHandler ; DMA1 Channel 2
DCD DMA1_Channel3_IRQHandler ; DMA1 Channel 3
DCD DMA1_Channel4_IRQHandler ; DMA1 Channel 4
DCD DMA1_Channel5_IRQHandler ; DMA1 Channel 5
DCD DMA1_Channel6_IRQHandler ; DMA1 Channel 6
DCD DMA1_Channel7_IRQHandler ; DMA1 Channel 7
DCD ADC1_2_IRQHandler ; ADC1 & ADC2
DCD USB_HP_CAN1_TX_IRQHandler ; USB High Priority or CAN1 TX
DCD USB_LP_CAN1_RX0_IRQHandler ; USB Low Priority or CAN1 RX0
DCD CAN1_RX1_IRQHandler ; CAN1 RX1
DCD CAN1_SCE_IRQHandler ; CAN1 SCE
DCD EXTI9_5_IRQHandler ; EXTI Line 9..5
DCD TIM1_BRK_IRQHandler ; TIM1 Break
DCD TIM1_UP_IRQHandler ; TIM1 Update
DCD TIM1_TRG_COM_IRQHandler ; TIM1 Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM4_IRQHandler ; TIM4
DCD I2C1_EV_IRQHandler ; I2C1 Event
DCD I2C1_ER_IRQHandler ; I2C1 Error
DCD I2C2_EV_IRQHandler ; I2C2 Event
DCD I2C2_ER_IRQHandler ; I2C2 Error
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_IRQHandler ; USART3
DCD EXTI15_10_IRQHandler ; EXTI Line 15..10
DCD RTCAlarm_IRQHandler ; RTC Alarm through EXTI Line
DCD USBWakeUp_IRQHandler ; USB Wakeup from suspend
DCD TIM8_BRK_IRQHandler ; TIM8 Break
DCD TIM8_UP_IRQHandler ; TIM8 Update
DCD TIM8_TRG_COM_IRQHandler ; TIM8 Trigger and Commutation
DCD TIM8_CC_IRQHandler ; TIM8 Capture Compare
DCD ADC3_IRQHandler ; ADC3
DCD FSMC_IRQHandler ; FSMC
DCD SDIO_IRQHandler ; SDIO
DCD TIM5_IRQHandler ; TIM5
DCD SPI3_IRQHandler ; SPI3
DCD UART4_IRQHandler ; UART4
DCD UART5_IRQHandler ; UART5
DCD TIM6_IRQHandler ; TIM6
DCD TIM7_IRQHandler ; TIM7
DCD DMA2_Channel1_IRQHandler ; DMA2 Channel1
DCD DMA2_Channel2_IRQHandler ; DMA2 Channel2
DCD DMA2_Channel3_IRQHandler ; DMA2 Channel3
DCD DMA2_Channel4_5_IRQHandler ; DMA2 Channel4 & Channel5
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
MemManage_Handler\
PROC
EXPORT MemManage_Handler [WEAK]
B .
ENDP
BusFault_Handler\
PROC
EXPORT BusFault_Handler [WEAK]
B .
ENDP
UsageFault_Handler\
PROC
EXPORT UsageFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
DebugMon_Handler\
PROC
EXPORT DebugMon_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_IRQHandler [WEAK]
EXPORT TAMPER_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_IRQHandler [WEAK]
EXPORT EXTI0_IRQHandler [WEAK]
EXPORT EXTI1_IRQHandler [WEAK]
EXPORT EXTI2_IRQHandler [WEAK]
EXPORT EXTI3_IRQHandler [WEAK]
EXPORT EXTI4_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_IRQHandler [WEAK]
EXPORT DMA1_Channel3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_IRQHandler [WEAK]
EXPORT DMA1_Channel5_IRQHandler [WEAK]
EXPORT DMA1_Channel6_IRQHandler [WEAK]
EXPORT DMA1_Channel7_IRQHandler [WEAK]
EXPORT ADC1_2_IRQHandler [WEAK]
EXPORT USB_HP_CAN1_TX_IRQHandler [WEAK]
EXPORT USB_LP_CAN1_RX0_IRQHandler [WEAK]
EXPORT CAN1_RX1_IRQHandler [WEAK]
EXPORT CAN1_SCE_IRQHandler [WEAK]
EXPORT EXTI9_5_IRQHandler [WEAK]
EXPORT TIM1_BRK_IRQHandler [WEAK]
EXPORT TIM1_UP_IRQHandler [WEAK]
EXPORT TIM1_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM4_IRQHandler [WEAK]
EXPORT I2C1_EV_IRQHandler [WEAK]
EXPORT I2C1_ER_IRQHandler [WEAK]
EXPORT I2C2_EV_IRQHandler [WEAK]
EXPORT I2C2_ER_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_IRQHandler [WEAK]
EXPORT EXTI15_10_IRQHandler [WEAK]
EXPORT RTCAlarm_IRQHandler [WEAK]
EXPORT USBWakeUp_IRQHandler [WEAK]
EXPORT TIM8_BRK_IRQHandler [WEAK]
EXPORT TIM8_UP_IRQHandler [WEAK]
EXPORT TIM8_TRG_COM_IRQHandler [WEAK]
EXPORT TIM8_CC_IRQHandler [WEAK]
EXPORT ADC3_IRQHandler [WEAK]
EXPORT FSMC_IRQHandler [WEAK]
EXPORT SDIO_IRQHandler [WEAK]
EXPORT TIM5_IRQHandler [WEAK]
EXPORT SPI3_IRQHandler [WEAK]
EXPORT UART4_IRQHandler [WEAK]
EXPORT UART5_IRQHandler [WEAK]
EXPORT TIM6_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT DMA2_Channel1_IRQHandler [WEAK]
EXPORT DMA2_Channel2_IRQHandler [WEAK]
EXPORT DMA2_Channel3_IRQHandler [WEAK]
EXPORT DMA2_Channel4_5_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_IRQHandler
TAMPER_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_IRQHandler
EXTI0_IRQHandler
EXTI1_IRQHandler
EXTI2_IRQHandler
EXTI3_IRQHandler
EXTI4_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_IRQHandler
DMA1_Channel3_IRQHandler
DMA1_Channel4_IRQHandler
DMA1_Channel5_IRQHandler
DMA1_Channel6_IRQHandler
DMA1_Channel7_IRQHandler
ADC1_2_IRQHandler
USB_HP_CAN1_TX_IRQHandler
USB_LP_CAN1_RX0_IRQHandler
CAN1_RX1_IRQHandler
CAN1_SCE_IRQHandler
EXTI9_5_IRQHandler
TIM1_BRK_IRQHandler
TIM1_UP_IRQHandler
TIM1_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM4_IRQHandler
I2C1_EV_IRQHandler
I2C1_ER_IRQHandler
I2C2_EV_IRQHandler
I2C2_ER_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_IRQHandler
EXTI15_10_IRQHandler
RTCAlarm_IRQHandler
USBWakeUp_IRQHandler
TIM8_BRK_IRQHandler
TIM8_UP_IRQHandler
TIM8_TRG_COM_IRQHandler
TIM8_CC_IRQHandler
ADC3_IRQHandler
FSMC_IRQHandler
SDIO_IRQHandler
TIM5_IRQHandler
SPI3_IRQHandler
UART4_IRQHandler
UART5_IRQHandler
TIM6_IRQHandler
TIM7_IRQHandler
DMA2_Channel1_IRQHandler
DMA2_Channel2_IRQHandler
DMA2_Channel3_IRQHandler
DMA2_Channel4_5_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;******************* (C) COPYRIGHT 2011 STMicroelectronics *****END OF FILE*****
|
AAAAyl0n/Lambda0
| 18,823
|
1.Hardware/Screen/1.14寸方形屏/03-程序源码/10-1.14IPS显示屏STM32F303RC_SPI例程/CORE/startup_stm32f303xc.s
|
;******************** (C) COPYRIGHT 2016 STMicroelectronics ********************
;* File Name : startup_stm32f303xc.s
;* Author : MCD Application Team
;* Description : STM32F303xB/xC devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM4 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;*
;*******************************************************************************
;
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x00000400
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x00000200
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD MemManage_Handler ; MPU Fault Handler
DCD BusFault_Handler ; Bus Fault Handler
DCD UsageFault_Handler ; Usage Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD DebugMon_Handler ; Debug Monitor Handler
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window WatchDog
DCD PVD_IRQHandler ; PVD through EXTI Line detection
DCD TAMP_STAMP_IRQHandler ; Tamper and TimeStamps through the EXTI line
DCD RTC_WKUP_IRQHandler ; RTC Wakeup through the EXTI line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_IRQHandler ; RCC
DCD EXTI0_IRQHandler ; EXTI Line0
DCD EXTI1_IRQHandler ; EXTI Line1
DCD EXTI2_TSC_IRQHandler ; EXTI Line2 and Touch Sense controller
DCD EXTI3_IRQHandler ; EXTI Line3
DCD EXTI4_IRQHandler ; EXTI Line4
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_IRQHandler ; DMA1 Channel 2
DCD DMA1_Channel3_IRQHandler ; DMA1 Channel 3
DCD DMA1_Channel4_IRQHandler ; DMA1 Channel 4
DCD DMA1_Channel5_IRQHandler ; DMA1 Channel 5
DCD DMA1_Channel6_IRQHandler ; DMA1 Channel 6
DCD DMA1_Channel7_IRQHandler ; DMA1 Channel 7
DCD ADC1_2_IRQHandler ; ADC1 and ADC2
DCD USB_HP_CAN_TX_IRQHandler ; USB Device High Priority or CAN TX
DCD USB_LP_CAN_RX0_IRQHandler ; USB Device Low Priority or CAN RX0
DCD CAN_RX1_IRQHandler ; CAN RX1
DCD CAN_SCE_IRQHandler ; CAN SCE
DCD EXTI9_5_IRQHandler ; External Line[9:5]s
DCD TIM1_BRK_TIM15_IRQHandler ; TIM1 Break and TIM15
DCD TIM1_UP_TIM16_IRQHandler ; TIM1 Update and TIM16
DCD TIM1_TRG_COM_TIM17_IRQHandler ; TIM1 Trigger and Commutation and TIM17
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM4_IRQHandler ; TIM4
DCD I2C1_EV_IRQHandler ; I2C1 Event
DCD I2C1_ER_IRQHandler ; I2C1 Error
DCD I2C2_EV_IRQHandler ; I2C2 Event
DCD I2C2_ER_IRQHandler ; I2C2 Error
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_IRQHandler ; USART3
DCD EXTI15_10_IRQHandler ; External Line[15:10]s
DCD RTC_Alarm_IRQHandler ; RTC Alarm (A and B) through EXTI Line
DCD USBWakeUp_IRQHandler ; USB Wakeup through EXTI line
DCD TIM8_BRK_IRQHandler ; TIM8 Break
DCD TIM8_UP_IRQHandler ; TIM8 Update
DCD TIM8_TRG_COM_IRQHandler ; TIM8 Trigger and Commutation
DCD TIM8_CC_IRQHandler ; TIM8 Capture Compare
DCD ADC3_IRQHandler ; ADC3
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SPI3_IRQHandler ; SPI3
DCD UART4_IRQHandler ; UART4
DCD UART5_IRQHandler ; UART5
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC1&2 underrun errors
DCD TIM7_IRQHandler ; TIM7
DCD DMA2_Channel1_IRQHandler ; DMA2 Channel 1
DCD DMA2_Channel2_IRQHandler ; DMA2 Channel 2
DCD DMA2_Channel3_IRQHandler ; DMA2 Channel 3
DCD DMA2_Channel4_IRQHandler ; DMA2 Channel 4
DCD DMA2_Channel5_IRQHandler ; DMA2 Channel 5
DCD ADC4_IRQHandler ; ADC4
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD COMP1_2_3_IRQHandler ; COMP1, COMP2 and COMP3
DCD COMP4_5_6_IRQHandler ; COMP4, COMP5 and COMP6
DCD COMP7_IRQHandler ; COMP7
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD USB_HP_IRQHandler ; USB High Priority remap
DCD USB_LP_IRQHandler ; USB Low Priority remap
DCD USBWakeUp_RMP_IRQHandler ; USB Wakeup remap through EXTI
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD FPU_IRQHandler ; FPU
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT SystemInit
IMPORT __main
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
MemManage_Handler\
PROC
EXPORT MemManage_Handler [WEAK]
B .
ENDP
BusFault_Handler\
PROC
EXPORT BusFault_Handler [WEAK]
B .
ENDP
UsageFault_Handler\
PROC
EXPORT UsageFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
DebugMon_Handler\
PROC
EXPORT DebugMon_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_IRQHandler [WEAK]
EXPORT TAMP_STAMP_IRQHandler [WEAK]
EXPORT RTC_WKUP_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_IRQHandler [WEAK]
EXPORT EXTI0_IRQHandler [WEAK]
EXPORT EXTI1_IRQHandler [WEAK]
EXPORT EXTI2_TSC_IRQHandler [WEAK]
EXPORT EXTI3_IRQHandler [WEAK]
EXPORT EXTI4_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_IRQHandler [WEAK]
EXPORT DMA1_Channel3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_IRQHandler [WEAK]
EXPORT DMA1_Channel5_IRQHandler [WEAK]
EXPORT DMA1_Channel6_IRQHandler [WEAK]
EXPORT DMA1_Channel7_IRQHandler [WEAK]
EXPORT ADC1_2_IRQHandler [WEAK]
EXPORT USB_HP_CAN_TX_IRQHandler [WEAK]
EXPORT USB_LP_CAN_RX0_IRQHandler [WEAK]
EXPORT CAN_RX1_IRQHandler [WEAK]
EXPORT CAN_SCE_IRQHandler [WEAK]
EXPORT EXTI9_5_IRQHandler [WEAK]
EXPORT TIM1_BRK_TIM15_IRQHandler [WEAK]
EXPORT TIM1_UP_TIM16_IRQHandler [WEAK]
EXPORT TIM1_TRG_COM_TIM17_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM4_IRQHandler [WEAK]
EXPORT I2C1_EV_IRQHandler [WEAK]
EXPORT I2C1_ER_IRQHandler [WEAK]
EXPORT I2C2_EV_IRQHandler [WEAK]
EXPORT I2C2_ER_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_IRQHandler [WEAK]
EXPORT EXTI15_10_IRQHandler [WEAK]
EXPORT RTC_Alarm_IRQHandler [WEAK]
EXPORT USBWakeUp_IRQHandler [WEAK]
EXPORT TIM8_BRK_IRQHandler [WEAK]
EXPORT TIM8_UP_IRQHandler [WEAK]
EXPORT TIM8_TRG_COM_IRQHandler [WEAK]
EXPORT TIM8_CC_IRQHandler [WEAK]
EXPORT ADC3_IRQHandler [WEAK]
EXPORT SPI3_IRQHandler [WEAK]
EXPORT UART4_IRQHandler [WEAK]
EXPORT UART5_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT DMA2_Channel1_IRQHandler [WEAK]
EXPORT DMA2_Channel2_IRQHandler [WEAK]
EXPORT DMA2_Channel3_IRQHandler [WEAK]
EXPORT DMA2_Channel4_IRQHandler [WEAK]
EXPORT DMA2_Channel5_IRQHandler [WEAK]
EXPORT ADC4_IRQHandler [WEAK]
EXPORT COMP1_2_3_IRQHandler [WEAK]
EXPORT COMP4_5_6_IRQHandler [WEAK]
EXPORT COMP7_IRQHandler [WEAK]
EXPORT USB_HP_IRQHandler [WEAK]
EXPORT USB_LP_IRQHandler [WEAK]
EXPORT USBWakeUp_RMP_IRQHandler [WEAK]
EXPORT FPU_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_IRQHandler
TAMP_STAMP_IRQHandler
RTC_WKUP_IRQHandler
FLASH_IRQHandler
RCC_IRQHandler
EXTI0_IRQHandler
EXTI1_IRQHandler
EXTI2_TSC_IRQHandler
EXTI3_IRQHandler
EXTI4_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_IRQHandler
DMA1_Channel3_IRQHandler
DMA1_Channel4_IRQHandler
DMA1_Channel5_IRQHandler
DMA1_Channel6_IRQHandler
DMA1_Channel7_IRQHandler
ADC1_2_IRQHandler
USB_HP_CAN_TX_IRQHandler
USB_LP_CAN_RX0_IRQHandler
CAN_RX1_IRQHandler
CAN_SCE_IRQHandler
EXTI9_5_IRQHandler
TIM1_BRK_TIM15_IRQHandler
TIM1_UP_TIM16_IRQHandler
TIM1_TRG_COM_TIM17_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM4_IRQHandler
I2C1_EV_IRQHandler
I2C1_ER_IRQHandler
I2C2_EV_IRQHandler
I2C2_ER_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_IRQHandler
EXTI15_10_IRQHandler
RTC_Alarm_IRQHandler
USBWakeUp_IRQHandler
TIM8_BRK_IRQHandler
TIM8_UP_IRQHandler
TIM8_TRG_COM_IRQHandler
TIM8_CC_IRQHandler
ADC3_IRQHandler
SPI3_IRQHandler
UART4_IRQHandler
UART5_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
DMA2_Channel1_IRQHandler
DMA2_Channel2_IRQHandler
DMA2_Channel3_IRQHandler
DMA2_Channel4_IRQHandler
DMA2_Channel5_IRQHandler
ADC4_IRQHandler
COMP1_2_3_IRQHandler
COMP4_5_6_IRQHandler
COMP7_IRQHandler
USB_HP_IRQHandler
USB_LP_IRQHandler
USBWakeUp_RMP_IRQHandler
FPU_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
AAAAyl0n/Lambda0
| 19,939
|
1.Hardware/Screen/1.14寸方形屏/03-程序源码/09-1.14IPS显示屏STM32L476RG_SPI例程/CORE/startup_stm32l476xx.s
|
;********************** COPYRIGHT(c) 2017 STMicroelectronics ******************
;* File Name : startup_stm32l476xx.s
;* Author : MCD Application Team
;* Description : STM32L476xx Ultra Low Power devices vector table for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M4 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;* <<< Use Configuration Wizard in Context Menu >>>
;*******************************************************************************
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*
;*******************************************************************************
;
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400;
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200;
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD MemManage_Handler ; MPU Fault Handler
DCD BusFault_Handler ; Bus Fault Handler
DCD UsageFault_Handler ; Usage Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD DebugMon_Handler ; Debug Monitor Handler
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window WatchDog
DCD PVD_PVM_IRQHandler ; PVD/PVM1/PVM2/PVM3/PVM4 through EXTI Line detection
DCD TAMP_STAMP_IRQHandler ; Tamper and TimeStamps through the EXTI line
DCD RTC_WKUP_IRQHandler ; RTC Wakeup through the EXTI line
DCD FLASH_IRQHandler ; FLASH
DCD RCC_IRQHandler ; RCC
DCD EXTI0_IRQHandler ; EXTI Line0
DCD EXTI1_IRQHandler ; EXTI Line1
DCD EXTI2_IRQHandler ; EXTI Line2
DCD EXTI3_IRQHandler ; EXTI Line3
DCD EXTI4_IRQHandler ; EXTI Line4
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_IRQHandler ; DMA1 Channel 2
DCD DMA1_Channel3_IRQHandler ; DMA1 Channel 3
DCD DMA1_Channel4_IRQHandler ; DMA1 Channel 4
DCD DMA1_Channel5_IRQHandler ; DMA1 Channel 5
DCD DMA1_Channel6_IRQHandler ; DMA1 Channel 6
DCD DMA1_Channel7_IRQHandler ; DMA1 Channel 7
DCD ADC1_2_IRQHandler ; ADC1, ADC2
DCD CAN1_TX_IRQHandler ; CAN1 TX
DCD CAN1_RX0_IRQHandler ; CAN1 RX0
DCD CAN1_RX1_IRQHandler ; CAN1 RX1
DCD CAN1_SCE_IRQHandler ; CAN1 SCE
DCD EXTI9_5_IRQHandler ; External Line[9:5]s
DCD TIM1_BRK_TIM15_IRQHandler ; TIM1 Break and TIM15
DCD TIM1_UP_TIM16_IRQHandler ; TIM1 Update and TIM16
DCD TIM1_TRG_COM_TIM17_IRQHandler ; TIM1 Trigger and Commutation and TIM17
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM4_IRQHandler ; TIM4
DCD I2C1_EV_IRQHandler ; I2C1 Event
DCD I2C1_ER_IRQHandler ; I2C1 Error
DCD I2C2_EV_IRQHandler ; I2C2 Event
DCD I2C2_ER_IRQHandler ; I2C2 Error
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_IRQHandler ; USART3
DCD EXTI15_10_IRQHandler ; External Line[15:10]
DCD RTC_Alarm_IRQHandler ; RTC Alarm (A and B) through EXTI Line
DCD DFSDM1_FLT3_IRQHandler ; DFSDM1 Filter 3 global Interrupt
DCD TIM8_BRK_IRQHandler ; TIM8 Break Interrupt
DCD TIM8_UP_IRQHandler ; TIM8 Update Interrupt
DCD TIM8_TRG_COM_IRQHandler ; TIM8 Trigger and Commutation Interrupt
DCD TIM8_CC_IRQHandler ; TIM8 Capture Compare Interrupt
DCD ADC3_IRQHandler ; ADC3 global Interrupt
DCD FMC_IRQHandler ; FMC
DCD SDMMC1_IRQHandler ; SDMMC1
DCD TIM5_IRQHandler ; TIM5
DCD SPI3_IRQHandler ; SPI3
DCD UART4_IRQHandler ; UART4
DCD UART5_IRQHandler ; UART5
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC1&2 underrun errors
DCD TIM7_IRQHandler ; TIM7
DCD DMA2_Channel1_IRQHandler ; DMA2 Channel 1
DCD DMA2_Channel2_IRQHandler ; DMA2 Channel 2
DCD DMA2_Channel3_IRQHandler ; DMA2 Channel 3
DCD DMA2_Channel4_IRQHandler ; DMA2 Channel 4
DCD DMA2_Channel5_IRQHandler ; DMA2 Channel 5
DCD DFSDM1_FLT0_IRQHandler ; DFSDM1 Filter 0 global Interrupt
DCD DFSDM1_FLT1_IRQHandler ; DFSDM1 Filter 1 global Interrupt
DCD DFSDM1_FLT2_IRQHandler ; DFSDM1 Filter 2 global Interrupt
DCD COMP_IRQHandler ; COMP Interrupt
DCD LPTIM1_IRQHandler ; LP TIM1 interrupt
DCD LPTIM2_IRQHandler ; LP TIM2 interrupt
DCD OTG_FS_IRQHandler ; USB OTG FS
DCD DMA2_Channel6_IRQHandler ; DMA2 Channel 6
DCD DMA2_Channel7_IRQHandler ; DMA2 Channel 7
DCD LPUART1_IRQHandler ; LP UART1 interrupt
DCD QUADSPI_IRQHandler ; Quad SPI global interrupt
DCD I2C3_EV_IRQHandler ; I2C3 event
DCD I2C3_ER_IRQHandler ; I2C3 error
DCD SAI1_IRQHandler ; Serial Audio Interface 1 global interrupt
DCD SAI2_IRQHandler ; Serial Audio Interface 2 global interrupt
DCD SWPMI1_IRQHandler ; Serial Wire Interface 1 global interrupt
DCD TSC_IRQHandler ; Touch Sense Controller global interrupt
DCD LCD_IRQHandler ; LCD global interrupt
DCD 0 ; Reserved
DCD RNG_IRQHandler ; RNG global interrupt
DCD FPU_IRQHandler ; FPU
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT SystemInit
IMPORT __main
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
MemManage_Handler\
PROC
EXPORT MemManage_Handler [WEAK]
B .
ENDP
BusFault_Handler\
PROC
EXPORT BusFault_Handler [WEAK]
B .
ENDP
UsageFault_Handler\
PROC
EXPORT UsageFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
DebugMon_Handler\
PROC
EXPORT DebugMon_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_PVM_IRQHandler [WEAK]
EXPORT TAMP_STAMP_IRQHandler [WEAK]
EXPORT RTC_WKUP_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_IRQHandler [WEAK]
EXPORT EXTI0_IRQHandler [WEAK]
EXPORT EXTI1_IRQHandler [WEAK]
EXPORT EXTI2_IRQHandler [WEAK]
EXPORT EXTI3_IRQHandler [WEAK]
EXPORT EXTI4_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_IRQHandler [WEAK]
EXPORT DMA1_Channel3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_IRQHandler [WEAK]
EXPORT DMA1_Channel5_IRQHandler [WEAK]
EXPORT DMA1_Channel6_IRQHandler [WEAK]
EXPORT DMA1_Channel7_IRQHandler [WEAK]
EXPORT ADC1_2_IRQHandler [WEAK]
EXPORT CAN1_TX_IRQHandler [WEAK]
EXPORT CAN1_RX0_IRQHandler [WEAK]
EXPORT CAN1_RX1_IRQHandler [WEAK]
EXPORT CAN1_SCE_IRQHandler [WEAK]
EXPORT EXTI9_5_IRQHandler [WEAK]
EXPORT TIM1_BRK_TIM15_IRQHandler [WEAK]
EXPORT TIM1_UP_TIM16_IRQHandler [WEAK]
EXPORT TIM1_TRG_COM_TIM17_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM4_IRQHandler [WEAK]
EXPORT I2C1_EV_IRQHandler [WEAK]
EXPORT I2C1_ER_IRQHandler [WEAK]
EXPORT I2C2_EV_IRQHandler [WEAK]
EXPORT I2C2_ER_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_IRQHandler [WEAK]
EXPORT EXTI15_10_IRQHandler [WEAK]
EXPORT RTC_Alarm_IRQHandler [WEAK]
EXPORT DFSDM1_FLT3_IRQHandler [WEAK]
EXPORT TIM8_BRK_IRQHandler [WEAK]
EXPORT TIM8_UP_IRQHandler [WEAK]
EXPORT TIM8_TRG_COM_IRQHandler [WEAK]
EXPORT TIM8_CC_IRQHandler [WEAK]
EXPORT ADC3_IRQHandler [WEAK]
EXPORT FMC_IRQHandler [WEAK]
EXPORT SDMMC1_IRQHandler [WEAK]
EXPORT TIM5_IRQHandler [WEAK]
EXPORT SPI3_IRQHandler [WEAK]
EXPORT UART4_IRQHandler [WEAK]
EXPORT UART5_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT DMA2_Channel1_IRQHandler [WEAK]
EXPORT DMA2_Channel2_IRQHandler [WEAK]
EXPORT DMA2_Channel3_IRQHandler [WEAK]
EXPORT DMA2_Channel4_IRQHandler [WEAK]
EXPORT DMA2_Channel5_IRQHandler [WEAK]
EXPORT DFSDM1_FLT0_IRQHandler [WEAK]
EXPORT DFSDM1_FLT1_IRQHandler [WEAK]
EXPORT DFSDM1_FLT2_IRQHandler [WEAK]
EXPORT COMP_IRQHandler [WEAK]
EXPORT LPTIM1_IRQHandler [WEAK]
EXPORT LPTIM2_IRQHandler [WEAK]
EXPORT OTG_FS_IRQHandler [WEAK]
EXPORT DMA2_Channel6_IRQHandler [WEAK]
EXPORT DMA2_Channel7_IRQHandler [WEAK]
EXPORT LPUART1_IRQHandler [WEAK]
EXPORT QUADSPI_IRQHandler [WEAK]
EXPORT I2C3_EV_IRQHandler [WEAK]
EXPORT I2C3_ER_IRQHandler [WEAK]
EXPORT SAI1_IRQHandler [WEAK]
EXPORT SAI2_IRQHandler [WEAK]
EXPORT SWPMI1_IRQHandler [WEAK]
EXPORT TSC_IRQHandler [WEAK]
EXPORT LCD_IRQHandler [WEAK]
EXPORT RNG_IRQHandler [WEAK]
EXPORT FPU_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_PVM_IRQHandler
TAMP_STAMP_IRQHandler
RTC_WKUP_IRQHandler
FLASH_IRQHandler
RCC_IRQHandler
EXTI0_IRQHandler
EXTI1_IRQHandler
EXTI2_IRQHandler
EXTI3_IRQHandler
EXTI4_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_IRQHandler
DMA1_Channel3_IRQHandler
DMA1_Channel4_IRQHandler
DMA1_Channel5_IRQHandler
DMA1_Channel6_IRQHandler
DMA1_Channel7_IRQHandler
ADC1_2_IRQHandler
CAN1_TX_IRQHandler
CAN1_RX0_IRQHandler
CAN1_RX1_IRQHandler
CAN1_SCE_IRQHandler
EXTI9_5_IRQHandler
TIM1_BRK_TIM15_IRQHandler
TIM1_UP_TIM16_IRQHandler
TIM1_TRG_COM_TIM17_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM4_IRQHandler
I2C1_EV_IRQHandler
I2C1_ER_IRQHandler
I2C2_EV_IRQHandler
I2C2_ER_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_IRQHandler
EXTI15_10_IRQHandler
RTC_Alarm_IRQHandler
DFSDM1_FLT3_IRQHandler
TIM8_BRK_IRQHandler
TIM8_UP_IRQHandler
TIM8_TRG_COM_IRQHandler
TIM8_CC_IRQHandler
ADC3_IRQHandler
FMC_IRQHandler
SDMMC1_IRQHandler
TIM5_IRQHandler
SPI3_IRQHandler
UART4_IRQHandler
UART5_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
DMA2_Channel1_IRQHandler
DMA2_Channel2_IRQHandler
DMA2_Channel3_IRQHandler
DMA2_Channel4_IRQHandler
DMA2_Channel5_IRQHandler
DFSDM1_FLT0_IRQHandler
DFSDM1_FLT1_IRQHandler
DFSDM1_FLT2_IRQHandler
COMP_IRQHandler
LPTIM1_IRQHandler
LPTIM2_IRQHandler
OTG_FS_IRQHandler
DMA2_Channel6_IRQHandler
DMA2_Channel7_IRQHandler
LPUART1_IRQHandler
QUADSPI_IRQHandler
I2C3_EV_IRQHandler
I2C3_ER_IRQHandler
SAI1_IRQHandler
SAI2_IRQHandler
SWPMI1_IRQHandler
TSC_IRQHandler
LCD_IRQHandler
RNG_IRQHandler
FPU_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
AAAAyl0n/Lambda0
| 12,458
|
1.Hardware/Screen/1.14寸方形屏/03-程序源码/03-1.14IPS显示屏STM32F103RC_SPI例程/CORE/startup_stm32f10x_md.s
|
;******************** (C) COPYRIGHT 2011 STMicroelectronics ********************
;* File Name : startup_stm32f10x_md.s
;* Author : MCD Application Team
;* Version : V3.5.0
;* Date : 11-March-2011
;* Description : STM32F10x Medium Density Devices vector table for MDK-ARM
;* toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Configure the clock system
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM3 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;* <<< Use Configuration Wizard in Context Menu >>>
;*******************************************************************************
; THE PRESENT FIRMWARE WHICH IS FOR GUIDANCE ONLY AIMS AT PROVIDING CUSTOMERS
; WITH CODING INFORMATION REGARDING THEIR PRODUCTS IN ORDER FOR THEM TO SAVE TIME.
; AS A RESULT, STMICROELECTRONICS SHALL NOT BE HELD LIABLE FOR ANY DIRECT,
; INDIRECT OR CONSEQUENTIAL DAMAGES WITH RESPECT TO ANY CLAIMS ARISING FROM THE
; CONTENT OF SUCH FIRMWARE AND/OR THE USE MADE BY CUSTOMERS OF THE CODING
; INFORMATION CONTAINED HEREIN IN CONNECTION WITH THEIR PRODUCTS.
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x00000400
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x00000200
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD MemManage_Handler ; MPU Fault Handler
DCD BusFault_Handler ; Bus Fault Handler
DCD UsageFault_Handler ; Usage Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD DebugMon_Handler ; Debug Monitor Handler
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_IRQHandler ; PVD through EXTI Line detect
DCD TAMPER_IRQHandler ; Tamper
DCD RTC_IRQHandler ; RTC
DCD FLASH_IRQHandler ; Flash
DCD RCC_IRQHandler ; RCC
DCD EXTI0_IRQHandler ; EXTI Line 0
DCD EXTI1_IRQHandler ; EXTI Line 1
DCD EXTI2_IRQHandler ; EXTI Line 2
DCD EXTI3_IRQHandler ; EXTI Line 3
DCD EXTI4_IRQHandler ; EXTI Line 4
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_IRQHandler ; DMA1 Channel 2
DCD DMA1_Channel3_IRQHandler ; DMA1 Channel 3
DCD DMA1_Channel4_IRQHandler ; DMA1 Channel 4
DCD DMA1_Channel5_IRQHandler ; DMA1 Channel 5
DCD DMA1_Channel6_IRQHandler ; DMA1 Channel 6
DCD DMA1_Channel7_IRQHandler ; DMA1 Channel 7
DCD ADC1_2_IRQHandler ; ADC1_2
DCD USB_HP_CAN1_TX_IRQHandler ; USB High Priority or CAN1 TX
DCD USB_LP_CAN1_RX0_IRQHandler ; USB Low Priority or CAN1 RX0
DCD CAN1_RX1_IRQHandler ; CAN1 RX1
DCD CAN1_SCE_IRQHandler ; CAN1 SCE
DCD EXTI9_5_IRQHandler ; EXTI Line 9..5
DCD TIM1_BRK_IRQHandler ; TIM1 Break
DCD TIM1_UP_IRQHandler ; TIM1 Update
DCD TIM1_TRG_COM_IRQHandler ; TIM1 Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM4_IRQHandler ; TIM4
DCD I2C1_EV_IRQHandler ; I2C1 Event
DCD I2C1_ER_IRQHandler ; I2C1 Error
DCD I2C2_EV_IRQHandler ; I2C2 Event
DCD I2C2_ER_IRQHandler ; I2C2 Error
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_IRQHandler ; USART3
DCD EXTI15_10_IRQHandler ; EXTI Line 15..10
DCD RTCAlarm_IRQHandler ; RTC Alarm through EXTI Line
DCD USBWakeUp_IRQHandler ; USB Wakeup from suspend
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
MemManage_Handler\
PROC
EXPORT MemManage_Handler [WEAK]
B .
ENDP
BusFault_Handler\
PROC
EXPORT BusFault_Handler [WEAK]
B .
ENDP
UsageFault_Handler\
PROC
EXPORT UsageFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
DebugMon_Handler\
PROC
EXPORT DebugMon_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_IRQHandler [WEAK]
EXPORT TAMPER_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_IRQHandler [WEAK]
EXPORT EXTI0_IRQHandler [WEAK]
EXPORT EXTI1_IRQHandler [WEAK]
EXPORT EXTI2_IRQHandler [WEAK]
EXPORT EXTI3_IRQHandler [WEAK]
EXPORT EXTI4_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_IRQHandler [WEAK]
EXPORT DMA1_Channel3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_IRQHandler [WEAK]
EXPORT DMA1_Channel5_IRQHandler [WEAK]
EXPORT DMA1_Channel6_IRQHandler [WEAK]
EXPORT DMA1_Channel7_IRQHandler [WEAK]
EXPORT ADC1_2_IRQHandler [WEAK]
EXPORT USB_HP_CAN1_TX_IRQHandler [WEAK]
EXPORT USB_LP_CAN1_RX0_IRQHandler [WEAK]
EXPORT CAN1_RX1_IRQHandler [WEAK]
EXPORT CAN1_SCE_IRQHandler [WEAK]
EXPORT EXTI9_5_IRQHandler [WEAK]
EXPORT TIM1_BRK_IRQHandler [WEAK]
EXPORT TIM1_UP_IRQHandler [WEAK]
EXPORT TIM1_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM4_IRQHandler [WEAK]
EXPORT I2C1_EV_IRQHandler [WEAK]
EXPORT I2C1_ER_IRQHandler [WEAK]
EXPORT I2C2_EV_IRQHandler [WEAK]
EXPORT I2C2_ER_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_IRQHandler [WEAK]
EXPORT EXTI15_10_IRQHandler [WEAK]
EXPORT RTCAlarm_IRQHandler [WEAK]
EXPORT USBWakeUp_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_IRQHandler
TAMPER_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_IRQHandler
EXTI0_IRQHandler
EXTI1_IRQHandler
EXTI2_IRQHandler
EXTI3_IRQHandler
EXTI4_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_IRQHandler
DMA1_Channel3_IRQHandler
DMA1_Channel4_IRQHandler
DMA1_Channel5_IRQHandler
DMA1_Channel6_IRQHandler
DMA1_Channel7_IRQHandler
ADC1_2_IRQHandler
USB_HP_CAN1_TX_IRQHandler
USB_LP_CAN1_RX0_IRQHandler
CAN1_RX1_IRQHandler
CAN1_SCE_IRQHandler
EXTI9_5_IRQHandler
TIM1_BRK_IRQHandler
TIM1_UP_IRQHandler
TIM1_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM4_IRQHandler
I2C1_EV_IRQHandler
I2C1_ER_IRQHandler
I2C2_EV_IRQHandler
I2C2_ER_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_IRQHandler
EXTI15_10_IRQHandler
RTCAlarm_IRQHandler
USBWakeUp_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;******************* (C) COPYRIGHT 2011 STMicroelectronics *****END OF FILE*****
|
AAAAyl0n/Lambda0
| 15,145
|
1.Hardware/Screen/1.14寸方形屏/03-程序源码/03-1.14IPS显示屏STM32F103RC_SPI例程/CORE/startup_stm32f10x_hd.s
|
;******************** (C) COPYRIGHT 2011 STMicroelectronics ********************
;* File Name : startup_stm32f10x_hd.s
;* Author : MCD Application Team
;* Version : V3.5.0
;* Date : 11-March-2011
;* Description : STM32F10x High Density Devices vector table for MDK-ARM
;* toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Configure the clock system and also configure the external
;* SRAM mounted on STM3210E-EVAL board to be used as data
;* memory (optional, to be enabled by user)
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM3 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;* <<< Use Configuration Wizard in Context Menu >>>
;*******************************************************************************
; THE PRESENT FIRMWARE WHICH IS FOR GUIDANCE ONLY AIMS AT PROVIDING CUSTOMERS
; WITH CODING INFORMATION REGARDING THEIR PRODUCTS IN ORDER FOR THEM TO SAVE TIME.
; AS A RESULT, STMICROELECTRONICS SHALL NOT BE HELD LIABLE FOR ANY DIRECT,
; INDIRECT OR CONSEQUENTIAL DAMAGES WITH RESPECT TO ANY CLAIMS ARISING FROM THE
; CONTENT OF SUCH FIRMWARE AND/OR THE USE MADE BY CUSTOMERS OF THE CODING
; INFORMATION CONTAINED HEREIN IN CONNECTION WITH THEIR PRODUCTS.
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x00000400
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x00000200
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD MemManage_Handler ; MPU Fault Handler
DCD BusFault_Handler ; Bus Fault Handler
DCD UsageFault_Handler ; Usage Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD DebugMon_Handler ; Debug Monitor Handler
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_IRQHandler ; PVD through EXTI Line detect
DCD TAMPER_IRQHandler ; Tamper
DCD RTC_IRQHandler ; RTC
DCD FLASH_IRQHandler ; Flash
DCD RCC_IRQHandler ; RCC
DCD EXTI0_IRQHandler ; EXTI Line 0
DCD EXTI1_IRQHandler ; EXTI Line 1
DCD EXTI2_IRQHandler ; EXTI Line 2
DCD EXTI3_IRQHandler ; EXTI Line 3
DCD EXTI4_IRQHandler ; EXTI Line 4
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_IRQHandler ; DMA1 Channel 2
DCD DMA1_Channel3_IRQHandler ; DMA1 Channel 3
DCD DMA1_Channel4_IRQHandler ; DMA1 Channel 4
DCD DMA1_Channel5_IRQHandler ; DMA1 Channel 5
DCD DMA1_Channel6_IRQHandler ; DMA1 Channel 6
DCD DMA1_Channel7_IRQHandler ; DMA1 Channel 7
DCD ADC1_2_IRQHandler ; ADC1 & ADC2
DCD USB_HP_CAN1_TX_IRQHandler ; USB High Priority or CAN1 TX
DCD USB_LP_CAN1_RX0_IRQHandler ; USB Low Priority or CAN1 RX0
DCD CAN1_RX1_IRQHandler ; CAN1 RX1
DCD CAN1_SCE_IRQHandler ; CAN1 SCE
DCD EXTI9_5_IRQHandler ; EXTI Line 9..5
DCD TIM1_BRK_IRQHandler ; TIM1 Break
DCD TIM1_UP_IRQHandler ; TIM1 Update
DCD TIM1_TRG_COM_IRQHandler ; TIM1 Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM4_IRQHandler ; TIM4
DCD I2C1_EV_IRQHandler ; I2C1 Event
DCD I2C1_ER_IRQHandler ; I2C1 Error
DCD I2C2_EV_IRQHandler ; I2C2 Event
DCD I2C2_ER_IRQHandler ; I2C2 Error
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_IRQHandler ; USART3
DCD EXTI15_10_IRQHandler ; EXTI Line 15..10
DCD RTCAlarm_IRQHandler ; RTC Alarm through EXTI Line
DCD USBWakeUp_IRQHandler ; USB Wakeup from suspend
DCD TIM8_BRK_IRQHandler ; TIM8 Break
DCD TIM8_UP_IRQHandler ; TIM8 Update
DCD TIM8_TRG_COM_IRQHandler ; TIM8 Trigger and Commutation
DCD TIM8_CC_IRQHandler ; TIM8 Capture Compare
DCD ADC3_IRQHandler ; ADC3
DCD FSMC_IRQHandler ; FSMC
DCD SDIO_IRQHandler ; SDIO
DCD TIM5_IRQHandler ; TIM5
DCD SPI3_IRQHandler ; SPI3
DCD UART4_IRQHandler ; UART4
DCD UART5_IRQHandler ; UART5
DCD TIM6_IRQHandler ; TIM6
DCD TIM7_IRQHandler ; TIM7
DCD DMA2_Channel1_IRQHandler ; DMA2 Channel1
DCD DMA2_Channel2_IRQHandler ; DMA2 Channel2
DCD DMA2_Channel3_IRQHandler ; DMA2 Channel3
DCD DMA2_Channel4_5_IRQHandler ; DMA2 Channel4 & Channel5
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
MemManage_Handler\
PROC
EXPORT MemManage_Handler [WEAK]
B .
ENDP
BusFault_Handler\
PROC
EXPORT BusFault_Handler [WEAK]
B .
ENDP
UsageFault_Handler\
PROC
EXPORT UsageFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
DebugMon_Handler\
PROC
EXPORT DebugMon_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_IRQHandler [WEAK]
EXPORT TAMPER_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_IRQHandler [WEAK]
EXPORT EXTI0_IRQHandler [WEAK]
EXPORT EXTI1_IRQHandler [WEAK]
EXPORT EXTI2_IRQHandler [WEAK]
EXPORT EXTI3_IRQHandler [WEAK]
EXPORT EXTI4_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_IRQHandler [WEAK]
EXPORT DMA1_Channel3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_IRQHandler [WEAK]
EXPORT DMA1_Channel5_IRQHandler [WEAK]
EXPORT DMA1_Channel6_IRQHandler [WEAK]
EXPORT DMA1_Channel7_IRQHandler [WEAK]
EXPORT ADC1_2_IRQHandler [WEAK]
EXPORT USB_HP_CAN1_TX_IRQHandler [WEAK]
EXPORT USB_LP_CAN1_RX0_IRQHandler [WEAK]
EXPORT CAN1_RX1_IRQHandler [WEAK]
EXPORT CAN1_SCE_IRQHandler [WEAK]
EXPORT EXTI9_5_IRQHandler [WEAK]
EXPORT TIM1_BRK_IRQHandler [WEAK]
EXPORT TIM1_UP_IRQHandler [WEAK]
EXPORT TIM1_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM4_IRQHandler [WEAK]
EXPORT I2C1_EV_IRQHandler [WEAK]
EXPORT I2C1_ER_IRQHandler [WEAK]
EXPORT I2C2_EV_IRQHandler [WEAK]
EXPORT I2C2_ER_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_IRQHandler [WEAK]
EXPORT EXTI15_10_IRQHandler [WEAK]
EXPORT RTCAlarm_IRQHandler [WEAK]
EXPORT USBWakeUp_IRQHandler [WEAK]
EXPORT TIM8_BRK_IRQHandler [WEAK]
EXPORT TIM8_UP_IRQHandler [WEAK]
EXPORT TIM8_TRG_COM_IRQHandler [WEAK]
EXPORT TIM8_CC_IRQHandler [WEAK]
EXPORT ADC3_IRQHandler [WEAK]
EXPORT FSMC_IRQHandler [WEAK]
EXPORT SDIO_IRQHandler [WEAK]
EXPORT TIM5_IRQHandler [WEAK]
EXPORT SPI3_IRQHandler [WEAK]
EXPORT UART4_IRQHandler [WEAK]
EXPORT UART5_IRQHandler [WEAK]
EXPORT TIM6_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT DMA2_Channel1_IRQHandler [WEAK]
EXPORT DMA2_Channel2_IRQHandler [WEAK]
EXPORT DMA2_Channel3_IRQHandler [WEAK]
EXPORT DMA2_Channel4_5_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_IRQHandler
TAMPER_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_IRQHandler
EXTI0_IRQHandler
EXTI1_IRQHandler
EXTI2_IRQHandler
EXTI3_IRQHandler
EXTI4_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_IRQHandler
DMA1_Channel3_IRQHandler
DMA1_Channel4_IRQHandler
DMA1_Channel5_IRQHandler
DMA1_Channel6_IRQHandler
DMA1_Channel7_IRQHandler
ADC1_2_IRQHandler
USB_HP_CAN1_TX_IRQHandler
USB_LP_CAN1_RX0_IRQHandler
CAN1_RX1_IRQHandler
CAN1_SCE_IRQHandler
EXTI9_5_IRQHandler
TIM1_BRK_IRQHandler
TIM1_UP_IRQHandler
TIM1_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM4_IRQHandler
I2C1_EV_IRQHandler
I2C1_ER_IRQHandler
I2C2_EV_IRQHandler
I2C2_ER_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_IRQHandler
EXTI15_10_IRQHandler
RTCAlarm_IRQHandler
USBWakeUp_IRQHandler
TIM8_BRK_IRQHandler
TIM8_UP_IRQHandler
TIM8_TRG_COM_IRQHandler
TIM8_CC_IRQHandler
ADC3_IRQHandler
FSMC_IRQHandler
SDIO_IRQHandler
TIM5_IRQHandler
SPI3_IRQHandler
UART4_IRQHandler
UART5_IRQHandler
TIM6_IRQHandler
TIM7_IRQHandler
DMA2_Channel1_IRQHandler
DMA2_Channel2_IRQHandler
DMA2_Channel3_IRQHandler
DMA2_Channel4_5_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;******************* (C) COPYRIGHT 2011 STMicroelectronics *****END OF FILE*****
|
AAAAyl0n/Lambda0
| 13,590
|
1.Hardware/Screen/1.14寸方形屏/03-程序源码/07-1.14IPS显示屏STM32L151AC8T6_SPI例程/CORE/startup_stm32l151xba.s
|
;/******************** (C) COPYRIGHT 2017 STMicroelectronics ********************
;* File Name : startup_stm32l151xba.s
;* Author : MCD Application Team
;* Version : 21-April-2017
;* Date : V2.2.1
;* Description : STM32L151XBA Devices vector for MDK-ARM toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR
;* address.
;* - Configure the system clock
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the Cortex-M3 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;********************************************************************************
;*
;* COPYRIGHT(c) 2017 STMicroelectronics
;*
;* Redistribution and use in source and binary forms, with or without modification,
;* are permitted provided that the following conditions are met:
;* 1. Redistributions of source code must retain the above copyright notice,
;* this list of conditions and the following disclaimer.
;* 2. Redistributions in binary form must reproduce the above copyright notice,
;* this list of conditions and the following disclaimer in the documentation
;* and/or other materials provided with the distribution.
;* 3. Neither the name of STMicroelectronics nor the names of its contributors
;* may be used to endorse or promote products derived from this software
;* without specific prior written permission.
;*
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
;* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x400
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x200
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD MemManage_Handler ; MPU Fault Handler
DCD BusFault_Handler ; Bus Fault Handler
DCD UsageFault_Handler ; Usage Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD DebugMon_Handler ; Debug Monitor Handler
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_IRQHandler ; PVD through EXTI Line detect
DCD TAMPER_STAMP_IRQHandler ; Tamper and Time Stamp
DCD RTC_WKUP_IRQHandler ; RTC Wakeup
DCD FLASH_IRQHandler ; FLASH
DCD RCC_IRQHandler ; RCC
DCD EXTI0_IRQHandler ; EXTI Line 0
DCD EXTI1_IRQHandler ; EXTI Line 1
DCD EXTI2_IRQHandler ; EXTI Line 2
DCD EXTI3_IRQHandler ; EXTI Line 3
DCD EXTI4_IRQHandler ; EXTI Line 4
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_IRQHandler ; DMA1 Channel 2
DCD DMA1_Channel3_IRQHandler ; DMA1 Channel 3
DCD DMA1_Channel4_IRQHandler ; DMA1 Channel 4
DCD DMA1_Channel5_IRQHandler ; DMA1 Channel 5
DCD DMA1_Channel6_IRQHandler ; DMA1 Channel 6
DCD DMA1_Channel7_IRQHandler ; DMA1 Channel 7
DCD ADC1_IRQHandler ; ADC1
DCD USB_HP_IRQHandler ; USB High Priority
DCD USB_LP_IRQHandler ; USB Low Priority
DCD DAC_IRQHandler ; DAC
DCD COMP_IRQHandler ; COMP through EXTI Line
DCD EXTI9_5_IRQHandler ; EXTI Line 9..5
DCD 0 ; Reserved
DCD TIM9_IRQHandler ; TIM9
DCD TIM10_IRQHandler ; TIM10
DCD TIM11_IRQHandler ; TIM11
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM4_IRQHandler ; TIM4
DCD I2C1_EV_IRQHandler ; I2C1 Event
DCD I2C1_ER_IRQHandler ; I2C1 Error
DCD I2C2_EV_IRQHandler ; I2C2 Event
DCD I2C2_ER_IRQHandler ; I2C2 Error
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_IRQHandler ; USART3
DCD EXTI15_10_IRQHandler ; EXTI Line 15..10
DCD RTC_Alarm_IRQHandler ; RTC Alarm through EXTI Line
DCD USB_FS_WKUP_IRQHandler ; USB FS Wakeup from suspend
DCD TIM6_IRQHandler ; TIM6
DCD TIM7_IRQHandler ; TIM7
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
MemManage_Handler\
PROC
EXPORT MemManage_Handler [WEAK]
B .
ENDP
BusFault_Handler\
PROC
EXPORT BusFault_Handler [WEAK]
B .
ENDP
UsageFault_Handler\
PROC
EXPORT UsageFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
DebugMon_Handler\
PROC
EXPORT DebugMon_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_IRQHandler [WEAK]
EXPORT TAMPER_STAMP_IRQHandler [WEAK]
EXPORT RTC_WKUP_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_IRQHandler [WEAK]
EXPORT EXTI0_IRQHandler [WEAK]
EXPORT EXTI1_IRQHandler [WEAK]
EXPORT EXTI2_IRQHandler [WEAK]
EXPORT EXTI3_IRQHandler [WEAK]
EXPORT EXTI4_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_IRQHandler [WEAK]
EXPORT DMA1_Channel3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_IRQHandler [WEAK]
EXPORT DMA1_Channel5_IRQHandler [WEAK]
EXPORT DMA1_Channel6_IRQHandler [WEAK]
EXPORT DMA1_Channel7_IRQHandler [WEAK]
EXPORT ADC1_IRQHandler [WEAK]
EXPORT USB_HP_IRQHandler [WEAK]
EXPORT USB_LP_IRQHandler [WEAK]
EXPORT DAC_IRQHandler [WEAK]
EXPORT COMP_IRQHandler [WEAK]
EXPORT EXTI9_5_IRQHandler [WEAK]
EXPORT TIM9_IRQHandler [WEAK]
EXPORT TIM10_IRQHandler [WEAK]
EXPORT TIM11_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM4_IRQHandler [WEAK]
EXPORT I2C1_EV_IRQHandler [WEAK]
EXPORT I2C1_ER_IRQHandler [WEAK]
EXPORT I2C2_EV_IRQHandler [WEAK]
EXPORT I2C2_ER_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_IRQHandler [WEAK]
EXPORT EXTI15_10_IRQHandler [WEAK]
EXPORT RTC_Alarm_IRQHandler [WEAK]
EXPORT USB_FS_WKUP_IRQHandler [WEAK]
EXPORT TIM6_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_IRQHandler
TAMPER_STAMP_IRQHandler
RTC_WKUP_IRQHandler
FLASH_IRQHandler
RCC_IRQHandler
EXTI0_IRQHandler
EXTI1_IRQHandler
EXTI2_IRQHandler
EXTI3_IRQHandler
EXTI4_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_IRQHandler
DMA1_Channel3_IRQHandler
DMA1_Channel4_IRQHandler
DMA1_Channel5_IRQHandler
DMA1_Channel6_IRQHandler
DMA1_Channel7_IRQHandler
ADC1_IRQHandler
USB_HP_IRQHandler
USB_LP_IRQHandler
DAC_IRQHandler
COMP_IRQHandler
EXTI9_5_IRQHandler
TIM9_IRQHandler
TIM10_IRQHandler
TIM11_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM4_IRQHandler
I2C1_EV_IRQHandler
I2C1_ER_IRQHandler
I2C2_EV_IRQHandler
I2C2_ER_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_IRQHandler
EXTI15_10_IRQHandler
RTC_Alarm_IRQHandler
USB_FS_WKUP_IRQHandler
TIM6_IRQHandler
TIM7_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE*****
|
AAAAyl0n/Lambda0
| 12,458
|
1.Hardware/Screen/1.14寸方形屏/03-程序源码/04-1.14IPS显示屏STM32F103ZET6_SPI例程/CORE/startup_stm32f10x_md.s
|
;******************** (C) COPYRIGHT 2011 STMicroelectronics ********************
;* File Name : startup_stm32f10x_md.s
;* Author : MCD Application Team
;* Version : V3.5.0
;* Date : 11-March-2011
;* Description : STM32F10x Medium Density Devices vector table for MDK-ARM
;* toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Configure the clock system
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM3 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;* <<< Use Configuration Wizard in Context Menu >>>
;*******************************************************************************
; THE PRESENT FIRMWARE WHICH IS FOR GUIDANCE ONLY AIMS AT PROVIDING CUSTOMERS
; WITH CODING INFORMATION REGARDING THEIR PRODUCTS IN ORDER FOR THEM TO SAVE TIME.
; AS A RESULT, STMICROELECTRONICS SHALL NOT BE HELD LIABLE FOR ANY DIRECT,
; INDIRECT OR CONSEQUENTIAL DAMAGES WITH RESPECT TO ANY CLAIMS ARISING FROM THE
; CONTENT OF SUCH FIRMWARE AND/OR THE USE MADE BY CUSTOMERS OF THE CODING
; INFORMATION CONTAINED HEREIN IN CONNECTION WITH THEIR PRODUCTS.
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x00000400
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x00000200
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD MemManage_Handler ; MPU Fault Handler
DCD BusFault_Handler ; Bus Fault Handler
DCD UsageFault_Handler ; Usage Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD DebugMon_Handler ; Debug Monitor Handler
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_IRQHandler ; PVD through EXTI Line detect
DCD TAMPER_IRQHandler ; Tamper
DCD RTC_IRQHandler ; RTC
DCD FLASH_IRQHandler ; Flash
DCD RCC_IRQHandler ; RCC
DCD EXTI0_IRQHandler ; EXTI Line 0
DCD EXTI1_IRQHandler ; EXTI Line 1
DCD EXTI2_IRQHandler ; EXTI Line 2
DCD EXTI3_IRQHandler ; EXTI Line 3
DCD EXTI4_IRQHandler ; EXTI Line 4
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_IRQHandler ; DMA1 Channel 2
DCD DMA1_Channel3_IRQHandler ; DMA1 Channel 3
DCD DMA1_Channel4_IRQHandler ; DMA1 Channel 4
DCD DMA1_Channel5_IRQHandler ; DMA1 Channel 5
DCD DMA1_Channel6_IRQHandler ; DMA1 Channel 6
DCD DMA1_Channel7_IRQHandler ; DMA1 Channel 7
DCD ADC1_2_IRQHandler ; ADC1_2
DCD USB_HP_CAN1_TX_IRQHandler ; USB High Priority or CAN1 TX
DCD USB_LP_CAN1_RX0_IRQHandler ; USB Low Priority or CAN1 RX0
DCD CAN1_RX1_IRQHandler ; CAN1 RX1
DCD CAN1_SCE_IRQHandler ; CAN1 SCE
DCD EXTI9_5_IRQHandler ; EXTI Line 9..5
DCD TIM1_BRK_IRQHandler ; TIM1 Break
DCD TIM1_UP_IRQHandler ; TIM1 Update
DCD TIM1_TRG_COM_IRQHandler ; TIM1 Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM4_IRQHandler ; TIM4
DCD I2C1_EV_IRQHandler ; I2C1 Event
DCD I2C1_ER_IRQHandler ; I2C1 Error
DCD I2C2_EV_IRQHandler ; I2C2 Event
DCD I2C2_ER_IRQHandler ; I2C2 Error
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_IRQHandler ; USART3
DCD EXTI15_10_IRQHandler ; EXTI Line 15..10
DCD RTCAlarm_IRQHandler ; RTC Alarm through EXTI Line
DCD USBWakeUp_IRQHandler ; USB Wakeup from suspend
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
MemManage_Handler\
PROC
EXPORT MemManage_Handler [WEAK]
B .
ENDP
BusFault_Handler\
PROC
EXPORT BusFault_Handler [WEAK]
B .
ENDP
UsageFault_Handler\
PROC
EXPORT UsageFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
DebugMon_Handler\
PROC
EXPORT DebugMon_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_IRQHandler [WEAK]
EXPORT TAMPER_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_IRQHandler [WEAK]
EXPORT EXTI0_IRQHandler [WEAK]
EXPORT EXTI1_IRQHandler [WEAK]
EXPORT EXTI2_IRQHandler [WEAK]
EXPORT EXTI3_IRQHandler [WEAK]
EXPORT EXTI4_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_IRQHandler [WEAK]
EXPORT DMA1_Channel3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_IRQHandler [WEAK]
EXPORT DMA1_Channel5_IRQHandler [WEAK]
EXPORT DMA1_Channel6_IRQHandler [WEAK]
EXPORT DMA1_Channel7_IRQHandler [WEAK]
EXPORT ADC1_2_IRQHandler [WEAK]
EXPORT USB_HP_CAN1_TX_IRQHandler [WEAK]
EXPORT USB_LP_CAN1_RX0_IRQHandler [WEAK]
EXPORT CAN1_RX1_IRQHandler [WEAK]
EXPORT CAN1_SCE_IRQHandler [WEAK]
EXPORT EXTI9_5_IRQHandler [WEAK]
EXPORT TIM1_BRK_IRQHandler [WEAK]
EXPORT TIM1_UP_IRQHandler [WEAK]
EXPORT TIM1_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM4_IRQHandler [WEAK]
EXPORT I2C1_EV_IRQHandler [WEAK]
EXPORT I2C1_ER_IRQHandler [WEAK]
EXPORT I2C2_EV_IRQHandler [WEAK]
EXPORT I2C2_ER_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_IRQHandler [WEAK]
EXPORT EXTI15_10_IRQHandler [WEAK]
EXPORT RTCAlarm_IRQHandler [WEAK]
EXPORT USBWakeUp_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_IRQHandler
TAMPER_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_IRQHandler
EXTI0_IRQHandler
EXTI1_IRQHandler
EXTI2_IRQHandler
EXTI3_IRQHandler
EXTI4_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_IRQHandler
DMA1_Channel3_IRQHandler
DMA1_Channel4_IRQHandler
DMA1_Channel5_IRQHandler
DMA1_Channel6_IRQHandler
DMA1_Channel7_IRQHandler
ADC1_2_IRQHandler
USB_HP_CAN1_TX_IRQHandler
USB_LP_CAN1_RX0_IRQHandler
CAN1_RX1_IRQHandler
CAN1_SCE_IRQHandler
EXTI9_5_IRQHandler
TIM1_BRK_IRQHandler
TIM1_UP_IRQHandler
TIM1_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM4_IRQHandler
I2C1_EV_IRQHandler
I2C1_ER_IRQHandler
I2C2_EV_IRQHandler
I2C2_ER_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_IRQHandler
EXTI15_10_IRQHandler
RTCAlarm_IRQHandler
USBWakeUp_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;******************* (C) COPYRIGHT 2011 STMicroelectronics *****END OF FILE*****
|
AAAAyl0n/Lambda0
| 15,145
|
1.Hardware/Screen/1.14寸方形屏/03-程序源码/04-1.14IPS显示屏STM32F103ZET6_SPI例程/CORE/startup_stm32f10x_hd.s
|
;******************** (C) COPYRIGHT 2011 STMicroelectronics ********************
;* File Name : startup_stm32f10x_hd.s
;* Author : MCD Application Team
;* Version : V3.5.0
;* Date : 11-March-2011
;* Description : STM32F10x High Density Devices vector table for MDK-ARM
;* toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Configure the clock system and also configure the external
;* SRAM mounted on STM3210E-EVAL board to be used as data
;* memory (optional, to be enabled by user)
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM3 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;* <<< Use Configuration Wizard in Context Menu >>>
;*******************************************************************************
; THE PRESENT FIRMWARE WHICH IS FOR GUIDANCE ONLY AIMS AT PROVIDING CUSTOMERS
; WITH CODING INFORMATION REGARDING THEIR PRODUCTS IN ORDER FOR THEM TO SAVE TIME.
; AS A RESULT, STMICROELECTRONICS SHALL NOT BE HELD LIABLE FOR ANY DIRECT,
; INDIRECT OR CONSEQUENTIAL DAMAGES WITH RESPECT TO ANY CLAIMS ARISING FROM THE
; CONTENT OF SUCH FIRMWARE AND/OR THE USE MADE BY CUSTOMERS OF THE CODING
; INFORMATION CONTAINED HEREIN IN CONNECTION WITH THEIR PRODUCTS.
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x00000400
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x00000200
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD MemManage_Handler ; MPU Fault Handler
DCD BusFault_Handler ; Bus Fault Handler
DCD UsageFault_Handler ; Usage Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD DebugMon_Handler ; Debug Monitor Handler
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_IRQHandler ; PVD through EXTI Line detect
DCD TAMPER_IRQHandler ; Tamper
DCD RTC_IRQHandler ; RTC
DCD FLASH_IRQHandler ; Flash
DCD RCC_IRQHandler ; RCC
DCD EXTI0_IRQHandler ; EXTI Line 0
DCD EXTI1_IRQHandler ; EXTI Line 1
DCD EXTI2_IRQHandler ; EXTI Line 2
DCD EXTI3_IRQHandler ; EXTI Line 3
DCD EXTI4_IRQHandler ; EXTI Line 4
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_IRQHandler ; DMA1 Channel 2
DCD DMA1_Channel3_IRQHandler ; DMA1 Channel 3
DCD DMA1_Channel4_IRQHandler ; DMA1 Channel 4
DCD DMA1_Channel5_IRQHandler ; DMA1 Channel 5
DCD DMA1_Channel6_IRQHandler ; DMA1 Channel 6
DCD DMA1_Channel7_IRQHandler ; DMA1 Channel 7
DCD ADC1_2_IRQHandler ; ADC1 & ADC2
DCD USB_HP_CAN1_TX_IRQHandler ; USB High Priority or CAN1 TX
DCD USB_LP_CAN1_RX0_IRQHandler ; USB Low Priority or CAN1 RX0
DCD CAN1_RX1_IRQHandler ; CAN1 RX1
DCD CAN1_SCE_IRQHandler ; CAN1 SCE
DCD EXTI9_5_IRQHandler ; EXTI Line 9..5
DCD TIM1_BRK_IRQHandler ; TIM1 Break
DCD TIM1_UP_IRQHandler ; TIM1 Update
DCD TIM1_TRG_COM_IRQHandler ; TIM1 Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM4_IRQHandler ; TIM4
DCD I2C1_EV_IRQHandler ; I2C1 Event
DCD I2C1_ER_IRQHandler ; I2C1 Error
DCD I2C2_EV_IRQHandler ; I2C2 Event
DCD I2C2_ER_IRQHandler ; I2C2 Error
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_IRQHandler ; USART3
DCD EXTI15_10_IRQHandler ; EXTI Line 15..10
DCD RTCAlarm_IRQHandler ; RTC Alarm through EXTI Line
DCD USBWakeUp_IRQHandler ; USB Wakeup from suspend
DCD TIM8_BRK_IRQHandler ; TIM8 Break
DCD TIM8_UP_IRQHandler ; TIM8 Update
DCD TIM8_TRG_COM_IRQHandler ; TIM8 Trigger and Commutation
DCD TIM8_CC_IRQHandler ; TIM8 Capture Compare
DCD ADC3_IRQHandler ; ADC3
DCD FSMC_IRQHandler ; FSMC
DCD SDIO_IRQHandler ; SDIO
DCD TIM5_IRQHandler ; TIM5
DCD SPI3_IRQHandler ; SPI3
DCD UART4_IRQHandler ; UART4
DCD UART5_IRQHandler ; UART5
DCD TIM6_IRQHandler ; TIM6
DCD TIM7_IRQHandler ; TIM7
DCD DMA2_Channel1_IRQHandler ; DMA2 Channel1
DCD DMA2_Channel2_IRQHandler ; DMA2 Channel2
DCD DMA2_Channel3_IRQHandler ; DMA2 Channel3
DCD DMA2_Channel4_5_IRQHandler ; DMA2 Channel4 & Channel5
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
MemManage_Handler\
PROC
EXPORT MemManage_Handler [WEAK]
B .
ENDP
BusFault_Handler\
PROC
EXPORT BusFault_Handler [WEAK]
B .
ENDP
UsageFault_Handler\
PROC
EXPORT UsageFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
DebugMon_Handler\
PROC
EXPORT DebugMon_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_IRQHandler [WEAK]
EXPORT TAMPER_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_IRQHandler [WEAK]
EXPORT EXTI0_IRQHandler [WEAK]
EXPORT EXTI1_IRQHandler [WEAK]
EXPORT EXTI2_IRQHandler [WEAK]
EXPORT EXTI3_IRQHandler [WEAK]
EXPORT EXTI4_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_IRQHandler [WEAK]
EXPORT DMA1_Channel3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_IRQHandler [WEAK]
EXPORT DMA1_Channel5_IRQHandler [WEAK]
EXPORT DMA1_Channel6_IRQHandler [WEAK]
EXPORT DMA1_Channel7_IRQHandler [WEAK]
EXPORT ADC1_2_IRQHandler [WEAK]
EXPORT USB_HP_CAN1_TX_IRQHandler [WEAK]
EXPORT USB_LP_CAN1_RX0_IRQHandler [WEAK]
EXPORT CAN1_RX1_IRQHandler [WEAK]
EXPORT CAN1_SCE_IRQHandler [WEAK]
EXPORT EXTI9_5_IRQHandler [WEAK]
EXPORT TIM1_BRK_IRQHandler [WEAK]
EXPORT TIM1_UP_IRQHandler [WEAK]
EXPORT TIM1_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM4_IRQHandler [WEAK]
EXPORT I2C1_EV_IRQHandler [WEAK]
EXPORT I2C1_ER_IRQHandler [WEAK]
EXPORT I2C2_EV_IRQHandler [WEAK]
EXPORT I2C2_ER_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_IRQHandler [WEAK]
EXPORT EXTI15_10_IRQHandler [WEAK]
EXPORT RTCAlarm_IRQHandler [WEAK]
EXPORT USBWakeUp_IRQHandler [WEAK]
EXPORT TIM8_BRK_IRQHandler [WEAK]
EXPORT TIM8_UP_IRQHandler [WEAK]
EXPORT TIM8_TRG_COM_IRQHandler [WEAK]
EXPORT TIM8_CC_IRQHandler [WEAK]
EXPORT ADC3_IRQHandler [WEAK]
EXPORT FSMC_IRQHandler [WEAK]
EXPORT SDIO_IRQHandler [WEAK]
EXPORT TIM5_IRQHandler [WEAK]
EXPORT SPI3_IRQHandler [WEAK]
EXPORT UART4_IRQHandler [WEAK]
EXPORT UART5_IRQHandler [WEAK]
EXPORT TIM6_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT DMA2_Channel1_IRQHandler [WEAK]
EXPORT DMA2_Channel2_IRQHandler [WEAK]
EXPORT DMA2_Channel3_IRQHandler [WEAK]
EXPORT DMA2_Channel4_5_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_IRQHandler
TAMPER_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_IRQHandler
EXTI0_IRQHandler
EXTI1_IRQHandler
EXTI2_IRQHandler
EXTI3_IRQHandler
EXTI4_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_IRQHandler
DMA1_Channel3_IRQHandler
DMA1_Channel4_IRQHandler
DMA1_Channel5_IRQHandler
DMA1_Channel6_IRQHandler
DMA1_Channel7_IRQHandler
ADC1_2_IRQHandler
USB_HP_CAN1_TX_IRQHandler
USB_LP_CAN1_RX0_IRQHandler
CAN1_RX1_IRQHandler
CAN1_SCE_IRQHandler
EXTI9_5_IRQHandler
TIM1_BRK_IRQHandler
TIM1_UP_IRQHandler
TIM1_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM4_IRQHandler
I2C1_EV_IRQHandler
I2C1_ER_IRQHandler
I2C2_EV_IRQHandler
I2C2_ER_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_IRQHandler
EXTI15_10_IRQHandler
RTCAlarm_IRQHandler
USBWakeUp_IRQHandler
TIM8_BRK_IRQHandler
TIM8_UP_IRQHandler
TIM8_TRG_COM_IRQHandler
TIM8_CC_IRQHandler
ADC3_IRQHandler
FSMC_IRQHandler
SDIO_IRQHandler
TIM5_IRQHandler
SPI3_IRQHandler
UART4_IRQHandler
UART5_IRQHandler
TIM6_IRQHandler
TIM7_IRQHandler
DMA2_Channel1_IRQHandler
DMA2_Channel2_IRQHandler
DMA2_Channel3_IRQHandler
DMA2_Channel4_5_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;******************* (C) COPYRIGHT 2011 STMicroelectronics *****END OF FILE*****
|
AAAAyl0n/Lambda0
| 12,458
|
1.Hardware/Screen/1.28英寸GC9A01圆形TFT资料/代码/STM32/STM32F10x/ASM/startup_stm32f10x_md.s
|
;******************** (C) COPYRIGHT 2011 STMicroelectronics ********************
;* File Name : startup_stm32f10x_md.s
;* Author : MCD Application Team
;* Version : V3.5.0
;* Date : 11-March-2011
;* Description : STM32F10x Medium Density Devices vector table for MDK-ARM
;* toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Configure the clock system
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM3 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;* <<< Use Configuration Wizard in Context Menu >>>
;*******************************************************************************
; THE PRESENT FIRMWARE WHICH IS FOR GUIDANCE ONLY AIMS AT PROVIDING CUSTOMERS
; WITH CODING INFORMATION REGARDING THEIR PRODUCTS IN ORDER FOR THEM TO SAVE TIME.
; AS A RESULT, STMICROELECTRONICS SHALL NOT BE HELD LIABLE FOR ANY DIRECT,
; INDIRECT OR CONSEQUENTIAL DAMAGES WITH RESPECT TO ANY CLAIMS ARISING FROM THE
; CONTENT OF SUCH FIRMWARE AND/OR THE USE MADE BY CUSTOMERS OF THE CODING
; INFORMATION CONTAINED HEREIN IN CONNECTION WITH THEIR PRODUCTS.
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x00000400
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x00000200
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD MemManage_Handler ; MPU Fault Handler
DCD BusFault_Handler ; Bus Fault Handler
DCD UsageFault_Handler ; Usage Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD DebugMon_Handler ; Debug Monitor Handler
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_IRQHandler ; PVD through EXTI Line detect
DCD TAMPER_IRQHandler ; Tamper
DCD RTC_IRQHandler ; RTC
DCD FLASH_IRQHandler ; Flash
DCD RCC_IRQHandler ; RCC
DCD EXTI0_IRQHandler ; EXTI Line 0
DCD EXTI1_IRQHandler ; EXTI Line 1
DCD EXTI2_IRQHandler ; EXTI Line 2
DCD EXTI3_IRQHandler ; EXTI Line 3
DCD EXTI4_IRQHandler ; EXTI Line 4
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_IRQHandler ; DMA1 Channel 2
DCD DMA1_Channel3_IRQHandler ; DMA1 Channel 3
DCD DMA1_Channel4_IRQHandler ; DMA1 Channel 4
DCD DMA1_Channel5_IRQHandler ; DMA1 Channel 5
DCD DMA1_Channel6_IRQHandler ; DMA1 Channel 6
DCD DMA1_Channel7_IRQHandler ; DMA1 Channel 7
DCD ADC1_2_IRQHandler ; ADC1_2
DCD USB_HP_CAN1_TX_IRQHandler ; USB High Priority or CAN1 TX
DCD USB_LP_CAN1_RX0_IRQHandler ; USB Low Priority or CAN1 RX0
DCD CAN1_RX1_IRQHandler ; CAN1 RX1
DCD CAN1_SCE_IRQHandler ; CAN1 SCE
DCD EXTI9_5_IRQHandler ; EXTI Line 9..5
DCD TIM1_BRK_IRQHandler ; TIM1 Break
DCD TIM1_UP_IRQHandler ; TIM1 Update
DCD TIM1_TRG_COM_IRQHandler ; TIM1 Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM4_IRQHandler ; TIM4
DCD I2C1_EV_IRQHandler ; I2C1 Event
DCD I2C1_ER_IRQHandler ; I2C1 Error
DCD I2C2_EV_IRQHandler ; I2C2 Event
DCD I2C2_ER_IRQHandler ; I2C2 Error
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_IRQHandler ; USART3
DCD EXTI15_10_IRQHandler ; EXTI Line 15..10
DCD RTCAlarm_IRQHandler ; RTC Alarm through EXTI Line
DCD USBWakeUp_IRQHandler ; USB Wakeup from suspend
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
MemManage_Handler\
PROC
EXPORT MemManage_Handler [WEAK]
B .
ENDP
BusFault_Handler\
PROC
EXPORT BusFault_Handler [WEAK]
B .
ENDP
UsageFault_Handler\
PROC
EXPORT UsageFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
DebugMon_Handler\
PROC
EXPORT DebugMon_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_IRQHandler [WEAK]
EXPORT TAMPER_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_IRQHandler [WEAK]
EXPORT EXTI0_IRQHandler [WEAK]
EXPORT EXTI1_IRQHandler [WEAK]
EXPORT EXTI2_IRQHandler [WEAK]
EXPORT EXTI3_IRQHandler [WEAK]
EXPORT EXTI4_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_IRQHandler [WEAK]
EXPORT DMA1_Channel3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_IRQHandler [WEAK]
EXPORT DMA1_Channel5_IRQHandler [WEAK]
EXPORT DMA1_Channel6_IRQHandler [WEAK]
EXPORT DMA1_Channel7_IRQHandler [WEAK]
EXPORT ADC1_2_IRQHandler [WEAK]
EXPORT USB_HP_CAN1_TX_IRQHandler [WEAK]
EXPORT USB_LP_CAN1_RX0_IRQHandler [WEAK]
EXPORT CAN1_RX1_IRQHandler [WEAK]
EXPORT CAN1_SCE_IRQHandler [WEAK]
EXPORT EXTI9_5_IRQHandler [WEAK]
EXPORT TIM1_BRK_IRQHandler [WEAK]
EXPORT TIM1_UP_IRQHandler [WEAK]
EXPORT TIM1_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM4_IRQHandler [WEAK]
EXPORT I2C1_EV_IRQHandler [WEAK]
EXPORT I2C1_ER_IRQHandler [WEAK]
EXPORT I2C2_EV_IRQHandler [WEAK]
EXPORT I2C2_ER_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_IRQHandler [WEAK]
EXPORT EXTI15_10_IRQHandler [WEAK]
EXPORT RTCAlarm_IRQHandler [WEAK]
EXPORT USBWakeUp_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_IRQHandler
TAMPER_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_IRQHandler
EXTI0_IRQHandler
EXTI1_IRQHandler
EXTI2_IRQHandler
EXTI3_IRQHandler
EXTI4_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_IRQHandler
DMA1_Channel3_IRQHandler
DMA1_Channel4_IRQHandler
DMA1_Channel5_IRQHandler
DMA1_Channel6_IRQHandler
DMA1_Channel7_IRQHandler
ADC1_2_IRQHandler
USB_HP_CAN1_TX_IRQHandler
USB_LP_CAN1_RX0_IRQHandler
CAN1_RX1_IRQHandler
CAN1_SCE_IRQHandler
EXTI9_5_IRQHandler
TIM1_BRK_IRQHandler
TIM1_UP_IRQHandler
TIM1_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM4_IRQHandler
I2C1_EV_IRQHandler
I2C1_ER_IRQHandler
I2C2_EV_IRQHandler
I2C2_ER_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_IRQHandler
EXTI15_10_IRQHandler
RTCAlarm_IRQHandler
USBWakeUp_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;******************* (C) COPYRIGHT 2011 STMicroelectronics *****END OF FILE*****
|
AAAAyl0n/Lambda0
| 10,004
|
1.Hardware/Screen/1.28英寸GC9A01圆形TFT资料/代码/STM32/STM32F10x/ASM/cortexm3_macro.s
|
;******************** (C) COPYRIGHT 2008 STMicroelectronics ********************
;* File Name : cortexm3_macro.s
;* Author : MCD Application Team
;* Version : V2.0.3
;* Date : 09/22/2008
;* Description : Instruction wrappers for special Cortex-M3 instructions
;* to be used with RVMDK toolchain.
;*******************************************************************************
; THE PRESENT FIRMWARE WHICH IS FOR GUIDANCE ONLY AIMS AT PROVIDING CUSTOMERS
; WITH CODING INFORMATION REGARDING THEIR PRODUCTS IN ORDER FOR THEM TO SAVE TIME.
; AS A RESULT, STMICROELECTRONICS SHALL NOT BE HELD LIABLE FOR ANY DIRECT,
; INDIRECT OR CONSEQUENTIAL DAMAGES WITH RESPECT TO ANY CLAIMS ARISING FROM THE
; CONTENT OF SUCH FIRMWARE AND/OR THE USE MADE BY CUSTOMERS OF THE CODING
; INFORMATION CONTAINED HEREIN IN CONNECTION WITH THEIR PRODUCTS.
;*******************************************************************************
THUMB
REQUIRE8
PRESERVE8
AREA |.text|, CODE, READONLY, ALIGN=2
; Exported functions
EXPORT __WFI
EXPORT __WFE
EXPORT __SEV
EXPORT __ISB
EXPORT __DSB
EXPORT __DMB
EXPORT __SVC
EXPORT __MRS_CONTROL
EXPORT __MSR_CONTROL
EXPORT __MRS_PSP
EXPORT __MSR_PSP
EXPORT __MRS_MSP
EXPORT __MSR_MSP
EXPORT __RESETPRIMASK
EXPORT __SETPRIMASK
EXPORT __READ_PRIMASK
EXPORT __RESETFAULTMASK
EXPORT __SETFAULTMASK
EXPORT __READ_FAULTMASK
EXPORT __BASEPRICONFIG
EXPORT __GetBASEPRI
EXPORT __REV_HalfWord
EXPORT __REV_Word
;*******************************************************************************
; Function Name : __WFI
; Description : Assembler function for the WFI instruction.
; Input : None
; Return : None
;*******************************************************************************
__WFI
WFI
BX r14
;*******************************************************************************
; Function Name : __WFE
; Description : Assembler function for the WFE instruction.
; Input : None
; Return : None
;*******************************************************************************
__WFE
WFE
BX r14
;*******************************************************************************
; Function Name : __SEV
; Description : Assembler function for the SEV instruction.
; Input : None
; Return : None
;*******************************************************************************
__SEV
SEV
BX r14
;*******************************************************************************
; Function Name : __ISB
; Description : Assembler function for the ISB instruction.
; Input : None
; Return : None
;*******************************************************************************
__ISB
ISB
BX r14
;*******************************************************************************
; Function Name : __DSB
; Description : Assembler function for the DSB instruction.
; Input : None
; Return : None
;*******************************************************************************
__DSB
DSB
BX r14
;*******************************************************************************
; Function Name : __DMB
; Description : Assembler function for the DMB instruction.
; Input : None
; Return : None
;*******************************************************************************
__DMB
DMB
BX r14
;*******************************************************************************
; Function Name : __SVC
; Description : Assembler function for the SVC instruction.
; Input : None
; Return : None
;*******************************************************************************
__SVC
SVC 0x01
BX r14
;*******************************************************************************
; Function Name : __MRS_CONTROL
; Description : Assembler function for the MRS instruction.
; Input : None
; Return : - r0 : Cortex-M3 CONTROL register value.
;*******************************************************************************
__MRS_CONTROL
MRS r0, CONTROL
BX r14
;*******************************************************************************
; Function Name : __MSR_CONTROL
; Description : Assembler function for the MSR instruction.
; Input : - r0 : Cortex-M3 CONTROL register new value.
; Return : None
;*******************************************************************************
__MSR_CONTROL
MSR CONTROL, r0
ISB
BX r14
;*******************************************************************************
; Function Name : __MRS_PSP
; Description : Assembler function for the MRS instruction.
; Input : None
; Return : - r0 : Process Stack value.
;*******************************************************************************
__MRS_PSP
MRS r0, PSP
BX r14
;*******************************************************************************
; Function Name : __MSR_PSP
; Description : Assembler function for the MSR instruction.
; Input : - r0 : Process Stack new value.
; Return : None
;*******************************************************************************
__MSR_PSP
MSR PSP, r0 ; set Process Stack value
BX r14
;*******************************************************************************
; Function Name : __MRS_MSP
; Description : Assembler function for the MRS instruction.
; Input : None
; Return : - r0 : Main Stack value.
;*******************************************************************************
__MRS_MSP
MRS r0, MSP
BX r14
;*******************************************************************************
; Function Name : __MSR_MSP
; Description : Assembler function for the MSR instruction.
; Input : - r0 : Main Stack new value.
; Return : None
;*******************************************************************************
__MSR_MSP
MSR MSP, r0 ; set Main Stack value
BX r14
;*******************************************************************************
; Function Name : __RESETPRIMASK
; Description : Assembler function to reset the PRIMASK.
; Input : None
; Return : None
;*******************************************************************************
__RESETPRIMASK
CPSIE i
BX r14
;*******************************************************************************
; Function Name : __SETPRIMASK
; Description : Assembler function to set the PRIMASK.
; Input : None
; Return : None
;*******************************************************************************
__SETPRIMASK
CPSID i
BX r14
;*******************************************************************************
; Function Name : __READ_PRIMASK
; Description : Assembler function to get the PRIMASK value.
; Input : None
; Return : - r0 : PRIMASK register value
;*******************************************************************************
__READ_PRIMASK
MRS r0, PRIMASK
BX r14
;*******************************************************************************
; Function Name : __SETFAULTMASK
; Description : Assembler function to set the FAULTMASK.
; Input : None
; Return : None
;*******************************************************************************
__SETFAULTMASK
CPSID f
BX r14
;*******************************************************************************
; Function Name : __RESETFAULTMASK
; Description : Assembler function to reset the FAULTMASK.
; Input : None
; Return : None
;*******************************************************************************
__RESETFAULTMASK
CPSIE f
BX r14
;*******************************************************************************
; Function Name : __READ_FAULTMASK
; Description : Assembler function to get the FAULTMASK value.
; Input : None
; Return : - r0 : FAULTMASK register value
;*******************************************************************************
__READ_FAULTMASK
MRS r0, FAULTMASK
BX r14
;*******************************************************************************
; Function Name : __BASEPRICONFIG
; Description : Assembler function to set the Base Priority.
; Input : - r0 : Base Priority new value
; Return : None
;*******************************************************************************
__BASEPRICONFIG
MSR BASEPRI, r0
BX r14
;*******************************************************************************
; Function Name : __GetBASEPRI
; Description : Assembler function to get the Base Priority value.
; Input : None
; Return : - r0 : Base Priority value
;*******************************************************************************
__GetBASEPRI
MRS r0, BASEPRI_MAX
BX r14
;*******************************************************************************
; Function Name : __REV_HalfWord
; Description : Reverses the byte order in HalfWord(16-bit) input variable.
; Input : - r0 : specifies the input variable
; Return : - r0 : holds tve variable value after byte reversing.
;*******************************************************************************
__REV_HalfWord
REV16 r0, r0
BX r14
;*******************************************************************************
; Function Name : __REV_Word
; Description : Reverses the byte order in Word(32-bit) input variable.
; Input : - r0 : specifies the input variable
; Return : - r0 : holds tve variable value after byte reversing.
;*******************************************************************************
__REV_Word
REV r0, r0
BX r14
END
;******************* (C) COPYRIGHT 2008 STMicroelectronics *****END OF FILE*****
|
AAAAyl0n/Lambda0
| 12,079
|
1.Hardware/Screen/1.28英寸GC9A01圆形TFT资料/代码/STM32/STM32F10x/ASM/startup_stm32f10x_ld.s
|
;******************** (C) COPYRIGHT 2011 STMicroelectronics ********************
;* File Name : startup_stm32f10x_ld.s
;* Author : MCD Application Team
;* Version : V3.5.0
;* Date : 11-March-2011
;* Description : STM32F10x Low Density Devices vector table for MDK-ARM
;* toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Configure the clock system
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM3 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;* <<< Use Configuration Wizard in Context Menu >>>
;*******************************************************************************
; THE PRESENT FIRMWARE WHICH IS FOR GUIDANCE ONLY AIMS AT PROVIDING CUSTOMERS
; WITH CODING INFORMATION REGARDING THEIR PRODUCTS IN ORDER FOR THEM TO SAVE TIME.
; AS A RESULT, STMICROELECTRONICS SHALL NOT BE HELD LIABLE FOR ANY DIRECT,
; INDIRECT OR CONSEQUENTIAL DAMAGES WITH RESPECT TO ANY CLAIMS ARISING FROM THE
; CONTENT OF SUCH FIRMWARE AND/OR THE USE MADE BY CUSTOMERS OF THE CODING
; INFORMATION CONTAINED HEREIN IN CONNECTION WITH THEIR PRODUCTS.
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x00000400
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x00000200
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD MemManage_Handler ; MPU Fault Handler
DCD BusFault_Handler ; Bus Fault Handler
DCD UsageFault_Handler ; Usage Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD DebugMon_Handler ; Debug Monitor Handler
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_IRQHandler ; PVD through EXTI Line detect
DCD TAMPER_IRQHandler ; Tamper
DCD RTC_IRQHandler ; RTC
DCD FLASH_IRQHandler ; Flash
DCD RCC_IRQHandler ; RCC
DCD EXTI0_IRQHandler ; EXTI Line 0
DCD EXTI1_IRQHandler ; EXTI Line 1
DCD EXTI2_IRQHandler ; EXTI Line 2
DCD EXTI3_IRQHandler ; EXTI Line 3
DCD EXTI4_IRQHandler ; EXTI Line 4
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_IRQHandler ; DMA1 Channel 2
DCD DMA1_Channel3_IRQHandler ; DMA1 Channel 3
DCD DMA1_Channel4_IRQHandler ; DMA1 Channel 4
DCD DMA1_Channel5_IRQHandler ; DMA1 Channel 5
DCD DMA1_Channel6_IRQHandler ; DMA1 Channel 6
DCD DMA1_Channel7_IRQHandler ; DMA1 Channel 7
DCD ADC1_2_IRQHandler ; ADC1_2
DCD USB_HP_CAN1_TX_IRQHandler ; USB High Priority or CAN1 TX
DCD USB_LP_CAN1_RX0_IRQHandler ; USB Low Priority or CAN1 RX0
DCD CAN1_RX1_IRQHandler ; CAN1 RX1
DCD CAN1_SCE_IRQHandler ; CAN1 SCE
DCD EXTI9_5_IRQHandler ; EXTI Line 9..5
DCD TIM1_BRK_IRQHandler ; TIM1 Break
DCD TIM1_UP_IRQHandler ; TIM1 Update
DCD TIM1_TRG_COM_IRQHandler ; TIM1 Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD 0 ; Reserved
DCD I2C1_EV_IRQHandler ; I2C1 Event
DCD I2C1_ER_IRQHandler ; I2C1 Error
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SPI1_IRQHandler ; SPI1
DCD 0 ; Reserved
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD 0 ; Reserved
DCD EXTI15_10_IRQHandler ; EXTI Line 15..10
DCD RTCAlarm_IRQHandler ; RTC Alarm through EXTI Line
DCD USBWakeUp_IRQHandler ; USB Wakeup from suspend
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler routine
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
MemManage_Handler\
PROC
EXPORT MemManage_Handler [WEAK]
B .
ENDP
BusFault_Handler\
PROC
EXPORT BusFault_Handler [WEAK]
B .
ENDP
UsageFault_Handler\
PROC
EXPORT UsageFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
DebugMon_Handler\
PROC
EXPORT DebugMon_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_IRQHandler [WEAK]
EXPORT TAMPER_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_IRQHandler [WEAK]
EXPORT EXTI0_IRQHandler [WEAK]
EXPORT EXTI1_IRQHandler [WEAK]
EXPORT EXTI2_IRQHandler [WEAK]
EXPORT EXTI3_IRQHandler [WEAK]
EXPORT EXTI4_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_IRQHandler [WEAK]
EXPORT DMA1_Channel3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_IRQHandler [WEAK]
EXPORT DMA1_Channel5_IRQHandler [WEAK]
EXPORT DMA1_Channel6_IRQHandler [WEAK]
EXPORT DMA1_Channel7_IRQHandler [WEAK]
EXPORT ADC1_2_IRQHandler [WEAK]
EXPORT USB_HP_CAN1_TX_IRQHandler [WEAK]
EXPORT USB_LP_CAN1_RX0_IRQHandler [WEAK]
EXPORT CAN1_RX1_IRQHandler [WEAK]
EXPORT CAN1_SCE_IRQHandler [WEAK]
EXPORT EXTI9_5_IRQHandler [WEAK]
EXPORT TIM1_BRK_IRQHandler [WEAK]
EXPORT TIM1_UP_IRQHandler [WEAK]
EXPORT TIM1_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT I2C1_EV_IRQHandler [WEAK]
EXPORT I2C1_ER_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT EXTI15_10_IRQHandler [WEAK]
EXPORT RTCAlarm_IRQHandler [WEAK]
EXPORT USBWakeUp_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_IRQHandler
TAMPER_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_IRQHandler
EXTI0_IRQHandler
EXTI1_IRQHandler
EXTI2_IRQHandler
EXTI3_IRQHandler
EXTI4_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_IRQHandler
DMA1_Channel3_IRQHandler
DMA1_Channel4_IRQHandler
DMA1_Channel5_IRQHandler
DMA1_Channel6_IRQHandler
DMA1_Channel7_IRQHandler
ADC1_2_IRQHandler
USB_HP_CAN1_TX_IRQHandler
USB_LP_CAN1_RX0_IRQHandler
CAN1_RX1_IRQHandler
CAN1_SCE_IRQHandler
EXTI9_5_IRQHandler
TIM1_BRK_IRQHandler
TIM1_UP_IRQHandler
TIM1_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
I2C1_EV_IRQHandler
I2C1_ER_IRQHandler
SPI1_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
EXTI15_10_IRQHandler
RTCAlarm_IRQHandler
USBWakeUp_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;******************* (C) COPYRIGHT 2011 STMicroelectronics *****END OF FILE*****
|
AAAAyl0n/Lambda0
| 13,753
|
1.Hardware/Screen/1.28英寸GC9A01圆形TFT资料/代码/STM32/STM32F10x/ASM/startup_stm32f10x_md_vl.s
|
;******************** (C) COPYRIGHT 2010 STMicroelectronics ********************
;* File Name : startup_stm32f10x_md_vl.s
;* Author : MCD Application Team
;* Version : V3.3.0
;* Date : 04/16/2010
;* Description : STM32F10x Medium Density Value Line Devices vector table
;* for RVMDK toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Configure the clock system
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM3 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;* <<< Use Configuration Wizard in Context Menu >>>
;*******************************************************************************
; THE PRESENT FIRMWARE WHICH IS FOR GUIDANCE ONLY AIMS AT PROVIDING CUSTOMERS
; WITH CODING INFORMATION REGARDING THEIR PRODUCTS IN ORDER FOR THEM TO SAVE TIME.
; AS A RESULT, STMICROELECTRONICS SHALL NOT BE HELD LIABLE FOR ANY DIRECT,
; INDIRECT OR CONSEQUENTIAL DAMAGES WITH RESPECT TO ANY CLAIMS ARISING FROM THE
; CONTENT OF SUCH FIRMWARE AND/OR THE USE MADE BY CUSTOMERS OF THE CODING
; INFORMATION CONTAINED HEREIN IN CONNECTION WITH THEIR PRODUCTS.
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x00000400
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x00000200
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD MemManage_Handler ; MPU Fault Handler
DCD BusFault_Handler ; Bus Fault Handler
DCD UsageFault_Handler ; Usage Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD DebugMon_Handler ; Debug Monitor Handler
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_IRQHandler ; PVD through EXTI Line detect
DCD TAMPER_IRQHandler ; Tamper
DCD RTC_IRQHandler ; RTC
DCD FLASH_IRQHandler ; Flash
DCD RCC_IRQHandler ; RCC
DCD EXTI0_IRQHandler ; EXTI Line 0
DCD EXTI1_IRQHandler ; EXTI Line 1
DCD EXTI2_IRQHandler ; EXTI Line 2
DCD EXTI3_IRQHandler ; EXTI Line 3
DCD EXTI4_IRQHandler ; EXTI Line 4
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_IRQHandler ; DMA1 Channel 2
DCD DMA1_Channel3_IRQHandler ; DMA1 Channel 3
DCD DMA1_Channel4_IRQHandler ; DMA1 Channel 4
DCD DMA1_Channel5_IRQHandler ; DMA1 Channel 5
DCD DMA1_Channel6_IRQHandler ; DMA1 Channel 6
DCD DMA1_Channel7_IRQHandler ; DMA1 Channel 7
DCD ADC1_IRQHandler ; ADC1
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD EXTI9_5_IRQHandler ; EXTI Line 9..5
DCD TIM1_BRK_TIM15_IRQHandler ; TIM1 Break and TIM15
DCD TIM1_UP_TIM16_IRQHandler ; TIM1 Update and TIM16
DCD TIM1_TRG_COM_TIM17_IRQHandler ; TIM1 Trigger and Commutation and TIM17
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM4_IRQHandler ; TIM4
DCD I2C1_EV_IRQHandler ; I2C1 Event
DCD I2C1_ER_IRQHandler ; I2C1 Error
DCD I2C2_EV_IRQHandler ; I2C2 Event
DCD I2C2_ER_IRQHandler ; I2C2 Error
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_IRQHandler ; USART3
DCD EXTI15_10_IRQHandler ; EXTI Line 15..10
DCD RTCAlarm_IRQHandler ; RTC Alarm through EXTI Line
DCD CEC_IRQHandler ; HDMI-CEC
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD TIM6_DAC_IRQHandler ; TIM6 and DAC underrun
DCD TIM7_IRQHandler ; TIM7
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT __main
IMPORT SystemInit
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
MemManage_Handler\
PROC
EXPORT MemManage_Handler [WEAK]
B .
ENDP
BusFault_Handler\
PROC
EXPORT BusFault_Handler [WEAK]
B .
ENDP
UsageFault_Handler\
PROC
EXPORT UsageFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
DebugMon_Handler\
PROC
EXPORT DebugMon_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_IRQHandler [WEAK]
EXPORT TAMPER_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_IRQHandler [WEAK]
EXPORT EXTI0_IRQHandler [WEAK]
EXPORT EXTI1_IRQHandler [WEAK]
EXPORT EXTI2_IRQHandler [WEAK]
EXPORT EXTI3_IRQHandler [WEAK]
EXPORT EXTI4_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_IRQHandler [WEAK]
EXPORT DMA1_Channel3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_IRQHandler [WEAK]
EXPORT DMA1_Channel5_IRQHandler [WEAK]
EXPORT DMA1_Channel6_IRQHandler [WEAK]
EXPORT DMA1_Channel7_IRQHandler [WEAK]
EXPORT ADC1_IRQHandler [WEAK]
EXPORT EXTI9_5_IRQHandler [WEAK]
EXPORT TIM1_BRK_TIM15_IRQHandler [WEAK]
EXPORT TIM1_UP_TIM16_IRQHandler [WEAK]
EXPORT TIM1_TRG_COM_TIM17_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM4_IRQHandler [WEAK]
EXPORT I2C1_EV_IRQHandler [WEAK]
EXPORT I2C1_ER_IRQHandler [WEAK]
EXPORT I2C2_EV_IRQHandler [WEAK]
EXPORT I2C2_ER_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_IRQHandler [WEAK]
EXPORT EXTI15_10_IRQHandler [WEAK]
EXPORT RTCAlarm_IRQHandler [WEAK]
EXPORT CEC_IRQHandler [WEAK]
EXPORT TIM6_DAC_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_IRQHandler
TAMPER_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_IRQHandler
EXTI0_IRQHandler
EXTI1_IRQHandler
EXTI2_IRQHandler
EXTI3_IRQHandler
EXTI4_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_IRQHandler
DMA1_Channel3_IRQHandler
DMA1_Channel4_IRQHandler
DMA1_Channel5_IRQHandler
DMA1_Channel6_IRQHandler
DMA1_Channel7_IRQHandler
ADC1_IRQHandler
EXTI9_5_IRQHandler
TIM1_BRK_TIM15_IRQHandler
TIM1_UP_TIM16_IRQHandler
TIM1_TRG_COM_TIM17_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM4_IRQHandler
I2C1_EV_IRQHandler
I2C1_ER_IRQHandler
I2C2_EV_IRQHandler
I2C2_ER_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_IRQHandler
EXTI15_10_IRQHandler
RTCAlarm_IRQHandler
CEC_IRQHandler
TIM6_DAC_IRQHandler
TIM7_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;******************* (C) COPYRIGHT 2010 STMicroelectronics *****END OF FILE*****
|
AAAAyl0n/Lambda0
| 15,393
|
1.Hardware/Screen/1.28英寸GC9A01圆形TFT资料/代码/STM32/STM32F10x/ASM/startup_stm32f10x_cl.s
|
;******************** (C) COPYRIGHT 2010 STMicroelectronics ********************
;* File Name : startup_stm32f10x_cl.s
;* Author : MCD Application Team
;* Version : V3.3.0
;* Date : 04/16/2010
;* Description : STM32F10x Connectivity line devices vector table for RVMDK
;* toolchain.
;* This module performs:
;* - Set the initial SP
;* - Set the initial PC == Reset_Handler
;* - Set the vector table entries with the exceptions ISR address
;* - Configure the clock system
;* - Branches to __main in the C library (which eventually
;* calls main()).
;* After Reset the CortexM3 processor is in Thread mode,
;* priority is Privileged, and the Stack is set to Main.
;* <<< Use Configuration Wizard in Context Menu >>>
;*******************************************************************************
; THE PRESENT FIRMWARE WHICH IS FOR GUIDANCE ONLY AIMS AT PROVIDING CUSTOMERS
; WITH CODING INFORMATION REGARDING THEIR PRODUCTS IN ORDER FOR THEM TO SAVE TIME.
; AS A RESULT, STMICROELECTRONICS SHALL NOT BE HELD LIABLE FOR ANY DIRECT,
; INDIRECT OR CONSEQUENTIAL DAMAGES WITH RESPECT TO ANY CLAIMS ARISING FROM THE
; CONTENT OF SUCH FIRMWARE AND/OR THE USE MADE BY CUSTOMERS OF THE CODING
; INFORMATION CONTAINED HEREIN IN CONNECTION WITH THEIR PRODUCTS.
;*******************************************************************************
; Amount of memory (in bytes) allocated for Stack
; Tailor this value to your application needs
; <h> Stack Configuration
; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Stack_Size EQU 0x00000400
AREA STACK, NOINIT, READWRITE, ALIGN=3
Stack_Mem SPACE Stack_Size
__initial_sp
; <h> Heap Configuration
; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8>
; </h>
Heap_Size EQU 0x00000200
AREA HEAP, NOINIT, READWRITE, ALIGN=3
__heap_base
Heap_Mem SPACE Heap_Size
__heap_limit
PRESERVE8
THUMB
; Vector Table Mapped to Address 0 at Reset
AREA RESET, DATA, READONLY
EXPORT __Vectors
EXPORT __Vectors_End
EXPORT __Vectors_Size
__Vectors DCD __initial_sp ; Top of Stack
DCD Reset_Handler ; Reset Handler
DCD NMI_Handler ; NMI Handler
DCD HardFault_Handler ; Hard Fault Handler
DCD MemManage_Handler ; MPU Fault Handler
DCD BusFault_Handler ; Bus Fault Handler
DCD UsageFault_Handler ; Usage Fault Handler
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD SVC_Handler ; SVCall Handler
DCD DebugMon_Handler ; Debug Monitor Handler
DCD 0 ; Reserved
DCD PendSV_Handler ; PendSV Handler
DCD SysTick_Handler ; SysTick Handler
; External Interrupts
DCD WWDG_IRQHandler ; Window Watchdog
DCD PVD_IRQHandler ; PVD through EXTI Line detect
DCD TAMPER_IRQHandler ; Tamper
DCD RTC_IRQHandler ; RTC
DCD FLASH_IRQHandler ; Flash
DCD RCC_IRQHandler ; RCC
DCD EXTI0_IRQHandler ; EXTI Line 0
DCD EXTI1_IRQHandler ; EXTI Line 1
DCD EXTI2_IRQHandler ; EXTI Line 2
DCD EXTI3_IRQHandler ; EXTI Line 3
DCD EXTI4_IRQHandler ; EXTI Line 4
DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1
DCD DMA1_Channel2_IRQHandler ; DMA1 Channel 2
DCD DMA1_Channel3_IRQHandler ; DMA1 Channel 3
DCD DMA1_Channel4_IRQHandler ; DMA1 Channel 4
DCD DMA1_Channel5_IRQHandler ; DMA1 Channel 5
DCD DMA1_Channel6_IRQHandler ; DMA1 Channel 6
DCD DMA1_Channel7_IRQHandler ; DMA1 Channel 7
DCD ADC1_2_IRQHandler ; ADC1 and ADC2
DCD CAN1_TX_IRQHandler ; CAN1 TX
DCD CAN1_RX0_IRQHandler ; CAN1 RX0
DCD CAN1_RX1_IRQHandler ; CAN1 RX1
DCD CAN1_SCE_IRQHandler ; CAN1 SCE
DCD EXTI9_5_IRQHandler ; EXTI Line 9..5
DCD TIM1_BRK_IRQHandler ; TIM1 Break
DCD TIM1_UP_IRQHandler ; TIM1 Update
DCD TIM1_TRG_COM_IRQHandler ; TIM1 Trigger and Commutation
DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare
DCD TIM2_IRQHandler ; TIM2
DCD TIM3_IRQHandler ; TIM3
DCD TIM4_IRQHandler ; TIM4
DCD I2C1_EV_IRQHandler ; I2C1 Event
DCD I2C1_ER_IRQHandler ; I2C1 Error
DCD I2C2_EV_IRQHandler ; I2C2 Event
DCD I2C2_ER_IRQHandler ; I2C1 Error
DCD SPI1_IRQHandler ; SPI1
DCD SPI2_IRQHandler ; SPI2
DCD USART1_IRQHandler ; USART1
DCD USART2_IRQHandler ; USART2
DCD USART3_IRQHandler ; USART3
DCD EXTI15_10_IRQHandler ; EXTI Line 15..10
DCD RTCAlarm_IRQHandler ; RTC alarm through EXTI line
DCD OTG_FS_WKUP_IRQHandler ; USB OTG FS Wakeup through EXTI line
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD 0 ; Reserved
DCD TIM5_IRQHandler ; TIM5
DCD SPI3_IRQHandler ; SPI3
DCD UART4_IRQHandler ; UART4
DCD UART5_IRQHandler ; UART5
DCD TIM6_IRQHandler ; TIM6
DCD TIM7_IRQHandler ; TIM7
DCD DMA2_Channel1_IRQHandler ; DMA2 Channel1
DCD DMA2_Channel2_IRQHandler ; DMA2 Channel2
DCD DMA2_Channel3_IRQHandler ; DMA2 Channel3
DCD DMA2_Channel4_IRQHandler ; DMA2 Channel4
DCD DMA2_Channel5_IRQHandler ; DMA2 Channel5
DCD ETH_IRQHandler ; Ethernet
DCD ETH_WKUP_IRQHandler ; Ethernet Wakeup through EXTI line
DCD CAN2_TX_IRQHandler ; CAN2 TX
DCD CAN2_RX0_IRQHandler ; CAN2 RX0
DCD CAN2_RX1_IRQHandler ; CAN2 RX1
DCD CAN2_SCE_IRQHandler ; CAN2 SCE
DCD OTG_FS_IRQHandler ; USB OTG FS
__Vectors_End
__Vectors_Size EQU __Vectors_End - __Vectors
AREA |.text|, CODE, READONLY
; Reset handler
Reset_Handler PROC
EXPORT Reset_Handler [WEAK]
IMPORT SystemInit
IMPORT __main
LDR R0, =SystemInit
BLX R0
LDR R0, =__main
BX R0
ENDP
; Dummy Exception Handlers (infinite loops which can be modified)
NMI_Handler PROC
EXPORT NMI_Handler [WEAK]
B .
ENDP
HardFault_Handler\
PROC
EXPORT HardFault_Handler [WEAK]
B .
ENDP
MemManage_Handler\
PROC
EXPORT MemManage_Handler [WEAK]
B .
ENDP
BusFault_Handler\
PROC
EXPORT BusFault_Handler [WEAK]
B .
ENDP
UsageFault_Handler\
PROC
EXPORT UsageFault_Handler [WEAK]
B .
ENDP
SVC_Handler PROC
EXPORT SVC_Handler [WEAK]
B .
ENDP
DebugMon_Handler\
PROC
EXPORT DebugMon_Handler [WEAK]
B .
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler [WEAK]
B .
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler [WEAK]
B .
ENDP
Default_Handler PROC
EXPORT WWDG_IRQHandler [WEAK]
EXPORT PVD_IRQHandler [WEAK]
EXPORT TAMPER_IRQHandler [WEAK]
EXPORT RTC_IRQHandler [WEAK]
EXPORT FLASH_IRQHandler [WEAK]
EXPORT RCC_IRQHandler [WEAK]
EXPORT EXTI0_IRQHandler [WEAK]
EXPORT EXTI1_IRQHandler [WEAK]
EXPORT EXTI2_IRQHandler [WEAK]
EXPORT EXTI3_IRQHandler [WEAK]
EXPORT EXTI4_IRQHandler [WEAK]
EXPORT DMA1_Channel1_IRQHandler [WEAK]
EXPORT DMA1_Channel2_IRQHandler [WEAK]
EXPORT DMA1_Channel3_IRQHandler [WEAK]
EXPORT DMA1_Channel4_IRQHandler [WEAK]
EXPORT DMA1_Channel5_IRQHandler [WEAK]
EXPORT DMA1_Channel6_IRQHandler [WEAK]
EXPORT DMA1_Channel7_IRQHandler [WEAK]
EXPORT ADC1_2_IRQHandler [WEAK]
EXPORT CAN1_TX_IRQHandler [WEAK]
EXPORT CAN1_RX0_IRQHandler [WEAK]
EXPORT CAN1_RX1_IRQHandler [WEAK]
EXPORT CAN1_SCE_IRQHandler [WEAK]
EXPORT EXTI9_5_IRQHandler [WEAK]
EXPORT TIM1_BRK_IRQHandler [WEAK]
EXPORT TIM1_UP_IRQHandler [WEAK]
EXPORT TIM1_TRG_COM_IRQHandler [WEAK]
EXPORT TIM1_CC_IRQHandler [WEAK]
EXPORT TIM2_IRQHandler [WEAK]
EXPORT TIM3_IRQHandler [WEAK]
EXPORT TIM4_IRQHandler [WEAK]
EXPORT I2C1_EV_IRQHandler [WEAK]
EXPORT I2C1_ER_IRQHandler [WEAK]
EXPORT I2C2_EV_IRQHandler [WEAK]
EXPORT I2C2_ER_IRQHandler [WEAK]
EXPORT SPI1_IRQHandler [WEAK]
EXPORT SPI2_IRQHandler [WEAK]
EXPORT USART1_IRQHandler [WEAK]
EXPORT USART2_IRQHandler [WEAK]
EXPORT USART3_IRQHandler [WEAK]
EXPORT EXTI15_10_IRQHandler [WEAK]
EXPORT RTCAlarm_IRQHandler [WEAK]
EXPORT OTG_FS_WKUP_IRQHandler [WEAK]
EXPORT TIM5_IRQHandler [WEAK]
EXPORT SPI3_IRQHandler [WEAK]
EXPORT UART4_IRQHandler [WEAK]
EXPORT UART5_IRQHandler [WEAK]
EXPORT TIM6_IRQHandler [WEAK]
EXPORT TIM7_IRQHandler [WEAK]
EXPORT DMA2_Channel1_IRQHandler [WEAK]
EXPORT DMA2_Channel2_IRQHandler [WEAK]
EXPORT DMA2_Channel3_IRQHandler [WEAK]
EXPORT DMA2_Channel4_IRQHandler [WEAK]
EXPORT DMA2_Channel5_IRQHandler [WEAK]
EXPORT ETH_IRQHandler [WEAK]
EXPORT ETH_WKUP_IRQHandler [WEAK]
EXPORT CAN2_TX_IRQHandler [WEAK]
EXPORT CAN2_RX0_IRQHandler [WEAK]
EXPORT CAN2_RX1_IRQHandler [WEAK]
EXPORT CAN2_SCE_IRQHandler [WEAK]
EXPORT OTG_FS_IRQHandler [WEAK]
WWDG_IRQHandler
PVD_IRQHandler
TAMPER_IRQHandler
RTC_IRQHandler
FLASH_IRQHandler
RCC_IRQHandler
EXTI0_IRQHandler
EXTI1_IRQHandler
EXTI2_IRQHandler
EXTI3_IRQHandler
EXTI4_IRQHandler
DMA1_Channel1_IRQHandler
DMA1_Channel2_IRQHandler
DMA1_Channel3_IRQHandler
DMA1_Channel4_IRQHandler
DMA1_Channel5_IRQHandler
DMA1_Channel6_IRQHandler
DMA1_Channel7_IRQHandler
ADC1_2_IRQHandler
CAN1_TX_IRQHandler
CAN1_RX0_IRQHandler
CAN1_RX1_IRQHandler
CAN1_SCE_IRQHandler
EXTI9_5_IRQHandler
TIM1_BRK_IRQHandler
TIM1_UP_IRQHandler
TIM1_TRG_COM_IRQHandler
TIM1_CC_IRQHandler
TIM2_IRQHandler
TIM3_IRQHandler
TIM4_IRQHandler
I2C1_EV_IRQHandler
I2C1_ER_IRQHandler
I2C2_EV_IRQHandler
I2C2_ER_IRQHandler
SPI1_IRQHandler
SPI2_IRQHandler
USART1_IRQHandler
USART2_IRQHandler
USART3_IRQHandler
EXTI15_10_IRQHandler
RTCAlarm_IRQHandler
OTG_FS_WKUP_IRQHandler
TIM5_IRQHandler
SPI3_IRQHandler
UART4_IRQHandler
UART5_IRQHandler
TIM6_IRQHandler
TIM7_IRQHandler
DMA2_Channel1_IRQHandler
DMA2_Channel2_IRQHandler
DMA2_Channel3_IRQHandler
DMA2_Channel4_IRQHandler
DMA2_Channel5_IRQHandler
ETH_IRQHandler
ETH_WKUP_IRQHandler
CAN2_TX_IRQHandler
CAN2_RX0_IRQHandler
CAN2_RX1_IRQHandler
CAN2_SCE_IRQHandler
OTG_FS_IRQHandler
B .
ENDP
ALIGN
;*******************************************************************************
; User Stack and Heap initialization
;*******************************************************************************
IF :DEF:__MICROLIB
EXPORT __initial_sp
EXPORT __heap_base
EXPORT __heap_limit
ELSE
IMPORT __use_two_region_memory
EXPORT __user_initial_stackheap
__user_initial_stackheap
LDR R0, = Heap_Mem
LDR R1, =(Stack_Mem + Stack_Size)
LDR R2, = (Heap_Mem + Heap_Size)
LDR R3, = Stack_Mem
BX LR
ALIGN
ENDIF
END
;******************* (C) COPYRIGHT 2010 STMicroelectronics *****END OF FILE*****
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.