repo_id
stringlengths
5
115
size
int64
590
5.01M
file_path
stringlengths
4
212
content
stringlengths
590
5.01M
aixcc-public/challenge-001-exemplar-source
5,111
arch/s390/kernel/vmlinux.lds.S
/* SPDX-License-Identifier: GPL-2.0 */ /* ld script to make s390 Linux kernel * Written by Martin Schwidefsky (schwidefsky@de.ibm.com) */ #include <asm/thread_info.h> #include <asm/page.h> #include <asm/ftrace.lds.h> /* * Put .bss..swapper_pg_dir as the first thing in .bss. This will * make sure it has 16k alignment. */ #define BSS_FIRST_SECTIONS *(.bss..swapper_pg_dir) \ *(.bss..invalid_pg_dir) /* Handle ro_after_init data on our own. */ #define RO_AFTER_INIT_DATA #define RUNTIME_DISCARD_EXIT #define EMITS_PT_NOTE #include <asm-generic/vmlinux.lds.h> #include <asm/vmlinux.lds.h> OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390") OUTPUT_ARCH(s390:64-bit) ENTRY(startup_continue) jiffies = jiffies_64; PHDRS { text PT_LOAD FLAGS(5); /* R_E */ data PT_LOAD FLAGS(7); /* RWE */ note PT_NOTE FLAGS(0); /* ___ */ } SECTIONS { . = 0x100000; .text : { _stext = .; /* Start of text section */ _text = .; /* Text and read-only data */ HEAD_TEXT TEXT_TEXT SCHED_TEXT CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT SOFTIRQENTRY_TEXT FTRACE_HOTPATCH_TRAMPOLINES_TEXT *(.text.*_indirect_*) *(.gnu.warning) . = ALIGN(PAGE_SIZE); _etext = .; /* End of text section */ } :text = 0x0700 RO_DATA(PAGE_SIZE) . = ALIGN(PAGE_SIZE); _sdata = .; /* Start of data section */ . = ALIGN(PAGE_SIZE); __start_ro_after_init = .; .data..ro_after_init : { *(.data..ro_after_init) JUMP_TABLE_DATA } :data EXCEPTION_TABLE(16) . = ALIGN(PAGE_SIZE); __end_ro_after_init = .; RW_DATA(0x100, PAGE_SIZE, THREAD_SIZE) BOOT_DATA_PRESERVED . = ALIGN(8); .amode31.refs : { _start_amode31_refs = .; *(.amode31.refs) _end_amode31_refs = .; } . = ALIGN(PAGE_SIZE); _edata = .; /* End of data section */ /* will be freed after init */ . = ALIGN(PAGE_SIZE); /* Init code and data */ __init_begin = .; . = ALIGN(PAGE_SIZE); .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { _sinittext = .; INIT_TEXT . = ALIGN(PAGE_SIZE); _einittext = .; } /* * .exit.text is discarded at runtime, not link time, * to deal with references from __bug_table */ .exit.text : { EXIT_TEXT } .exit.data : { EXIT_DATA } /* * struct alt_inst entries. From the header (alternative.h): * "Alternative instructions for different CPU types or capabilities" * Think locking instructions on spinlocks. * Note, that it is a part of __init region. */ . = ALIGN(8); .altinstructions : { __alt_instructions = .; *(.altinstructions) __alt_instructions_end = .; } /* * And here are the replacement instructions. The linker sticks * them as binary blobs. The .altinstructions has enough data to * get the address and the length of them to patch the kernel safely. * Note, that it is a part of __init region. */ .altinstr_replacement : { *(.altinstr_replacement) } /* * Table with the patch locations to undo expolines */ . = ALIGN(4); .nospec_call_table : { __nospec_call_start = . ; *(.s390_indirect*) __nospec_call_end = . ; } .nospec_return_table : { __nospec_return_start = . ; *(.s390_return*) __nospec_return_end = . ; } BOOT_DATA /* * .amode31 section for code, data, ex_table that need to stay * below 2 GB, even when the kernel is relocated above 2 GB. */ . = ALIGN(PAGE_SIZE); _samode31 = .; .amode31.text : { _stext_amode31 = .; *(.amode31.text) *(.amode31.text.*_indirect_*) . = ALIGN(PAGE_SIZE); _etext_amode31 = .; } . = ALIGN(16); .amode31.ex_table : { _start_amode31_ex_table = .; KEEP(*(.amode31.ex_table)) _stop_amode31_ex_table = .; } . = ALIGN(PAGE_SIZE); .amode31.data : { *(.amode31.data) } . = ALIGN(PAGE_SIZE); _eamode31 = .; /* early.c uses stsi, which requires page aligned data. */ . = ALIGN(PAGE_SIZE); INIT_DATA_SECTION(0x100) PERCPU_SECTION(0x100) .dynsym ALIGN(8) : { __dynsym_start = .; *(.dynsym) __dynsym_end = .; } .rela.dyn ALIGN(8) : { __rela_dyn_start = .; *(.rela*) __rela_dyn_end = .; } . = ALIGN(PAGE_SIZE); __init_end = .; /* freed after init ends here */ BSS_SECTION(PAGE_SIZE, 4 * PAGE_SIZE, PAGE_SIZE) . = ALIGN(PAGE_SIZE); _end = . ; /* * uncompressed image info used by the decompressor * it should match struct vmlinux_info */ .vmlinux.info 0 (INFO) : { QUAD(_stext) /* default_lma */ QUAD(startup_continue) /* entry */ QUAD(__bss_start - _stext) /* image_size */ QUAD(__bss_stop - __bss_start) /* bss_size */ QUAD(__boot_data_start) /* bootdata_off */ QUAD(__boot_data_end - __boot_data_start) /* bootdata_size */ QUAD(__boot_data_preserved_start) /* bootdata_preserved_off */ QUAD(__boot_data_preserved_end - __boot_data_preserved_start) /* bootdata_preserved_size */ QUAD(__dynsym_start) /* dynsym_start */ QUAD(__rela_dyn_start) /* rela_dyn_start */ QUAD(__rela_dyn_end) /* rela_dyn_end */ QUAD(_eamode31 - _samode31) /* amode31_size */ } :NONE /* Debugging sections. */ STABS_DEBUG DWARF_DEBUG ELF_DETAILS /* Sections to be discarded */ DISCARDS /DISCARD/ : { *(.eh_frame) *(.interp) } }
aixcc-public/challenge-001-exemplar-source
3,352
arch/s390/kernel/text_amode31.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Code that needs to run below 2 GB. * * Copyright IBM Corp. 2019 */ #include <linux/linkage.h> #include <asm/asm-extable.h> #include <asm/errno.h> #include <asm/sigp.h> .section .amode31.text,"ax" /* * Simplified version of expoline thunk. The normal thunks can not be used here, * because they might be more than 2 GB away, and not reachable by the relative * branch. No comdat, exrl, etc. optimizations used here, because it only * affects a few functions that are not performance-relevant. */ .macro BR_EX_AMODE31_r14 larl %r1,0f ex 0,0(%r1) j . 0: br %r14 .endm /* * int _diag14_amode31(unsigned long rx, unsigned long ry1, unsigned long subcode) */ ENTRY(_diag14_amode31) lgr %r1,%r2 lgr %r2,%r3 lgr %r3,%r4 lhi %r5,-EIO sam31 diag %r1,%r2,0x14 .Ldiag14_ex: ipm %r5 srl %r5,28 .Ldiag14_fault: sam64 lgfr %r2,%r5 BR_EX_AMODE31_r14 EX_TABLE_AMODE31(.Ldiag14_ex, .Ldiag14_fault) ENDPROC(_diag14_amode31) /* * int _diag210_amode31(struct diag210 *addr) */ ENTRY(_diag210_amode31) lgr %r1,%r2 lhi %r2,-1 sam31 diag %r1,%r0,0x210 .Ldiag210_ex: ipm %r2 srl %r2,28 .Ldiag210_fault: sam64 lgfr %r2,%r2 BR_EX_AMODE31_r14 EX_TABLE_AMODE31(.Ldiag210_ex, .Ldiag210_fault) ENDPROC(_diag210_amode31) /* * int _diag26c_amode31(void *req, void *resp, enum diag26c_sc subcode) */ ENTRY(_diag26c_amode31) lghi %r5,-EOPNOTSUPP sam31 diag %r2,%r4,0x26c .Ldiag26c_ex: sam64 lgfr %r2,%r5 BR_EX_AMODE31_r14 EX_TABLE_AMODE31(.Ldiag26c_ex, .Ldiag26c_ex) ENDPROC(_diag26c_amode31) /* * void _diag0c_amode31(struct hypfs_diag0c_entry *entry) */ ENTRY(_diag0c_amode31) sam31 diag %r2,%r2,0x0c sam64 BR_EX_AMODE31_r14 ENDPROC(_diag0c_amode31) /* * void _diag308_reset_amode31(void) * * Calls diag 308 subcode 1 and continues execution */ ENTRY(_diag308_reset_amode31) larl %r4,.Lctlregs # Save control registers stctg %c0,%c15,0(%r4) lg %r2,0(%r4) # Disable lowcore protection nilh %r2,0xefff larl %r4,.Lctlreg0 stg %r2,0(%r4) lctlg %c0,%c0,0(%r4) larl %r4,.Lfpctl # Floating point control register stfpc 0(%r4) larl %r4,.Lprefix # Save prefix register stpx 0(%r4) larl %r4,.Lprefix_zero # Set prefix register to 0 spx 0(%r4) larl %r4,.Lcontinue_psw # Save PSW flags epsw %r2,%r3 stm %r2,%r3,0(%r4) larl %r4,.Lrestart_part2 # Setup restart PSW at absolute 0 larl %r3,.Lrestart_diag308_psw og %r4,0(%r3) # Save PSW lghi %r3,0 sturg %r4,%r3 # Use sturg, because of large pages lghi %r1,1 lghi %r0,0 diag %r0,%r1,0x308 .Lrestart_part2: lhi %r0,0 # Load r0 with zero lhi %r1,2 # Use mode 2 = ESAME (dump) sigp %r1,%r0,SIGP_SET_ARCHITECTURE # Switch to ESAME mode sam64 # Switch to 64 bit addressing mode larl %r4,.Lctlregs # Restore control registers lctlg %c0,%c15,0(%r4) larl %r4,.Lfpctl # Restore floating point ctl register lfpc 0(%r4) larl %r4,.Lprefix # Restore prefix register spx 0(%r4) larl %r4,.Lcontinue_psw # Restore PSW flags larl %r2,.Lcontinue stg %r2,8(%r4) lpswe 0(%r4) .Lcontinue: BR_EX_AMODE31_r14 ENDPROC(_diag308_reset_amode31) .section .amode31.data,"aw",@progbits .align 8 .Lrestart_diag308_psw: .long 0x00080000,0x80000000 .align 8 .Lcontinue_psw: .quad 0,0 .align 8 .Lctlreg0: .quad 0 .Lctlregs: .rept 16 .quad 0 .endr .Lfpctl: .long 0 .Lprefix: .long 0 .Lprefix_zero: .long 0
aixcc-public/challenge-001-exemplar-source
1,850
arch/s390/kernel/reipl.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright IBM Corp 2000, 2011 * Author(s): Holger Smolinski <Holger.Smolinski@de.ibm.com>, * Denis Joseph Barrow, */ #include <linux/linkage.h> #include <asm/asm-offsets.h> #include <asm/nospec-insn.h> #include <asm/sigp.h> GEN_BR_THUNK %r9 # # Issue "store status" for the current CPU to its prefix page # and call passed function afterwards # # r2 = Function to be called after store status # r3 = Parameter for function # ENTRY(store_status) /* Save register one and load save area base */ stg %r1,__LC_SAVE_AREA_RESTART /* General purpose registers */ lghi %r1,__LC_GPREGS_SAVE_AREA stmg %r0,%r15,0(%r1) mvc 8(8,%r1),__LC_SAVE_AREA_RESTART /* Control registers */ lghi %r1,__LC_CREGS_SAVE_AREA stctg %c0,%c15,0(%r1) /* Access registers */ lghi %r1,__LC_AREGS_SAVE_AREA stam %a0,%a15,0(%r1) /* Floating point registers */ lghi %r1,__LC_FPREGS_SAVE_AREA std %f0, 0x00(%r1) std %f1, 0x08(%r1) std %f2, 0x10(%r1) std %f3, 0x18(%r1) std %f4, 0x20(%r1) std %f5, 0x28(%r1) std %f6, 0x30(%r1) std %f7, 0x38(%r1) std %f8, 0x40(%r1) std %f9, 0x48(%r1) std %f10,0x50(%r1) std %f11,0x58(%r1) std %f12,0x60(%r1) std %f13,0x68(%r1) std %f14,0x70(%r1) std %f15,0x78(%r1) /* Floating point control register */ lghi %r1,__LC_FP_CREG_SAVE_AREA stfpc 0(%r1) /* CPU timer */ lghi %r1,__LC_CPU_TIMER_SAVE_AREA stpt 0(%r1) /* Store prefix register */ lghi %r1,__LC_PREFIX_SAVE_AREA stpx 0(%r1) /* Clock comparator - seven bytes */ lghi %r1,__LC_CLOCK_COMP_SAVE_AREA larl %r4,.Lclkcmp stckc 0(%r4) mvc 1(7,%r1),1(%r4) /* Program status word */ lghi %r1,__LC_PSW_SAVE_AREA epsw %r4,%r5 st %r4,0(%r1) st %r5,4(%r1) stg %r2,8(%r1) lgr %r9,%r2 lgr %r2,%r3 BR_EX %r9 ENDPROC(store_status) .section .bss .align 8 .Lclkcmp: .quad 0x0000000000000000 .previous
aixcc-public/challenge-001-exemplar-source
6,267
arch/s390/crypto/crc32be-vx.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Hardware-accelerated CRC-32 variants for Linux on z Systems * * Use the z/Architecture Vector Extension Facility to accelerate the * computing of CRC-32 checksums. * * This CRC-32 implementation algorithm processes the most-significant * bit first (BE). * * Copyright IBM Corp. 2015 * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> */ #include <linux/linkage.h> #include <asm/nospec-insn.h> #include <asm/vx-insn.h> /* Vector register range containing CRC-32 constants */ #define CONST_R1R2 %v9 #define CONST_R3R4 %v10 #define CONST_R5 %v11 #define CONST_R6 %v12 #define CONST_RU_POLY %v13 #define CONST_CRC_POLY %v14 .data .align 8 /* * The CRC-32 constant block contains reduction constants to fold and * process particular chunks of the input data stream in parallel. * * For the CRC-32 variants, the constants are precomputed according to * these definitions: * * R1 = x4*128+64 mod P(x) * R2 = x4*128 mod P(x) * R3 = x128+64 mod P(x) * R4 = x128 mod P(x) * R5 = x96 mod P(x) * R6 = x64 mod P(x) * * Barret reduction constant, u, is defined as floor(x**64 / P(x)). * * where P(x) is the polynomial in the normal domain and the P'(x) is the * polynomial in the reversed (bitreflected) domain. * * Note that the constant definitions below are extended in order to compute * intermediate results with a single VECTOR GALOIS FIELD MULTIPLY instruction. * The righmost doubleword can be 0 to prevent contribution to the result or * can be multiplied by 1 to perform an XOR without the need for a separate * VECTOR EXCLUSIVE OR instruction. * * CRC-32 (IEEE 802.3 Ethernet, ...) polynomials: * * P(x) = 0x04C11DB7 * P'(x) = 0xEDB88320 */ .Lconstants_CRC_32_BE: .quad 0x08833794c, 0x0e6228b11 # R1, R2 .quad 0x0c5b9cd4c, 0x0e8a45605 # R3, R4 .quad 0x0f200aa66, 1 << 32 # R5, x32 .quad 0x0490d678d, 1 # R6, 1 .quad 0x104d101df, 0 # u .quad 0x104C11DB7, 0 # P(x) .previous GEN_BR_THUNK %r14 .text /* * The CRC-32 function(s) use these calling conventions: * * Parameters: * * %r2: Initial CRC value, typically ~0; and final CRC (return) value. * %r3: Input buffer pointer, performance might be improved if the * buffer is on a doubleword boundary. * %r4: Length of the buffer, must be 64 bytes or greater. * * Register usage: * * %r5: CRC-32 constant pool base pointer. * V0: Initial CRC value and intermediate constants and results. * V1..V4: Data for CRC computation. * V5..V8: Next data chunks that are fetched from the input buffer. * * V9..V14: CRC-32 constants. */ ENTRY(crc32_be_vgfm_16) /* Load CRC-32 constants */ larl %r5,.Lconstants_CRC_32_BE VLM CONST_R1R2,CONST_CRC_POLY,0,%r5 /* Load the initial CRC value into the leftmost word of V0. */ VZERO %v0 VLVGF %v0,%r2,0 /* Load a 64-byte data chunk and XOR with CRC */ VLM %v1,%v4,0,%r3 /* 64-bytes into V1..V4 */ VX %v1,%v0,%v1 /* V1 ^= CRC */ aghi %r3,64 /* BUF = BUF + 64 */ aghi %r4,-64 /* LEN = LEN - 64 */ /* Check remaining buffer size and jump to proper folding method */ cghi %r4,64 jl .Lless_than_64bytes .Lfold_64bytes_loop: /* Load the next 64-byte data chunk into V5 to V8 */ VLM %v5,%v8,0,%r3 /* * Perform a GF(2) multiplication of the doublewords in V1 with * the reduction constants in V0. The intermediate result is * then folded (accumulated) with the next data chunk in V5 and * stored in V1. Repeat this step for the register contents * in V2, V3, and V4 respectively. */ VGFMAG %v1,CONST_R1R2,%v1,%v5 VGFMAG %v2,CONST_R1R2,%v2,%v6 VGFMAG %v3,CONST_R1R2,%v3,%v7 VGFMAG %v4,CONST_R1R2,%v4,%v8 /* Adjust buffer pointer and length for next loop */ aghi %r3,64 /* BUF = BUF + 64 */ aghi %r4,-64 /* LEN = LEN - 64 */ cghi %r4,64 jnl .Lfold_64bytes_loop .Lless_than_64bytes: /* Fold V1 to V4 into a single 128-bit value in V1 */ VGFMAG %v1,CONST_R3R4,%v1,%v2 VGFMAG %v1,CONST_R3R4,%v1,%v3 VGFMAG %v1,CONST_R3R4,%v1,%v4 /* Check whether to continue with 64-bit folding */ cghi %r4,16 jl .Lfinal_fold .Lfold_16bytes_loop: VL %v2,0,,%r3 /* Load next data chunk */ VGFMAG %v1,CONST_R3R4,%v1,%v2 /* Fold next data chunk */ /* Adjust buffer pointer and size for folding next data chunk */ aghi %r3,16 aghi %r4,-16 /* Process remaining data chunks */ cghi %r4,16 jnl .Lfold_16bytes_loop .Lfinal_fold: /* * The R5 constant is used to fold a 128-bit value into an 96-bit value * that is XORed with the next 96-bit input data chunk. To use a single * VGFMG instruction, multiply the rightmost 64-bit with x^32 (1<<32) to * form an intermediate 96-bit value (with appended zeros) which is then * XORed with the intermediate reduction result. */ VGFMG %v1,CONST_R5,%v1 /* * Further reduce the remaining 96-bit value to a 64-bit value using a * single VGFMG, the rightmost doubleword is multiplied with 0x1. The * intermediate result is then XORed with the product of the leftmost * doubleword with R6. The result is a 64-bit value and is subject to * the Barret reduction. */ VGFMG %v1,CONST_R6,%v1 /* * The input values to the Barret reduction are the degree-63 polynomial * in V1 (R(x)), degree-32 generator polynomial, and the reduction * constant u. The Barret reduction result is the CRC value of R(x) mod * P(x). * * The Barret reduction algorithm is defined as: * * 1. T1(x) = floor( R(x) / x^32 ) GF2MUL u * 2. T2(x) = floor( T1(x) / x^32 ) GF2MUL P(x) * 3. C(x) = R(x) XOR T2(x) mod x^32 * * Note: To compensate the division by x^32, use the vector unpack * instruction to move the leftmost word into the leftmost doubleword * of the vector register. The rightmost doubleword is multiplied * with zero to not contribute to the intermediate results. */ /* T1(x) = floor( R(x) / x^32 ) GF2MUL u */ VUPLLF %v2,%v1 VGFMG %v2,CONST_RU_POLY,%v2 /* * Compute the GF(2) product of the CRC polynomial in VO with T1(x) in * V2 and XOR the intermediate result, T2(x), with the value in V1. * The final result is in the rightmost word of V2. */ VUPLLF %v2,%v2 VGFMAG %v2,CONST_CRC_POLY,%v2,%v1 .Ldone: VLGVF %r2,%v2,3 BR_EX %r14 ENDPROC(crc32_be_vgfm_16) .previous
aixcc-public/challenge-001-exemplar-source
7,965
arch/s390/crypto/crc32le-vx.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Hardware-accelerated CRC-32 variants for Linux on z Systems * * Use the z/Architecture Vector Extension Facility to accelerate the * computing of bitreflected CRC-32 checksums for IEEE 802.3 Ethernet * and Castagnoli. * * This CRC-32 implementation algorithm is bitreflected and processes * the least-significant bit first (Little-Endian). * * Copyright IBM Corp. 2015 * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> */ #include <linux/linkage.h> #include <asm/nospec-insn.h> #include <asm/vx-insn.h> /* Vector register range containing CRC-32 constants */ #define CONST_PERM_LE2BE %v9 #define CONST_R2R1 %v10 #define CONST_R4R3 %v11 #define CONST_R5 %v12 #define CONST_RU_POLY %v13 #define CONST_CRC_POLY %v14 .data .align 8 /* * The CRC-32 constant block contains reduction constants to fold and * process particular chunks of the input data stream in parallel. * * For the CRC-32 variants, the constants are precomputed according to * these definitions: * * R1 = [(x4*128+32 mod P'(x) << 32)]' << 1 * R2 = [(x4*128-32 mod P'(x) << 32)]' << 1 * R3 = [(x128+32 mod P'(x) << 32)]' << 1 * R4 = [(x128-32 mod P'(x) << 32)]' << 1 * R5 = [(x64 mod P'(x) << 32)]' << 1 * R6 = [(x32 mod P'(x) << 32)]' << 1 * * The bitreflected Barret reduction constant, u', is defined as * the bit reversal of floor(x**64 / P(x)). * * where P(x) is the polynomial in the normal domain and the P'(x) is the * polynomial in the reversed (bitreflected) domain. * * CRC-32 (IEEE 802.3 Ethernet, ...) polynomials: * * P(x) = 0x04C11DB7 * P'(x) = 0xEDB88320 * * CRC-32C (Castagnoli) polynomials: * * P(x) = 0x1EDC6F41 * P'(x) = 0x82F63B78 */ .Lconstants_CRC_32_LE: .octa 0x0F0E0D0C0B0A09080706050403020100 # BE->LE mask .quad 0x1c6e41596, 0x154442bd4 # R2, R1 .quad 0x0ccaa009e, 0x1751997d0 # R4, R3 .octa 0x163cd6124 # R5 .octa 0x1F7011641 # u' .octa 0x1DB710641 # P'(x) << 1 .Lconstants_CRC_32C_LE: .octa 0x0F0E0D0C0B0A09080706050403020100 # BE->LE mask .quad 0x09e4addf8, 0x740eef02 # R2, R1 .quad 0x14cd00bd6, 0xf20c0dfe # R4, R3 .octa 0x0dd45aab8 # R5 .octa 0x0dea713f1 # u' .octa 0x105ec76f0 # P'(x) << 1 .previous GEN_BR_THUNK %r14 .text /* * The CRC-32 functions use these calling conventions: * * Parameters: * * %r2: Initial CRC value, typically ~0; and final CRC (return) value. * %r3: Input buffer pointer, performance might be improved if the * buffer is on a doubleword boundary. * %r4: Length of the buffer, must be 64 bytes or greater. * * Register usage: * * %r5: CRC-32 constant pool base pointer. * V0: Initial CRC value and intermediate constants and results. * V1..V4: Data for CRC computation. * V5..V8: Next data chunks that are fetched from the input buffer. * V9: Constant for BE->LE conversion and shift operations * * V10..V14: CRC-32 constants. */ ENTRY(crc32_le_vgfm_16) larl %r5,.Lconstants_CRC_32_LE j crc32_le_vgfm_generic ENDPROC(crc32_le_vgfm_16) ENTRY(crc32c_le_vgfm_16) larl %r5,.Lconstants_CRC_32C_LE j crc32_le_vgfm_generic ENDPROC(crc32c_le_vgfm_16) ENTRY(crc32_le_vgfm_generic) /* Load CRC-32 constants */ VLM CONST_PERM_LE2BE,CONST_CRC_POLY,0,%r5 /* * Load the initial CRC value. * * The CRC value is loaded into the rightmost word of the * vector register and is later XORed with the LSB portion * of the loaded input data. */ VZERO %v0 /* Clear V0 */ VLVGF %v0,%r2,3 /* Load CRC into rightmost word */ /* Load a 64-byte data chunk and XOR with CRC */ VLM %v1,%v4,0,%r3 /* 64-bytes into V1..V4 */ VPERM %v1,%v1,%v1,CONST_PERM_LE2BE VPERM %v2,%v2,%v2,CONST_PERM_LE2BE VPERM %v3,%v3,%v3,CONST_PERM_LE2BE VPERM %v4,%v4,%v4,CONST_PERM_LE2BE VX %v1,%v0,%v1 /* V1 ^= CRC */ aghi %r3,64 /* BUF = BUF + 64 */ aghi %r4,-64 /* LEN = LEN - 64 */ cghi %r4,64 jl .Lless_than_64bytes .Lfold_64bytes_loop: /* Load the next 64-byte data chunk into V5 to V8 */ VLM %v5,%v8,0,%r3 VPERM %v5,%v5,%v5,CONST_PERM_LE2BE VPERM %v6,%v6,%v6,CONST_PERM_LE2BE VPERM %v7,%v7,%v7,CONST_PERM_LE2BE VPERM %v8,%v8,%v8,CONST_PERM_LE2BE /* * Perform a GF(2) multiplication of the doublewords in V1 with * the R1 and R2 reduction constants in V0. The intermediate result * is then folded (accumulated) with the next data chunk in V5 and * stored in V1. Repeat this step for the register contents * in V2, V3, and V4 respectively. */ VGFMAG %v1,CONST_R2R1,%v1,%v5 VGFMAG %v2,CONST_R2R1,%v2,%v6 VGFMAG %v3,CONST_R2R1,%v3,%v7 VGFMAG %v4,CONST_R2R1,%v4,%v8 aghi %r3,64 /* BUF = BUF + 64 */ aghi %r4,-64 /* LEN = LEN - 64 */ cghi %r4,64 jnl .Lfold_64bytes_loop .Lless_than_64bytes: /* * Fold V1 to V4 into a single 128-bit value in V1. Multiply V1 with R3 * and R4 and accumulating the next 128-bit chunk until a single 128-bit * value remains. */ VGFMAG %v1,CONST_R4R3,%v1,%v2 VGFMAG %v1,CONST_R4R3,%v1,%v3 VGFMAG %v1,CONST_R4R3,%v1,%v4 cghi %r4,16 jl .Lfinal_fold .Lfold_16bytes_loop: VL %v2,0,,%r3 /* Load next data chunk */ VPERM %v2,%v2,%v2,CONST_PERM_LE2BE VGFMAG %v1,CONST_R4R3,%v1,%v2 /* Fold next data chunk */ aghi %r3,16 aghi %r4,-16 cghi %r4,16 jnl .Lfold_16bytes_loop .Lfinal_fold: /* * Set up a vector register for byte shifts. The shift value must * be loaded in bits 1-4 in byte element 7 of a vector register. * Shift by 8 bytes: 0x40 * Shift by 4 bytes: 0x20 */ VLEIB %v9,0x40,7 /* * Prepare V0 for the next GF(2) multiplication: shift V0 by 8 bytes * to move R4 into the rightmost doubleword and set the leftmost * doubleword to 0x1. */ VSRLB %v0,CONST_R4R3,%v9 VLEIG %v0,1,0 /* * Compute GF(2) product of V1 and V0. The rightmost doubleword * of V1 is multiplied with R4. The leftmost doubleword of V1 is * multiplied by 0x1 and is then XORed with rightmost product. * Implicitly, the intermediate leftmost product becomes padded */ VGFMG %v1,%v0,%v1 /* * Now do the final 32-bit fold by multiplying the rightmost word * in V1 with R5 and XOR the result with the remaining bits in V1. * * To achieve this by a single VGFMAG, right shift V1 by a word * and store the result in V2 which is then accumulated. Use the * vector unpack instruction to load the rightmost half of the * doubleword into the rightmost doubleword element of V1; the other * half is loaded in the leftmost doubleword. * The vector register with CONST_R5 contains the R5 constant in the * rightmost doubleword and the leftmost doubleword is zero to ignore * the leftmost product of V1. */ VLEIB %v9,0x20,7 /* Shift by words */ VSRLB %v2,%v1,%v9 /* Store remaining bits in V2 */ VUPLLF %v1,%v1 /* Split rightmost doubleword */ VGFMAG %v1,CONST_R5,%v1,%v2 /* V1 = (V1 * R5) XOR V2 */ /* * Apply a Barret reduction to compute the final 32-bit CRC value. * * The input values to the Barret reduction are the degree-63 polynomial * in V1 (R(x)), degree-32 generator polynomial, and the reduction * constant u. The Barret reduction result is the CRC value of R(x) mod * P(x). * * The Barret reduction algorithm is defined as: * * 1. T1(x) = floor( R(x) / x^32 ) GF2MUL u * 2. T2(x) = floor( T1(x) / x^32 ) GF2MUL P(x) * 3. C(x) = R(x) XOR T2(x) mod x^32 * * Note: The leftmost doubleword of vector register containing * CONST_RU_POLY is zero and, thus, the intermediate GF(2) product * is zero and does not contribute to the final result. */ /* T1(x) = floor( R(x) / x^32 ) GF2MUL u */ VUPLLF %v2,%v1 VGFMG %v2,CONST_RU_POLY,%v2 /* * Compute the GF(2) product of the CRC polynomial with T1(x) in * V2 and XOR the intermediate result, T2(x), with the value in V1. * The final result is stored in word element 2 of V2. */ VUPLLF %v2,%v2 VGFMAG %v2,CONST_CRC_POLY,%v2,%v1 .Ldone: VLGVF %r2,%v2,2 BR_EX %r14 ENDPROC(crc32_le_vgfm_generic) .previous
aixcc-public/challenge-001-exemplar-source
13,354
arch/s390/crypto/chacha-s390.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Original implementation written by Andy Polyakov, @dot-asm. * This is an adaptation of the original code for kernel use. * * Copyright (C) 2006-2019 CRYPTOGAMS by <appro@openssl.org>. All Rights Reserved. */ #include <linux/linkage.h> #include <asm/nospec-insn.h> #include <asm/vx-insn.h> #define SP %r15 #define FRAME (16 * 8 + 4 * 8) .data .align 32 .Lsigma: .long 0x61707865,0x3320646e,0x79622d32,0x6b206574 # endian-neutral .long 1,0,0,0 .long 2,0,0,0 .long 3,0,0,0 .long 0x03020100,0x07060504,0x0b0a0908,0x0f0e0d0c # byte swap .long 0,1,2,3 .long 0x61707865,0x61707865,0x61707865,0x61707865 # smashed sigma .long 0x3320646e,0x3320646e,0x3320646e,0x3320646e .long 0x79622d32,0x79622d32,0x79622d32,0x79622d32 .long 0x6b206574,0x6b206574,0x6b206574,0x6b206574 .previous GEN_BR_THUNK %r14 .text ############################################################################# # void chacha20_vx_4x(u8 *out, counst u8 *inp, size_t len, # counst u32 *key, const u32 *counter) #define OUT %r2 #define INP %r3 #define LEN %r4 #define KEY %r5 #define COUNTER %r6 #define BEPERM %v31 #define CTR %v26 #define K0 %v16 #define K1 %v17 #define K2 %v18 #define K3 %v19 #define XA0 %v0 #define XA1 %v1 #define XA2 %v2 #define XA3 %v3 #define XB0 %v4 #define XB1 %v5 #define XB2 %v6 #define XB3 %v7 #define XC0 %v8 #define XC1 %v9 #define XC2 %v10 #define XC3 %v11 #define XD0 %v12 #define XD1 %v13 #define XD2 %v14 #define XD3 %v15 #define XT0 %v27 #define XT1 %v28 #define XT2 %v29 #define XT3 %v30 ENTRY(chacha20_vx_4x) stmg %r6,%r7,6*8(SP) larl %r7,.Lsigma lhi %r0,10 lhi %r1,0 VL K0,0,,%r7 # load sigma VL K1,0,,KEY # load key VL K2,16,,KEY VL K3,0,,COUNTER # load counter VL BEPERM,0x40,,%r7 VL CTR,0x50,,%r7 VLM XA0,XA3,0x60,%r7,4 # load [smashed] sigma VREPF XB0,K1,0 # smash the key VREPF XB1,K1,1 VREPF XB2,K1,2 VREPF XB3,K1,3 VREPF XD0,K3,0 VREPF XD1,K3,1 VREPF XD2,K3,2 VREPF XD3,K3,3 VAF XD0,XD0,CTR VREPF XC0,K2,0 VREPF XC1,K2,1 VREPF XC2,K2,2 VREPF XC3,K2,3 .Loop_4x: VAF XA0,XA0,XB0 VX XD0,XD0,XA0 VERLLF XD0,XD0,16 VAF XA1,XA1,XB1 VX XD1,XD1,XA1 VERLLF XD1,XD1,16 VAF XA2,XA2,XB2 VX XD2,XD2,XA2 VERLLF XD2,XD2,16 VAF XA3,XA3,XB3 VX XD3,XD3,XA3 VERLLF XD3,XD3,16 VAF XC0,XC0,XD0 VX XB0,XB0,XC0 VERLLF XB0,XB0,12 VAF XC1,XC1,XD1 VX XB1,XB1,XC1 VERLLF XB1,XB1,12 VAF XC2,XC2,XD2 VX XB2,XB2,XC2 VERLLF XB2,XB2,12 VAF XC3,XC3,XD3 VX XB3,XB3,XC3 VERLLF XB3,XB3,12 VAF XA0,XA0,XB0 VX XD0,XD0,XA0 VERLLF XD0,XD0,8 VAF XA1,XA1,XB1 VX XD1,XD1,XA1 VERLLF XD1,XD1,8 VAF XA2,XA2,XB2 VX XD2,XD2,XA2 VERLLF XD2,XD2,8 VAF XA3,XA3,XB3 VX XD3,XD3,XA3 VERLLF XD3,XD3,8 VAF XC0,XC0,XD0 VX XB0,XB0,XC0 VERLLF XB0,XB0,7 VAF XC1,XC1,XD1 VX XB1,XB1,XC1 VERLLF XB1,XB1,7 VAF XC2,XC2,XD2 VX XB2,XB2,XC2 VERLLF XB2,XB2,7 VAF XC3,XC3,XD3 VX XB3,XB3,XC3 VERLLF XB3,XB3,7 VAF XA0,XA0,XB1 VX XD3,XD3,XA0 VERLLF XD3,XD3,16 VAF XA1,XA1,XB2 VX XD0,XD0,XA1 VERLLF XD0,XD0,16 VAF XA2,XA2,XB3 VX XD1,XD1,XA2 VERLLF XD1,XD1,16 VAF XA3,XA3,XB0 VX XD2,XD2,XA3 VERLLF XD2,XD2,16 VAF XC2,XC2,XD3 VX XB1,XB1,XC2 VERLLF XB1,XB1,12 VAF XC3,XC3,XD0 VX XB2,XB2,XC3 VERLLF XB2,XB2,12 VAF XC0,XC0,XD1 VX XB3,XB3,XC0 VERLLF XB3,XB3,12 VAF XC1,XC1,XD2 VX XB0,XB0,XC1 VERLLF XB0,XB0,12 VAF XA0,XA0,XB1 VX XD3,XD3,XA0 VERLLF XD3,XD3,8 VAF XA1,XA1,XB2 VX XD0,XD0,XA1 VERLLF XD0,XD0,8 VAF XA2,XA2,XB3 VX XD1,XD1,XA2 VERLLF XD1,XD1,8 VAF XA3,XA3,XB0 VX XD2,XD2,XA3 VERLLF XD2,XD2,8 VAF XC2,XC2,XD3 VX XB1,XB1,XC2 VERLLF XB1,XB1,7 VAF XC3,XC3,XD0 VX XB2,XB2,XC3 VERLLF XB2,XB2,7 VAF XC0,XC0,XD1 VX XB3,XB3,XC0 VERLLF XB3,XB3,7 VAF XC1,XC1,XD2 VX XB0,XB0,XC1 VERLLF XB0,XB0,7 brct %r0,.Loop_4x VAF XD0,XD0,CTR VMRHF XT0,XA0,XA1 # transpose data VMRHF XT1,XA2,XA3 VMRLF XT2,XA0,XA1 VMRLF XT3,XA2,XA3 VPDI XA0,XT0,XT1,0b0000 VPDI XA1,XT0,XT1,0b0101 VPDI XA2,XT2,XT3,0b0000 VPDI XA3,XT2,XT3,0b0101 VMRHF XT0,XB0,XB1 VMRHF XT1,XB2,XB3 VMRLF XT2,XB0,XB1 VMRLF XT3,XB2,XB3 VPDI XB0,XT0,XT1,0b0000 VPDI XB1,XT0,XT1,0b0101 VPDI XB2,XT2,XT3,0b0000 VPDI XB3,XT2,XT3,0b0101 VMRHF XT0,XC0,XC1 VMRHF XT1,XC2,XC3 VMRLF XT2,XC0,XC1 VMRLF XT3,XC2,XC3 VPDI XC0,XT0,XT1,0b0000 VPDI XC1,XT0,XT1,0b0101 VPDI XC2,XT2,XT3,0b0000 VPDI XC3,XT2,XT3,0b0101 VMRHF XT0,XD0,XD1 VMRHF XT1,XD2,XD3 VMRLF XT2,XD0,XD1 VMRLF XT3,XD2,XD3 VPDI XD0,XT0,XT1,0b0000 VPDI XD1,XT0,XT1,0b0101 VPDI XD2,XT2,XT3,0b0000 VPDI XD3,XT2,XT3,0b0101 VAF XA0,XA0,K0 VAF XB0,XB0,K1 VAF XC0,XC0,K2 VAF XD0,XD0,K3 VPERM XA0,XA0,XA0,BEPERM VPERM XB0,XB0,XB0,BEPERM VPERM XC0,XC0,XC0,BEPERM VPERM XD0,XD0,XD0,BEPERM VLM XT0,XT3,0,INP,0 VX XT0,XT0,XA0 VX XT1,XT1,XB0 VX XT2,XT2,XC0 VX XT3,XT3,XD0 VSTM XT0,XT3,0,OUT,0 la INP,0x40(INP) la OUT,0x40(OUT) aghi LEN,-0x40 VAF XA0,XA1,K0 VAF XB0,XB1,K1 VAF XC0,XC1,K2 VAF XD0,XD1,K3 VPERM XA0,XA0,XA0,BEPERM VPERM XB0,XB0,XB0,BEPERM VPERM XC0,XC0,XC0,BEPERM VPERM XD0,XD0,XD0,BEPERM clgfi LEN,0x40 jl .Ltail_4x VLM XT0,XT3,0,INP,0 VX XT0,XT0,XA0 VX XT1,XT1,XB0 VX XT2,XT2,XC0 VX XT3,XT3,XD0 VSTM XT0,XT3,0,OUT,0 la INP,0x40(INP) la OUT,0x40(OUT) aghi LEN,-0x40 je .Ldone_4x VAF XA0,XA2,K0 VAF XB0,XB2,K1 VAF XC0,XC2,K2 VAF XD0,XD2,K3 VPERM XA0,XA0,XA0,BEPERM VPERM XB0,XB0,XB0,BEPERM VPERM XC0,XC0,XC0,BEPERM VPERM XD0,XD0,XD0,BEPERM clgfi LEN,0x40 jl .Ltail_4x VLM XT0,XT3,0,INP,0 VX XT0,XT0,XA0 VX XT1,XT1,XB0 VX XT2,XT2,XC0 VX XT3,XT3,XD0 VSTM XT0,XT3,0,OUT,0 la INP,0x40(INP) la OUT,0x40(OUT) aghi LEN,-0x40 je .Ldone_4x VAF XA0,XA3,K0 VAF XB0,XB3,K1 VAF XC0,XC3,K2 VAF XD0,XD3,K3 VPERM XA0,XA0,XA0,BEPERM VPERM XB0,XB0,XB0,BEPERM VPERM XC0,XC0,XC0,BEPERM VPERM XD0,XD0,XD0,BEPERM clgfi LEN,0x40 jl .Ltail_4x VLM XT0,XT3,0,INP,0 VX XT0,XT0,XA0 VX XT1,XT1,XB0 VX XT2,XT2,XC0 VX XT3,XT3,XD0 VSTM XT0,XT3,0,OUT,0 .Ldone_4x: lmg %r6,%r7,6*8(SP) BR_EX %r14 .Ltail_4x: VLR XT0,XC0 VLR XT1,XD0 VST XA0,8*8+0x00,,SP VST XB0,8*8+0x10,,SP VST XT0,8*8+0x20,,SP VST XT1,8*8+0x30,,SP lghi %r1,0 .Loop_tail_4x: llgc %r5,0(%r1,INP) llgc %r6,8*8(%r1,SP) xr %r6,%r5 stc %r6,0(%r1,OUT) la %r1,1(%r1) brct LEN,.Loop_tail_4x lmg %r6,%r7,6*8(SP) BR_EX %r14 ENDPROC(chacha20_vx_4x) #undef OUT #undef INP #undef LEN #undef KEY #undef COUNTER #undef BEPERM #undef K0 #undef K1 #undef K2 #undef K3 ############################################################################# # void chacha20_vx(u8 *out, counst u8 *inp, size_t len, # counst u32 *key, const u32 *counter) #define OUT %r2 #define INP %r3 #define LEN %r4 #define KEY %r5 #define COUNTER %r6 #define BEPERM %v31 #define K0 %v27 #define K1 %v24 #define K2 %v25 #define K3 %v26 #define A0 %v0 #define B0 %v1 #define C0 %v2 #define D0 %v3 #define A1 %v4 #define B1 %v5 #define C1 %v6 #define D1 %v7 #define A2 %v8 #define B2 %v9 #define C2 %v10 #define D2 %v11 #define A3 %v12 #define B3 %v13 #define C3 %v14 #define D3 %v15 #define A4 %v16 #define B4 %v17 #define C4 %v18 #define D4 %v19 #define A5 %v20 #define B5 %v21 #define C5 %v22 #define D5 %v23 #define T0 %v27 #define T1 %v28 #define T2 %v29 #define T3 %v30 ENTRY(chacha20_vx) clgfi LEN,256 jle chacha20_vx_4x stmg %r6,%r7,6*8(SP) lghi %r1,-FRAME lgr %r0,SP la SP,0(%r1,SP) stg %r0,0(SP) # back-chain larl %r7,.Lsigma lhi %r0,10 VLM K1,K2,0,KEY,0 # load key VL K3,0,,COUNTER # load counter VLM K0,BEPERM,0,%r7,4 # load sigma, increments, ... .Loop_outer_vx: VLR A0,K0 VLR B0,K1 VLR A1,K0 VLR B1,K1 VLR A2,K0 VLR B2,K1 VLR A3,K0 VLR B3,K1 VLR A4,K0 VLR B4,K1 VLR A5,K0 VLR B5,K1 VLR D0,K3 VAF D1,K3,T1 # K[3]+1 VAF D2,K3,T2 # K[3]+2 VAF D3,K3,T3 # K[3]+3 VAF D4,D2,T2 # K[3]+4 VAF D5,D2,T3 # K[3]+5 VLR C0,K2 VLR C1,K2 VLR C2,K2 VLR C3,K2 VLR C4,K2 VLR C5,K2 VLR T1,D1 VLR T2,D2 VLR T3,D3 .Loop_vx: VAF A0,A0,B0 VAF A1,A1,B1 VAF A2,A2,B2 VAF A3,A3,B3 VAF A4,A4,B4 VAF A5,A5,B5 VX D0,D0,A0 VX D1,D1,A1 VX D2,D2,A2 VX D3,D3,A3 VX D4,D4,A4 VX D5,D5,A5 VERLLF D0,D0,16 VERLLF D1,D1,16 VERLLF D2,D2,16 VERLLF D3,D3,16 VERLLF D4,D4,16 VERLLF D5,D5,16 VAF C0,C0,D0 VAF C1,C1,D1 VAF C2,C2,D2 VAF C3,C3,D3 VAF C4,C4,D4 VAF C5,C5,D5 VX B0,B0,C0 VX B1,B1,C1 VX B2,B2,C2 VX B3,B3,C3 VX B4,B4,C4 VX B5,B5,C5 VERLLF B0,B0,12 VERLLF B1,B1,12 VERLLF B2,B2,12 VERLLF B3,B3,12 VERLLF B4,B4,12 VERLLF B5,B5,12 VAF A0,A0,B0 VAF A1,A1,B1 VAF A2,A2,B2 VAF A3,A3,B3 VAF A4,A4,B4 VAF A5,A5,B5 VX D0,D0,A0 VX D1,D1,A1 VX D2,D2,A2 VX D3,D3,A3 VX D4,D4,A4 VX D5,D5,A5 VERLLF D0,D0,8 VERLLF D1,D1,8 VERLLF D2,D2,8 VERLLF D3,D3,8 VERLLF D4,D4,8 VERLLF D5,D5,8 VAF C0,C0,D0 VAF C1,C1,D1 VAF C2,C2,D2 VAF C3,C3,D3 VAF C4,C4,D4 VAF C5,C5,D5 VX B0,B0,C0 VX B1,B1,C1 VX B2,B2,C2 VX B3,B3,C3 VX B4,B4,C4 VX B5,B5,C5 VERLLF B0,B0,7 VERLLF B1,B1,7 VERLLF B2,B2,7 VERLLF B3,B3,7 VERLLF B4,B4,7 VERLLF B5,B5,7 VSLDB C0,C0,C0,8 VSLDB C1,C1,C1,8 VSLDB C2,C2,C2,8 VSLDB C3,C3,C3,8 VSLDB C4,C4,C4,8 VSLDB C5,C5,C5,8 VSLDB B0,B0,B0,4 VSLDB B1,B1,B1,4 VSLDB B2,B2,B2,4 VSLDB B3,B3,B3,4 VSLDB B4,B4,B4,4 VSLDB B5,B5,B5,4 VSLDB D0,D0,D0,12 VSLDB D1,D1,D1,12 VSLDB D2,D2,D2,12 VSLDB D3,D3,D3,12 VSLDB D4,D4,D4,12 VSLDB D5,D5,D5,12 VAF A0,A0,B0 VAF A1,A1,B1 VAF A2,A2,B2 VAF A3,A3,B3 VAF A4,A4,B4 VAF A5,A5,B5 VX D0,D0,A0 VX D1,D1,A1 VX D2,D2,A2 VX D3,D3,A3 VX D4,D4,A4 VX D5,D5,A5 VERLLF D0,D0,16 VERLLF D1,D1,16 VERLLF D2,D2,16 VERLLF D3,D3,16 VERLLF D4,D4,16 VERLLF D5,D5,16 VAF C0,C0,D0 VAF C1,C1,D1 VAF C2,C2,D2 VAF C3,C3,D3 VAF C4,C4,D4 VAF C5,C5,D5 VX B0,B0,C0 VX B1,B1,C1 VX B2,B2,C2 VX B3,B3,C3 VX B4,B4,C4 VX B5,B5,C5 VERLLF B0,B0,12 VERLLF B1,B1,12 VERLLF B2,B2,12 VERLLF B3,B3,12 VERLLF B4,B4,12 VERLLF B5,B5,12 VAF A0,A0,B0 VAF A1,A1,B1 VAF A2,A2,B2 VAF A3,A3,B3 VAF A4,A4,B4 VAF A5,A5,B5 VX D0,D0,A0 VX D1,D1,A1 VX D2,D2,A2 VX D3,D3,A3 VX D4,D4,A4 VX D5,D5,A5 VERLLF D0,D0,8 VERLLF D1,D1,8 VERLLF D2,D2,8 VERLLF D3,D3,8 VERLLF D4,D4,8 VERLLF D5,D5,8 VAF C0,C0,D0 VAF C1,C1,D1 VAF C2,C2,D2 VAF C3,C3,D3 VAF C4,C4,D4 VAF C5,C5,D5 VX B0,B0,C0 VX B1,B1,C1 VX B2,B2,C2 VX B3,B3,C3 VX B4,B4,C4 VX B5,B5,C5 VERLLF B0,B0,7 VERLLF B1,B1,7 VERLLF B2,B2,7 VERLLF B3,B3,7 VERLLF B4,B4,7 VERLLF B5,B5,7 VSLDB C0,C0,C0,8 VSLDB C1,C1,C1,8 VSLDB C2,C2,C2,8 VSLDB C3,C3,C3,8 VSLDB C4,C4,C4,8 VSLDB C5,C5,C5,8 VSLDB B0,B0,B0,12 VSLDB B1,B1,B1,12 VSLDB B2,B2,B2,12 VSLDB B3,B3,B3,12 VSLDB B4,B4,B4,12 VSLDB B5,B5,B5,12 VSLDB D0,D0,D0,4 VSLDB D1,D1,D1,4 VSLDB D2,D2,D2,4 VSLDB D3,D3,D3,4 VSLDB D4,D4,D4,4 VSLDB D5,D5,D5,4 brct %r0,.Loop_vx VAF A0,A0,K0 VAF B0,B0,K1 VAF C0,C0,K2 VAF D0,D0,K3 VAF A1,A1,K0 VAF D1,D1,T1 # +K[3]+1 VPERM A0,A0,A0,BEPERM VPERM B0,B0,B0,BEPERM VPERM C0,C0,C0,BEPERM VPERM D0,D0,D0,BEPERM clgfi LEN,0x40 jl .Ltail_vx VAF D2,D2,T2 # +K[3]+2 VAF D3,D3,T3 # +K[3]+3 VLM T0,T3,0,INP,0 VX A0,A0,T0 VX B0,B0,T1 VX C0,C0,T2 VX D0,D0,T3 VLM K0,T3,0,%r7,4 # re-load sigma and increments VSTM A0,D0,0,OUT,0 la INP,0x40(INP) la OUT,0x40(OUT) aghi LEN,-0x40 je .Ldone_vx VAF B1,B1,K1 VAF C1,C1,K2 VPERM A0,A1,A1,BEPERM VPERM B0,B1,B1,BEPERM VPERM C0,C1,C1,BEPERM VPERM D0,D1,D1,BEPERM clgfi LEN,0x40 jl .Ltail_vx VLM A1,D1,0,INP,0 VX A0,A0,A1 VX B0,B0,B1 VX C0,C0,C1 VX D0,D0,D1 VSTM A0,D0,0,OUT,0 la INP,0x40(INP) la OUT,0x40(OUT) aghi LEN,-0x40 je .Ldone_vx VAF A2,A2,K0 VAF B2,B2,K1 VAF C2,C2,K2 VPERM A0,A2,A2,BEPERM VPERM B0,B2,B2,BEPERM VPERM C0,C2,C2,BEPERM VPERM D0,D2,D2,BEPERM clgfi LEN,0x40 jl .Ltail_vx VLM A1,D1,0,INP,0 VX A0,A0,A1 VX B0,B0,B1 VX C0,C0,C1 VX D0,D0,D1 VSTM A0,D0,0,OUT,0 la INP,0x40(INP) la OUT,0x40(OUT) aghi LEN,-0x40 je .Ldone_vx VAF A3,A3,K0 VAF B3,B3,K1 VAF C3,C3,K2 VAF D2,K3,T3 # K[3]+3 VPERM A0,A3,A3,BEPERM VPERM B0,B3,B3,BEPERM VPERM C0,C3,C3,BEPERM VPERM D0,D3,D3,BEPERM clgfi LEN,0x40 jl .Ltail_vx VAF D3,D2,T1 # K[3]+4 VLM A1,D1,0,INP,0 VX A0,A0,A1 VX B0,B0,B1 VX C0,C0,C1 VX D0,D0,D1 VSTM A0,D0,0,OUT,0 la INP,0x40(INP) la OUT,0x40(OUT) aghi LEN,-0x40 je .Ldone_vx VAF A4,A4,K0 VAF B4,B4,K1 VAF C4,C4,K2 VAF D4,D4,D3 # +K[3]+4 VAF D3,D3,T1 # K[3]+5 VAF K3,D2,T3 # K[3]+=6 VPERM A0,A4,A4,BEPERM VPERM B0,B4,B4,BEPERM VPERM C0,C4,C4,BEPERM VPERM D0,D4,D4,BEPERM clgfi LEN,0x40 jl .Ltail_vx VLM A1,D1,0,INP,0 VX A0,A0,A1 VX B0,B0,B1 VX C0,C0,C1 VX D0,D0,D1 VSTM A0,D0,0,OUT,0 la INP,0x40(INP) la OUT,0x40(OUT) aghi LEN,-0x40 je .Ldone_vx VAF A5,A5,K0 VAF B5,B5,K1 VAF C5,C5,K2 VAF D5,D5,D3 # +K[3]+5 VPERM A0,A5,A5,BEPERM VPERM B0,B5,B5,BEPERM VPERM C0,C5,C5,BEPERM VPERM D0,D5,D5,BEPERM clgfi LEN,0x40 jl .Ltail_vx VLM A1,D1,0,INP,0 VX A0,A0,A1 VX B0,B0,B1 VX C0,C0,C1 VX D0,D0,D1 VSTM A0,D0,0,OUT,0 la INP,0x40(INP) la OUT,0x40(OUT) lhi %r0,10 aghi LEN,-0x40 jne .Loop_outer_vx .Ldone_vx: lmg %r6,%r7,FRAME+6*8(SP) la SP,FRAME(SP) BR_EX %r14 .Ltail_vx: VSTM A0,D0,8*8,SP,3 lghi %r1,0 .Loop_tail_vx: llgc %r5,0(%r1,INP) llgc %r6,8*8(%r1,SP) xr %r6,%r5 stc %r6,0(%r1,OUT) la %r1,1(%r1) brct LEN,.Loop_tail_vx lmg %r6,%r7,FRAME+6*8(SP) la SP,FRAME(SP) BR_EX %r14 ENDPROC(chacha20_vx) .previous
aixcc-public/challenge-001-exemplar-source
3,716
arch/s390/lib/mem.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * String handling functions. * * Copyright IBM Corp. 2012 */ #include <linux/linkage.h> #include <asm/export.h> #include <asm/nospec-insn.h> GEN_BR_THUNK %r14 /* * void *memmove(void *dest, const void *src, size_t n) */ WEAK(memmove) ENTRY(__memmove) ltgr %r4,%r4 lgr %r1,%r2 jz .Lmemmove_exit aghi %r4,-1 clgr %r2,%r3 jnh .Lmemmove_forward la %r5,1(%r4,%r3) clgr %r2,%r5 jl .Lmemmove_reverse .Lmemmove_forward: srlg %r0,%r4,8 ltgr %r0,%r0 jz .Lmemmove_forward_remainder .Lmemmove_forward_loop: mvc 0(256,%r1),0(%r3) la %r1,256(%r1) la %r3,256(%r3) brctg %r0,.Lmemmove_forward_loop .Lmemmove_forward_remainder: larl %r5,.Lmemmove_mvc ex %r4,0(%r5) .Lmemmove_exit: BR_EX %r14 .Lmemmove_reverse: ic %r0,0(%r4,%r3) stc %r0,0(%r4,%r1) brctg %r4,.Lmemmove_reverse ic %r0,0(%r4,%r3) stc %r0,0(%r4,%r1) BR_EX %r14 .Lmemmove_mvc: mvc 0(1,%r1),0(%r3) ENDPROC(__memmove) EXPORT_SYMBOL(memmove) /* * memset implementation * * This code corresponds to the C construct below. We do distinguish * between clearing (c == 0) and setting a memory array (c != 0) simply * because nearly all memset invocations in the kernel clear memory and * the xc instruction is preferred in such cases. * * void *memset(void *s, int c, size_t n) * { * if (likely(c == 0)) * return __builtin_memset(s, 0, n); * return __builtin_memset(s, c, n); * } */ WEAK(memset) ENTRY(__memset) ltgr %r4,%r4 jz .Lmemset_exit ltgr %r3,%r3 jnz .Lmemset_fill aghi %r4,-1 srlg %r3,%r4,8 ltgr %r3,%r3 lgr %r1,%r2 jz .Lmemset_clear_remainder .Lmemset_clear_loop: xc 0(256,%r1),0(%r1) la %r1,256(%r1) brctg %r3,.Lmemset_clear_loop .Lmemset_clear_remainder: larl %r3,.Lmemset_xc ex %r4,0(%r3) .Lmemset_exit: BR_EX %r14 .Lmemset_fill: cghi %r4,1 lgr %r1,%r2 je .Lmemset_fill_exit aghi %r4,-2 srlg %r5,%r4,8 ltgr %r5,%r5 jz .Lmemset_fill_remainder .Lmemset_fill_loop: stc %r3,0(%r1) mvc 1(255,%r1),0(%r1) la %r1,256(%r1) brctg %r5,.Lmemset_fill_loop .Lmemset_fill_remainder: stc %r3,0(%r1) larl %r5,.Lmemset_mvc ex %r4,0(%r5) BR_EX %r14 .Lmemset_fill_exit: stc %r3,0(%r1) BR_EX %r14 .Lmemset_xc: xc 0(1,%r1),0(%r1) .Lmemset_mvc: mvc 1(1,%r1),0(%r1) ENDPROC(__memset) EXPORT_SYMBOL(memset) /* * memcpy implementation * * void *memcpy(void *dest, const void *src, size_t n) */ WEAK(memcpy) ENTRY(__memcpy) ltgr %r4,%r4 jz .Lmemcpy_exit aghi %r4,-1 srlg %r5,%r4,8 ltgr %r5,%r5 lgr %r1,%r2 jnz .Lmemcpy_loop .Lmemcpy_remainder: larl %r5,.Lmemcpy_mvc ex %r4,0(%r5) .Lmemcpy_exit: BR_EX %r14 .Lmemcpy_loop: mvc 0(256,%r1),0(%r3) la %r1,256(%r1) la %r3,256(%r3) brctg %r5,.Lmemcpy_loop j .Lmemcpy_remainder .Lmemcpy_mvc: mvc 0(1,%r1),0(%r3) ENDPROC(__memcpy) EXPORT_SYMBOL(memcpy) /* * __memset16/32/64 * * void *__memset16(uint16_t *s, uint16_t v, size_t count) * void *__memset32(uint32_t *s, uint32_t v, size_t count) * void *__memset64(uint64_t *s, uint64_t v, size_t count) */ .macro __MEMSET bits,bytes,insn ENTRY(__memset\bits) ltgr %r4,%r4 jz .L__memset_exit\bits cghi %r4,\bytes je .L__memset_store\bits aghi %r4,-(\bytes+1) srlg %r5,%r4,8 ltgr %r5,%r5 lgr %r1,%r2 jz .L__memset_remainder\bits .L__memset_loop\bits: \insn %r3,0(%r1) mvc \bytes(256-\bytes,%r1),0(%r1) la %r1,256(%r1) brctg %r5,.L__memset_loop\bits .L__memset_remainder\bits: \insn %r3,0(%r1) larl %r5,.L__memset_mvc\bits ex %r4,0(%r5) BR_EX %r14 .L__memset_store\bits: \insn %r3,0(%r2) .L__memset_exit\bits: BR_EX %r14 .L__memset_mvc\bits: mvc \bytes(1,%r1),0(%r1) ENDPROC(__memset\bits) .endm __MEMSET 16,2,sth EXPORT_SYMBOL(__memset16) __MEMSET 32,4,st EXPORT_SYMBOL(__memset32) __MEMSET 64,8,stg EXPORT_SYMBOL(__memset64)
aixcc-public/challenge-001-exemplar-source
6,528
arch/s390/purgatory/head.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Purgatory setup code * * Copyright IBM Corp. 2018 * * Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com> */ #include <linux/linkage.h> #include <asm/asm-offsets.h> #include <asm/page.h> #include <asm/sigp.h> #include <asm/ptrace.h> /* The purgatory is the code running between two kernels. It's main purpose * is to verify that the next kernel was not corrupted after load and to * start it. * * If the next kernel is a crash kernel there are some peculiarities to * consider: * * First the purgatory is called twice. Once only to verify the * sha digest. So if the crash kernel got corrupted the old kernel can try * to trigger a stand-alone dumper. And once to actually load the crash kernel. * * Second the purgatory also has to swap the crash memory region with its * destination at address 0. As the purgatory is part of crash memory this * requires some finesse. The tactic here is that the purgatory first copies * itself to the end of the destination and then swaps the rest of the * memory running from there. */ #define bufsz purgatory_end-stack .macro MEMCPY dst,src,len lgr %r0,\dst lgr %r1,\len lgr %r2,\src lgr %r3,\len 20: mvcle %r0,%r2,0 jo 20b .endm .macro MEMSWAP dst,src,buf,len 10: larl %r0,purgatory_end larl %r1,stack slgr %r0,%r1 cgr \len,%r0 jh 11f lgr %r4,\len j 12f 11: lgr %r4,%r0 12: MEMCPY \buf,\dst,%r4 MEMCPY \dst,\src,%r4 MEMCPY \src,\buf,%r4 agr \dst,%r4 agr \src,%r4 sgr \len,%r4 cghi \len,0 jh 10b .endm .macro START_NEXT_KERNEL base subcode lg %r4,kernel_entry-\base(%r13) lg %r5,load_psw_mask-\base(%r13) ogr %r4,%r5 stg %r4,0(%r0) xgr %r0,%r0 lghi %r1,\subcode diag %r0,%r1,0x308 .endm .text .align PAGE_SIZE ENTRY(purgatory_start) /* The purgatory might be called after a diag308 so better set * architecture and addressing mode. */ lhi %r1,1 sigp %r1,%r0,SIGP_SET_ARCHITECTURE sam64 larl %r5,gprregs stmg %r6,%r15,0(%r5) basr %r13,0 .base_crash: /* Setup stack */ larl %r15,purgatory_end-STACK_FRAME_OVERHEAD /* If the next kernel is KEXEC_TYPE_CRASH the purgatory is called * directly with a flag passed in %r2 whether the purgatory shall do * checksum verification only (%r2 = 0 -> verification only). * * Check now and preserve over C function call by storing in * %r10 whith * 1 -> checksum verification only * 0 -> load new kernel */ lghi %r10,0 lg %r11,kernel_type-.base_crash(%r13) cghi %r11,1 /* KEXEC_TYPE_CRASH */ jne .do_checksum_verification cghi %r2,0 /* checksum verification only */ jne .do_checksum_verification lghi %r10,1 .do_checksum_verification: brasl %r14,verify_sha256_digest cghi %r10,1 /* checksum verification only */ je .return_old_kernel cghi %r2,0 /* checksum match */ jne .disabled_wait /* If the next kernel is a crash kernel the purgatory has to swap * the mem regions first. */ cghi %r11,1 /* KEXEC_TYPE_CRASH */ je .start_crash_kernel /* start normal kernel */ START_NEXT_KERNEL .base_crash 0 .return_old_kernel: lmg %r6,%r15,gprregs-.base_crash(%r13) br %r14 .disabled_wait: lpswe disabled_wait_psw-.base_crash(%r13) .start_crash_kernel: /* Location of purgatory_start in crash memory */ larl %r0,.base_crash larl %r1,purgatory_start slgr %r0,%r1 lgr %r8,%r13 sgr %r8,%r0 /* Destination for this code i.e. end of memory to be swapped. */ larl %r0,purgatory_end larl %r1,purgatory_start slgr %r0,%r1 lg %r9,crash_size-.base_crash(%r13) sgr %r9,%r0 /* Destination in crash memory, i.e. same as r9 but in crash memory. */ lg %r10,crash_start-.base_crash(%r13) agr %r10,%r9 /* Buffer location (in crash memory) and size. As the purgatory is * behind the point of no return it can re-use the stack as buffer. */ larl %r11,purgatory_end larl %r12,stack slgr %r11,%r12 MEMCPY %r12,%r9,%r11 /* dst -> (crash) buf */ MEMCPY %r9,%r8,%r11 /* self -> dst */ /* Jump to new location. */ lgr %r7,%r9 larl %r0,.jump_to_dst larl %r1,purgatory_start slgr %r0,%r1 agr %r7,%r0 br %r7 .jump_to_dst: basr %r13,0 .base_dst: /* clear buffer */ MEMCPY %r12,%r10,%r11 /* (crash) buf -> (crash) dst */ /* Load new buffer location after jump */ larl %r7,stack lgr %r0,%r7 larl %r1,purgatory_start slgr %r0,%r1 agr %r10,%r0 MEMCPY %r10,%r7,%r11 /* (new) buf -> (crash) buf */ /* Now the code is set up to run from its designated location. Start * swapping the rest of crash memory now. * * The registers will be used as follow: * * %r0-%r4 reserved for macros defined above * %r5-%r6 tmp registers * %r7 pointer to current struct sha region * %r8 index to iterate over all sha regions * %r9 pointer in crash memory * %r10 pointer in old kernel * %r11 total size (still) to be moved * %r12 pointer to buffer */ lgr %r12,%r7 lgr %r11,%r9 lghi %r10,0 lg %r9,crash_start-.base_dst(%r13) lghi %r8,16 /* KEXEC_SEGMENTS_MAX */ larl %r7,purgatory_sha_regions j .loop_first /* Loop over all purgatory_sha_regions. */ .loop_next: aghi %r8,-1 cghi %r8,0 je .loop_out aghi %r7,__KEXEC_SHA_REGION_SIZE .loop_first: lg %r5,__KEXEC_SHA_REGION_START(%r7) cghi %r5,0 je .loop_next /* Copy [end last sha region, start current sha region) */ /* Note: kexec_sha_region->start points in crash memory */ sgr %r5,%r9 MEMCPY %r9,%r10,%r5 agr %r9,%r5 agr %r10,%r5 sgr %r11,%r5 /* Swap sha region */ lg %r6,__KEXEC_SHA_REGION_LEN(%r7) MEMSWAP %r9,%r10,%r12,%r6 sg %r11,__KEXEC_SHA_REGION_LEN(%r7) j .loop_next .loop_out: /* Copy rest of crash memory */ MEMCPY %r9,%r10,%r11 /* start crash kernel */ START_NEXT_KERNEL .base_dst 1 load_psw_mask: .long 0x00080000,0x80000000 .align 8 disabled_wait_psw: .quad 0x0002000180000000 .quad 0x0000000000000000 + .do_checksum_verification gprregs: .rept 10 .quad 0 .endr /* Macro to define a global variable with name and size (in bytes) to be * shared with C code. * * Add the .size and .type attribute to satisfy checks on the Elf_Sym during * purgatory load. */ .macro GLOBAL_VARIABLE name,size \name: .global \name .size \name,\size .type \name,object .skip \size,0 .endm GLOBAL_VARIABLE purgatory_sha256_digest,32 GLOBAL_VARIABLE purgatory_sha_regions,16*__KEXEC_SHA_REGION_SIZE GLOBAL_VARIABLE kernel_entry,8 GLOBAL_VARIABLE kernel_type,8 GLOBAL_VARIABLE crash_start,8 GLOBAL_VARIABLE crash_size,8 .align PAGE_SIZE stack: /* The buffer to move this code must be as big as the code. */ .skip stack-purgatory_start .align PAGE_SIZE purgatory_end:
aixcc-public/challenge-001-exemplar-source
9,081
arch/s390/boot/head.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright IBM Corp. 1999, 2010 * * Author(s): Hartmut Penner <hp@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> * Rob van der Heij <rvdhei@iae.nl> * * There are 5 different IPL methods * 1) load the image directly into ram at address 0 and do an PSW restart * 2) linload will load the image from address 0x10000 to memory 0x10000 * and start the code thru LPSW 0x0008000080010000 (VM only, deprecated) * 3) generate the tape ipl header, store the generated image on a tape * and ipl from it * In case of SL tape you need to IPL 5 times to get past VOL1 etc * 4) generate the vm reader ipl header, move the generated image to the * VM reader (use option NOH!) and do a ipl from reader (VM only) * 5) direct call of start by the SALIPL loader * We use the cpuid to distinguish between VM and native ipl * params for kernel are pushed to 0x10400 (see setup.h) * */ #include <linux/init.h> #include <linux/linkage.h> #include <asm/asm-offsets.h> #include <asm/page.h> #include <asm/ptrace.h> #include <asm/sclp.h> #include "boot.h" #define EP_OFFSET 0x10008 #define EP_STRING "S390EP" #define IPL_BS 0x730 __HEAD ipl_start: mvi __LC_AR_MODE_ID,1 # set esame flag slr %r0,%r0 # set cpuid to zero lhi %r1,2 # mode 2 = esame (dump) sigp %r1,%r0,0x12 # switch to esame mode sam64 # switch to 64 bit addressing mode lgh %r1,__LC_SUBCHANNEL_ID # test if subchannel number brctg %r1,.Lnoload # is valid llgf %r1,__LC_SUBCHANNEL_ID # load ipl subchannel number lghi %r2,IPL_BS # load start address bras %r14,.Lloader # load rest of ipl image larl %r12,parmarea # pointer to parameter area stg %r1,IPL_DEVICE-PARMAREA(%r12) # save ipl device number # # load parameter file from ipl device # .Lagain1: larl %r2,_end # ramdisk loc. is temp bras %r14,.Lloader # load parameter file ltgr %r2,%r2 # got anything ? jz .Lnopf lg %r3,MAX_COMMAND_LINE_SIZE-PARMAREA(%r12) aghi %r3,-1 clgr %r2,%r3 jl .Lnotrunc lgr %r2,%r3 .Lnotrunc: larl %r4,_end larl %r13,.L_hdr clc 0(3,%r4),0(%r13) # if it is HDRx jz .Lagain1 # skip dataset header larl %r13,.L_eof clc 0(3,%r4),0(%r13) # if it is EOFx jz .Lagain1 # skip dateset trailer lgr %r5,%r2 la %r6,COMMAND_LINE-PARMAREA(%r12) lgr %r7,%r2 aghi %r7,1 mvcl %r6,%r4 .Lnopf: # # load ramdisk from ipl device # .Lagain2: larl %r2,_end # addr of ramdisk stg %r2,INITRD_START-PARMAREA(%r12) bras %r14,.Lloader # load ramdisk stg %r2,INITRD_SIZE-PARMAREA(%r12) # store size of rd ltgr %r2,%r2 jnz .Lrdcont stg %r2,INITRD_START-PARMAREA(%r12) # no ramdisk found .Lrdcont: larl %r2,_end larl %r13,.L_hdr # skip HDRx and EOFx clc 0(3,%r2),0(%r13) jz .Lagain2 larl %r13,.L_eof clc 0(3,%r2),0(%r13) jz .Lagain2 # # reset files in VM reader # larl %r13,.Lcpuid stidp 0(%r13) # store cpuid tm 0(%r13),0xff # running VM ? jno .Lnoreset larl %r2,.Lreset lghi %r3,26 diag %r2,%r3,8 larl %r5,.Lirb stsch 0(%r5) # check if irq is pending tm 30(%r5),0x0f # by verifying if any of the jnz .Lwaitforirq # activity or status control tm 31(%r5),0xff # bits is set in the schib jz .Lnoreset .Lwaitforirq: bras %r14,.Lirqwait # wait for IO interrupt c %r1,__LC_SUBCHANNEL_ID # compare subchannel number jne .Lwaitforirq larl %r5,.Lirb tsch 0(%r5) .Lnoreset: j .Lnoload # # everything loaded, go for it # .Lnoload: jg startup # # subroutine to wait for end I/O # .Lirqwait: larl %r13,.Lnewpswmask # set up IO interrupt psw mvc __LC_IO_NEW_PSW(8),0(%r13) stg %r14,__LC_IO_NEW_PSW+8 larl %r13,.Lwaitpsw lpswe 0(%r13) .Lioint: # # subroutine for loading cards from the reader # .Lloader: lgr %r4,%r14 larl %r3,.Lorb # r2 = address of orb into r2 larl %r5,.Lirb # r4 = address of irb larl %r6,.Lccws lghi %r7,20 .Linit: st %r2,4(%r6) # initialize CCW data addresses la %r2,0x50(%r2) la %r6,8(%r6) brctg %r7,.Linit larl %r13,.Lcr6 lctlg %c6,%c6,0(%r13) xgr %r2,%r2 .Lldlp: ssch 0(%r3) # load chunk of 1600 bytes jnz .Llderr .Lwait4irq: bras %r14,.Lirqwait c %r1,__LC_SUBCHANNEL_ID # compare subchannel number jne .Lwait4irq tsch 0(%r5) xgr %r0,%r0 ic %r0,8(%r5) # get device status cghi %r0,8 # channel end ? je .Lcont cghi %r0,12 # channel end + device end ? je .Lcont llgf %r0,4(%r5) sgf %r0,8(%r3) # r0/8 = number of ccws executed mghi %r0,10 # *10 = number of bytes in ccws llgh %r3,10(%r5) # get residual count sgr %r0,%r3 # #ccws*80-residual=#bytes read agr %r2,%r0 br %r4 # r2 contains the total size .Lcont: aghi %r2,0x640 # add 0x640 to total size larl %r6,.Lccws lghi %r7,20 .Lincr: l %r0,4(%r6) # update CCW data addresses aghi %r0,0x640 st %r0,4(%r6) aghi %r6,8 brctg %r7,.Lincr j .Lldlp .Llderr: larl %r13,.Lcrash lpsw 0(%r13) .align 8 .Lwaitpsw: .quad 0x0202000180000000,.Lioint .Lnewpswmask: .quad 0x0000000180000000 .align 8 .Lorb: .long 0x00000000,0x0080ff00,.Lccws .Lirb: .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 .align 8 .Lcr6: .quad 0x00000000ff000000 .align 8 .Lcrash:.long 0x000a0000,0x00000000 .align 8 .Lccws: .rept 19 .long 0x02600050,0x00000000 .endr .long 0x02200050,0x00000000 .Lreset:.byte 0xc3,0xc8,0xc1,0xd5,0xc7,0xc5,0x40,0xd9,0xc4,0xd9,0x40 .byte 0xc1,0xd3,0xd3,0x40,0xd2,0xc5,0xc5,0xd7,0x40,0xd5,0xd6 .byte 0xc8,0xd6,0xd3,0xc4 # "change rdr all keep nohold" .L_eof: .long 0xc5d6c600 /* C'EOF' */ .L_hdr: .long 0xc8c4d900 /* C'HDR' */ .align 8 .Lcpuid:.fill 8,1,0 # # normal startup-code, running in absolute addressing mode # this is called either by the ipl loader or directly by PSW restart # or linload or SALIPL # .org STARTUP_NORMAL_OFFSET - IPL_START SYM_CODE_START(startup) j startup_normal .org EP_OFFSET - IPL_START # # This is a list of s390 kernel entry points. At address 0x1000f the number of # valid entry points is stored. # # IMPORTANT: Do not change this table, it is s390 kernel ABI! # .ascii EP_STRING .byte 0x00,0x01 # # kdump startup-code, running in 64 bit absolute addressing mode # .org STARTUP_KDUMP_OFFSET - IPL_START j startup_kdump SYM_CODE_END(startup) SYM_CODE_START_LOCAL(startup_normal) mvi __LC_AR_MODE_ID,1 # set esame flag slr %r0,%r0 # set cpuid to zero lhi %r1,2 # mode 2 = esame (dump) sigp %r1,%r0,0x12 # switch to esame mode bras %r13,0f .fill 16,4,0x0 0: lmh %r0,%r15,0(%r13) # clear high-order half of gprs sam64 # switch to 64 bit addressing mode larl %r13,.Lext_new_psw mvc __LC_EXT_NEW_PSW(16),0(%r13) larl %r13,.Lpgm_new_psw mvc __LC_PGM_NEW_PSW(16),0(%r13) larl %r13,.Lio_new_psw mvc __LC_IO_NEW_PSW(16),0(%r13) xc 0x200(256),0x200 # partially clear lowcore xc 0x300(256),0x300 xc 0xe00(256),0xe00 xc 0xf00(256),0xf00 larl %r13,.Lctl lctlg %c0,%c15,0(%r13) # load control registers stcke __LC_BOOT_CLOCK mvc __LC_LAST_UPDATE_CLOCK(8),__LC_BOOT_CLOCK+1 larl %r13,6f spt 0(%r13) mvc __LC_LAST_UPDATE_TIMER(8),0(%r13) larl %r15,_stack_end-STACK_FRAME_OVERHEAD brasl %r14,sclp_early_setup_buffer brasl %r14,verify_facilities brasl %r14,startup_kernel SYM_CODE_END(startup_normal) .align 8 6: .long 0x7fffffff,0xffffffff .Lext_new_psw: .quad 0x0002000180000000,0x1b0 # disabled wait .Lpgm_new_psw: .quad 0x0000000180000000,startup_pgm_check_handler .Lio_new_psw: .quad 0x0002000180000000,0x1f0 # disabled wait .Lctl: .quad 0x04040000 # cr0: AFP registers & secondary space .quad 0 # cr1: primary space segment table .quad 0 # cr2: dispatchable unit control table .quad 0 # cr3: instruction authorization .quad 0xffff # cr4: instruction authorization .quad 0 # cr5: primary-aste origin .quad 0 # cr6: I/O interrupts .quad 0 # cr7: secondary space segment table .quad 0x0000000000008000 # cr8: access registers translation .quad 0 # cr9: tracing off .quad 0 # cr10: tracing off .quad 0 # cr11: tracing off .quad 0 # cr12: tracing off .quad 0 # cr13: home space segment table .quad 0xc0000000 # cr14: machine check handling off .quad 0 # cr15: linkage stack operations #include "head_kdump.S" # # This program check is active immediately after kernel start # and until early_pgm_check_handler is set in kernel/early.c # It simply saves general/control registers and psw in # the save area and does disabled wait with a faulty address. # SYM_CODE_START_LOCAL(startup_pgm_check_handler) stmg %r8,%r15,__LC_SAVE_AREA_SYNC la %r8,4095 stctg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r8) stmg %r0,%r7,__LC_GPREGS_SAVE_AREA-4095(%r8) mvc __LC_GPREGS_SAVE_AREA-4095+64(64,%r8),__LC_SAVE_AREA_SYNC mvc __LC_PSW_SAVE_AREA-4095(16,%r8),__LC_PGM_OLD_PSW mvc __LC_RETURN_PSW(16),__LC_PGM_OLD_PSW ni __LC_RETURN_PSW,0xfc # remove IO and EX bits ni __LC_RETURN_PSW+1,0xfb # remove MCHK bit oi __LC_RETURN_PSW+1,0x2 # set wait state bit larl %r9,.Lold_psw_disabled_wait stg %r9,__LC_PGM_NEW_PSW+8 larl %r15,_dump_info_stack_end-STACK_FRAME_OVERHEAD brasl %r14,print_pgm_check_info .Lold_psw_disabled_wait: la %r8,4095 lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r8) lpswe __LC_RETURN_PSW # disabled wait SYM_CODE_END(startup_pgm_check_handler)
aixcc-public/challenge-001-exemplar-source
2,418
arch/s390/boot/head_kdump.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * S390 kdump lowlevel functions (new kernel) * * Copyright IBM Corp. 2011 * Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com> */ #include <asm/sigp.h> #define DATAMOVER_ADDR 0x4000 #define COPY_PAGE_ADDR 0x6000 #ifdef CONFIG_CRASH_DUMP # # kdump entry (new kernel - not yet relocated) # # Note: This code has to be position independent # SYM_CODE_START_LOCAL(startup_kdump) lhi %r1,2 # mode 2 = esame (dump) sigp %r1,%r0,SIGP_SET_ARCHITECTURE # Switch to esame mode sam64 # Switch to 64 bit addressing basr %r13,0 .Lbase: larl %r2,.Lbase_addr # Check, if we have been lg %r2,0(%r2) # already relocated: clgr %r2,%r13 # jne .Lrelocate # No : Start data mover lghi %r2,0 # Yes: Start kdump kernel brasl %r14,startup_kdump_relocated .Lrelocate: larl %r4,startup lg %r2,0x418(%r4) # Get kdump base lg %r3,0x420(%r4) # Get kdump size larl %r10,.Lcopy_start # Source of data mover lghi %r8,DATAMOVER_ADDR # Target of data mover mvc 0(256,%r8),0(%r10) # Copy data mover code agr %r8,%r2 # Copy data mover to mvc 0(256,%r8),0(%r10) # reserved mem lghi %r14,DATAMOVER_ADDR # Jump to copied data mover basr %r14,%r14 .Lbase_addr: .quad .Lbase # # kdump data mover code (runs at address DATAMOVER_ADDR) # # r2: kdump base address # r3: kdump size # .Lcopy_start: basr %r13,0 # Base 0: lgr %r11,%r2 # Save kdump base address lgr %r12,%r2 agr %r12,%r3 # Compute kdump end address lghi %r5,0 lghi %r10,COPY_PAGE_ADDR # Load copy page address 1: mvc 0(256,%r10),0(%r5) # Copy old kernel to tmp mvc 0(256,%r5),0(%r11) # Copy new kernel to old mvc 0(256,%r11),0(%r10) # Copy tmp to new aghi %r11,256 aghi %r5,256 clgr %r11,%r12 jl 1b lg %r14,.Lstartup_kdump-0b(%r13) basr %r14,%r14 # Start relocated kernel .Lstartup_kdump: .long 0x00000000,0x00000000 + startup_kdump_relocated .Lcopy_end: # # Startup of kdump (relocated new kernel) # .align 2 startup_kdump_relocated: basr %r13,0 0: lpswe .Lrestart_psw-0b(%r13) # Start new kernel... SYM_CODE_END(startup_kdump) .align 8 .Lrestart_psw: .quad 0x0000000080000000,0x0000000000000000 + startup #else SYM_CODE_START_LOCAL(startup_kdump) larl %r13,startup_kdump_crash lpswe 0(%r13) SYM_CODE_END(startup_kdump) .align 8 startup_kdump_crash: .quad 0x0002000080000000,0x0000000000000000 + startup_kdump_crash #endif /* CONFIG_CRASH_DUMP */
aixcc-public/challenge-001-exemplar-source
2,261
arch/s390/boot/vmlinux.lds.S
/* SPDX-License-Identifier: GPL-2.0 */ #include <asm-generic/vmlinux.lds.h> #include <asm/vmlinux.lds.h> #include <asm/thread_info.h> #include <asm/page.h> #include <asm/sclp.h> #include "boot.h" OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390") OUTPUT_ARCH(s390:64-bit) ENTRY(startup) SECTIONS { . = 0; .ipldata : { *(.ipldata) } . = IPL_START; .head.text : { _head = . ; HEAD_TEXT _ehead = . ; } . = PARMAREA; .parmarea : { *(.parmarea) } .text : { _text = .; /* Text */ *(.text) *(.text.*) _etext = . ; } .rodata : { _rodata = . ; *(.rodata) /* read-only data */ *(.rodata.*) _erodata = . ; } NOTES .data : { _data = . ; *(.data) *(.data.*) _edata = . ; } BOOT_DATA BOOT_DATA_PRESERVED /* * This is the BSS section of the decompressor and not of the decompressed Linux kernel. * It will consume place in the decompressor's image. */ . = ALIGN(8); .bss : { _bss = . ; *(.bss) *(.bss.*) *(COMMON) /* * Stacks for the decompressor */ . = ALIGN(PAGE_SIZE); _dump_info_stack_start = .; . += PAGE_SIZE; _dump_info_stack_end = .; . = ALIGN(PAGE_SIZE); _stack_start = .; . += BOOT_STACK_SIZE; _stack_end = .; _ebss = .; } /* * uncompressed image info used by the decompressor it should match * struct vmlinux_info. It comes from .vmlinux.info section of * uncompressed vmlinux in a form of info.o */ . = ALIGN(8); .vmlinux.info : { _vmlinux_info = .; *(.vmlinux.info) } .decompressor.syms : { . += 1; /* make sure we have \0 before the first entry */ . = ALIGN(2); _decompressor_syms_start = .; *(.decompressor.syms) _decompressor_syms_end = .; } #ifdef CONFIG_KERNEL_UNCOMPRESSED . = 0x100000; #else . = ALIGN(8); #endif .rodata.compressed : { _compressed_start = .; *(.vmlinux.bin.compressed) _compressed_end = .; } #define SB_TRAILER_SIZE 32 /* Trailer needed for Secure Boot */ . += SB_TRAILER_SIZE; /* make sure .sb.trailer does not overwrite the previous section */ . = ALIGN(4096) - SB_TRAILER_SIZE; .sb.trailer : { QUAD(0) QUAD(0) QUAD(0) QUAD(0x000000207a49504c) } _end = .; /* Sections to be discarded */ /DISCARD/ : { *(.eh_frame) *(__ex_table) *(*__ksymtab*) *(___kcrctab*) } }
aixcc-public/challenge-001-exemplar-source
3,632
arch/s390/kernel/vdso32/vdso32.lds.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * This is the infamous ld script for the 64 bits vdso * library */ #include <asm/page.h> #include <asm/vdso.h> OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390") OUTPUT_ARCH(s390:31-bit) ENTRY(_start) SECTIONS { PROVIDE(_vdso_data = . - __VVAR_PAGES * PAGE_SIZE); #ifdef CONFIG_TIME_NS PROVIDE(_timens_data = _vdso_data + PAGE_SIZE); #endif . = VDSO_LBASE + SIZEOF_HEADERS; .hash : { *(.hash) } :text .gnu.hash : { *(.gnu.hash) } .dynsym : { *(.dynsym) } .dynstr : { *(.dynstr) } .gnu.version : { *(.gnu.version) } .gnu.version_d : { *(.gnu.version_d) } .gnu.version_r : { *(.gnu.version_r) } .note : { *(.note.*) } :text :note . = ALIGN(16); .text : { *(.text .stub .text.* .gnu.linkonce.t.*) } :text PROVIDE(__etext = .); PROVIDE(_etext = .); PROVIDE(etext = .); /* * Other stuff is appended to the text segment: */ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } .rodata1 : { *(.rodata1) } .dynamic : { *(.dynamic) } :text :dynamic .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr .eh_frame : { KEEP (*(.eh_frame)) } :text .gcc_except_table : { *(.gcc_except_table .gcc_except_table.*) } .rela.dyn ALIGN(8) : { *(.rela.dyn) } .got ALIGN(8) : { *(.got .toc) } .got.plt ALIGN(8) : { *(.got.plt) } _end = .; PROVIDE(end = .); /* * Stabs debugging sections are here too. */ .stab 0 : { *(.stab) } .stabstr 0 : { *(.stabstr) } .stab.excl 0 : { *(.stab.excl) } .stab.exclstr 0 : { *(.stab.exclstr) } .stab.index 0 : { *(.stab.index) } .stab.indexstr 0 : { *(.stab.indexstr) } .comment 0 : { *(.comment) } /* * DWARF debug sections. * Symbols in the DWARF debugging sections are relative to the * beginning of the section so we begin them at 0. */ /* DWARF 1 */ .debug 0 : { *(.debug) } .line 0 : { *(.line) } /* GNU DWARF 1 extensions */ .debug_srcinfo 0 : { *(.debug_srcinfo) } .debug_sfnames 0 : { *(.debug_sfnames) } /* DWARF 1.1 and DWARF 2 */ .debug_aranges 0 : { *(.debug_aranges) } .debug_pubnames 0 : { *(.debug_pubnames) } /* DWARF 2 */ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) } .debug_abbrev 0 : { *(.debug_abbrev) } .debug_line 0 : { *(.debug_line) } .debug_frame 0 : { *(.debug_frame) } .debug_str 0 : { *(.debug_str) } .debug_loc 0 : { *(.debug_loc) } .debug_macinfo 0 : { *(.debug_macinfo) } /* SGI/MIPS DWARF 2 extensions */ .debug_weaknames 0 : { *(.debug_weaknames) } .debug_funcnames 0 : { *(.debug_funcnames) } .debug_typenames 0 : { *(.debug_typenames) } .debug_varnames 0 : { *(.debug_varnames) } /* DWARF 3 */ .debug_pubtypes 0 : { *(.debug_pubtypes) } .debug_ranges 0 : { *(.debug_ranges) } .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) } /DISCARD/ : { *(.note.GNU-stack) *(.branch_lt) *(.data .data.* .gnu.linkonce.d.* .sdata*) *(.bss .sbss .dynbss .dynsbss) } } /* * Very old versions of ld do not recognize this name token; use the constant. */ #define PT_GNU_EH_FRAME 0x6474e550 /* * We must supply the ELF program headers explicitly to get just one * PT_LOAD segment, and set the flags explicitly to make segments read-only. */ PHDRS { text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */ dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ note PT_NOTE FLAGS(4); /* PF_R */ eh_frame_hdr PT_GNU_EH_FRAME; } /* * This controls what symbols we export from the DSO. */ VERSION { VDSO_VERSION_STRING { global: /* * Has to be there for the kernel to find */ __kernel_compat_restart_syscall; __kernel_compat_rt_sigreturn; __kernel_compat_sigreturn; local: *; }; }
aixcc-public/challenge-001-exemplar-source
3,706
arch/s390/kernel/vdso64/vdso64.lds.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * This is the infamous ld script for the 64 bits vdso * library */ #include <asm/page.h> #include <asm/vdso.h> OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390") OUTPUT_ARCH(s390:64-bit) ENTRY(_start) SECTIONS { PROVIDE(_vdso_data = . - __VVAR_PAGES * PAGE_SIZE); #ifdef CONFIG_TIME_NS PROVIDE(_timens_data = _vdso_data + PAGE_SIZE); #endif . = VDSO_LBASE + SIZEOF_HEADERS; .hash : { *(.hash) } :text .gnu.hash : { *(.gnu.hash) } .dynsym : { *(.dynsym) } .dynstr : { *(.dynstr) } .gnu.version : { *(.gnu.version) } .gnu.version_d : { *(.gnu.version_d) } .gnu.version_r : { *(.gnu.version_r) } .note : { *(.note.*) } :text :note . = ALIGN(16); .text : { *(.text .stub .text.* .gnu.linkonce.t.*) } :text PROVIDE(__etext = .); PROVIDE(_etext = .); PROVIDE(etext = .); /* * Other stuff is appended to the text segment: */ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } .rodata1 : { *(.rodata1) } .dynamic : { *(.dynamic) } :text :dynamic .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr .eh_frame : { KEEP (*(.eh_frame)) } :text .gcc_except_table : { *(.gcc_except_table .gcc_except_table.*) } .rela.dyn ALIGN(8) : { *(.rela.dyn) } .got ALIGN(8) : { *(.got .toc) } .got.plt ALIGN(8) : { *(.got.plt) } _end = .; PROVIDE(end = .); /* * Stabs debugging sections are here too. */ .stab 0 : { *(.stab) } .stabstr 0 : { *(.stabstr) } .stab.excl 0 : { *(.stab.excl) } .stab.exclstr 0 : { *(.stab.exclstr) } .stab.index 0 : { *(.stab.index) } .stab.indexstr 0 : { *(.stab.indexstr) } .comment 0 : { *(.comment) } /* * DWARF debug sections. * Symbols in the DWARF debugging sections are relative to the * beginning of the section so we begin them at 0. */ /* DWARF 1 */ .debug 0 : { *(.debug) } .line 0 : { *(.line) } /* GNU DWARF 1 extensions */ .debug_srcinfo 0 : { *(.debug_srcinfo) } .debug_sfnames 0 : { *(.debug_sfnames) } /* DWARF 1.1 and DWARF 2 */ .debug_aranges 0 : { *(.debug_aranges) } .debug_pubnames 0 : { *(.debug_pubnames) } /* DWARF 2 */ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) } .debug_abbrev 0 : { *(.debug_abbrev) } .debug_line 0 : { *(.debug_line) } .debug_frame 0 : { *(.debug_frame) } .debug_str 0 : { *(.debug_str) } .debug_loc 0 : { *(.debug_loc) } .debug_macinfo 0 : { *(.debug_macinfo) } /* SGI/MIPS DWARF 2 extensions */ .debug_weaknames 0 : { *(.debug_weaknames) } .debug_funcnames 0 : { *(.debug_funcnames) } .debug_typenames 0 : { *(.debug_typenames) } .debug_varnames 0 : { *(.debug_varnames) } /* DWARF 3 */ .debug_pubtypes 0 : { *(.debug_pubtypes) } .debug_ranges 0 : { *(.debug_ranges) } .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) } /DISCARD/ : { *(.note.GNU-stack) *(.branch_lt) *(.data .data.* .gnu.linkonce.d.* .sdata*) *(.bss .sbss .dynbss .dynsbss) } } /* * Very old versions of ld do not recognize this name token; use the constant. */ #define PT_GNU_EH_FRAME 0x6474e550 /* * We must supply the ELF program headers explicitly to get just one * PT_LOAD segment, and set the flags explicitly to make segments read-only. */ PHDRS { text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */ dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ note PT_NOTE FLAGS(4); /* PF_R */ eh_frame_hdr PT_GNU_EH_FRAME; } /* * This controls what symbols we export from the DSO. */ VERSION { VDSO_VERSION_STRING { global: /* * Has to be there for the kernel to find */ __kernel_gettimeofday; __kernel_clock_gettime; __kernel_clock_getres; __kernel_getcpu; __kernel_restart_syscall; __kernel_rt_sigreturn; __kernel_sigreturn; local: *; }; }
aixcc-public/challenge-001-exemplar-source
1,480
arch/s390/kernel/vdso64/vdso_user_wrapper.S
/* SPDX-License-Identifier: GPL-2.0 */ #include <asm/vdso.h> #include <asm/unistd.h> #include <asm/asm-offsets.h> #include <asm/dwarf.h> #include <asm/ptrace.h> #define WRAPPER_FRAME_SIZE (STACK_FRAME_OVERHEAD+8) /* * Older glibc version called vdso without allocating a stackframe. This wrapper * is just used to allocate a stackframe. See * https://sourceware.org/git/?p=glibc.git;a=commit;h=478593e6374f3818da39332260dc453cb19cfa1e * for details. */ .macro vdso_func func .globl __kernel_\func .type __kernel_\func,@function .align 8 __kernel_\func: CFI_STARTPROC aghi %r15,-WRAPPER_FRAME_SIZE CFI_DEF_CFA_OFFSET (STACK_FRAME_OVERHEAD + WRAPPER_FRAME_SIZE) CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD stg %r14,STACK_FRAME_OVERHEAD(%r15) brasl %r14,__s390_vdso_\func lg %r14,STACK_FRAME_OVERHEAD(%r15) aghi %r15,WRAPPER_FRAME_SIZE CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD CFI_RESTORE 15 br %r14 CFI_ENDPROC .size __kernel_\func,.-__kernel_\func .endm vdso_func gettimeofday vdso_func clock_getres vdso_func clock_gettime vdso_func getcpu .macro vdso_syscall func,syscall .globl __kernel_\func .type __kernel_\func,@function .align 8 __kernel_\func: CFI_STARTPROC svc \syscall /* Make sure we notice when a syscall returns, which shouldn't happen */ .word 0 CFI_ENDPROC .size __kernel_\func,.-__kernel_\func .endm vdso_syscall restart_syscall,__NR_restart_syscall vdso_syscall sigreturn,__NR_sigreturn vdso_syscall rt_sigreturn,__NR_rt_sigreturn
aixcc-public/challenge-001-exemplar-source
7,652
arch/x86/xen/xen-asm.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Asm versions of Xen pv-ops, suitable for direct use. * * We only bother with direct forms (ie, vcpu in percpu data) of the * operations here; the indirect forms are better handled in C. */ #include <asm/errno.h> #include <asm/asm-offsets.h> #include <asm/percpu.h> #include <asm/processor-flags.h> #include <asm/segment.h> #include <asm/thread_info.h> #include <asm/asm.h> #include <asm/frame.h> #include <asm/unwind_hints.h> #include <xen/interface/xen.h> #include <linux/init.h> #include <linux/linkage.h> #include <../entry/calling.h> .pushsection .noinstr.text, "ax" /* * Disabling events is simply a matter of making the event mask * non-zero. */ SYM_FUNC_START(xen_irq_disable_direct) movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask RET SYM_FUNC_END(xen_irq_disable_direct) /* * Force an event check by making a hypercall, but preserve regs * before making the call. */ SYM_FUNC_START(check_events) FRAME_BEGIN push %rax push %rcx push %rdx push %rsi push %rdi push %r8 push %r9 push %r10 push %r11 call xen_force_evtchn_callback pop %r11 pop %r10 pop %r9 pop %r8 pop %rdi pop %rsi pop %rdx pop %rcx pop %rax FRAME_END RET SYM_FUNC_END(check_events) /* * Enable events. This clears the event mask and tests the pending * event status with one and operation. If there are pending events, * then enter the hypervisor to get them handled. */ SYM_FUNC_START(xen_irq_enable_direct) FRAME_BEGIN /* Unmask events */ movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask /* * Preempt here doesn't matter because that will deal with any * pending interrupts. The pending check may end up being run * on the wrong CPU, but that doesn't hurt. */ /* Test for pending */ testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending jz 1f call check_events 1: FRAME_END RET SYM_FUNC_END(xen_irq_enable_direct) /* * (xen_)save_fl is used to get the current interrupt enable status. * Callers expect the status to be in X86_EFLAGS_IF, and other bits * may be set in the return value. We take advantage of this by * making sure that X86_EFLAGS_IF has the right value (and other bits * in that byte are 0), but other bits in the return value are * undefined. We need to toggle the state of the bit, because Xen and * x86 use opposite senses (mask vs enable). */ SYM_FUNC_START(xen_save_fl_direct) testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask setz %ah addb %ah, %ah RET SYM_FUNC_END(xen_save_fl_direct) SYM_FUNC_START(xen_read_cr2) FRAME_BEGIN _ASM_MOV PER_CPU_VAR(xen_vcpu), %_ASM_AX _ASM_MOV XEN_vcpu_info_arch_cr2(%_ASM_AX), %_ASM_AX FRAME_END RET SYM_FUNC_END(xen_read_cr2); SYM_FUNC_START(xen_read_cr2_direct) FRAME_BEGIN _ASM_MOV PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_arch_cr2, %_ASM_AX FRAME_END RET SYM_FUNC_END(xen_read_cr2_direct); .popsection .macro xen_pv_trap name SYM_CODE_START(xen_\name) UNWIND_HINT_ENTRY ENDBR pop %rcx pop %r11 jmp \name SYM_CODE_END(xen_\name) _ASM_NOKPROBE(xen_\name) .endm xen_pv_trap asm_exc_divide_error xen_pv_trap asm_xenpv_exc_debug xen_pv_trap asm_exc_int3 xen_pv_trap asm_xenpv_exc_nmi xen_pv_trap asm_exc_overflow xen_pv_trap asm_exc_bounds xen_pv_trap asm_exc_invalid_op xen_pv_trap asm_exc_device_not_available xen_pv_trap asm_xenpv_exc_double_fault xen_pv_trap asm_exc_coproc_segment_overrun xen_pv_trap asm_exc_invalid_tss xen_pv_trap asm_exc_segment_not_present xen_pv_trap asm_exc_stack_segment xen_pv_trap asm_exc_general_protection xen_pv_trap asm_exc_page_fault xen_pv_trap asm_exc_spurious_interrupt_bug xen_pv_trap asm_exc_coprocessor_error xen_pv_trap asm_exc_alignment_check #ifdef CONFIG_X86_KERNEL_IBT xen_pv_trap asm_exc_control_protection #endif #ifdef CONFIG_X86_MCE xen_pv_trap asm_xenpv_exc_machine_check #endif /* CONFIG_X86_MCE */ xen_pv_trap asm_exc_simd_coprocessor_error #ifdef CONFIG_IA32_EMULATION xen_pv_trap entry_INT80_compat #endif xen_pv_trap asm_exc_xen_unknown_trap xen_pv_trap asm_exc_xen_hypervisor_callback __INIT SYM_CODE_START(xen_early_idt_handler_array) i = 0 .rept NUM_EXCEPTION_VECTORS UNWIND_HINT_EMPTY ENDBR pop %rcx pop %r11 jmp early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE i = i + 1 .fill xen_early_idt_handler_array + i*XEN_EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc .endr SYM_CODE_END(xen_early_idt_handler_array) __FINIT hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32 /* * Xen64 iret frame: * * ss * rsp * rflags * cs * rip <-- standard iret frame * * flags * * rcx } * r11 }<-- pushed by hypercall page * rsp->rax } */ SYM_CODE_START(xen_iret) UNWIND_HINT_EMPTY ANNOTATE_NOENDBR pushq $0 jmp hypercall_iret SYM_CODE_END(xen_iret) /* * XEN pv doesn't use trampoline stack, PER_CPU_VAR(cpu_tss_rw + TSS_sp0) is * also the kernel stack. Reusing swapgs_restore_regs_and_return_to_usermode() * in XEN pv would cause %rsp to move up to the top of the kernel stack and * leave the IRET frame below %rsp, which is dangerous to be corrupted if #NMI * interrupts. And swapgs_restore_regs_and_return_to_usermode() pushing the IRET * frame at the same address is useless. */ SYM_CODE_START(xenpv_restore_regs_and_return_to_usermode) UNWIND_HINT_REGS POP_REGS /* stackleak_erase() can work safely on the kernel stack. */ STACKLEAK_ERASE_NOCLOBBER addq $8, %rsp /* skip regs->orig_ax */ jmp xen_iret SYM_CODE_END(xenpv_restore_regs_and_return_to_usermode) /* * Xen handles syscall callbacks much like ordinary exceptions, which * means we have: * - kernel gs * - kernel rsp * - an iret-like stack frame on the stack (including rcx and r11): * ss * rsp * rflags * cs * rip * r11 * rsp->rcx */ /* Normal 64-bit system call target */ SYM_CODE_START(xen_entry_SYSCALL_64) UNWIND_HINT_ENTRY ENDBR popq %rcx popq %r11 /* * Neither Xen nor the kernel really knows what the old SS and * CS were. The kernel expects __USER_DS and __USER_CS, so * report those values even though Xen will guess its own values. */ movq $__USER_DS, 4*8(%rsp) movq $__USER_CS, 1*8(%rsp) jmp entry_SYSCALL_64_after_hwframe SYM_CODE_END(xen_entry_SYSCALL_64) #ifdef CONFIG_IA32_EMULATION /* 32-bit compat syscall target */ SYM_CODE_START(xen_entry_SYSCALL_compat) UNWIND_HINT_ENTRY ENDBR popq %rcx popq %r11 /* * Neither Xen nor the kernel really knows what the old SS and * CS were. The kernel expects __USER32_DS and __USER32_CS, so * report those values even though Xen will guess its own values. */ movq $__USER32_DS, 4*8(%rsp) movq $__USER32_CS, 1*8(%rsp) jmp entry_SYSCALL_compat_after_hwframe SYM_CODE_END(xen_entry_SYSCALL_compat) /* 32-bit compat sysenter target */ SYM_CODE_START(xen_entry_SYSENTER_compat) UNWIND_HINT_ENTRY ENDBR /* * NB: Xen is polite and clears TF from EFLAGS for us. This means * that we don't need to guard against single step exceptions here. */ popq %rcx popq %r11 /* * Neither Xen nor the kernel really knows what the old SS and * CS were. The kernel expects __USER32_DS and __USER32_CS, so * report those values even though Xen will guess its own values. */ movq $__USER32_DS, 4*8(%rsp) movq $__USER32_CS, 1*8(%rsp) jmp entry_SYSENTER_compat_after_hwframe SYM_CODE_END(xen_entry_SYSENTER_compat) #else /* !CONFIG_IA32_EMULATION */ SYM_CODE_START(xen_entry_SYSCALL_compat) SYM_CODE_START(xen_entry_SYSENTER_compat) UNWIND_HINT_ENTRY ENDBR lea 16(%rsp), %rsp /* strip %rcx, %r11 */ mov $-ENOSYS, %rax pushq $0 jmp hypercall_iret SYM_CODE_END(xen_entry_SYSENTER_compat) SYM_CODE_END(xen_entry_SYSCALL_compat) #endif /* CONFIG_IA32_EMULATION */
aixcc-public/challenge-001-exemplar-source
3,167
arch/x86/xen/xen-head.S
/* SPDX-License-Identifier: GPL-2.0 */ /* Xen-specific pieces of head.S, intended to be included in the right place in head.S */ #ifdef CONFIG_XEN #include <linux/elfnote.h> #include <linux/init.h> #include <asm/boot.h> #include <asm/asm.h> #include <asm/msr.h> #include <asm/page_types.h> #include <asm/percpu.h> #include <asm/unwind_hints.h> #include <xen/interface/elfnote.h> #include <xen/interface/features.h> #include <xen/interface/xen.h> #include <xen/interface/xen-mca.h> #include <asm/xen/interface.h> .pushsection .noinstr.text, "ax" .balign PAGE_SIZE SYM_CODE_START(hypercall_page) .rept (PAGE_SIZE / 32) UNWIND_HINT_FUNC ANNOTATE_NOENDBR ANNOTATE_UNRET_SAFE ret /* * Xen will write the hypercall page, and sort out ENDBR. */ .skip 31, 0xcc .endr #define HYPERCALL(n) \ .equ xen_hypercall_##n, hypercall_page + __HYPERVISOR_##n * 32; \ .type xen_hypercall_##n, @function; .size xen_hypercall_##n, 32 #include <asm/xen-hypercalls.h> #undef HYPERCALL SYM_CODE_END(hypercall_page) .popsection #ifdef CONFIG_XEN_PV __INIT SYM_CODE_START(startup_xen) UNWIND_HINT_EMPTY ANNOTATE_NOENDBR cld mov initial_stack(%rip), %rsp /* Set up %gs. * * The base of %gs always points to fixed_percpu_data. If the * stack protector canary is enabled, it is located at %gs:40. * Note that, on SMP, the boot cpu uses init data section until * the per cpu areas are set up. */ movl $MSR_GS_BASE,%ecx movq $INIT_PER_CPU_VAR(fixed_percpu_data),%rax cdq wrmsr mov %rsi, %rdi call xen_start_kernel SYM_CODE_END(startup_xen) __FINIT #ifdef CONFIG_XEN_PV_SMP .pushsection .text SYM_CODE_START(asm_cpu_bringup_and_idle) UNWIND_HINT_EMPTY ENDBR call cpu_bringup_and_idle SYM_CODE_END(asm_cpu_bringup_and_idle) .popsection #endif #endif ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz "linux") ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION, .asciz "2.6") ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION, .asciz "xen-3.0") #ifdef CONFIG_X86_32 ELFNOTE(Xen, XEN_ELFNOTE_VIRT_BASE, _ASM_PTR __PAGE_OFFSET) #else ELFNOTE(Xen, XEN_ELFNOTE_VIRT_BASE, _ASM_PTR __START_KERNEL_map) /* Map the p2m table to a 512GB-aligned user address. */ ELFNOTE(Xen, XEN_ELFNOTE_INIT_P2M, .quad (PUD_SIZE * PTRS_PER_PUD)) #endif #ifdef CONFIG_XEN_PV ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, _ASM_PTR startup_xen) #endif ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, _ASM_PTR hypercall_page) ELFNOTE(Xen, XEN_ELFNOTE_FEATURES, .ascii "!writable_page_tables|pae_pgdir_above_4gb") ELFNOTE(Xen, XEN_ELFNOTE_SUPPORTED_FEATURES, .long (1 << XENFEAT_writable_page_tables) | \ (1 << XENFEAT_dom0) | \ (1 << XENFEAT_linux_rsdp_unrestricted)) ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE, .asciz "yes") ELFNOTE(Xen, XEN_ELFNOTE_LOADER, .asciz "generic") ELFNOTE(Xen, XEN_ELFNOTE_L1_MFN_VALID, .quad _PAGE_PRESENT; .quad _PAGE_PRESENT) ELFNOTE(Xen, XEN_ELFNOTE_SUSPEND_CANCEL, .long 1) ELFNOTE(Xen, XEN_ELFNOTE_MOD_START_PFN, .long 1) ELFNOTE(Xen, XEN_ELFNOTE_HV_START_LOW, _ASM_PTR __HYPERVISOR_VIRT_START) ELFNOTE(Xen, XEN_ELFNOTE_PADDR_OFFSET, _ASM_PTR 0) #endif /*CONFIG_XEN */
aixcc-public/challenge-001-exemplar-source
2,412
arch/x86/power/hibernate_asm_32.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * This may not use any stack, nor any variable that is not "NoSave": * * Its rewriting one kernel image with another. What is stack in "old" * image could very well be data page in "new" image, and overwriting * your own stack under you is bad idea. */ #include <linux/linkage.h> #include <asm/segment.h> #include <asm/page_types.h> #include <asm/asm-offsets.h> #include <asm/processor-flags.h> #include <asm/frame.h> .text SYM_FUNC_START(swsusp_arch_suspend) movl %esp, saved_context_esp movl %ebx, saved_context_ebx movl %ebp, saved_context_ebp movl %esi, saved_context_esi movl %edi, saved_context_edi pushfl popl saved_context_eflags /* save cr3 */ movl %cr3, %eax movl %eax, restore_cr3 FRAME_BEGIN call swsusp_save FRAME_END RET SYM_FUNC_END(swsusp_arch_suspend) SYM_CODE_START(restore_image) /* prepare to jump to the image kernel */ movl restore_jump_address, %ebx movl restore_cr3, %ebp movl mmu_cr4_features, %ecx /* jump to relocated restore code */ movl relocated_restore_code, %eax jmpl *%eax SYM_CODE_END(restore_image) /* code below has been relocated to a safe page */ SYM_CODE_START(core_restore_code) movl temp_pgt, %eax movl %eax, %cr3 jecxz 1f # cr4 Pentium and higher, skip if zero andl $~(X86_CR4_PGE), %ecx movl %ecx, %cr4; # turn off PGE movl %cr3, %eax; # flush TLB movl %eax, %cr3 1: movl restore_pblist, %edx .p2align 4,,7 copy_loop: testl %edx, %edx jz done movl pbe_address(%edx), %esi movl pbe_orig_address(%edx), %edi movl $(PAGE_SIZE >> 2), %ecx rep movsl movl pbe_next(%edx), %edx jmp copy_loop .p2align 4,,7 done: jmpl *%ebx SYM_CODE_END(core_restore_code) /* code below belongs to the image kernel */ .align PAGE_SIZE SYM_FUNC_START(restore_registers) /* go back to the original page tables */ movl %ebp, %cr3 movl mmu_cr4_features, %ecx jecxz 1f # cr4 Pentium and higher, skip if zero movl %ecx, %cr4; # turn PGE back on 1: movl saved_context_esp, %esp movl saved_context_ebp, %ebp movl saved_context_ebx, %ebx movl saved_context_esi, %esi movl saved_context_edi, %edi pushl saved_context_eflags popfl /* Saved in save_processor_state. */ movl $saved_context, %eax lgdt saved_context_gdt_desc(%eax) xorl %eax, %eax /* tell the hibernation core that we've just restored the memory */ movl %eax, in_suspend RET SYM_FUNC_END(restore_registers)
aixcc-public/challenge-001-exemplar-source
3,828
arch/x86/power/hibernate_asm_64.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Hibernation support for x86-64 * * Copyright 2007 Rafael J. Wysocki <rjw@sisk.pl> * Copyright 2005 Andi Kleen <ak@suse.de> * Copyright 2004 Pavel Machek <pavel@suse.cz> * * swsusp_arch_resume must not use any stack or any nonlocal variables while * copying pages: * * Its rewriting one kernel image with another. What is stack in "old" * image could very well be data page in "new" image, and overwriting * your own stack under you is bad idea. */ .text #include <linux/linkage.h> #include <asm/segment.h> #include <asm/page_types.h> #include <asm/asm-offsets.h> #include <asm/processor-flags.h> #include <asm/frame.h> #include <asm/nospec-branch.h> /* code below belongs to the image kernel */ .align PAGE_SIZE SYM_FUNC_START(restore_registers) /* go back to the original page tables */ movq %r9, %cr3 /* Flush TLB, including "global" things (vmalloc) */ movq mmu_cr4_features(%rip), %rax movq %rax, %rdx andq $~(X86_CR4_PGE), %rdx movq %rdx, %cr4; # turn off PGE movq %cr3, %rcx; # flush TLB movq %rcx, %cr3 movq %rax, %cr4; # turn PGE back on /* We don't restore %rax, it must be 0 anyway */ movq $saved_context, %rax movq pt_regs_sp(%rax), %rsp movq pt_regs_bp(%rax), %rbp movq pt_regs_si(%rax), %rsi movq pt_regs_di(%rax), %rdi movq pt_regs_bx(%rax), %rbx movq pt_regs_cx(%rax), %rcx movq pt_regs_dx(%rax), %rdx movq pt_regs_r8(%rax), %r8 movq pt_regs_r9(%rax), %r9 movq pt_regs_r10(%rax), %r10 movq pt_regs_r11(%rax), %r11 movq pt_regs_r12(%rax), %r12 movq pt_regs_r13(%rax), %r13 movq pt_regs_r14(%rax), %r14 movq pt_regs_r15(%rax), %r15 pushq pt_regs_flags(%rax) popfq /* Saved in save_processor_state. */ lgdt saved_context_gdt_desc(%rax) xorl %eax, %eax /* tell the hibernation core that we've just restored the memory */ movq %rax, in_suspend(%rip) RET SYM_FUNC_END(restore_registers) SYM_FUNC_START(swsusp_arch_suspend) movq $saved_context, %rax movq %rsp, pt_regs_sp(%rax) movq %rbp, pt_regs_bp(%rax) movq %rsi, pt_regs_si(%rax) movq %rdi, pt_regs_di(%rax) movq %rbx, pt_regs_bx(%rax) movq %rcx, pt_regs_cx(%rax) movq %rdx, pt_regs_dx(%rax) movq %r8, pt_regs_r8(%rax) movq %r9, pt_regs_r9(%rax) movq %r10, pt_regs_r10(%rax) movq %r11, pt_regs_r11(%rax) movq %r12, pt_regs_r12(%rax) movq %r13, pt_regs_r13(%rax) movq %r14, pt_regs_r14(%rax) movq %r15, pt_regs_r15(%rax) pushfq popq pt_regs_flags(%rax) /* save cr3 */ movq %cr3, %rax movq %rax, restore_cr3(%rip) FRAME_BEGIN call swsusp_save FRAME_END RET SYM_FUNC_END(swsusp_arch_suspend) SYM_FUNC_START(restore_image) /* prepare to jump to the image kernel */ movq restore_jump_address(%rip), %r8 movq restore_cr3(%rip), %r9 /* prepare to switch to temporary page tables */ movq temp_pgt(%rip), %rax movq mmu_cr4_features(%rip), %rbx /* prepare to copy image data to their original locations */ movq restore_pblist(%rip), %rdx /* jump to relocated restore code */ movq relocated_restore_code(%rip), %rcx ANNOTATE_RETPOLINE_SAFE jmpq *%rcx SYM_FUNC_END(restore_image) /* code below has been relocated to a safe page */ SYM_FUNC_START(core_restore_code) /* switch to temporary page tables */ movq %rax, %cr3 /* flush TLB */ movq %rbx, %rcx andq $~(X86_CR4_PGE), %rcx movq %rcx, %cr4; # turn off PGE movq %cr3, %rcx; # flush TLB movq %rcx, %cr3; movq %rbx, %cr4; # turn PGE back on .Lloop: testq %rdx, %rdx jz .Ldone /* get addresses from the pbe and copy the page */ movq pbe_address(%rdx), %rsi movq pbe_orig_address(%rdx), %rdi movq $(PAGE_SIZE >> 3), %rcx rep movsq /* progress to the next pbe */ movq pbe_next(%rdx), %rdx jmp .Lloop .Ldone: /* jump to the restore_registers address from the image header */ ANNOTATE_RETPOLINE_SAFE jmpq *%r8 SYM_FUNC_END(core_restore_code)
aixcc-public/challenge-001-exemplar-source
1,041
arch/x86/um/setjmp_64.S
/* SPDX-License-Identifier: GPL-2.0 */ # # arch/x86_64/setjmp.S # # setjmp/longjmp for the x86-64 architecture # # # The jmp_buf is assumed to contain the following, in order: # %rbx # %rsp (post-return) # %rbp # %r12 # %r13 # %r14 # %r15 # <return address> # .text .align 4 .globl kernel_setjmp .type kernel_setjmp, @function kernel_setjmp: pop %rsi # Return address, and adjust the stack xorl %eax,%eax # Return value movq %rbx,(%rdi) movq %rsp,8(%rdi) # Post-return %rsp! push %rsi # Make the call/return stack happy movq %rbp,16(%rdi) movq %r12,24(%rdi) movq %r13,32(%rdi) movq %r14,40(%rdi) movq %r15,48(%rdi) movq %rsi,56(%rdi) # Return address RET .size kernel_setjmp,.-kernel_setjmp .text .align 4 .globl kernel_longjmp .type kernel_longjmp, @function kernel_longjmp: movl %esi,%eax # Return value (int) movq (%rdi),%rbx movq 8(%rdi),%rsp movq 16(%rdi),%rbp movq 24(%rdi),%r12 movq 32(%rdi),%r13 movq 40(%rdi),%r14 movq 48(%rdi),%r15 jmp *56(%rdi) .size kernel_longjmp,.-kernel_longjmp
aixcc-public/challenge-001-exemplar-source
4,692
arch/x86/um/checksum_32.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * IP/TCP/UDP checksumming routines * * Authors: Jorge Cwik, <jorge@laser.satlink.net> * Arnt Gulbrandsen, <agulbra@nvg.unit.no> * Tom May, <ftom@netcom.com> * Pentium Pro/II routines: * Alexander Kjeldaas <astor@guardian.no> * Finn Arne Gangstad <finnag@guardian.no> * Lots of code moved from tcp.c and ip.c; see those files * for more names. * * Changes: Ingo Molnar, converted csum_partial_copy() to 2.1 exception * handling. * Andi Kleen, add zeroing on error * converted to pure assembler */ #include <asm/errno.h> #include <asm/asm.h> #include <asm/export.h> /* * computes a partial checksum, e.g. for TCP/UDP fragments */ /* unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum) */ .text .align 4 .globl csum_partial #ifndef CONFIG_X86_USE_PPRO_CHECKSUM /* * Experiments with Ethernet and SLIP connections show that buff * is aligned on either a 2-byte or 4-byte boundary. We get at * least a twofold speedup on 486 and Pentium if it is 4-byte aligned. * Fortunately, it is easy to convert 2-byte alignment to 4-byte * alignment for the unrolled loop. */ csum_partial: pushl %esi pushl %ebx movl 20(%esp),%eax # Function arg: unsigned int sum movl 16(%esp),%ecx # Function arg: int len movl 12(%esp),%esi # Function arg: unsigned char *buff testl $2, %esi # Check alignment. jz 2f # Jump if alignment is ok. subl $2, %ecx # Alignment uses up two bytes. jae 1f # Jump if we had at least two bytes. addl $2, %ecx # ecx was < 2. Deal with it. jmp 4f 1: movw (%esi), %bx addl $2, %esi addw %bx, %ax adcl $0, %eax 2: movl %ecx, %edx shrl $5, %ecx jz 2f testl %esi, %esi 1: movl (%esi), %ebx adcl %ebx, %eax movl 4(%esi), %ebx adcl %ebx, %eax movl 8(%esi), %ebx adcl %ebx, %eax movl 12(%esi), %ebx adcl %ebx, %eax movl 16(%esi), %ebx adcl %ebx, %eax movl 20(%esi), %ebx adcl %ebx, %eax movl 24(%esi), %ebx adcl %ebx, %eax movl 28(%esi), %ebx adcl %ebx, %eax lea 32(%esi), %esi dec %ecx jne 1b adcl $0, %eax 2: movl %edx, %ecx andl $0x1c, %edx je 4f shrl $2, %edx # This clears CF 3: adcl (%esi), %eax lea 4(%esi), %esi dec %edx jne 3b adcl $0, %eax 4: andl $3, %ecx jz 7f cmpl $2, %ecx jb 5f movw (%esi),%cx leal 2(%esi),%esi je 6f shll $16,%ecx 5: movb (%esi),%cl 6: addl %ecx,%eax adcl $0, %eax 7: popl %ebx popl %esi RET #else /* Version for PentiumII/PPro */ csum_partial: pushl %esi pushl %ebx movl 20(%esp),%eax # Function arg: unsigned int sum movl 16(%esp),%ecx # Function arg: int len movl 12(%esp),%esi # Function arg: const unsigned char *buf testl $2, %esi jnz 30f 10: movl %ecx, %edx movl %ecx, %ebx andl $0x7c, %ebx shrl $7, %ecx addl %ebx,%esi shrl $2, %ebx negl %ebx lea 45f(%ebx,%ebx,2), %ebx testl %esi, %esi jmp *%ebx # Handle 2-byte-aligned regions 20: addw (%esi), %ax lea 2(%esi), %esi adcl $0, %eax jmp 10b 30: subl $2, %ecx ja 20b je 32f movzbl (%esi),%ebx # csumming 1 byte, 2-aligned addl %ebx, %eax adcl $0, %eax jmp 80f 32: addw (%esi), %ax # csumming 2 bytes, 2-aligned adcl $0, %eax jmp 80f 40: addl -128(%esi), %eax adcl -124(%esi), %eax adcl -120(%esi), %eax adcl -116(%esi), %eax adcl -112(%esi), %eax adcl -108(%esi), %eax adcl -104(%esi), %eax adcl -100(%esi), %eax adcl -96(%esi), %eax adcl -92(%esi), %eax adcl -88(%esi), %eax adcl -84(%esi), %eax adcl -80(%esi), %eax adcl -76(%esi), %eax adcl -72(%esi), %eax adcl -68(%esi), %eax adcl -64(%esi), %eax adcl -60(%esi), %eax adcl -56(%esi), %eax adcl -52(%esi), %eax adcl -48(%esi), %eax adcl -44(%esi), %eax adcl -40(%esi), %eax adcl -36(%esi), %eax adcl -32(%esi), %eax adcl -28(%esi), %eax adcl -24(%esi), %eax adcl -20(%esi), %eax adcl -16(%esi), %eax adcl -12(%esi), %eax adcl -8(%esi), %eax adcl -4(%esi), %eax 45: lea 128(%esi), %esi adcl $0, %eax dec %ecx jge 40b movl %edx, %ecx 50: andl $3, %ecx jz 80f # Handle the last 1-3 bytes without jumping notl %ecx # 1->2, 2->1, 3->0, higher bits are masked movl $0xffffff,%ebx # by the shll and shrl instructions shll $3,%ecx shrl %cl,%ebx andl -128(%esi),%ebx # esi is 4-aligned so should be ok addl %ebx,%eax adcl $0,%eax 80: popl %ebx popl %esi RET #endif EXPORT_SYMBOL(csum_partial)
aixcc-public/challenge-001-exemplar-source
1,072
arch/x86/um/setjmp_32.S
/* SPDX-License-Identifier: GPL-2.0 */ # # arch/i386/setjmp.S # # setjmp/longjmp for the i386 architecture # # # The jmp_buf is assumed to contain the following, in order: # %ebx # %esp # %ebp # %esi # %edi # <return address> # .text .align 4 .globl kernel_setjmp .type kernel_setjmp, @function kernel_setjmp: #ifdef _REGPARM movl %eax,%edx #else movl 4(%esp),%edx #endif popl %ecx # Return address, and adjust the stack xorl %eax,%eax # Return value movl %ebx,(%edx) movl %esp,4(%edx) # Post-return %esp! pushl %ecx # Make the call/return stack happy movl %ebp,8(%edx) movl %esi,12(%edx) movl %edi,16(%edx) movl %ecx,20(%edx) # Return address RET .size kernel_setjmp,.-kernel_setjmp .text .align 4 .globl kernel_longjmp .type kernel_longjmp, @function kernel_longjmp: #ifdef _REGPARM xchgl %eax,%edx #else movl 4(%esp),%edx # jmp_ptr address movl 8(%esp),%eax # Return value #endif movl (%edx),%ebx movl 4(%edx),%esp movl 8(%edx),%ebp movl 12(%edx),%esi movl 16(%edx),%edi jmp *20(%edx) .size kernel_longjmp,.-kernel_longjmp
aixcc-public/challenge-001-exemplar-source
3,985
arch/x86/kernel/ftrace_32.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2017 Steven Rostedt, VMware Inc. */ #include <linux/linkage.h> #include <asm/page_types.h> #include <asm/segment.h> #include <asm/export.h> #include <asm/ftrace.h> #include <asm/nospec-branch.h> #include <asm/frame.h> #include <asm/asm-offsets.h> #ifdef CONFIG_FRAME_POINTER # define MCOUNT_FRAME 1 /* using frame = true */ #else # define MCOUNT_FRAME 0 /* using frame = false */ #endif SYM_FUNC_START(__fentry__) RET SYM_FUNC_END(__fentry__) EXPORT_SYMBOL(__fentry__) SYM_CODE_START(ftrace_caller) #ifdef CONFIG_FRAME_POINTER /* * Frame pointers are of ip followed by bp. * Since fentry is an immediate jump, we are left with * parent-ip, function-ip. We need to add a frame with * parent-ip followed by ebp. */ pushl 4(%esp) /* parent ip */ pushl %ebp movl %esp, %ebp pushl 2*4(%esp) /* function ip */ /* For mcount, the function ip is directly above */ pushl %ebp movl %esp, %ebp #endif pushl %eax pushl %ecx pushl %edx pushl $0 /* Pass NULL as regs pointer */ #ifdef CONFIG_FRAME_POINTER /* Load parent ebp into edx */ movl 4*4(%esp), %edx #else /* There's no frame pointer, load the appropriate stack addr instead */ lea 4*4(%esp), %edx #endif movl (MCOUNT_FRAME+4)*4(%esp), %eax /* load the rip */ /* Get the parent ip */ movl 4(%edx), %edx /* edx has ebp */ movl function_trace_op, %ecx subl $MCOUNT_INSN_SIZE, %eax .globl ftrace_call ftrace_call: call ftrace_stub addl $4, %esp /* skip NULL pointer */ popl %edx popl %ecx popl %eax #ifdef CONFIG_FRAME_POINTER popl %ebp addl $4,%esp /* skip function ip */ popl %ebp /* this is the orig bp */ addl $4, %esp /* skip parent ip */ #endif .Lftrace_ret: #ifdef CONFIG_FUNCTION_GRAPH_TRACER .globl ftrace_graph_call ftrace_graph_call: jmp ftrace_stub #endif /* This is weak to keep gas from relaxing the jumps */ SYM_INNER_LABEL_ALIGN(ftrace_stub, SYM_L_WEAK) RET SYM_CODE_END(ftrace_caller) SYM_CODE_START(ftrace_regs_caller) /* * We're here from an mcount/fentry CALL, and the stack frame looks like: * * <previous context> * RET-IP * * The purpose of this function is to call out in an emulated INT3 * environment with a stack frame like: * * <previous context> * gap / RET-IP * gap * gap * gap * pt_regs * * We do _NOT_ restore: ss, flags, cs, gs, fs, es, ds */ subl $3*4, %esp # RET-IP + 3 gaps pushl %ss # ss pushl %esp # points at ss addl $5*4, (%esp) # make it point at <previous context> pushfl # flags pushl $__KERNEL_CS # cs pushl 7*4(%esp) # ip <- RET-IP pushl $0 # orig_eax pushl %gs pushl %fs pushl %es pushl %ds pushl %eax pushl %ebp pushl %edi pushl %esi pushl %edx pushl %ecx pushl %ebx ENCODE_FRAME_POINTER movl PT_EIP(%esp), %eax # 1st argument: IP subl $MCOUNT_INSN_SIZE, %eax movl 21*4(%esp), %edx # 2nd argument: parent ip movl function_trace_op, %ecx # 3rd argument: ftrace_pos pushl %esp # 4th argument: pt_regs SYM_INNER_LABEL(ftrace_regs_call, SYM_L_GLOBAL) call ftrace_stub addl $4, %esp # skip 4th argument /* place IP below the new SP */ movl PT_OLDESP(%esp), %eax movl PT_EIP(%esp), %ecx movl %ecx, -4(%eax) /* place EAX below that */ movl PT_EAX(%esp), %ecx movl %ecx, -8(%eax) popl %ebx popl %ecx popl %edx popl %esi popl %edi popl %ebp lea -8(%eax), %esp popl %eax jmp .Lftrace_ret SYM_CODE_END(ftrace_regs_caller) #ifdef CONFIG_FUNCTION_GRAPH_TRACER SYM_CODE_START(ftrace_graph_caller) pushl %eax pushl %ecx pushl %edx movl 3*4(%esp), %eax /* Even with frame pointers, fentry doesn't have one here */ lea 4*4(%esp), %edx movl $0, %ecx subl $MCOUNT_INSN_SIZE, %eax call prepare_ftrace_return popl %edx popl %ecx popl %eax RET SYM_CODE_END(ftrace_graph_caller) .globl return_to_handler return_to_handler: pushl %eax pushl %edx movl $0, %eax call ftrace_return_to_handler movl %eax, %ecx popl %edx popl %eax JMP_NOSPEC ecx #endif
aixcc-public/challenge-001-exemplar-source
3,745
arch/x86/kernel/verify_cpu.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * * verify_cpu.S - Code for cpu long mode and SSE verification. This * code has been borrowed from boot/setup.S and was introduced by * Andi Kleen. * * Copyright (c) 2007 Andi Kleen (ak@suse.de) * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com) * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com) * Copyright (c) 2010 Kees Cook (kees.cook@canonical.com) * * This is a common code for verification whether CPU supports * long mode and SSE or not. It is not called directly instead this * file is included at various places and compiled in that context. * This file is expected to run in 32bit code. Currently: * * arch/x86/boot/compressed/head_64.S: Boot cpu verification * arch/x86/kernel/trampoline_64.S: secondary processor verification * arch/x86/kernel/head_32.S: processor startup * * verify_cpu, returns the status of longmode and SSE in register %eax. * 0: Success 1: Failure * * On Intel, the XD_DISABLE flag will be cleared as a side-effect. * * The caller needs to check for the error code and take the action * appropriately. Either display a message or halt. */ #include <asm/cpufeatures.h> #include <asm/msr-index.h> SYM_FUNC_START_LOCAL(verify_cpu) pushf # Save caller passed flags push $0 # Kill any dangerous flags popf #ifndef __x86_64__ pushfl # standard way to check for cpuid popl %eax movl %eax,%ebx xorl $0x200000,%eax pushl %eax popfl pushfl popl %eax cmpl %eax,%ebx jz .Lverify_cpu_no_longmode # cpu has no cpuid #endif movl $0x0,%eax # See if cpuid 1 is implemented cpuid cmpl $0x1,%eax jb .Lverify_cpu_no_longmode # no cpuid 1 xor %di,%di cmpl $0x68747541,%ebx # AuthenticAMD jnz .Lverify_cpu_noamd cmpl $0x69746e65,%edx jnz .Lverify_cpu_noamd cmpl $0x444d4163,%ecx jnz .Lverify_cpu_noamd mov $1,%di # cpu is from AMD jmp .Lverify_cpu_check .Lverify_cpu_noamd: cmpl $0x756e6547,%ebx # GenuineIntel? jnz .Lverify_cpu_check cmpl $0x49656e69,%edx jnz .Lverify_cpu_check cmpl $0x6c65746e,%ecx jnz .Lverify_cpu_check # only call IA32_MISC_ENABLE when: # family > 6 || (family == 6 && model >= 0xd) movl $0x1, %eax # check CPU family and model cpuid movl %eax, %ecx andl $0x0ff00f00, %eax # mask family and extended family shrl $8, %eax cmpl $6, %eax ja .Lverify_cpu_clear_xd # family > 6, ok jb .Lverify_cpu_check # family < 6, skip andl $0x000f00f0, %ecx # mask model and extended model shrl $4, %ecx cmpl $0xd, %ecx jb .Lverify_cpu_check # family == 6, model < 0xd, skip .Lverify_cpu_clear_xd: movl $MSR_IA32_MISC_ENABLE, %ecx rdmsr btrl $2, %edx # clear MSR_IA32_MISC_ENABLE_XD_DISABLE jnc .Lverify_cpu_check # only write MSR if bit was changed wrmsr .Lverify_cpu_check: movl $0x1,%eax # Does the cpu have what it takes cpuid andl $REQUIRED_MASK0,%edx xorl $REQUIRED_MASK0,%edx jnz .Lverify_cpu_no_longmode movl $0x80000000,%eax # See if extended cpuid is implemented cpuid cmpl $0x80000001,%eax jb .Lverify_cpu_no_longmode # no extended cpuid movl $0x80000001,%eax # Does the cpu have what it takes cpuid andl $REQUIRED_MASK1,%edx xorl $REQUIRED_MASK1,%edx jnz .Lverify_cpu_no_longmode .Lverify_cpu_sse_test: movl $1,%eax cpuid andl $SSE_MASK,%edx cmpl $SSE_MASK,%edx je .Lverify_cpu_sse_ok test %di,%di jz .Lverify_cpu_no_longmode # only try to force SSE on AMD movl $MSR_K7_HWCR,%ecx rdmsr btr $15,%eax # enable SSE wrmsr xor %di,%di # don't loop jmp .Lverify_cpu_sse_test # try again .Lverify_cpu_no_longmode: popf # Restore caller passed flags movl $1,%eax RET .Lverify_cpu_sse_ok: popf # Restore caller passed flags xorl %eax, %eax RET SYM_FUNC_END(verify_cpu)
aixcc-public/challenge-001-exemplar-source
8,465
arch/x86/kernel/ftrace_64.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2014 Steven Rostedt, Red Hat Inc */ #include <linux/linkage.h> #include <linux/cfi_types.h> #include <asm/ptrace.h> #include <asm/ftrace.h> #include <asm/export.h> #include <asm/nospec-branch.h> #include <asm/unwind_hints.h> #include <asm/frame.h> .code64 .section .text, "ax" #ifdef CONFIG_FRAME_POINTER /* Save parent and function stack frames (rip and rbp) */ # define MCOUNT_FRAME_SIZE (8+16*2) #else /* No need to save a stack frame */ # define MCOUNT_FRAME_SIZE 0 #endif /* CONFIG_FRAME_POINTER */ /* Size of stack used to save mcount regs in save_mcount_regs */ #define MCOUNT_REG_SIZE (FRAME_SIZE + MCOUNT_FRAME_SIZE) /* * gcc -pg option adds a call to 'mcount' in most functions. * When -mfentry is used, the call is to 'fentry' and not 'mcount' * and is done before the function's stack frame is set up. * They both require a set of regs to be saved before calling * any C code and restored before returning back to the function. * * On boot up, all these calls are converted into nops. When tracing * is enabled, the call can jump to either ftrace_caller or * ftrace_regs_caller. Callbacks (tracing functions) that require * ftrace_regs_caller (like kprobes) need to have pt_regs passed to * it. For this reason, the size of the pt_regs structure will be * allocated on the stack and the required mcount registers will * be saved in the locations that pt_regs has them in. */ /* * @added: the amount of stack added before calling this * * After this is called, the following registers contain: * * %rdi - holds the address that called the trampoline * %rsi - holds the parent function (traced function's return address) * %rdx - holds the original %rbp */ .macro save_mcount_regs added=0 #ifdef CONFIG_FRAME_POINTER /* Save the original rbp */ pushq %rbp /* * Stack traces will stop at the ftrace trampoline if the frame pointer * is not set up properly. If fentry is used, we need to save a frame * pointer for the parent as well as the function traced, because the * fentry is called before the stack frame is set up, where as mcount * is called afterward. */ /* Save the parent pointer (skip orig rbp and our return address) */ pushq \added+8*2(%rsp) pushq %rbp movq %rsp, %rbp /* Save the return address (now skip orig rbp, rbp and parent) */ pushq \added+8*3(%rsp) pushq %rbp movq %rsp, %rbp #endif /* CONFIG_FRAME_POINTER */ /* * We add enough stack to save all regs. */ subq $(FRAME_SIZE), %rsp movq %rax, RAX(%rsp) movq %rcx, RCX(%rsp) movq %rdx, RDX(%rsp) movq %rsi, RSI(%rsp) movq %rdi, RDI(%rsp) movq %r8, R8(%rsp) movq %r9, R9(%rsp) movq $0, ORIG_RAX(%rsp) /* * Save the original RBP. Even though the mcount ABI does not * require this, it helps out callers. */ #ifdef CONFIG_FRAME_POINTER movq MCOUNT_REG_SIZE-8(%rsp), %rdx #else movq %rbp, %rdx #endif movq %rdx, RBP(%rsp) /* Copy the parent address into %rsi (second parameter) */ movq MCOUNT_REG_SIZE+8+\added(%rsp), %rsi /* Move RIP to its proper location */ movq MCOUNT_REG_SIZE+\added(%rsp), %rdi movq %rdi, RIP(%rsp) /* * Now %rdi (the first parameter) has the return address of * where ftrace_call returns. But the callbacks expect the * address of the call itself. */ subq $MCOUNT_INSN_SIZE, %rdi .endm .macro restore_mcount_regs save=0 /* ftrace_regs_caller or frame pointers require this */ movq RBP(%rsp), %rbp movq R9(%rsp), %r9 movq R8(%rsp), %r8 movq RDI(%rsp), %rdi movq RSI(%rsp), %rsi movq RDX(%rsp), %rdx movq RCX(%rsp), %rcx movq RAX(%rsp), %rax addq $MCOUNT_REG_SIZE-\save, %rsp .endm SYM_TYPED_FUNC_START(ftrace_stub) RET SYM_FUNC_END(ftrace_stub) #ifdef CONFIG_FUNCTION_GRAPH_TRACER SYM_TYPED_FUNC_START(ftrace_stub_graph) RET SYM_FUNC_END(ftrace_stub_graph) #endif #ifdef CONFIG_DYNAMIC_FTRACE SYM_FUNC_START(__fentry__) RET SYM_FUNC_END(__fentry__) EXPORT_SYMBOL(__fentry__) SYM_FUNC_START(ftrace_caller) /* save_mcount_regs fills in first two parameters */ save_mcount_regs /* Stack - skipping return address of ftrace_caller */ leaq MCOUNT_REG_SIZE+8(%rsp), %rcx movq %rcx, RSP(%rsp) SYM_INNER_LABEL(ftrace_caller_op_ptr, SYM_L_GLOBAL) ANNOTATE_NOENDBR /* Load the ftrace_ops into the 3rd parameter */ movq function_trace_op(%rip), %rdx /* regs go into 4th parameter */ leaq (%rsp), %rcx /* Only ops with REGS flag set should have CS register set */ movq $0, CS(%rsp) SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL) ANNOTATE_NOENDBR call ftrace_stub /* Handlers can change the RIP */ movq RIP(%rsp), %rax movq %rax, MCOUNT_REG_SIZE(%rsp) restore_mcount_regs /* * The code up to this label is copied into trampolines so * think twice before adding any new code or changing the * layout here. */ SYM_INNER_LABEL(ftrace_caller_end, SYM_L_GLOBAL) ANNOTATE_NOENDBR RET SYM_FUNC_END(ftrace_caller); STACK_FRAME_NON_STANDARD_FP(ftrace_caller) SYM_FUNC_START(ftrace_regs_caller) /* Save the current flags before any operations that can change them */ pushfq /* added 8 bytes to save flags */ save_mcount_regs 8 /* save_mcount_regs fills in first two parameters */ SYM_INNER_LABEL(ftrace_regs_caller_op_ptr, SYM_L_GLOBAL) ANNOTATE_NOENDBR /* Load the ftrace_ops into the 3rd parameter */ movq function_trace_op(%rip), %rdx /* Save the rest of pt_regs */ movq %r15, R15(%rsp) movq %r14, R14(%rsp) movq %r13, R13(%rsp) movq %r12, R12(%rsp) movq %r11, R11(%rsp) movq %r10, R10(%rsp) movq %rbx, RBX(%rsp) /* Copy saved flags */ movq MCOUNT_REG_SIZE(%rsp), %rcx movq %rcx, EFLAGS(%rsp) /* Kernel segments */ movq $__KERNEL_DS, %rcx movq %rcx, SS(%rsp) movq $__KERNEL_CS, %rcx movq %rcx, CS(%rsp) /* Stack - skipping return address and flags */ leaq MCOUNT_REG_SIZE+8*2(%rsp), %rcx movq %rcx, RSP(%rsp) ENCODE_FRAME_POINTER /* regs go into 4th parameter */ leaq (%rsp), %rcx SYM_INNER_LABEL(ftrace_regs_call, SYM_L_GLOBAL) ANNOTATE_NOENDBR call ftrace_stub /* Copy flags back to SS, to restore them */ movq EFLAGS(%rsp), %rax movq %rax, MCOUNT_REG_SIZE(%rsp) /* Handlers can change the RIP */ movq RIP(%rsp), %rax movq %rax, MCOUNT_REG_SIZE+8(%rsp) /* restore the rest of pt_regs */ movq R15(%rsp), %r15 movq R14(%rsp), %r14 movq R13(%rsp), %r13 movq R12(%rsp), %r12 movq R10(%rsp), %r10 movq RBX(%rsp), %rbx movq ORIG_RAX(%rsp), %rax movq %rax, MCOUNT_REG_SIZE-8(%rsp) /* * If ORIG_RAX is anything but zero, make this a call to that. * See arch_ftrace_set_direct_caller(). */ testq %rax, %rax SYM_INNER_LABEL(ftrace_regs_caller_jmp, SYM_L_GLOBAL) ANNOTATE_NOENDBR jnz 1f restore_mcount_regs /* Restore flags */ popfq /* * The trampoline will add the return. */ SYM_INNER_LABEL(ftrace_regs_caller_end, SYM_L_GLOBAL) ANNOTATE_NOENDBR RET /* Swap the flags with orig_rax */ 1: movq MCOUNT_REG_SIZE(%rsp), %rdi movq %rdi, MCOUNT_REG_SIZE-8(%rsp) movq %rax, MCOUNT_REG_SIZE(%rsp) restore_mcount_regs 8 /* Restore flags */ popfq UNWIND_HINT_FUNC RET SYM_FUNC_END(ftrace_regs_caller) STACK_FRAME_NON_STANDARD_FP(ftrace_regs_caller) #else /* ! CONFIG_DYNAMIC_FTRACE */ SYM_FUNC_START(__fentry__) cmpq $ftrace_stub, ftrace_trace_function jnz trace RET trace: /* save_mcount_regs fills in first two parameters */ save_mcount_regs /* * When DYNAMIC_FTRACE is not defined, ARCH_SUPPORTS_FTRACE_OPS is not * set (see include/asm/ftrace.h and include/linux/ftrace.h). Only the * ip and parent ip are used and the list function is called when * function tracing is enabled. */ movq ftrace_trace_function, %r8 CALL_NOSPEC r8 restore_mcount_regs jmp ftrace_stub SYM_FUNC_END(__fentry__) EXPORT_SYMBOL(__fentry__) STACK_FRAME_NON_STANDARD_FP(__fentry__) #endif /* CONFIG_DYNAMIC_FTRACE */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER SYM_CODE_START(return_to_handler) UNWIND_HINT_EMPTY ANNOTATE_NOENDBR subq $16, %rsp /* Save the return values */ movq %rax, (%rsp) movq %rdx, 8(%rsp) movq %rbp, %rdi call ftrace_return_to_handler movq %rax, %rdi movq 8(%rsp), %rdx movq (%rsp), %rax addq $16, %rsp /* * Jump back to the old return address. This cannot be JMP_NOSPEC rdi * since IBT would demand that contain ENDBR, which simply isn't so for * return addresses. Use a retpoline here to keep the RSB balanced. */ ANNOTATE_INTRA_FUNCTION_CALL call .Ldo_rop int3 .Ldo_rop: mov %rdi, (%rsp) RET SYM_CODE_END(return_to_handler) #endif
aixcc-public/challenge-001-exemplar-source
14,131
arch/x86/kernel/head_32.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * * Copyright (C) 1991, 1992 Linus Torvalds * * Enhanced CPU detection and feature setting code by Mike Jagdis * and Martin Mares, November 1997. */ .text #include <linux/threads.h> #include <linux/init.h> #include <linux/linkage.h> #include <asm/segment.h> #include <asm/page_types.h> #include <asm/pgtable_types.h> #include <asm/cache.h> #include <asm/thread_info.h> #include <asm/asm-offsets.h> #include <asm/setup.h> #include <asm/processor-flags.h> #include <asm/msr-index.h> #include <asm/cpufeatures.h> #include <asm/percpu.h> #include <asm/nops.h> #include <asm/nospec-branch.h> #include <asm/bootparam.h> #include <asm/export.h> #include <asm/pgtable_32.h> /* Physical address */ #define pa(X) ((X) - __PAGE_OFFSET) /* * References to members of the new_cpu_data structure. */ #define X86 new_cpu_data+CPUINFO_x86 #define X86_VENDOR new_cpu_data+CPUINFO_x86_vendor #define X86_MODEL new_cpu_data+CPUINFO_x86_model #define X86_STEPPING new_cpu_data+CPUINFO_x86_stepping #define X86_HARD_MATH new_cpu_data+CPUINFO_hard_math #define X86_CPUID new_cpu_data+CPUINFO_cpuid_level #define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability #define X86_VENDOR_ID new_cpu_data+CPUINFO_x86_vendor_id #define SIZEOF_PTREGS 17*4 /* * Worst-case size of the kernel mapping we need to make: * a relocatable kernel can live anywhere in lowmem, so we need to be able * to map all of lowmem. */ KERNEL_PAGES = LOWMEM_PAGES INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE RESERVE_BRK(pagetables, INIT_MAP_SIZE) /* * 32-bit kernel entrypoint; only used by the boot CPU. On entry, * %esi points to the real-mode code as a 32-bit pointer. * CS and DS must be 4 GB flat segments, but we don't depend on * any particular GDT layout, because we load our own as soon as we * can. */ __HEAD SYM_CODE_START(startup_32) movl pa(initial_stack),%ecx /* * Set segments to known values. */ lgdt pa(boot_gdt_descr) movl $(__BOOT_DS),%eax movl %eax,%ds movl %eax,%es movl %eax,%fs movl %eax,%gs movl %eax,%ss leal -__PAGE_OFFSET(%ecx),%esp /* * Clear BSS first so that there are no surprises... */ cld xorl %eax,%eax movl $pa(__bss_start),%edi movl $pa(__bss_stop),%ecx subl %edi,%ecx shrl $2,%ecx rep ; stosl /* * Copy bootup parameters out of the way. * Note: %esi still has the pointer to the real-mode data. * With the kexec as boot loader, parameter segment might be loaded beyond * kernel image and might not even be addressable by early boot page tables. * (kexec on panic case). Hence copy out the parameters before initializing * page tables. */ movl $pa(boot_params),%edi movl $(PARAM_SIZE/4),%ecx cld rep movsl movl pa(boot_params) + NEW_CL_POINTER,%esi andl %esi,%esi jz 1f # No command line movl $pa(boot_command_line),%edi movl $(COMMAND_LINE_SIZE/4),%ecx rep movsl 1: #ifdef CONFIG_OLPC /* save OFW's pgdir table for later use when calling into OFW */ movl %cr3, %eax movl %eax, pa(olpc_ofw_pgd) #endif #ifdef CONFIG_MICROCODE /* Early load ucode on BSP. */ call load_ucode_bsp #endif /* Create early pagetables. */ call mk_early_pgtbl_32 /* Do early initialization of the fixmap area */ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax #ifdef CONFIG_X86_PAE #define KPMDS (((-__PAGE_OFFSET) >> 30) & 3) /* Number of kernel PMDs */ movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8) #else movl %eax,pa(initial_page_table+0xffc) #endif jmp .Ldefault_entry SYM_CODE_END(startup_32) #ifdef CONFIG_HOTPLUG_CPU /* * Boot CPU0 entry point. It's called from play_dead(). Everything has been set * up already except stack. We just set up stack here. Then call * start_secondary(). */ SYM_FUNC_START(start_cpu0) movl initial_stack, %ecx movl %ecx, %esp call *(initial_code) 1: jmp 1b SYM_FUNC_END(start_cpu0) #endif /* * Non-boot CPU entry point; entered from trampoline.S * We can't lgdt here, because lgdt itself uses a data segment, but * we know the trampoline has already loaded the boot_gdt for us. * * If cpu hotplug is not supported then this code can go in init section * which will be freed later */ SYM_FUNC_START(startup_32_smp) cld movl $(__BOOT_DS),%eax movl %eax,%ds movl %eax,%es movl %eax,%fs movl %eax,%gs movl pa(initial_stack),%ecx movl %eax,%ss leal -__PAGE_OFFSET(%ecx),%esp #ifdef CONFIG_MICROCODE /* Early load ucode on AP. */ call load_ucode_ap #endif .Ldefault_entry: movl $(CR0_STATE & ~X86_CR0_PG),%eax movl %eax,%cr0 /* * We want to start out with EFLAGS unambiguously cleared. Some BIOSes leave * bits like NT set. This would confuse the debugger if this code is traced. So * initialize them properly now before switching to protected mode. That means * DF in particular (even though we have cleared it earlier after copying the * command line) because GCC expects it. */ pushl $0 popfl /* * New page tables may be in 4Mbyte page mode and may be using the global pages. * * NOTE! If we are on a 486 we may have no cr4 at all! Specifically, cr4 exists * if and only if CPUID exists and has flags other than the FPU flag set. */ movl $-1,pa(X86_CPUID) # preset CPUID level movl $X86_EFLAGS_ID,%ecx pushl %ecx popfl # set EFLAGS=ID pushfl popl %eax # get EFLAGS testl $X86_EFLAGS_ID,%eax # did EFLAGS.ID remained set? jz .Lenable_paging # hw disallowed setting of ID bit # which means no CPUID and no CR4 xorl %eax,%eax cpuid movl %eax,pa(X86_CPUID) # save largest std CPUID function movl $1,%eax cpuid andl $~1,%edx # Ignore CPUID.FPU jz .Lenable_paging # No flags or only CPUID.FPU = no CR4 movl pa(mmu_cr4_features),%eax movl %eax,%cr4 testb $X86_CR4_PAE, %al # check if PAE is enabled jz .Lenable_paging /* Check if extended functions are implemented */ movl $0x80000000, %eax cpuid /* Value must be in the range 0x80000001 to 0x8000ffff */ subl $0x80000001, %eax cmpl $(0x8000ffff-0x80000001), %eax ja .Lenable_paging /* Clear bogus XD_DISABLE bits */ call verify_cpu mov $0x80000001, %eax cpuid /* Execute Disable bit supported? */ btl $(X86_FEATURE_NX & 31), %edx jnc .Lenable_paging /* Setup EFER (Extended Feature Enable Register) */ movl $MSR_EFER, %ecx rdmsr btsl $_EFER_NX, %eax /* Make changes effective */ wrmsr .Lenable_paging: /* * Enable paging */ movl $pa(initial_page_table), %eax movl %eax,%cr3 /* set the page table pointer.. */ movl $CR0_STATE,%eax movl %eax,%cr0 /* ..and set paging (PG) bit */ ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */ 1: /* Shift the stack pointer to a virtual address */ addl $__PAGE_OFFSET, %esp /* * start system 32-bit setup. We need to re-do some of the things done * in 16-bit mode for the "real" operations. */ movl setup_once_ref,%eax andl %eax,%eax jz 1f # Did we do this already? call *%eax 1: /* * Check if it is 486 */ movb $4,X86 # at least 486 cmpl $-1,X86_CPUID je .Lis486 /* get vendor info */ xorl %eax,%eax # call CPUID with 0 -> return vendor ID cpuid movl %eax,X86_CPUID # save CPUID level movl %ebx,X86_VENDOR_ID # lo 4 chars movl %edx,X86_VENDOR_ID+4 # next 4 chars movl %ecx,X86_VENDOR_ID+8 # last 4 chars orl %eax,%eax # do we have processor info as well? je .Lis486 movl $1,%eax # Use the CPUID instruction to get CPU type cpuid movb %al,%cl # save reg for future use andb $0x0f,%ah # mask processor family movb %ah,X86 andb $0xf0,%al # mask model shrb $4,%al movb %al,X86_MODEL andb $0x0f,%cl # mask mask revision movb %cl,X86_STEPPING movl %edx,X86_CAPABILITY .Lis486: movl $0x50022,%ecx # set AM, WP, NE and MP movl %cr0,%eax andl $0x80000011,%eax # Save PG,PE,ET orl %ecx,%eax movl %eax,%cr0 lgdt early_gdt_descr ljmp $(__KERNEL_CS),$1f 1: movl $(__KERNEL_DS),%eax # reload all the segment registers movl %eax,%ss # after changing gdt. movl $(__USER_DS),%eax # DS/ES contains default USER segment movl %eax,%ds movl %eax,%es movl $(__KERNEL_PERCPU), %eax movl %eax,%fs # set this cpu's percpu xorl %eax,%eax movl %eax,%gs # clear possible garbage in %gs xorl %eax,%eax # Clear LDT lldt %ax call *(initial_code) 1: jmp 1b SYM_FUNC_END(startup_32_smp) #include "verify_cpu.S" /* * setup_once * * The setup work we only want to run on the BSP. * * Warning: %esi is live across this function. */ __INIT setup_once: andl $0,setup_once_ref /* Once is enough, thanks */ RET SYM_FUNC_START(early_idt_handler_array) # 36(%esp) %eflags # 32(%esp) %cs # 28(%esp) %eip # 24(%rsp) error code i = 0 .rept NUM_EXCEPTION_VECTORS .if ((EXCEPTION_ERRCODE_MASK >> i) & 1) == 0 pushl $0 # Dummy error code, to make stack frame uniform .endif pushl $i # 20(%esp) Vector number jmp early_idt_handler_common i = i + 1 .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc .endr SYM_FUNC_END(early_idt_handler_array) SYM_CODE_START_LOCAL(early_idt_handler_common) /* * The stack is the hardware frame, an error code or zero, and the * vector number. */ cld incl %ss:early_recursion_flag /* The vector number is in pt_regs->gs */ cld pushl %fs /* pt_regs->fs (__fsh varies by model) */ pushl %es /* pt_regs->es (__esh varies by model) */ pushl %ds /* pt_regs->ds (__dsh varies by model) */ pushl %eax /* pt_regs->ax */ pushl %ebp /* pt_regs->bp */ pushl %edi /* pt_regs->di */ pushl %esi /* pt_regs->si */ pushl %edx /* pt_regs->dx */ pushl %ecx /* pt_regs->cx */ pushl %ebx /* pt_regs->bx */ /* Fix up DS and ES */ movl $(__KERNEL_DS), %ecx movl %ecx, %ds movl %ecx, %es /* Load the vector number into EDX */ movl PT_GS(%esp), %edx /* Load GS into pt_regs->gs (and maybe clobber __gsh) */ movw %gs, PT_GS(%esp) movl %esp, %eax /* args are pt_regs (EAX), trapnr (EDX) */ call early_fixup_exception popl %ebx /* pt_regs->bx */ popl %ecx /* pt_regs->cx */ popl %edx /* pt_regs->dx */ popl %esi /* pt_regs->si */ popl %edi /* pt_regs->di */ popl %ebp /* pt_regs->bp */ popl %eax /* pt_regs->ax */ popl %ds /* pt_regs->ds (always ignores __dsh) */ popl %es /* pt_regs->es (always ignores __esh) */ popl %fs /* pt_regs->fs (always ignores __fsh) */ popl %gs /* pt_regs->gs (always ignores __gsh) */ decl %ss:early_recursion_flag addl $4, %esp /* pop pt_regs->orig_ax */ iret SYM_CODE_END(early_idt_handler_common) /* This is the default interrupt "handler" :-) */ SYM_FUNC_START(early_ignore_irq) cld #ifdef CONFIG_PRINTK pushl %eax pushl %ecx pushl %edx pushl %es pushl %ds movl $(__KERNEL_DS),%eax movl %eax,%ds movl %eax,%es cmpl $2,early_recursion_flag je hlt_loop incl early_recursion_flag pushl 16(%esp) pushl 24(%esp) pushl 32(%esp) pushl 40(%esp) pushl $int_msg call _printk call dump_stack addl $(5*4),%esp popl %ds popl %es popl %edx popl %ecx popl %eax #endif iret hlt_loop: hlt jmp hlt_loop SYM_FUNC_END(early_ignore_irq) __INITDATA .align 4 SYM_DATA(early_recursion_flag, .long 0) __REFDATA .align 4 SYM_DATA(initial_code, .long i386_start_kernel) SYM_DATA(setup_once_ref, .long setup_once) #ifdef CONFIG_PAGE_TABLE_ISOLATION #define PGD_ALIGN (2 * PAGE_SIZE) #define PTI_USER_PGD_FILL 1024 #else #define PGD_ALIGN (PAGE_SIZE) #define PTI_USER_PGD_FILL 0 #endif /* * BSS section */ __PAGE_ALIGNED_BSS .align PGD_ALIGN #ifdef CONFIG_X86_PAE .globl initial_pg_pmd initial_pg_pmd: .fill 1024*KPMDS,4,0 #else .globl initial_page_table initial_page_table: .fill 1024,4,0 #endif .align PGD_ALIGN initial_pg_fixmap: .fill 1024,4,0 .globl swapper_pg_dir .align PGD_ALIGN swapper_pg_dir: .fill 1024,4,0 .fill PTI_USER_PGD_FILL,4,0 .globl empty_zero_page empty_zero_page: .fill 4096,1,0 EXPORT_SYMBOL(empty_zero_page) /* * This starts the data section. */ #ifdef CONFIG_X86_PAE __PAGE_ALIGNED_DATA /* Page-aligned for the benefit of paravirt? */ .align PGD_ALIGN SYM_DATA_START(initial_page_table) .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */ # if KPMDS == 3 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 .long pa(initial_pg_pmd+PGD_IDENT_ATTR+0x1000),0 .long pa(initial_pg_pmd+PGD_IDENT_ATTR+0x2000),0 # elif KPMDS == 2 .long 0,0 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 .long pa(initial_pg_pmd+PGD_IDENT_ATTR+0x1000),0 # elif KPMDS == 1 .long 0,0 .long 0,0 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 # else # error "Kernel PMDs should be 1, 2 or 3" # endif .align PAGE_SIZE /* needs to be page-sized too */ #ifdef CONFIG_PAGE_TABLE_ISOLATION /* * PTI needs another page so sync_initial_pagetable() works correctly * and does not scribble over the data which is placed behind the * actual initial_page_table. See clone_pgd_range(). */ .fill 1024, 4, 0 #endif SYM_DATA_END(initial_page_table) #endif .data .balign 4 /* * The SIZEOF_PTREGS gap is a convention which helps the in-kernel unwinder * reliably detect the end of the stack. */ SYM_DATA(initial_stack, .long init_thread_union + THREAD_SIZE - SIZEOF_PTREGS - TOP_OF_KERNEL_STACK_PADDING) __INITRODATA int_msg: .asciz "Unknown interrupt or fault at: %p %p %p\n" #include "../../x86/xen/xen-head.S" /* * The IDT and GDT 'descriptors' are a strange 48-bit object * only used by the lidt and lgdt instructions. They are not * like usual segment descriptors - they consist of a 16-bit * segment size, and 32-bit linear address value: */ .data ALIGN # early boot GDT descriptor (must use 1:1 address mapping) .word 0 # 32 bit align gdt_desc.address SYM_DATA_START_LOCAL(boot_gdt_descr) .word __BOOT_DS+7 .long boot_gdt - __PAGE_OFFSET SYM_DATA_END(boot_gdt_descr) # boot GDT descriptor (later on used by CPU#0): .word 0 # 32 bit align gdt_desc.address SYM_DATA_START(early_gdt_descr) .word GDT_ENTRIES*8-1 .long gdt_page /* Overwritten for secondary CPUs */ SYM_DATA_END(early_gdt_descr) /* * The boot_gdt must mirror the equivalent in setup.S and is * used only for booting. */ .align L1_CACHE_BYTES SYM_DATA_START(boot_gdt) .fill GDT_ENTRY_BOOT_CS,8,0 .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */ .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */ SYM_DATA_END(boot_gdt)
aixcc-public/challenge-001-exemplar-source
2,480
arch/x86/kernel/sev_verify_cbit.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * sev_verify_cbit.S - Code for verification of the C-bit position reported * by the Hypervisor when running with SEV enabled. * * Copyright (c) 2020 Joerg Roedel (jroedel@suse.de) * * sev_verify_cbit() is called before switching to a new long-mode page-table * at boot. * * Verify that the C-bit position is correct by writing a random value to * an encrypted memory location while on the current page-table. Then it * switches to the new page-table to verify the memory content is still the * same. After that it switches back to the current page-table and when the * check succeeded it returns. If the check failed the code invalidates the * stack pointer and goes into a hlt loop. The stack-pointer is invalidated to * make sure no interrupt or exception can get the CPU out of the hlt loop. * * New page-table pointer is expected in %rdi (first parameter) * */ SYM_FUNC_START(sev_verify_cbit) #ifdef CONFIG_AMD_MEM_ENCRYPT /* First check if a C-bit was detected */ movq sme_me_mask(%rip), %rsi testq %rsi, %rsi jz 3f /* sme_me_mask != 0 could mean SME or SEV - Check also for SEV */ movq sev_status(%rip), %rsi testq %rsi, %rsi jz 3f /* Save CR4 in %rsi */ movq %cr4, %rsi /* Disable Global Pages */ movq %rsi, %rdx andq $(~X86_CR4_PGE), %rdx movq %rdx, %cr4 /* * Verified that running under SEV - now get a random value using * RDRAND. This instruction is mandatory when running as an SEV guest. * * Don't bail out of the loop if RDRAND returns errors. It is better to * prevent forward progress than to work with a non-random value here. */ 1: rdrand %rdx jnc 1b /* Store value to memory and keep it in %rdx */ movq %rdx, sev_check_data(%rip) /* Backup current %cr3 value to restore it later */ movq %cr3, %rcx /* Switch to new %cr3 - This might unmap the stack */ movq %rdi, %cr3 /* * Compare value in %rdx with memory location. If C-bit is incorrect * this would read the encrypted data and make the check fail. */ cmpq %rdx, sev_check_data(%rip) /* Restore old %cr3 */ movq %rcx, %cr3 /* Restore previous CR4 */ movq %rsi, %cr4 /* Check CMPQ result */ je 3f /* * The check failed, prevent any forward progress to prevent ROP * attacks, invalidate the stack and go into a hlt loop. */ xorq %rsp, %rsp subq $0x1000, %rsp 2: hlt jmp 2b 3: #endif /* Return page-table pointer */ movq %rdi, %rax RET SYM_FUNC_END(sev_verify_cbit)
aixcc-public/challenge-001-exemplar-source
6,078
arch/x86/kernel/relocate_kernel_32.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * relocate_kernel.S - put the kernel image in place to boot * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com> */ #include <linux/linkage.h> #include <asm/page_types.h> #include <asm/kexec.h> #include <asm/nospec-branch.h> #include <asm/processor-flags.h> /* * Must be relocatable PIC code callable as a C function, in particular * there must be a plain RET and not jump to return thunk. */ #define PTR(x) (x << 2) /* * control_page + KEXEC_CONTROL_CODE_MAX_SIZE * ~ control_page + PAGE_SIZE are used as data storage and stack for * jumping back */ #define DATA(offset) (KEXEC_CONTROL_CODE_MAX_SIZE+(offset)) /* Minimal CPU state */ #define ESP DATA(0x0) #define CR0 DATA(0x4) #define CR3 DATA(0x8) #define CR4 DATA(0xc) /* other data */ #define CP_VA_CONTROL_PAGE DATA(0x10) #define CP_PA_PGD DATA(0x14) #define CP_PA_SWAP_PAGE DATA(0x18) #define CP_PA_BACKUP_PAGES_MAP DATA(0x1c) .text SYM_CODE_START_NOALIGN(relocate_kernel) /* Save the CPU context, used for jumping back */ pushl %ebx pushl %esi pushl %edi pushl %ebp pushf movl 20+8(%esp), %ebp /* list of pages */ movl PTR(VA_CONTROL_PAGE)(%ebp), %edi movl %esp, ESP(%edi) movl %cr0, %eax movl %eax, CR0(%edi) movl %cr3, %eax movl %eax, CR3(%edi) movl %cr4, %eax movl %eax, CR4(%edi) /* read the arguments and say goodbye to the stack */ movl 20+4(%esp), %ebx /* page_list */ movl 20+8(%esp), %ebp /* list of pages */ movl 20+12(%esp), %edx /* start address */ movl 20+16(%esp), %ecx /* cpu_has_pae */ movl 20+20(%esp), %esi /* preserve_context */ /* zero out flags, and disable interrupts */ pushl $0 popfl /* save some information for jumping back */ movl PTR(VA_CONTROL_PAGE)(%ebp), %edi movl %edi, CP_VA_CONTROL_PAGE(%edi) movl PTR(PA_PGD)(%ebp), %eax movl %eax, CP_PA_PGD(%edi) movl PTR(PA_SWAP_PAGE)(%ebp), %eax movl %eax, CP_PA_SWAP_PAGE(%edi) movl %ebx, CP_PA_BACKUP_PAGES_MAP(%edi) /* * get physical address of control page now * this is impossible after page table switch */ movl PTR(PA_CONTROL_PAGE)(%ebp), %edi /* switch to new set of page tables */ movl PTR(PA_PGD)(%ebp), %eax movl %eax, %cr3 /* setup a new stack at the end of the physical control page */ lea PAGE_SIZE(%edi), %esp /* jump to identity mapped page */ movl %edi, %eax addl $(identity_mapped - relocate_kernel), %eax pushl %eax ANNOTATE_UNRET_SAFE ret int3 SYM_CODE_END(relocate_kernel) SYM_CODE_START_LOCAL_NOALIGN(identity_mapped) /* set return address to 0 if not preserving context */ pushl $0 /* store the start address on the stack */ pushl %edx /* * Set cr0 to a known state: * - Paging disabled * - Alignment check disabled * - Write protect disabled * - No task switch * - Don't do FP software emulation. * - Protected mode enabled */ movl %cr0, %eax andl $~(X86_CR0_PG | X86_CR0_AM | X86_CR0_WP | X86_CR0_TS | X86_CR0_EM), %eax orl $(X86_CR0_PE), %eax movl %eax, %cr0 /* clear cr4 if applicable */ testl %ecx, %ecx jz 1f /* * Set cr4 to a known state: * Setting everything to zero seems safe. */ xorl %eax, %eax movl %eax, %cr4 jmp 1f 1: /* Flush the TLB (needed?) */ xorl %eax, %eax movl %eax, %cr3 movl CP_PA_SWAP_PAGE(%edi), %eax pushl %eax pushl %ebx call swap_pages addl $8, %esp /* * To be certain of avoiding problems with self-modifying code * I need to execute a serializing instruction here. * So I flush the TLB, it's handy, and not processor dependent. */ xorl %eax, %eax movl %eax, %cr3 /* * set all of the registers to known values * leave %esp alone */ testl %esi, %esi jnz 1f xorl %edi, %edi xorl %eax, %eax xorl %ebx, %ebx xorl %ecx, %ecx xorl %edx, %edx xorl %esi, %esi xorl %ebp, %ebp ANNOTATE_UNRET_SAFE ret int3 1: popl %edx movl CP_PA_SWAP_PAGE(%edi), %esp addl $PAGE_SIZE, %esp 2: ANNOTATE_RETPOLINE_SAFE call *%edx /* get the re-entry point of the peer system */ movl 0(%esp), %ebp call 1f 1: popl %ebx subl $(1b - relocate_kernel), %ebx movl CP_VA_CONTROL_PAGE(%ebx), %edi lea PAGE_SIZE(%ebx), %esp movl CP_PA_SWAP_PAGE(%ebx), %eax movl CP_PA_BACKUP_PAGES_MAP(%ebx), %edx pushl %eax pushl %edx call swap_pages addl $8, %esp movl CP_PA_PGD(%ebx), %eax movl %eax, %cr3 movl %cr0, %eax orl $X86_CR0_PG, %eax movl %eax, %cr0 lea PAGE_SIZE(%edi), %esp movl %edi, %eax addl $(virtual_mapped - relocate_kernel), %eax pushl %eax ANNOTATE_UNRET_SAFE ret int3 SYM_CODE_END(identity_mapped) SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped) movl CR4(%edi), %eax movl %eax, %cr4 movl CR3(%edi), %eax movl %eax, %cr3 movl CR0(%edi), %eax movl %eax, %cr0 movl ESP(%edi), %esp movl %ebp, %eax popf popl %ebp popl %edi popl %esi popl %ebx ANNOTATE_UNRET_SAFE ret int3 SYM_CODE_END(virtual_mapped) /* Do the copies */ SYM_CODE_START_LOCAL_NOALIGN(swap_pages) movl 8(%esp), %edx movl 4(%esp), %ecx pushl %ebp pushl %ebx pushl %edi pushl %esi movl %ecx, %ebx jmp 1f 0: /* top, read another word from the indirection page */ movl (%ebx), %ecx addl $4, %ebx 1: testb $0x1, %cl /* is it a destination page */ jz 2f movl %ecx, %edi andl $0xfffff000, %edi jmp 0b 2: testb $0x2, %cl /* is it an indirection page */ jz 2f movl %ecx, %ebx andl $0xfffff000, %ebx jmp 0b 2: testb $0x4, %cl /* is it the done indicator */ jz 2f jmp 3f 2: testb $0x8, %cl /* is it the source indicator */ jz 0b /* Ignore it otherwise */ movl %ecx, %esi /* For every source page do a copy */ andl $0xfffff000, %esi movl %edi, %eax movl %esi, %ebp movl %edx, %edi movl $1024, %ecx rep ; movsl movl %ebp, %edi movl %eax, %esi movl $1024, %ecx rep ; movsl movl %eax, %edi movl %edx, %esi movl $1024, %ecx rep ; movsl lea PAGE_SIZE(%ebp), %esi jmp 0b 3: popl %esi popl %edi popl %ebx popl %ebp ANNOTATE_UNRET_SAFE ret int3 SYM_CODE_END(swap_pages) .globl kexec_control_code_size .set kexec_control_code_size, . - relocate_kernel
aixcc-public/challenge-001-exemplar-source
13,830
arch/x86/kernel/vmlinux.lds.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * ld script for the x86 kernel * * Historic 32-bit version written by Martin Mares <mj@atrey.karlin.mff.cuni.cz> * * Modernisation, unification and other changes and fixes: * Copyright (C) 2007-2009 Sam Ravnborg <sam@ravnborg.org> * * * Don't define absolute symbols until and unless you know that symbol * value is should remain constant even if kernel image is relocated * at run time. Absolute symbols are not relocated. If symbol value should * change if kernel is relocated, make the symbol section relative and * put it inside the section definition. */ #ifdef CONFIG_X86_32 #define LOAD_OFFSET __PAGE_OFFSET #else #define LOAD_OFFSET __START_KERNEL_map #endif #define RUNTIME_DISCARD_EXIT #define EMITS_PT_NOTE #define RO_EXCEPTION_TABLE_ALIGN 16 #include <asm-generic/vmlinux.lds.h> #include <asm/asm-offsets.h> #include <asm/thread_info.h> #include <asm/page_types.h> #include <asm/orc_lookup.h> #include <asm/cache.h> #include <asm/boot.h> #undef i386 /* in case the preprocessor is a 32bit one */ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT) #ifdef CONFIG_X86_32 OUTPUT_ARCH(i386) ENTRY(phys_startup_32) #else OUTPUT_ARCH(i386:x86-64) ENTRY(phys_startup_64) #endif jiffies = jiffies_64; #if defined(CONFIG_X86_64) /* * On 64-bit, align RODATA to 2MB so we retain large page mappings for * boundaries spanning kernel text, rodata and data sections. * * However, kernel identity mappings will have different RWX permissions * to the pages mapping to text and to the pages padding (which are freed) the * text section. Hence kernel identity mappings will be broken to smaller * pages. For 64-bit, kernel text and kernel identity mappings are different, * so we can enable protection checks as well as retain 2MB large page * mappings for kernel text. */ #define X86_ALIGN_RODATA_BEGIN . = ALIGN(HPAGE_SIZE); #define X86_ALIGN_RODATA_END \ . = ALIGN(HPAGE_SIZE); \ __end_rodata_hpage_align = .; \ __end_rodata_aligned = .; #define ALIGN_ENTRY_TEXT_BEGIN . = ALIGN(PMD_SIZE); #define ALIGN_ENTRY_TEXT_END . = ALIGN(PMD_SIZE); /* * This section contains data which will be mapped as decrypted. Memory * encryption operates on a page basis. Make this section PMD-aligned * to avoid splitting the pages while mapping the section early. * * Note: We use a separate section so that only this section gets * decrypted to avoid exposing more than we wish. */ #define BSS_DECRYPTED \ . = ALIGN(PMD_SIZE); \ __start_bss_decrypted = .; \ *(.bss..decrypted); \ . = ALIGN(PAGE_SIZE); \ __start_bss_decrypted_unused = .; \ . = ALIGN(PMD_SIZE); \ __end_bss_decrypted = .; \ #else #define X86_ALIGN_RODATA_BEGIN #define X86_ALIGN_RODATA_END \ . = ALIGN(PAGE_SIZE); \ __end_rodata_aligned = .; #define ALIGN_ENTRY_TEXT_BEGIN #define ALIGN_ENTRY_TEXT_END #define BSS_DECRYPTED #endif PHDRS { text PT_LOAD FLAGS(5); /* R_E */ data PT_LOAD FLAGS(6); /* RW_ */ #ifdef CONFIG_X86_64 #ifdef CONFIG_SMP percpu PT_LOAD FLAGS(6); /* RW_ */ #endif init PT_LOAD FLAGS(7); /* RWE */ #endif note PT_NOTE FLAGS(0); /* ___ */ } SECTIONS { #ifdef CONFIG_X86_32 . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR; phys_startup_32 = ABSOLUTE(startup_32 - LOAD_OFFSET); #else . = __START_KERNEL; phys_startup_64 = ABSOLUTE(startup_64 - LOAD_OFFSET); #endif /* Text and read-only data */ .text : AT(ADDR(.text) - LOAD_OFFSET) { _text = .; _stext = .; /* bootstrapping code */ HEAD_TEXT TEXT_TEXT SCHED_TEXT CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT ALIGN_ENTRY_TEXT_BEGIN #ifdef CONFIG_CPU_SRSO *(.text..__x86.rethunk_untrain) #endif ENTRY_TEXT #ifdef CONFIG_CPU_SRSO /* * See the comment above srso_alias_untrain_ret()'s * definition. */ . = srso_alias_untrain_ret | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20); *(.text..__x86.rethunk_safe) #endif ALIGN_ENTRY_TEXT_END SOFTIRQENTRY_TEXT STATIC_CALL_TEXT *(.gnu.warning) #ifdef CONFIG_RETPOLINE __indirect_thunk_start = .; *(.text..__x86.indirect_thunk) *(.text..__x86.return_thunk) __indirect_thunk_end = .; #endif } :text =0xcccc /* End of text section, which should occupy whole number of pages */ _etext = .; . = ALIGN(PAGE_SIZE); X86_ALIGN_RODATA_BEGIN RO_DATA(PAGE_SIZE) X86_ALIGN_RODATA_END /* Data */ .data : AT(ADDR(.data) - LOAD_OFFSET) { /* Start of data section */ _sdata = .; /* init_task */ INIT_TASK_DATA(THREAD_SIZE) #ifdef CONFIG_X86_32 /* 32 bit has nosave before _edata */ NOSAVE_DATA #endif PAGE_ALIGNED_DATA(PAGE_SIZE) CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES) DATA_DATA CONSTRUCTORS /* rarely changed data like cpu maps */ READ_MOSTLY_DATA(INTERNODE_CACHE_BYTES) /* End of data section */ _edata = .; } :data BUG_TABLE ORC_UNWIND_TABLE . = ALIGN(PAGE_SIZE); __vvar_page = .; .vvar : AT(ADDR(.vvar) - LOAD_OFFSET) { /* work around gold bug 13023 */ __vvar_beginning_hack = .; /* Place all vvars at the offsets in asm/vvar.h. */ #define EMIT_VVAR(name, offset) \ . = __vvar_beginning_hack + offset; \ *(.vvar_ ## name) #include <asm/vvar.h> #undef EMIT_VVAR /* * Pad the rest of the page with zeros. Otherwise the loader * can leave garbage here. */ . = __vvar_beginning_hack + PAGE_SIZE; } :data . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE); /* Init code and data - will be freed after init */ . = ALIGN(PAGE_SIZE); .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) { __init_begin = .; /* paired with __init_end */ } #if defined(CONFIG_X86_64) && defined(CONFIG_SMP) /* * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the * output PHDR, so the next output section - .init.text - should * start another segment - init. */ PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu) ASSERT(SIZEOF(.data..percpu) < CONFIG_PHYSICAL_START, "per-CPU data too large - increase CONFIG_PHYSICAL_START") #endif INIT_TEXT_SECTION(PAGE_SIZE) #ifdef CONFIG_X86_64 :init #endif /* * Section for code used exclusively before alternatives are run. All * references to such code must be patched out by alternatives, normally * by using X86_FEATURE_ALWAYS CPU feature bit. * * See static_cpu_has() for an example. */ .altinstr_aux : AT(ADDR(.altinstr_aux) - LOAD_OFFSET) { *(.altinstr_aux) } INIT_DATA_SECTION(16) .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) { __x86_cpu_dev_start = .; *(.x86_cpu_dev.init) __x86_cpu_dev_end = .; } #ifdef CONFIG_X86_INTEL_MID .x86_intel_mid_dev.init : AT(ADDR(.x86_intel_mid_dev.init) - \ LOAD_OFFSET) { __x86_intel_mid_dev_start = .; *(.x86_intel_mid_dev.init) __x86_intel_mid_dev_end = .; } #endif /* * start address and size of operations which during runtime * can be patched with virtualization friendly instructions or * baremetal native ones. Think page table operations. * Details in paravirt_types.h */ . = ALIGN(8); .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) { __parainstructions = .; *(.parainstructions) __parainstructions_end = .; } #ifdef CONFIG_RETPOLINE /* * List of instructions that call/jmp/jcc to retpoline thunks * __x86_indirect_thunk_*(). These instructions can be patched along * with alternatives, after which the section can be freed. */ . = ALIGN(8); .retpoline_sites : AT(ADDR(.retpoline_sites) - LOAD_OFFSET) { __retpoline_sites = .; *(.retpoline_sites) __retpoline_sites_end = .; } . = ALIGN(8); .return_sites : AT(ADDR(.return_sites) - LOAD_OFFSET) { __return_sites = .; *(.return_sites) __return_sites_end = .; } #endif #ifdef CONFIG_X86_KERNEL_IBT . = ALIGN(8); .ibt_endbr_seal : AT(ADDR(.ibt_endbr_seal) - LOAD_OFFSET) { __ibt_endbr_seal = .; *(.ibt_endbr_seal) __ibt_endbr_seal_end = .; } #endif /* * struct alt_inst entries. From the header (alternative.h): * "Alternative instructions for different CPU types or capabilities" * Think locking instructions on spinlocks. */ . = ALIGN(8); .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) { __alt_instructions = .; *(.altinstructions) __alt_instructions_end = .; } /* * And here are the replacement instructions. The linker sticks * them as binary blobs. The .altinstructions has enough data to * get the address and the length of them to patch the kernel safely. */ .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) { *(.altinstr_replacement) } . = ALIGN(8); .apicdrivers : AT(ADDR(.apicdrivers) - LOAD_OFFSET) { __apicdrivers = .; *(.apicdrivers); __apicdrivers_end = .; } . = ALIGN(8); /* * .exit.text is discarded at runtime, not link time, to deal with * references from .altinstructions */ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { EXIT_TEXT } .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { EXIT_DATA } #if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP) PERCPU_SECTION(INTERNODE_CACHE_BYTES) #endif . = ALIGN(PAGE_SIZE); /* freed after init ends here */ .init.end : AT(ADDR(.init.end) - LOAD_OFFSET) { __init_end = .; } /* * smp_locks might be freed after init * start/end must be page aligned */ . = ALIGN(PAGE_SIZE); .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) { __smp_locks = .; *(.smp_locks) . = ALIGN(PAGE_SIZE); __smp_locks_end = .; } #ifdef CONFIG_X86_64 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { NOSAVE_DATA } #endif /* BSS */ . = ALIGN(PAGE_SIZE); .bss : AT(ADDR(.bss) - LOAD_OFFSET) { __bss_start = .; *(.bss..page_aligned) . = ALIGN(PAGE_SIZE); *(BSS_MAIN) BSS_DECRYPTED . = ALIGN(PAGE_SIZE); __bss_stop = .; } /* * The memory occupied from _text to here, __end_of_kernel_reserve, is * automatically reserved in setup_arch(). Anything after here must be * explicitly reserved using memblock_reserve() or it will be discarded * and treated as available memory. */ __end_of_kernel_reserve = .; . = ALIGN(PAGE_SIZE); .brk : AT(ADDR(.brk) - LOAD_OFFSET) { __brk_base = .; . += 64 * 1024; /* 64k alignment slop space */ *(.bss..brk) /* areas brk users have reserved */ __brk_limit = .; } . = ALIGN(PAGE_SIZE); /* keep VO_INIT_SIZE page aligned */ _end = .; #ifdef CONFIG_AMD_MEM_ENCRYPT /* * Early scratch/workarea section: Lives outside of the kernel proper * (_text - _end). * * Resides after _end because even though the .brk section is after * __end_of_kernel_reserve, the .brk section is later reserved as a * part of the kernel. Since it is located after __end_of_kernel_reserve * it will be discarded and become part of the available memory. As * such, it can only be used by very early boot code and must not be * needed afterwards. * * Currently used by SME for performing in-place encryption of the * kernel during boot. Resides on a 2MB boundary to simplify the * pagetable setup used for SME in-place encryption. */ . = ALIGN(HPAGE_SIZE); .init.scratch : AT(ADDR(.init.scratch) - LOAD_OFFSET) { __init_scratch_begin = .; *(.init.scratch) . = ALIGN(HPAGE_SIZE); __init_scratch_end = .; } #endif STABS_DEBUG DWARF_DEBUG ELF_DETAILS DISCARDS /* * Make sure that the .got.plt is either completely empty or it * contains only the lazy dispatch entries. */ .got.plt (INFO) : { *(.got.plt) } ASSERT(SIZEOF(.got.plt) == 0 || #ifdef CONFIG_X86_64 SIZEOF(.got.plt) == 0x18, #else SIZEOF(.got.plt) == 0xc, #endif "Unexpected GOT/PLT entries detected!") /* * Sections that should stay zero sized, which is safer to * explicitly check instead of blindly discarding. */ .got : { *(.got) *(.igot.*) } ASSERT(SIZEOF(.got) == 0, "Unexpected GOT entries detected!") .plt : { *(.plt) *(.plt.*) *(.iplt) } ASSERT(SIZEOF(.plt) == 0, "Unexpected run-time procedure linkages detected!") .rel.dyn : { *(.rel.*) *(.rel_*) } ASSERT(SIZEOF(.rel.dyn) == 0, "Unexpected run-time relocations (.rel) detected!") .rela.dyn : { *(.rela.*) *(.rela_*) } ASSERT(SIZEOF(.rela.dyn) == 0, "Unexpected run-time relocations (.rela) detected!") } /* * The ASSERT() sink to . is intentional, for binutils 2.14 compatibility: */ . = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE), "kernel image bigger than KERNEL_IMAGE_SIZE"); #ifdef CONFIG_X86_64 /* * Per-cpu symbols which need to be offset from __per_cpu_load * for the boot processor. */ #define INIT_PER_CPU(x) init_per_cpu__##x = ABSOLUTE(x) + __per_cpu_load INIT_PER_CPU(gdt_page); INIT_PER_CPU(fixed_percpu_data); INIT_PER_CPU(irq_stack_backing_store); #ifdef CONFIG_SMP . = ASSERT((fixed_percpu_data == 0), "fixed_percpu_data is not at start of per-cpu area"); #endif #ifdef CONFIG_RETHUNK . = ASSERT((retbleed_return_thunk & 0x3f) == 0, "retbleed_return_thunk not cacheline-aligned"); . = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned"); #endif #ifdef CONFIG_CPU_SRSO /* * GNU ld cannot do XOR until 2.41. * https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=f6f78318fca803c4907fb8d7f6ded8295f1947b1 * * LLVM lld cannot do XOR until lld-17. * https://github.com/llvm/llvm-project/commit/fae96104d4378166cbe5c875ef8ed808a356f3fb * * Instead do: (A | B) - (A & B) in order to compute the XOR * of the two function addresses: */ . = ASSERT(((ABSOLUTE(srso_alias_untrain_ret) | srso_alias_safe_ret) - (ABSOLUTE(srso_alias_untrain_ret) & srso_alias_safe_ret)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)), "SRSO function pair won't alias"); #endif #endif /* CONFIG_X86_64 */ #ifdef CONFIG_KEXEC_CORE #include <asm/kexec.h> . = ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE, "kexec control code size is too big"); #endif
aixcc-public/challenge-001-exemplar-source
6,526
arch/x86/kernel/relocate_kernel_64.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * relocate_kernel.S - put the kernel image in place to boot * Copyright (C) 2002-2005 Eric Biederman <ebiederm@xmission.com> */ #include <linux/linkage.h> #include <asm/page_types.h> #include <asm/kexec.h> #include <asm/processor-flags.h> #include <asm/pgtable_types.h> #include <asm/nospec-branch.h> #include <asm/unwind_hints.h> /* * Must be relocatable PIC code callable as a C function, in particular * there must be a plain RET and not jump to return thunk. */ #define PTR(x) (x << 3) #define PAGE_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) /* * control_page + KEXEC_CONTROL_CODE_MAX_SIZE * ~ control_page + PAGE_SIZE are used as data storage and stack for * jumping back */ #define DATA(offset) (KEXEC_CONTROL_CODE_MAX_SIZE+(offset)) /* Minimal CPU state */ #define RSP DATA(0x0) #define CR0 DATA(0x8) #define CR3 DATA(0x10) #define CR4 DATA(0x18) /* other data */ #define CP_PA_TABLE_PAGE DATA(0x20) #define CP_PA_SWAP_PAGE DATA(0x28) #define CP_PA_BACKUP_PAGES_MAP DATA(0x30) .text .align PAGE_SIZE .code64 SYM_CODE_START_NOALIGN(relocate_kernel) UNWIND_HINT_EMPTY ANNOTATE_NOENDBR /* * %rdi indirection_page * %rsi page_list * %rdx start address * %rcx preserve_context * %r8 host_mem_enc_active */ /* Save the CPU context, used for jumping back */ pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 pushf movq PTR(VA_CONTROL_PAGE)(%rsi), %r11 movq %rsp, RSP(%r11) movq %cr0, %rax movq %rax, CR0(%r11) movq %cr3, %rax movq %rax, CR3(%r11) movq %cr4, %rax movq %rax, CR4(%r11) /* Save CR4. Required to enable the right paging mode later. */ movq %rax, %r13 /* zero out flags, and disable interrupts */ pushq $0 popfq /* Save SME active flag */ movq %r8, %r12 /* * get physical address of control page now * this is impossible after page table switch */ movq PTR(PA_CONTROL_PAGE)(%rsi), %r8 /* get physical address of page table now too */ movq PTR(PA_TABLE_PAGE)(%rsi), %r9 /* get physical address of swap page now */ movq PTR(PA_SWAP_PAGE)(%rsi), %r10 /* save some information for jumping back */ movq %r9, CP_PA_TABLE_PAGE(%r11) movq %r10, CP_PA_SWAP_PAGE(%r11) movq %rdi, CP_PA_BACKUP_PAGES_MAP(%r11) /* Switch to the identity mapped page tables */ movq %r9, %cr3 /* setup a new stack at the end of the physical control page */ lea PAGE_SIZE(%r8), %rsp /* jump to identity mapped page */ addq $(identity_mapped - relocate_kernel), %r8 pushq %r8 ANNOTATE_UNRET_SAFE ret int3 SYM_CODE_END(relocate_kernel) SYM_CODE_START_LOCAL_NOALIGN(identity_mapped) UNWIND_HINT_EMPTY /* set return address to 0 if not preserving context */ pushq $0 /* store the start address on the stack */ pushq %rdx /* * Clear X86_CR4_CET (if it was set) such that we can clear CR0_WP * below. */ movq %cr4, %rax andq $~(X86_CR4_CET), %rax movq %rax, %cr4 /* * Set cr0 to a known state: * - Paging enabled * - Alignment check disabled * - Write protect disabled * - No task switch * - Don't do FP software emulation. * - Protected mode enabled */ movq %cr0, %rax andq $~(X86_CR0_AM | X86_CR0_WP | X86_CR0_TS | X86_CR0_EM), %rax orl $(X86_CR0_PG | X86_CR0_PE), %eax movq %rax, %cr0 /* * Set cr4 to a known state: * - physical address extension enabled * - 5-level paging, if it was enabled before */ movl $X86_CR4_PAE, %eax testq $X86_CR4_LA57, %r13 jz 1f orl $X86_CR4_LA57, %eax 1: movq %rax, %cr4 jmp 1f 1: /* Flush the TLB (needed?) */ movq %r9, %cr3 /* * If SME is active, there could be old encrypted cache line * entries that will conflict with the now unencrypted memory * used by kexec. Flush the caches before copying the kernel. */ testq %r12, %r12 jz 1f wbinvd 1: movq %rcx, %r11 call swap_pages /* * To be certain of avoiding problems with self-modifying code * I need to execute a serializing instruction here. * So I flush the TLB by reloading %cr3 here, it's handy, * and not processor dependent. */ movq %cr3, %rax movq %rax, %cr3 /* * set all of the registers to known values * leave %rsp alone */ testq %r11, %r11 jnz 1f xorl %eax, %eax xorl %ebx, %ebx xorl %ecx, %ecx xorl %edx, %edx xorl %esi, %esi xorl %edi, %edi xorl %ebp, %ebp xorl %r8d, %r8d xorl %r9d, %r9d xorl %r10d, %r10d xorl %r11d, %r11d xorl %r12d, %r12d xorl %r13d, %r13d xorl %r14d, %r14d xorl %r15d, %r15d ANNOTATE_UNRET_SAFE ret int3 1: popq %rdx leaq PAGE_SIZE(%r10), %rsp ANNOTATE_RETPOLINE_SAFE call *%rdx /* get the re-entry point of the peer system */ movq 0(%rsp), %rbp leaq relocate_kernel(%rip), %r8 movq CP_PA_SWAP_PAGE(%r8), %r10 movq CP_PA_BACKUP_PAGES_MAP(%r8), %rdi movq CP_PA_TABLE_PAGE(%r8), %rax movq %rax, %cr3 lea PAGE_SIZE(%r8), %rsp call swap_pages movq $virtual_mapped, %rax pushq %rax ANNOTATE_UNRET_SAFE ret int3 SYM_CODE_END(identity_mapped) SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped) UNWIND_HINT_EMPTY ANNOTATE_NOENDBR // RET target, above movq RSP(%r8), %rsp movq CR4(%r8), %rax movq %rax, %cr4 movq CR3(%r8), %rax movq CR0(%r8), %r8 movq %rax, %cr3 movq %r8, %cr0 movq %rbp, %rax popf popq %r15 popq %r14 popq %r13 popq %r12 popq %rbp popq %rbx ANNOTATE_UNRET_SAFE ret int3 SYM_CODE_END(virtual_mapped) /* Do the copies */ SYM_CODE_START_LOCAL_NOALIGN(swap_pages) UNWIND_HINT_EMPTY movq %rdi, %rcx /* Put the page_list in %rcx */ xorl %edi, %edi xorl %esi, %esi jmp 1f 0: /* top, read another word for the indirection page */ movq (%rbx), %rcx addq $8, %rbx 1: testb $0x1, %cl /* is it a destination page? */ jz 2f movq %rcx, %rdi andq $0xfffffffffffff000, %rdi jmp 0b 2: testb $0x2, %cl /* is it an indirection page? */ jz 2f movq %rcx, %rbx andq $0xfffffffffffff000, %rbx jmp 0b 2: testb $0x4, %cl /* is it the done indicator? */ jz 2f jmp 3f 2: testb $0x8, %cl /* is it the source indicator? */ jz 0b /* Ignore it otherwise */ movq %rcx, %rsi /* For ever source page do a copy */ andq $0xfffffffffffff000, %rsi movq %rdi, %rdx movq %rsi, %rax movq %r10, %rdi movl $512, %ecx rep ; movsq movq %rax, %rdi movq %rdx, %rsi movl $512, %ecx rep ; movsq movq %rdx, %rdi movq %r10, %rsi movl $512, %ecx rep ; movsq lea PAGE_SIZE(%rax), %rsi jmp 0b 3: ANNOTATE_UNRET_SAFE ret int3 SYM_CODE_END(swap_pages) .globl kexec_control_code_size .set kexec_control_code_size, . - relocate_kernel
aixcc-public/challenge-001-exemplar-source
18,730
arch/x86/kernel/head_64.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/arch/x86/kernel/head_64.S -- start in 32bit and switch to 64bit * * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> * Copyright (C) 2000 Karsten Keil <kkeil@suse.de> * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de> * Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com> */ #include <linux/linkage.h> #include <linux/threads.h> #include <linux/init.h> #include <linux/pgtable.h> #include <asm/segment.h> #include <asm/page.h> #include <asm/msr.h> #include <asm/cache.h> #include <asm/processor-flags.h> #include <asm/percpu.h> #include <asm/nops.h> #include "../entry/calling.h" #include <asm/export.h> #include <asm/nospec-branch.h> #include <asm/fixmap.h> /* * We are not able to switch in one step to the final KERNEL ADDRESS SPACE * because we need identity-mapped pages. */ #define l4_index(x) (((x) >> 39) & 511) #define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) L4_PAGE_OFFSET = l4_index(__PAGE_OFFSET_BASE_L4) L4_START_KERNEL = l4_index(__START_KERNEL_map) L3_START_KERNEL = pud_index(__START_KERNEL_map) .text __HEAD .code64 SYM_CODE_START_NOALIGN(startup_64) UNWIND_HINT_EMPTY /* * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, * and someone has loaded an identity mapped page table * for us. These identity mapped page tables map all of the * kernel pages and possibly all of memory. * * %rsi holds a physical pointer to real_mode_data. * * We come here either directly from a 64bit bootloader, or from * arch/x86/boot/compressed/head_64.S. * * We only come here initially at boot nothing else comes here. * * Since we may be loaded at an address different from what we were * compiled to run at we first fixup the physical addresses in our page * tables and then reload them. */ /* Set up the stack for verify_cpu(), similar to initial_stack below */ leaq (__end_init_task - FRAME_SIZE)(%rip), %rsp leaq _text(%rip), %rdi /* * initial_gs points to initial fixed_percpu_data struct with storage for * the stack protector canary. Global pointer fixups are needed at this * stage, so apply them as is done in fixup_pointer(), and initialize %gs * such that the canary can be accessed at %gs:40 for subsequent C calls. */ movl $MSR_GS_BASE, %ecx movq initial_gs(%rip), %rax movq $_text, %rdx subq %rdx, %rax addq %rdi, %rax movq %rax, %rdx shrq $32, %rdx wrmsr pushq %rsi call startup_64_setup_env popq %rsi /* Now switch to __KERNEL_CS so IRET works reliably */ pushq $__KERNEL_CS leaq .Lon_kernel_cs(%rip), %rax pushq %rax lretq .Lon_kernel_cs: UNWIND_HINT_EMPTY #ifdef CONFIG_AMD_MEM_ENCRYPT /* * Activate SEV/SME memory encryption if supported/enabled. This needs to * be done now, since this also includes setup of the SEV-SNP CPUID table, * which needs to be done before any CPUID instructions are executed in * subsequent code. */ movq %rsi, %rdi pushq %rsi call sme_enable popq %rsi #endif /* Sanitize CPU configuration */ call verify_cpu /* * Perform pagetable fixups. Additionally, if SME is active, encrypt * the kernel and retrieve the modifier (SME encryption mask if SME * is active) to be added to the initial pgdir entry that will be * programmed into CR3. */ leaq _text(%rip), %rdi pushq %rsi call __startup_64 popq %rsi /* Form the CR3 value being sure to include the CR3 modifier */ addq $(early_top_pgt - __START_KERNEL_map), %rax jmp 1f SYM_CODE_END(startup_64) SYM_CODE_START(secondary_startup_64) UNWIND_HINT_EMPTY ANNOTATE_NOENDBR /* * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, * and someone has loaded a mapped page table. * * %rsi holds a physical pointer to real_mode_data. * * We come here either from startup_64 (using physical addresses) * or from trampoline.S (using virtual addresses). * * Using virtual addresses from trampoline.S removes the need * to have any identity mapped pages in the kernel page table * after the boot processor executes this code. */ /* Sanitize CPU configuration */ call verify_cpu /* * The secondary_startup_64_no_verify entry point is only used by * SEV-ES guests. In those guests the call to verify_cpu() would cause * #VC exceptions which can not be handled at this stage of secondary * CPU bringup. * * All non SEV-ES systems, especially Intel systems, need to execute * verify_cpu() above to make sure NX is enabled. */ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL) UNWIND_HINT_EMPTY ANNOTATE_NOENDBR /* * Retrieve the modifier (SME encryption mask if SME is active) to be * added to the initial pgdir entry that will be programmed into CR3. */ #ifdef CONFIG_AMD_MEM_ENCRYPT movq sme_me_mask, %rax #else xorq %rax, %rax #endif /* Form the CR3 value being sure to include the CR3 modifier */ addq $(init_top_pgt - __START_KERNEL_map), %rax 1: #ifdef CONFIG_X86_MCE /* * Preserve CR4.MCE if the kernel will enable #MC support. * Clearing MCE may fault in some environments (that also force #MC * support). Any machine check that occurs before #MC support is fully * configured will crash the system regardless of the CR4.MCE value set * here. */ movq %cr4, %rcx andl $X86_CR4_MCE, %ecx #else movl $0, %ecx #endif /* Enable PAE mode, PGE and LA57 */ orl $(X86_CR4_PAE | X86_CR4_PGE), %ecx #ifdef CONFIG_X86_5LEVEL testl $1, __pgtable_l5_enabled(%rip) jz 1f orl $X86_CR4_LA57, %ecx 1: #endif movq %rcx, %cr4 /* Setup early boot stage 4-/5-level pagetables. */ addq phys_base(%rip), %rax /* * For SEV guests: Verify that the C-bit is correct. A malicious * hypervisor could lie about the C-bit position to perform a ROP * attack on the guest by writing to the unencrypted stack and wait for * the next RET instruction. * %rsi carries pointer to realmode data and is callee-clobbered. Save * and restore it. */ pushq %rsi movq %rax, %rdi call sev_verify_cbit popq %rsi /* * Switch to new page-table * * For the boot CPU this switches to early_top_pgt which still has the * indentity mappings present. The secondary CPUs will switch to the * init_top_pgt here, away from the trampoline_pgd and unmap the * indentity mapped ranges. */ movq %rax, %cr3 /* * Do a global TLB flush after the CR3 switch to make sure the TLB * entries from the identity mapping are flushed. */ movq %cr4, %rcx movq %rcx, %rax xorq $X86_CR4_PGE, %rcx movq %rcx, %cr4 movq %rax, %cr4 /* Ensure I am executing from virtual addresses */ movq $1f, %rax ANNOTATE_RETPOLINE_SAFE jmp *%rax 1: UNWIND_HINT_EMPTY ANNOTATE_NOENDBR // above /* * We must switch to a new descriptor in kernel space for the GDT * because soon the kernel won't have access anymore to the userspace * addresses where we're currently running on. We have to do that here * because in 32bit we couldn't load a 64bit linear address. */ lgdt early_gdt_descr(%rip) /* set up data segments */ xorl %eax,%eax movl %eax,%ds movl %eax,%ss movl %eax,%es /* * We don't really need to load %fs or %gs, but load them anyway * to kill any stale realmode selectors. This allows execution * under VT hardware. */ movl %eax,%fs movl %eax,%gs /* Set up %gs. * * The base of %gs always points to fixed_percpu_data. If the * stack protector canary is enabled, it is located at %gs:40. * Note that, on SMP, the boot cpu uses init data section until * the per cpu areas are set up. */ movl $MSR_GS_BASE,%ecx movl initial_gs(%rip),%eax movl initial_gs+4(%rip),%edx wrmsr /* * Setup a boot time stack - Any secondary CPU will have lost its stack * by now because the cr3-switch above unmaps the real-mode stack */ movq initial_stack(%rip), %rsp /* Setup and Load IDT */ pushq %rsi call early_setup_idt popq %rsi /* Check if nx is implemented */ movl $0x80000001, %eax cpuid movl %edx,%edi /* Setup EFER (Extended Feature Enable Register) */ movl $MSR_EFER, %ecx rdmsr /* * Preserve current value of EFER for comparison and to skip * EFER writes if no change was made (for TDX guest) */ movl %eax, %edx btsl $_EFER_SCE, %eax /* Enable System Call */ btl $20,%edi /* No Execute supported? */ jnc 1f btsl $_EFER_NX, %eax btsq $_PAGE_BIT_NX,early_pmd_flags(%rip) /* Avoid writing EFER if no change was made (for TDX guest) */ 1: cmpl %edx, %eax je 1f xor %edx, %edx wrmsr /* Make changes effective */ 1: /* Setup cr0 */ movl $CR0_STATE, %eax /* Make changes effective */ movq %rax, %cr0 /* zero EFLAGS after setting rsp */ pushq $0 popfq /* rsi is pointer to real mode structure with interesting info. pass it to C */ movq %rsi, %rdi .Ljump_to_C_code: /* * Jump to run C code and to be on a real kernel address. * Since we are running on identity-mapped space we have to jump * to the full 64bit address, this is only possible as indirect * jump. In addition we need to ensure %cs is set so we make this * a far return. * * Note: do not change to far jump indirect with 64bit offset. * * AMD does not support far jump indirect with 64bit offset. * AMD64 Architecture Programmer's Manual, Volume 3: states only * JMP FAR mem16:16 FF /5 Far jump indirect, * with the target specified by a far pointer in memory. * JMP FAR mem16:32 FF /5 Far jump indirect, * with the target specified by a far pointer in memory. * * Intel64 does support 64bit offset. * Software Developer Manual Vol 2: states: * FF /5 JMP m16:16 Jump far, absolute indirect, * address given in m16:16 * FF /5 JMP m16:32 Jump far, absolute indirect, * address given in m16:32. * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect, * address given in m16:64. */ pushq $.Lafter_lret # put return address on stack for unwinder xorl %ebp, %ebp # clear frame pointer movq initial_code(%rip), %rax pushq $__KERNEL_CS # set correct cs pushq %rax # target address in negative space lretq .Lafter_lret: ANNOTATE_NOENDBR SYM_CODE_END(secondary_startup_64) #include "verify_cpu.S" #include "sev_verify_cbit.S" #ifdef CONFIG_HOTPLUG_CPU /* * Boot CPU0 entry point. It's called from play_dead(). Everything has been set * up already except stack. We just set up stack here. Then call * start_secondary() via .Ljump_to_C_code. */ SYM_CODE_START(start_cpu0) UNWIND_HINT_EMPTY movq initial_stack(%rip), %rsp jmp .Ljump_to_C_code SYM_CODE_END(start_cpu0) #endif #ifdef CONFIG_AMD_MEM_ENCRYPT /* * VC Exception handler used during early boot when running on kernel * addresses, but before the switch to the idt_table can be made. * The early_idt_handler_array can't be used here because it calls into a lot * of __init code and this handler is also used during CPU offlining/onlining. * Therefore this handler ends up in the .text section so that it stays around * when .init.text is freed. */ SYM_CODE_START_NOALIGN(vc_boot_ghcb) UNWIND_HINT_IRET_REGS offset=8 ENDBR ANNOTATE_UNRET_END /* Build pt_regs */ PUSH_AND_CLEAR_REGS /* Call C handler */ movq %rsp, %rdi movq ORIG_RAX(%rsp), %rsi movq initial_vc_handler(%rip), %rax ANNOTATE_RETPOLINE_SAFE call *%rax /* Unwind pt_regs */ POP_REGS /* Remove Error Code */ addq $8, %rsp iretq SYM_CODE_END(vc_boot_ghcb) #endif /* Both SMP bootup and ACPI suspend change these variables */ __REFDATA .balign 8 SYM_DATA(initial_code, .quad x86_64_start_kernel) SYM_DATA(initial_gs, .quad INIT_PER_CPU_VAR(fixed_percpu_data)) #ifdef CONFIG_AMD_MEM_ENCRYPT SYM_DATA(initial_vc_handler, .quad handle_vc_boot_ghcb) #endif /* * The FRAME_SIZE gap is a convention which helps the in-kernel unwinder * reliably detect the end of the stack. */ SYM_DATA(initial_stack, .quad init_thread_union + THREAD_SIZE - FRAME_SIZE) __FINITDATA __INIT SYM_CODE_START(early_idt_handler_array) i = 0 .rept NUM_EXCEPTION_VECTORS .if ((EXCEPTION_ERRCODE_MASK >> i) & 1) == 0 UNWIND_HINT_IRET_REGS ENDBR pushq $0 # Dummy error code, to make stack frame uniform .else UNWIND_HINT_IRET_REGS offset=8 ENDBR .endif pushq $i # 72(%rsp) Vector number jmp early_idt_handler_common UNWIND_HINT_IRET_REGS i = i + 1 .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc .endr SYM_CODE_END(early_idt_handler_array) ANNOTATE_NOENDBR // early_idt_handler_array[NUM_EXCEPTION_VECTORS] SYM_CODE_START_LOCAL(early_idt_handler_common) UNWIND_HINT_IRET_REGS offset=16 ANNOTATE_UNRET_END /* * The stack is the hardware frame, an error code or zero, and the * vector number. */ cld incl early_recursion_flag(%rip) /* The vector number is currently in the pt_regs->di slot. */ pushq %rsi /* pt_regs->si */ movq 8(%rsp), %rsi /* RSI = vector number */ movq %rdi, 8(%rsp) /* pt_regs->di = RDI */ pushq %rdx /* pt_regs->dx */ pushq %rcx /* pt_regs->cx */ pushq %rax /* pt_regs->ax */ pushq %r8 /* pt_regs->r8 */ pushq %r9 /* pt_regs->r9 */ pushq %r10 /* pt_regs->r10 */ pushq %r11 /* pt_regs->r11 */ pushq %rbx /* pt_regs->bx */ pushq %rbp /* pt_regs->bp */ pushq %r12 /* pt_regs->r12 */ pushq %r13 /* pt_regs->r13 */ pushq %r14 /* pt_regs->r14 */ pushq %r15 /* pt_regs->r15 */ UNWIND_HINT_REGS movq %rsp,%rdi /* RDI = pt_regs; RSI is already trapnr */ call do_early_exception decl early_recursion_flag(%rip) jmp restore_regs_and_return_to_kernel SYM_CODE_END(early_idt_handler_common) #ifdef CONFIG_AMD_MEM_ENCRYPT /* * VC Exception handler used during very early boot. The * early_idt_handler_array can't be used because it returns via the * paravirtualized INTERRUPT_RETURN and pv-ops don't work that early. * * XXX it does, fix this. * * This handler will end up in the .init.text section and not be * available to boot secondary CPUs. */ SYM_CODE_START_NOALIGN(vc_no_ghcb) UNWIND_HINT_IRET_REGS offset=8 ENDBR ANNOTATE_UNRET_END /* Build pt_regs */ PUSH_AND_CLEAR_REGS /* Call C handler */ movq %rsp, %rdi movq ORIG_RAX(%rsp), %rsi call do_vc_no_ghcb /* Unwind pt_regs */ POP_REGS /* Remove Error Code */ addq $8, %rsp /* Pure iret required here - don't use INTERRUPT_RETURN */ iretq SYM_CODE_END(vc_no_ghcb) #endif #define SYM_DATA_START_PAGE_ALIGNED(name) \ SYM_START(name, SYM_L_GLOBAL, .balign PAGE_SIZE) #ifdef CONFIG_PAGE_TABLE_ISOLATION /* * Each PGD needs to be 8k long and 8k aligned. We do not * ever go out to userspace with these, so we do not * strictly *need* the second page, but this allows us to * have a single set_pgd() implementation that does not * need to worry about whether it has 4k or 8k to work * with. * * This ensures PGDs are 8k long: */ #define PTI_USER_PGD_FILL 512 /* This ensures they are 8k-aligned: */ #define SYM_DATA_START_PTI_ALIGNED(name) \ SYM_START(name, SYM_L_GLOBAL, .balign 2 * PAGE_SIZE) #else #define SYM_DATA_START_PTI_ALIGNED(name) \ SYM_DATA_START_PAGE_ALIGNED(name) #define PTI_USER_PGD_FILL 0 #endif /* Automate the creation of 1 to 1 mapping pmd entries */ #define PMDS(START, PERM, COUNT) \ i = 0 ; \ .rept (COUNT) ; \ .quad (START) + (i << PMD_SHIFT) + (PERM) ; \ i = i + 1 ; \ .endr __INITDATA .balign 4 SYM_DATA_START_PTI_ALIGNED(early_top_pgt) .fill 512,8,0 .fill PTI_USER_PGD_FILL,8,0 SYM_DATA_END(early_top_pgt) SYM_DATA_START_PAGE_ALIGNED(early_dynamic_pgts) .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0 SYM_DATA_END(early_dynamic_pgts) SYM_DATA(early_recursion_flag, .long 0) .data #if defined(CONFIG_XEN_PV) || defined(CONFIG_PVH) SYM_DATA_START_PTI_ALIGNED(init_top_pgt) .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC .org init_top_pgt + L4_PAGE_OFFSET*8, 0 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC .org init_top_pgt + L4_START_KERNEL*8, 0 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC .fill PTI_USER_PGD_FILL,8,0 SYM_DATA_END(init_top_pgt) SYM_DATA_START_PAGE_ALIGNED(level3_ident_pgt) .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC .fill 511, 8, 0 SYM_DATA_END(level3_ident_pgt) SYM_DATA_START_PAGE_ALIGNED(level2_ident_pgt) /* * Since I easily can, map the first 1G. * Don't set NX because code runs from these pages. * * Note: This sets _PAGE_GLOBAL despite whether * the CPU supports it or it is enabled. But, * the CPU should ignore the bit. */ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD) SYM_DATA_END(level2_ident_pgt) #else SYM_DATA_START_PTI_ALIGNED(init_top_pgt) .fill 512,8,0 .fill PTI_USER_PGD_FILL,8,0 SYM_DATA_END(init_top_pgt) #endif #ifdef CONFIG_X86_5LEVEL SYM_DATA_START_PAGE_ALIGNED(level4_kernel_pgt) .fill 511,8,0 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC SYM_DATA_END(level4_kernel_pgt) #endif SYM_DATA_START_PAGE_ALIGNED(level3_kernel_pgt) .fill L3_START_KERNEL,8,0 /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */ .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC SYM_DATA_END(level3_kernel_pgt) SYM_DATA_START_PAGE_ALIGNED(level2_kernel_pgt) /* * Kernel high mapping. * * The kernel code+data+bss must be located below KERNEL_IMAGE_SIZE in * virtual address space, which is 1 GiB if RANDOMIZE_BASE is enabled, * 512 MiB otherwise. * * (NOTE: after that starts the module area, see MODULES_VADDR.) * * This table is eventually used by the kernel during normal runtime. * Care must be taken to clear out undesired bits later, like _PAGE_RW * or _PAGE_GLOBAL in some cases. */ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE) SYM_DATA_END(level2_kernel_pgt) SYM_DATA_START_PAGE_ALIGNED(level2_fixmap_pgt) .fill (512 - 4 - FIXMAP_PMD_NUM),8,0 pgtno = 0 .rept (FIXMAP_PMD_NUM) .quad level1_fixmap_pgt + (pgtno << PAGE_SHIFT) - __START_KERNEL_map \ + _PAGE_TABLE_NOENC; pgtno = pgtno + 1 .endr /* 6 MB reserved space + a 2MB hole */ .fill 4,8,0 SYM_DATA_END(level2_fixmap_pgt) SYM_DATA_START_PAGE_ALIGNED(level1_fixmap_pgt) .rept (FIXMAP_PMD_NUM) .fill 512,8,0 .endr SYM_DATA_END(level1_fixmap_pgt) #undef PMDS .data .align 16 SYM_DATA(early_gdt_descr, .word GDT_ENTRIES*8-1) SYM_DATA_LOCAL(early_gdt_descr_base, .quad INIT_PER_CPU_VAR(gdt_page)) .align 16 /* This must match the first entry in level2_kernel_pgt */ SYM_DATA(phys_base, .quad 0x0) EXPORT_SYMBOL(phys_base) #include "../../x86/xen/xen-head.S" __PAGE_ALIGNED_BSS SYM_DATA_START_PAGE_ALIGNED(empty_zero_page) .skip PAGE_SIZE SYM_DATA_END(empty_zero_page) EXPORT_SYMBOL(empty_zero_page)
aixcc-public/challenge-001-exemplar-source
38,603
arch/x86/crypto/aria-aesni-avx-asm_64.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * ARIA Cipher 16-way parallel algorithm (AVX) * * Copyright (c) 2022 Taehee Yoo <ap420073@gmail.com> * */ #include <linux/linkage.h> #include <linux/cfi_types.h> #include <asm/frame.h> /* struct aria_ctx: */ #define enc_key 0 #define dec_key 272 #define rounds 544 /* register macros */ #define CTX %rdi #define BV8(a0, a1, a2, a3, a4, a5, a6, a7) \ ( (((a0) & 1) << 0) | \ (((a1) & 1) << 1) | \ (((a2) & 1) << 2) | \ (((a3) & 1) << 3) | \ (((a4) & 1) << 4) | \ (((a5) & 1) << 5) | \ (((a6) & 1) << 6) | \ (((a7) & 1) << 7) ) #define BM8X8(l0, l1, l2, l3, l4, l5, l6, l7) \ ( ((l7) << (0 * 8)) | \ ((l6) << (1 * 8)) | \ ((l5) << (2 * 8)) | \ ((l4) << (3 * 8)) | \ ((l3) << (4 * 8)) | \ ((l2) << (5 * 8)) | \ ((l1) << (6 * 8)) | \ ((l0) << (7 * 8)) ) #define inc_le128(x, minus_one, tmp) \ vpcmpeqq minus_one, x, tmp; \ vpsubq minus_one, x, x; \ vpslldq $8, tmp, tmp; \ vpsubq tmp, x, x; #define filter_8bit(x, lo_t, hi_t, mask4bit, tmp0) \ vpand x, mask4bit, tmp0; \ vpandn x, mask4bit, x; \ vpsrld $4, x, x; \ \ vpshufb tmp0, lo_t, tmp0; \ vpshufb x, hi_t, x; \ vpxor tmp0, x, x; #define transpose_4x4(x0, x1, x2, x3, t1, t2) \ vpunpckhdq x1, x0, t2; \ vpunpckldq x1, x0, x0; \ \ vpunpckldq x3, x2, t1; \ vpunpckhdq x3, x2, x2; \ \ vpunpckhqdq t1, x0, x1; \ vpunpcklqdq t1, x0, x0; \ \ vpunpckhqdq x2, t2, x3; \ vpunpcklqdq x2, t2, x2; #define byteslice_16x16b(a0, b0, c0, d0, \ a1, b1, c1, d1, \ a2, b2, c2, d2, \ a3, b3, c3, d3, \ st0, st1) \ vmovdqu d2, st0; \ vmovdqu d3, st1; \ transpose_4x4(a0, a1, a2, a3, d2, d3); \ transpose_4x4(b0, b1, b2, b3, d2, d3); \ vmovdqu st0, d2; \ vmovdqu st1, d3; \ \ vmovdqu a0, st0; \ vmovdqu a1, st1; \ transpose_4x4(c0, c1, c2, c3, a0, a1); \ transpose_4x4(d0, d1, d2, d3, a0, a1); \ \ vmovdqu .Lshufb_16x16b, a0; \ vmovdqu st1, a1; \ vpshufb a0, a2, a2; \ vpshufb a0, a3, a3; \ vpshufb a0, b0, b0; \ vpshufb a0, b1, b1; \ vpshufb a0, b2, b2; \ vpshufb a0, b3, b3; \ vpshufb a0, a1, a1; \ vpshufb a0, c0, c0; \ vpshufb a0, c1, c1; \ vpshufb a0, c2, c2; \ vpshufb a0, c3, c3; \ vpshufb a0, d0, d0; \ vpshufb a0, d1, d1; \ vpshufb a0, d2, d2; \ vpshufb a0, d3, d3; \ vmovdqu d3, st1; \ vmovdqu st0, d3; \ vpshufb a0, d3, a0; \ vmovdqu d2, st0; \ \ transpose_4x4(a0, b0, c0, d0, d2, d3); \ transpose_4x4(a1, b1, c1, d1, d2, d3); \ vmovdqu st0, d2; \ vmovdqu st1, d3; \ \ vmovdqu b0, st0; \ vmovdqu b1, st1; \ transpose_4x4(a2, b2, c2, d2, b0, b1); \ transpose_4x4(a3, b3, c3, d3, b0, b1); \ vmovdqu st0, b0; \ vmovdqu st1, b1; \ /* does not adjust output bytes inside vectors */ #define debyteslice_16x16b(a0, b0, c0, d0, \ a1, b1, c1, d1, \ a2, b2, c2, d2, \ a3, b3, c3, d3, \ st0, st1) \ vmovdqu d2, st0; \ vmovdqu d3, st1; \ transpose_4x4(a0, a1, a2, a3, d2, d3); \ transpose_4x4(b0, b1, b2, b3, d2, d3); \ vmovdqu st0, d2; \ vmovdqu st1, d3; \ \ vmovdqu a0, st0; \ vmovdqu a1, st1; \ transpose_4x4(c0, c1, c2, c3, a0, a1); \ transpose_4x4(d0, d1, d2, d3, a0, a1); \ \ vmovdqu .Lshufb_16x16b, a0; \ vmovdqu st1, a1; \ vpshufb a0, a2, a2; \ vpshufb a0, a3, a3; \ vpshufb a0, b0, b0; \ vpshufb a0, b1, b1; \ vpshufb a0, b2, b2; \ vpshufb a0, b3, b3; \ vpshufb a0, a1, a1; \ vpshufb a0, c0, c0; \ vpshufb a0, c1, c1; \ vpshufb a0, c2, c2; \ vpshufb a0, c3, c3; \ vpshufb a0, d0, d0; \ vpshufb a0, d1, d1; \ vpshufb a0, d2, d2; \ vpshufb a0, d3, d3; \ vmovdqu d3, st1; \ vmovdqu st0, d3; \ vpshufb a0, d3, a0; \ vmovdqu d2, st0; \ \ transpose_4x4(c0, d0, a0, b0, d2, d3); \ transpose_4x4(c1, d1, a1, b1, d2, d3); \ vmovdqu st0, d2; \ vmovdqu st1, d3; \ \ vmovdqu b0, st0; \ vmovdqu b1, st1; \ transpose_4x4(c2, d2, a2, b2, b0, b1); \ transpose_4x4(c3, d3, a3, b3, b0, b1); \ vmovdqu st0, b0; \ vmovdqu st1, b1; \ /* does not adjust output bytes inside vectors */ /* load blocks to registers and apply pre-whitening */ #define inpack16_pre(x0, x1, x2, x3, \ x4, x5, x6, x7, \ y0, y1, y2, y3, \ y4, y5, y6, y7, \ rio) \ vmovdqu (0 * 16)(rio), x0; \ vmovdqu (1 * 16)(rio), x1; \ vmovdqu (2 * 16)(rio), x2; \ vmovdqu (3 * 16)(rio), x3; \ vmovdqu (4 * 16)(rio), x4; \ vmovdqu (5 * 16)(rio), x5; \ vmovdqu (6 * 16)(rio), x6; \ vmovdqu (7 * 16)(rio), x7; \ vmovdqu (8 * 16)(rio), y0; \ vmovdqu (9 * 16)(rio), y1; \ vmovdqu (10 * 16)(rio), y2; \ vmovdqu (11 * 16)(rio), y3; \ vmovdqu (12 * 16)(rio), y4; \ vmovdqu (13 * 16)(rio), y5; \ vmovdqu (14 * 16)(rio), y6; \ vmovdqu (15 * 16)(rio), y7; /* byteslice pre-whitened blocks and store to temporary memory */ #define inpack16_post(x0, x1, x2, x3, \ x4, x5, x6, x7, \ y0, y1, y2, y3, \ y4, y5, y6, y7, \ mem_ab, mem_cd) \ byteslice_16x16b(x0, x1, x2, x3, \ x4, x5, x6, x7, \ y0, y1, y2, y3, \ y4, y5, y6, y7, \ (mem_ab), (mem_cd)); \ \ vmovdqu x0, 0 * 16(mem_ab); \ vmovdqu x1, 1 * 16(mem_ab); \ vmovdqu x2, 2 * 16(mem_ab); \ vmovdqu x3, 3 * 16(mem_ab); \ vmovdqu x4, 4 * 16(mem_ab); \ vmovdqu x5, 5 * 16(mem_ab); \ vmovdqu x6, 6 * 16(mem_ab); \ vmovdqu x7, 7 * 16(mem_ab); \ vmovdqu y0, 0 * 16(mem_cd); \ vmovdqu y1, 1 * 16(mem_cd); \ vmovdqu y2, 2 * 16(mem_cd); \ vmovdqu y3, 3 * 16(mem_cd); \ vmovdqu y4, 4 * 16(mem_cd); \ vmovdqu y5, 5 * 16(mem_cd); \ vmovdqu y6, 6 * 16(mem_cd); \ vmovdqu y7, 7 * 16(mem_cd); #define write_output(x0, x1, x2, x3, \ x4, x5, x6, x7, \ y0, y1, y2, y3, \ y4, y5, y6, y7, \ mem) \ vmovdqu x0, 0 * 16(mem); \ vmovdqu x1, 1 * 16(mem); \ vmovdqu x2, 2 * 16(mem); \ vmovdqu x3, 3 * 16(mem); \ vmovdqu x4, 4 * 16(mem); \ vmovdqu x5, 5 * 16(mem); \ vmovdqu x6, 6 * 16(mem); \ vmovdqu x7, 7 * 16(mem); \ vmovdqu y0, 8 * 16(mem); \ vmovdqu y1, 9 * 16(mem); \ vmovdqu y2, 10 * 16(mem); \ vmovdqu y3, 11 * 16(mem); \ vmovdqu y4, 12 * 16(mem); \ vmovdqu y5, 13 * 16(mem); \ vmovdqu y6, 14 * 16(mem); \ vmovdqu y7, 15 * 16(mem); \ #define aria_store_state_8way(x0, x1, x2, x3, \ x4, x5, x6, x7, \ mem_tmp, idx) \ vmovdqu x0, ((idx + 0) * 16)(mem_tmp); \ vmovdqu x1, ((idx + 1) * 16)(mem_tmp); \ vmovdqu x2, ((idx + 2) * 16)(mem_tmp); \ vmovdqu x3, ((idx + 3) * 16)(mem_tmp); \ vmovdqu x4, ((idx + 4) * 16)(mem_tmp); \ vmovdqu x5, ((idx + 5) * 16)(mem_tmp); \ vmovdqu x6, ((idx + 6) * 16)(mem_tmp); \ vmovdqu x7, ((idx + 7) * 16)(mem_tmp); #define aria_load_state_8way(x0, x1, x2, x3, \ x4, x5, x6, x7, \ mem_tmp, idx) \ vmovdqu ((idx + 0) * 16)(mem_tmp), x0; \ vmovdqu ((idx + 1) * 16)(mem_tmp), x1; \ vmovdqu ((idx + 2) * 16)(mem_tmp), x2; \ vmovdqu ((idx + 3) * 16)(mem_tmp), x3; \ vmovdqu ((idx + 4) * 16)(mem_tmp), x4; \ vmovdqu ((idx + 5) * 16)(mem_tmp), x5; \ vmovdqu ((idx + 6) * 16)(mem_tmp), x6; \ vmovdqu ((idx + 7) * 16)(mem_tmp), x7; #define aria_ark_8way(x0, x1, x2, x3, \ x4, x5, x6, x7, \ t0, rk, idx, round) \ /* AddRoundKey */ \ vpbroadcastb ((round * 16) + idx + 3)(rk), t0; \ vpxor t0, x0, x0; \ vpbroadcastb ((round * 16) + idx + 2)(rk), t0; \ vpxor t0, x1, x1; \ vpbroadcastb ((round * 16) + idx + 1)(rk), t0; \ vpxor t0, x2, x2; \ vpbroadcastb ((round * 16) + idx + 0)(rk), t0; \ vpxor t0, x3, x3; \ vpbroadcastb ((round * 16) + idx + 7)(rk), t0; \ vpxor t0, x4, x4; \ vpbroadcastb ((round * 16) + idx + 6)(rk), t0; \ vpxor t0, x5, x5; \ vpbroadcastb ((round * 16) + idx + 5)(rk), t0; \ vpxor t0, x6, x6; \ vpbroadcastb ((round * 16) + idx + 4)(rk), t0; \ vpxor t0, x7, x7; #define aria_sbox_8way_gfni(x0, x1, x2, x3, \ x4, x5, x6, x7, \ t0, t1, t2, t3, \ t4, t5, t6, t7) \ vpbroadcastq .Ltf_s2_bitmatrix, t0; \ vpbroadcastq .Ltf_inv_bitmatrix, t1; \ vpbroadcastq .Ltf_id_bitmatrix, t2; \ vpbroadcastq .Ltf_aff_bitmatrix, t3; \ vpbroadcastq .Ltf_x2_bitmatrix, t4; \ vgf2p8affineinvqb $(tf_s2_const), t0, x1, x1; \ vgf2p8affineinvqb $(tf_s2_const), t0, x5, x5; \ vgf2p8affineqb $(tf_inv_const), t1, x2, x2; \ vgf2p8affineqb $(tf_inv_const), t1, x6, x6; \ vgf2p8affineinvqb $0, t2, x2, x2; \ vgf2p8affineinvqb $0, t2, x6, x6; \ vgf2p8affineinvqb $(tf_aff_const), t3, x0, x0; \ vgf2p8affineinvqb $(tf_aff_const), t3, x4, x4; \ vgf2p8affineqb $(tf_x2_const), t4, x3, x3; \ vgf2p8affineqb $(tf_x2_const), t4, x7, x7; \ vgf2p8affineinvqb $0, t2, x3, x3; \ vgf2p8affineinvqb $0, t2, x7, x7 #define aria_sbox_8way(x0, x1, x2, x3, \ x4, x5, x6, x7, \ t0, t1, t2, t3, \ t4, t5, t6, t7) \ vpxor t7, t7, t7; \ vmovdqa .Linv_shift_row, t0; \ vmovdqa .Lshift_row, t1; \ vpbroadcastd .L0f0f0f0f, t6; \ vmovdqa .Ltf_lo__inv_aff__and__s2, t2; \ vmovdqa .Ltf_hi__inv_aff__and__s2, t3; \ vmovdqa .Ltf_lo__x2__and__fwd_aff, t4; \ vmovdqa .Ltf_hi__x2__and__fwd_aff, t5; \ \ vaesenclast t7, x0, x0; \ vaesenclast t7, x4, x4; \ vaesenclast t7, x1, x1; \ vaesenclast t7, x5, x5; \ vaesdeclast t7, x2, x2; \ vaesdeclast t7, x6, x6; \ \ /* AES inverse shift rows */ \ vpshufb t0, x0, x0; \ vpshufb t0, x4, x4; \ vpshufb t0, x1, x1; \ vpshufb t0, x5, x5; \ vpshufb t1, x3, x3; \ vpshufb t1, x7, x7; \ vpshufb t1, x2, x2; \ vpshufb t1, x6, x6; \ \ /* affine transformation for S2 */ \ filter_8bit(x1, t2, t3, t6, t0); \ /* affine transformation for S2 */ \ filter_8bit(x5, t2, t3, t6, t0); \ \ /* affine transformation for X2 */ \ filter_8bit(x3, t4, t5, t6, t0); \ /* affine transformation for X2 */ \ filter_8bit(x7, t4, t5, t6, t0); \ vaesdeclast t7, x3, x3; \ vaesdeclast t7, x7, x7; #define aria_diff_m(x0, x1, x2, x3, \ t0, t1, t2, t3) \ /* T = rotr32(X, 8); */ \ /* X ^= T */ \ vpxor x0, x3, t0; \ vpxor x1, x0, t1; \ vpxor x2, x1, t2; \ vpxor x3, x2, t3; \ /* X = T ^ rotr(X, 16); */ \ vpxor t2, x0, x0; \ vpxor x1, t3, t3; \ vpxor t0, x2, x2; \ vpxor t1, x3, x1; \ vmovdqu t3, x3; #define aria_diff_word(x0, x1, x2, x3, \ x4, x5, x6, x7, \ y0, y1, y2, y3, \ y4, y5, y6, y7) \ /* t1 ^= t2; */ \ vpxor y0, x4, x4; \ vpxor y1, x5, x5; \ vpxor y2, x6, x6; \ vpxor y3, x7, x7; \ \ /* t2 ^= t3; */ \ vpxor y4, y0, y0; \ vpxor y5, y1, y1; \ vpxor y6, y2, y2; \ vpxor y7, y3, y3; \ \ /* t0 ^= t1; */ \ vpxor x4, x0, x0; \ vpxor x5, x1, x1; \ vpxor x6, x2, x2; \ vpxor x7, x3, x3; \ \ /* t3 ^= t1; */ \ vpxor x4, y4, y4; \ vpxor x5, y5, y5; \ vpxor x6, y6, y6; \ vpxor x7, y7, y7; \ \ /* t2 ^= t0; */ \ vpxor x0, y0, y0; \ vpxor x1, y1, y1; \ vpxor x2, y2, y2; \ vpxor x3, y3, y3; \ \ /* t1 ^= t2; */ \ vpxor y0, x4, x4; \ vpxor y1, x5, x5; \ vpxor y2, x6, x6; \ vpxor y3, x7, x7; #define aria_fe(x0, x1, x2, x3, \ x4, x5, x6, x7, \ y0, y1, y2, y3, \ y4, y5, y6, y7, \ mem_tmp, rk, round) \ aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ y0, rk, 8, round); \ \ aria_sbox_8way(x2, x3, x0, x1, x6, x7, x4, x5, \ y0, y1, y2, y3, y4, y5, y6, y7); \ \ aria_diff_m(x0, x1, x2, x3, y0, y1, y2, y3); \ aria_diff_m(x4, x5, x6, x7, y0, y1, y2, y3); \ aria_store_state_8way(x0, x1, x2, x3, \ x4, x5, x6, x7, \ mem_tmp, 8); \ \ aria_load_state_8way(x0, x1, x2, x3, \ x4, x5, x6, x7, \ mem_tmp, 0); \ aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ y0, rk, 0, round); \ \ aria_sbox_8way(x2, x3, x0, x1, x6, x7, x4, x5, \ y0, y1, y2, y3, y4, y5, y6, y7); \ \ aria_diff_m(x0, x1, x2, x3, y0, y1, y2, y3); \ aria_diff_m(x4, x5, x6, x7, y0, y1, y2, y3); \ aria_store_state_8way(x0, x1, x2, x3, \ x4, x5, x6, x7, \ mem_tmp, 0); \ aria_load_state_8way(y0, y1, y2, y3, \ y4, y5, y6, y7, \ mem_tmp, 8); \ aria_diff_word(x0, x1, x2, x3, \ x4, x5, x6, x7, \ y0, y1, y2, y3, \ y4, y5, y6, y7); \ /* aria_diff_byte() \ * T3 = ABCD -> BADC \ * T3 = y4, y5, y6, y7 -> y5, y4, y7, y6 \ * T0 = ABCD -> CDAB \ * T0 = x0, x1, x2, x3 -> x2, x3, x0, x1 \ * T1 = ABCD -> DCBA \ * T1 = x4, x5, x6, x7 -> x7, x6, x5, x4 \ */ \ aria_diff_word(x2, x3, x0, x1, \ x7, x6, x5, x4, \ y0, y1, y2, y3, \ y5, y4, y7, y6); \ aria_store_state_8way(x3, x2, x1, x0, \ x6, x7, x4, x5, \ mem_tmp, 0); #define aria_fo(x0, x1, x2, x3, \ x4, x5, x6, x7, \ y0, y1, y2, y3, \ y4, y5, y6, y7, \ mem_tmp, rk, round) \ aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ y0, rk, 8, round); \ \ aria_sbox_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ y0, y1, y2, y3, y4, y5, y6, y7); \ \ aria_diff_m(x0, x1, x2, x3, y0, y1, y2, y3); \ aria_diff_m(x4, x5, x6, x7, y0, y1, y2, y3); \ aria_store_state_8way(x0, x1, x2, x3, \ x4, x5, x6, x7, \ mem_tmp, 8); \ \ aria_load_state_8way(x0, x1, x2, x3, \ x4, x5, x6, x7, \ mem_tmp, 0); \ aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ y0, rk, 0, round); \ \ aria_sbox_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ y0, y1, y2, y3, y4, y5, y6, y7); \ \ aria_diff_m(x0, x1, x2, x3, y0, y1, y2, y3); \ aria_diff_m(x4, x5, x6, x7, y0, y1, y2, y3); \ aria_store_state_8way(x0, x1, x2, x3, \ x4, x5, x6, x7, \ mem_tmp, 0); \ aria_load_state_8way(y0, y1, y2, y3, \ y4, y5, y6, y7, \ mem_tmp, 8); \ aria_diff_word(x0, x1, x2, x3, \ x4, x5, x6, x7, \ y0, y1, y2, y3, \ y4, y5, y6, y7); \ /* aria_diff_byte() \ * T1 = ABCD -> BADC \ * T1 = x4, x5, x6, x7 -> x5, x4, x7, x6 \ * T2 = ABCD -> CDAB \ * T2 = y0, y1, y2, y3, -> y2, y3, y0, y1 \ * T3 = ABCD -> DCBA \ * T3 = y4, y5, y6, y7 -> y7, y6, y5, y4 \ */ \ aria_diff_word(x0, x1, x2, x3, \ x5, x4, x7, x6, \ y2, y3, y0, y1, \ y7, y6, y5, y4); \ aria_store_state_8way(x3, x2, x1, x0, \ x6, x7, x4, x5, \ mem_tmp, 0); #define aria_ff(x0, x1, x2, x3, \ x4, x5, x6, x7, \ y0, y1, y2, y3, \ y4, y5, y6, y7, \ mem_tmp, rk, round, last_round) \ aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ y0, rk, 8, round); \ \ aria_sbox_8way(x2, x3, x0, x1, x6, x7, x4, x5, \ y0, y1, y2, y3, y4, y5, y6, y7); \ \ aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ y0, rk, 8, last_round); \ \ aria_store_state_8way(x0, x1, x2, x3, \ x4, x5, x6, x7, \ mem_tmp, 8); \ \ aria_load_state_8way(x0, x1, x2, x3, \ x4, x5, x6, x7, \ mem_tmp, 0); \ aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ y0, rk, 0, round); \ \ aria_sbox_8way(x2, x3, x0, x1, x6, x7, x4, x5, \ y0, y1, y2, y3, y4, y5, y6, y7); \ \ aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ y0, rk, 0, last_round); \ \ aria_load_state_8way(y0, y1, y2, y3, \ y4, y5, y6, y7, \ mem_tmp, 8); #define aria_fe_gfni(x0, x1, x2, x3, \ x4, x5, x6, x7, \ y0, y1, y2, y3, \ y4, y5, y6, y7, \ mem_tmp, rk, round) \ aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ y0, rk, 8, round); \ \ aria_sbox_8way_gfni(x2, x3, x0, x1, \ x6, x7, x4, x5, \ y0, y1, y2, y3, \ y4, y5, y6, y7); \ \ aria_diff_m(x0, x1, x2, x3, y0, y1, y2, y3); \ aria_diff_m(x4, x5, x6, x7, y0, y1, y2, y3); \ aria_store_state_8way(x0, x1, x2, x3, \ x4, x5, x6, x7, \ mem_tmp, 8); \ \ aria_load_state_8way(x0, x1, x2, x3, \ x4, x5, x6, x7, \ mem_tmp, 0); \ aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ y0, rk, 0, round); \ \ aria_sbox_8way_gfni(x2, x3, x0, x1, \ x6, x7, x4, x5, \ y0, y1, y2, y3, \ y4, y5, y6, y7); \ \ aria_diff_m(x0, x1, x2, x3, y0, y1, y2, y3); \ aria_diff_m(x4, x5, x6, x7, y0, y1, y2, y3); \ aria_store_state_8way(x0, x1, x2, x3, \ x4, x5, x6, x7, \ mem_tmp, 0); \ aria_load_state_8way(y0, y1, y2, y3, \ y4, y5, y6, y7, \ mem_tmp, 8); \ aria_diff_word(x0, x1, x2, x3, \ x4, x5, x6, x7, \ y0, y1, y2, y3, \ y4, y5, y6, y7); \ /* aria_diff_byte() \ * T3 = ABCD -> BADC \ * T3 = y4, y5, y6, y7 -> y5, y4, y7, y6 \ * T0 = ABCD -> CDAB \ * T0 = x0, x1, x2, x3 -> x2, x3, x0, x1 \ * T1 = ABCD -> DCBA \ * T1 = x4, x5, x6, x7 -> x7, x6, x5, x4 \ */ \ aria_diff_word(x2, x3, x0, x1, \ x7, x6, x5, x4, \ y0, y1, y2, y3, \ y5, y4, y7, y6); \ aria_store_state_8way(x3, x2, x1, x0, \ x6, x7, x4, x5, \ mem_tmp, 0); #define aria_fo_gfni(x0, x1, x2, x3, \ x4, x5, x6, x7, \ y0, y1, y2, y3, \ y4, y5, y6, y7, \ mem_tmp, rk, round) \ aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ y0, rk, 8, round); \ \ aria_sbox_8way_gfni(x0, x1, x2, x3, \ x4, x5, x6, x7, \ y0, y1, y2, y3, \ y4, y5, y6, y7); \ \ aria_diff_m(x0, x1, x2, x3, y0, y1, y2, y3); \ aria_diff_m(x4, x5, x6, x7, y0, y1, y2, y3); \ aria_store_state_8way(x0, x1, x2, x3, \ x4, x5, x6, x7, \ mem_tmp, 8); \ \ aria_load_state_8way(x0, x1, x2, x3, \ x4, x5, x6, x7, \ mem_tmp, 0); \ aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ y0, rk, 0, round); \ \ aria_sbox_8way_gfni(x0, x1, x2, x3, \ x4, x5, x6, x7, \ y0, y1, y2, y3, \ y4, y5, y6, y7); \ \ aria_diff_m(x0, x1, x2, x3, y0, y1, y2, y3); \ aria_diff_m(x4, x5, x6, x7, y0, y1, y2, y3); \ aria_store_state_8way(x0, x1, x2, x3, \ x4, x5, x6, x7, \ mem_tmp, 0); \ aria_load_state_8way(y0, y1, y2, y3, \ y4, y5, y6, y7, \ mem_tmp, 8); \ aria_diff_word(x0, x1, x2, x3, \ x4, x5, x6, x7, \ y0, y1, y2, y3, \ y4, y5, y6, y7); \ /* aria_diff_byte() \ * T1 = ABCD -> BADC \ * T1 = x4, x5, x6, x7 -> x5, x4, x7, x6 \ * T2 = ABCD -> CDAB \ * T2 = y0, y1, y2, y3, -> y2, y3, y0, y1 \ * T3 = ABCD -> DCBA \ * T3 = y4, y5, y6, y7 -> y7, y6, y5, y4 \ */ \ aria_diff_word(x0, x1, x2, x3, \ x5, x4, x7, x6, \ y2, y3, y0, y1, \ y7, y6, y5, y4); \ aria_store_state_8way(x3, x2, x1, x0, \ x6, x7, x4, x5, \ mem_tmp, 0); #define aria_ff_gfni(x0, x1, x2, x3, \ x4, x5, x6, x7, \ y0, y1, y2, y3, \ y4, y5, y6, y7, \ mem_tmp, rk, round, last_round) \ aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ y0, rk, 8, round); \ \ aria_sbox_8way_gfni(x2, x3, x0, x1, \ x6, x7, x4, x5, \ y0, y1, y2, y3, \ y4, y5, y6, y7); \ \ aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ y0, rk, 8, last_round); \ \ aria_store_state_8way(x0, x1, x2, x3, \ x4, x5, x6, x7, \ mem_tmp, 8); \ \ aria_load_state_8way(x0, x1, x2, x3, \ x4, x5, x6, x7, \ mem_tmp, 0); \ aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ y0, rk, 0, round); \ \ aria_sbox_8way_gfni(x2, x3, x0, x1, \ x6, x7, x4, x5, \ y0, y1, y2, y3, \ y4, y5, y6, y7); \ \ aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ y0, rk, 0, last_round); \ \ aria_load_state_8way(y0, y1, y2, y3, \ y4, y5, y6, y7, \ mem_tmp, 8); /* NB: section is mergeable, all elements must be aligned 16-byte blocks */ .section .rodata.cst16, "aM", @progbits, 16 .align 16 #define SHUFB_BYTES(idx) \ 0 + (idx), 4 + (idx), 8 + (idx), 12 + (idx) .Lshufb_16x16b: .byte SHUFB_BYTES(0), SHUFB_BYTES(1), SHUFB_BYTES(2), SHUFB_BYTES(3); /* For isolating SubBytes from AESENCLAST, inverse shift row */ .Linv_shift_row: .byte 0x00, 0x0d, 0x0a, 0x07, 0x04, 0x01, 0x0e, 0x0b .byte 0x08, 0x05, 0x02, 0x0f, 0x0c, 0x09, 0x06, 0x03 .Lshift_row: .byte 0x00, 0x05, 0x0a, 0x0f, 0x04, 0x09, 0x0e, 0x03 .byte 0x08, 0x0d, 0x02, 0x07, 0x0c, 0x01, 0x06, 0x0b /* For CTR-mode IV byteswap */ .Lbswap128_mask: .byte 0x0f, 0x0e, 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x08 .byte 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00 /* AES inverse affine and S2 combined: * 1 1 0 0 0 0 0 1 x0 0 * 0 1 0 0 1 0 0 0 x1 0 * 1 1 0 0 1 1 1 1 x2 0 * 0 1 1 0 1 0 0 1 x3 1 * 0 1 0 0 1 1 0 0 * x4 + 0 * 0 1 0 1 1 0 0 0 x5 0 * 0 0 0 0 0 1 0 1 x6 0 * 1 1 1 0 0 1 1 1 x7 1 */ .Ltf_lo__inv_aff__and__s2: .octa 0x92172DA81A9FA520B2370D883ABF8500 .Ltf_hi__inv_aff__and__s2: .octa 0x2B15FFC1AF917B45E6D8320C625CB688 /* X2 and AES forward affine combined: * 1 0 1 1 0 0 0 1 x0 0 * 0 1 1 1 1 0 1 1 x1 0 * 0 0 0 1 1 0 1 0 x2 1 * 0 1 0 0 0 1 0 0 x3 0 * 0 0 1 1 1 0 1 1 * x4 + 0 * 0 1 0 0 1 0 0 0 x5 0 * 1 1 0 1 0 0 1 1 x6 0 * 0 1 0 0 1 0 1 0 x7 0 */ .Ltf_lo__x2__and__fwd_aff: .octa 0xEFAE0544FCBD1657B8F95213ABEA4100 .Ltf_hi__x2__and__fwd_aff: .octa 0x3F893781E95FE1576CDA64D2BA0CB204 .section .rodata.cst8, "aM", @progbits, 8 .align 8 /* AES affine: */ #define tf_aff_const BV8(1, 1, 0, 0, 0, 1, 1, 0) .Ltf_aff_bitmatrix: .quad BM8X8(BV8(1, 0, 0, 0, 1, 1, 1, 1), BV8(1, 1, 0, 0, 0, 1, 1, 1), BV8(1, 1, 1, 0, 0, 0, 1, 1), BV8(1, 1, 1, 1, 0, 0, 0, 1), BV8(1, 1, 1, 1, 1, 0, 0, 0), BV8(0, 1, 1, 1, 1, 1, 0, 0), BV8(0, 0, 1, 1, 1, 1, 1, 0), BV8(0, 0, 0, 1, 1, 1, 1, 1)) /* AES inverse affine: */ #define tf_inv_const BV8(1, 0, 1, 0, 0, 0, 0, 0) .Ltf_inv_bitmatrix: .quad BM8X8(BV8(0, 0, 1, 0, 0, 1, 0, 1), BV8(1, 0, 0, 1, 0, 0, 1, 0), BV8(0, 1, 0, 0, 1, 0, 0, 1), BV8(1, 0, 1, 0, 0, 1, 0, 0), BV8(0, 1, 0, 1, 0, 0, 1, 0), BV8(0, 0, 1, 0, 1, 0, 0, 1), BV8(1, 0, 0, 1, 0, 1, 0, 0), BV8(0, 1, 0, 0, 1, 0, 1, 0)) /* S2: */ #define tf_s2_const BV8(0, 1, 0, 0, 0, 1, 1, 1) .Ltf_s2_bitmatrix: .quad BM8X8(BV8(0, 1, 0, 1, 0, 1, 1, 1), BV8(0, 0, 1, 1, 1, 1, 1, 1), BV8(1, 1, 1, 0, 1, 1, 0, 1), BV8(1, 1, 0, 0, 0, 0, 1, 1), BV8(0, 1, 0, 0, 0, 0, 1, 1), BV8(1, 1, 0, 0, 1, 1, 1, 0), BV8(0, 1, 1, 0, 0, 0, 1, 1), BV8(1, 1, 1, 1, 0, 1, 1, 0)) /* X2: */ #define tf_x2_const BV8(0, 0, 1, 1, 0, 1, 0, 0) .Ltf_x2_bitmatrix: .quad BM8X8(BV8(0, 0, 0, 1, 1, 0, 0, 0), BV8(0, 0, 1, 0, 0, 1, 1, 0), BV8(0, 0, 0, 0, 1, 0, 1, 0), BV8(1, 1, 1, 0, 0, 0, 1, 1), BV8(1, 1, 1, 0, 1, 1, 0, 0), BV8(0, 1, 1, 0, 1, 0, 1, 1), BV8(1, 0, 1, 1, 1, 1, 0, 1), BV8(1, 0, 0, 1, 0, 0, 1, 1)) /* Identity matrix: */ .Ltf_id_bitmatrix: .quad BM8X8(BV8(1, 0, 0, 0, 0, 0, 0, 0), BV8(0, 1, 0, 0, 0, 0, 0, 0), BV8(0, 0, 1, 0, 0, 0, 0, 0), BV8(0, 0, 0, 1, 0, 0, 0, 0), BV8(0, 0, 0, 0, 1, 0, 0, 0), BV8(0, 0, 0, 0, 0, 1, 0, 0), BV8(0, 0, 0, 0, 0, 0, 1, 0), BV8(0, 0, 0, 0, 0, 0, 0, 1)) /* 4-bit mask */ .section .rodata.cst4.L0f0f0f0f, "aM", @progbits, 4 .align 4 .L0f0f0f0f: .long 0x0f0f0f0f .text SYM_FUNC_START_LOCAL(__aria_aesni_avx_crypt_16way) /* input: * %r9: rk * %rsi: dst * %rdx: src * %xmm0..%xmm15: 16 byte-sliced blocks */ FRAME_BEGIN movq %rsi, %rax; leaq 8 * 16(%rax), %r8; inpack16_post(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %r8); aria_fo(%xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %rax, %r9, 0); aria_fe(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %r9, 1); aria_fo(%xmm9, %xmm8, %xmm11, %xmm10, %xmm12, %xmm13, %xmm14, %xmm15, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %rax, %r9, 2); aria_fe(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %r9, 3); aria_fo(%xmm9, %xmm8, %xmm11, %xmm10, %xmm12, %xmm13, %xmm14, %xmm15, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %rax, %r9, 4); aria_fe(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %r9, 5); aria_fo(%xmm9, %xmm8, %xmm11, %xmm10, %xmm12, %xmm13, %xmm14, %xmm15, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %rax, %r9, 6); aria_fe(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %r9, 7); aria_fo(%xmm9, %xmm8, %xmm11, %xmm10, %xmm12, %xmm13, %xmm14, %xmm15, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %rax, %r9, 8); aria_fe(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %r9, 9); aria_fo(%xmm9, %xmm8, %xmm11, %xmm10, %xmm12, %xmm13, %xmm14, %xmm15, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %rax, %r9, 10); cmpl $12, rounds(CTX); jne .Laria_192; aria_ff(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %r9, 11, 12); jmp .Laria_end; .Laria_192: aria_fe(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %r9, 11); aria_fo(%xmm9, %xmm8, %xmm11, %xmm10, %xmm12, %xmm13, %xmm14, %xmm15, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %rax, %r9, 12); cmpl $14, rounds(CTX); jne .Laria_256; aria_ff(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %r9, 13, 14); jmp .Laria_end; .Laria_256: aria_fe(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %r9, 13); aria_fo(%xmm9, %xmm8, %xmm11, %xmm10, %xmm12, %xmm13, %xmm14, %xmm15, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %rax, %r9, 14); aria_ff(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %r9, 15, 16); .Laria_end: debyteslice_16x16b(%xmm8, %xmm12, %xmm1, %xmm4, %xmm9, %xmm13, %xmm0, %xmm5, %xmm10, %xmm14, %xmm3, %xmm6, %xmm11, %xmm15, %xmm2, %xmm7, (%rax), (%r8)); FRAME_END RET; SYM_FUNC_END(__aria_aesni_avx_crypt_16way) SYM_TYPED_FUNC_START(aria_aesni_avx_encrypt_16way) /* input: * %rdi: ctx, CTX * %rsi: dst * %rdx: src */ FRAME_BEGIN leaq enc_key(CTX), %r9; inpack16_pre(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rdx); call __aria_aesni_avx_crypt_16way; write_output(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax); FRAME_END RET; SYM_FUNC_END(aria_aesni_avx_encrypt_16way) SYM_TYPED_FUNC_START(aria_aesni_avx_decrypt_16way) /* input: * %rdi: ctx, CTX * %rsi: dst * %rdx: src */ FRAME_BEGIN leaq dec_key(CTX), %r9; inpack16_pre(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rdx); call __aria_aesni_avx_crypt_16way; write_output(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax); FRAME_END RET; SYM_FUNC_END(aria_aesni_avx_decrypt_16way) SYM_FUNC_START_LOCAL(__aria_aesni_avx_ctr_gen_keystream_16way) /* input: * %rdi: ctx * %rsi: dst * %rdx: src * %rcx: keystream * %r8: iv (big endian, 128bit) */ FRAME_BEGIN /* load IV and byteswap */ vmovdqu (%r8), %xmm8; vmovdqa .Lbswap128_mask (%rip), %xmm1; vpshufb %xmm1, %xmm8, %xmm3; /* be => le */ vpcmpeqd %xmm0, %xmm0, %xmm0; vpsrldq $8, %xmm0, %xmm0; /* low: -1, high: 0 */ /* construct IVs */ inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */ vpshufb %xmm1, %xmm3, %xmm9; inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */ vpshufb %xmm1, %xmm3, %xmm10; inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */ vpshufb %xmm1, %xmm3, %xmm11; inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */ vpshufb %xmm1, %xmm3, %xmm12; inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */ vpshufb %xmm1, %xmm3, %xmm13; inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */ vpshufb %xmm1, %xmm3, %xmm14; inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */ vpshufb %xmm1, %xmm3, %xmm15; vmovdqu %xmm8, (0 * 16)(%rcx); vmovdqu %xmm9, (1 * 16)(%rcx); vmovdqu %xmm10, (2 * 16)(%rcx); vmovdqu %xmm11, (3 * 16)(%rcx); vmovdqu %xmm12, (4 * 16)(%rcx); vmovdqu %xmm13, (5 * 16)(%rcx); vmovdqu %xmm14, (6 * 16)(%rcx); vmovdqu %xmm15, (7 * 16)(%rcx); inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */ vpshufb %xmm1, %xmm3, %xmm8; inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */ vpshufb %xmm1, %xmm3, %xmm9; inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */ vpshufb %xmm1, %xmm3, %xmm10; inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */ vpshufb %xmm1, %xmm3, %xmm11; inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */ vpshufb %xmm1, %xmm3, %xmm12; inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */ vpshufb %xmm1, %xmm3, %xmm13; inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */ vpshufb %xmm1, %xmm3, %xmm14; inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */ vpshufb %xmm1, %xmm3, %xmm15; inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */ vpshufb %xmm1, %xmm3, %xmm4; vmovdqu %xmm4, (%r8); vmovdqu (0 * 16)(%rcx), %xmm0; vmovdqu (1 * 16)(%rcx), %xmm1; vmovdqu (2 * 16)(%rcx), %xmm2; vmovdqu (3 * 16)(%rcx), %xmm3; vmovdqu (4 * 16)(%rcx), %xmm4; vmovdqu (5 * 16)(%rcx), %xmm5; vmovdqu (6 * 16)(%rcx), %xmm6; vmovdqu (7 * 16)(%rcx), %xmm7; FRAME_END RET; SYM_FUNC_END(__aria_aesni_avx_ctr_gen_keystream_16way) SYM_TYPED_FUNC_START(aria_aesni_avx_ctr_crypt_16way) /* input: * %rdi: ctx * %rsi: dst * %rdx: src * %rcx: keystream * %r8: iv (big endian, 128bit) */ FRAME_BEGIN call __aria_aesni_avx_ctr_gen_keystream_16way; leaq (%rsi), %r10; leaq (%rdx), %r11; leaq (%rcx), %rsi; leaq (%rcx), %rdx; leaq enc_key(CTX), %r9; call __aria_aesni_avx_crypt_16way; vpxor (0 * 16)(%r11), %xmm1, %xmm1; vpxor (1 * 16)(%r11), %xmm0, %xmm0; vpxor (2 * 16)(%r11), %xmm3, %xmm3; vpxor (3 * 16)(%r11), %xmm2, %xmm2; vpxor (4 * 16)(%r11), %xmm4, %xmm4; vpxor (5 * 16)(%r11), %xmm5, %xmm5; vpxor (6 * 16)(%r11), %xmm6, %xmm6; vpxor (7 * 16)(%r11), %xmm7, %xmm7; vpxor (8 * 16)(%r11), %xmm8, %xmm8; vpxor (9 * 16)(%r11), %xmm9, %xmm9; vpxor (10 * 16)(%r11), %xmm10, %xmm10; vpxor (11 * 16)(%r11), %xmm11, %xmm11; vpxor (12 * 16)(%r11), %xmm12, %xmm12; vpxor (13 * 16)(%r11), %xmm13, %xmm13; vpxor (14 * 16)(%r11), %xmm14, %xmm14; vpxor (15 * 16)(%r11), %xmm15, %xmm15; write_output(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %r10); FRAME_END RET; SYM_FUNC_END(aria_aesni_avx_ctr_crypt_16way) SYM_FUNC_START_LOCAL(__aria_aesni_avx_gfni_crypt_16way) /* input: * %r9: rk * %rsi: dst * %rdx: src * %xmm0..%xmm15: 16 byte-sliced blocks */ FRAME_BEGIN movq %rsi, %rax; leaq 8 * 16(%rax), %r8; inpack16_post(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %r8); aria_fo_gfni(%xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %rax, %r9, 0); aria_fe_gfni(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %r9, 1); aria_fo_gfni(%xmm9, %xmm8, %xmm11, %xmm10, %xmm12, %xmm13, %xmm14, %xmm15, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %rax, %r9, 2); aria_fe_gfni(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %r9, 3); aria_fo_gfni(%xmm9, %xmm8, %xmm11, %xmm10, %xmm12, %xmm13, %xmm14, %xmm15, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %rax, %r9, 4); aria_fe_gfni(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %r9, 5); aria_fo_gfni(%xmm9, %xmm8, %xmm11, %xmm10, %xmm12, %xmm13, %xmm14, %xmm15, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %rax, %r9, 6); aria_fe_gfni(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %r9, 7); aria_fo_gfni(%xmm9, %xmm8, %xmm11, %xmm10, %xmm12, %xmm13, %xmm14, %xmm15, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %rax, %r9, 8); aria_fe_gfni(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %r9, 9); aria_fo_gfni(%xmm9, %xmm8, %xmm11, %xmm10, %xmm12, %xmm13, %xmm14, %xmm15, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %rax, %r9, 10); cmpl $12, rounds(CTX); jne .Laria_gfni_192; aria_ff_gfni(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %r9, 11, 12); jmp .Laria_gfni_end; .Laria_gfni_192: aria_fe_gfni(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %r9, 11); aria_fo_gfni(%xmm9, %xmm8, %xmm11, %xmm10, %xmm12, %xmm13, %xmm14, %xmm15, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %rax, %r9, 12); cmpl $14, rounds(CTX); jne .Laria_gfni_256; aria_ff_gfni(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %r9, 13, 14); jmp .Laria_gfni_end; .Laria_gfni_256: aria_fe_gfni(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %r9, 13); aria_fo_gfni(%xmm9, %xmm8, %xmm11, %xmm10, %xmm12, %xmm13, %xmm14, %xmm15, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %rax, %r9, 14); aria_ff_gfni(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %r9, 15, 16); .Laria_gfni_end: debyteslice_16x16b(%xmm8, %xmm12, %xmm1, %xmm4, %xmm9, %xmm13, %xmm0, %xmm5, %xmm10, %xmm14, %xmm3, %xmm6, %xmm11, %xmm15, %xmm2, %xmm7, (%rax), (%r8)); FRAME_END RET; SYM_FUNC_END(__aria_aesni_avx_gfni_crypt_16way) SYM_TYPED_FUNC_START(aria_aesni_avx_gfni_encrypt_16way) /* input: * %rdi: ctx, CTX * %rsi: dst * %rdx: src */ FRAME_BEGIN leaq enc_key(CTX), %r9; inpack16_pre(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rdx); call __aria_aesni_avx_gfni_crypt_16way; write_output(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax); FRAME_END RET; SYM_FUNC_END(aria_aesni_avx_gfni_encrypt_16way) SYM_TYPED_FUNC_START(aria_aesni_avx_gfni_decrypt_16way) /* input: * %rdi: ctx, CTX * %rsi: dst * %rdx: src */ FRAME_BEGIN leaq dec_key(CTX), %r9; inpack16_pre(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rdx); call __aria_aesni_avx_gfni_crypt_16way; write_output(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax); FRAME_END RET; SYM_FUNC_END(aria_aesni_avx_gfni_decrypt_16way) SYM_TYPED_FUNC_START(aria_aesni_avx_gfni_ctr_crypt_16way) /* input: * %rdi: ctx * %rsi: dst * %rdx: src * %rcx: keystream * %r8: iv (big endian, 128bit) */ FRAME_BEGIN call __aria_aesni_avx_ctr_gen_keystream_16way leaq (%rsi), %r10; leaq (%rdx), %r11; leaq (%rcx), %rsi; leaq (%rcx), %rdx; leaq enc_key(CTX), %r9; call __aria_aesni_avx_gfni_crypt_16way; vpxor (0 * 16)(%r11), %xmm1, %xmm1; vpxor (1 * 16)(%r11), %xmm0, %xmm0; vpxor (2 * 16)(%r11), %xmm3, %xmm3; vpxor (3 * 16)(%r11), %xmm2, %xmm2; vpxor (4 * 16)(%r11), %xmm4, %xmm4; vpxor (5 * 16)(%r11), %xmm5, %xmm5; vpxor (6 * 16)(%r11), %xmm6, %xmm6; vpxor (7 * 16)(%r11), %xmm7, %xmm7; vpxor (8 * 16)(%r11), %xmm8, %xmm8; vpxor (9 * 16)(%r11), %xmm9, %xmm9; vpxor (10 * 16)(%r11), %xmm10, %xmm10; vpxor (11 * 16)(%r11), %xmm11, %xmm11; vpxor (12 * 16)(%r11), %xmm12, %xmm12; vpxor (13 * 16)(%r11), %xmm13, %xmm13; vpxor (14 * 16)(%r11), %xmm14, %xmm14; vpxor (15 * 16)(%r11), %xmm15, %xmm15; write_output(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %r10); FRAME_END RET; SYM_FUNC_END(aria_aesni_avx_gfni_ctr_crypt_16way)
aixcc-public/challenge-001-exemplar-source
11,133
arch/x86/crypto/crct10dif-pcl-asm_64.S
######################################################################## # Implement fast CRC-T10DIF computation with SSE and PCLMULQDQ instructions # # Copyright (c) 2013, Intel Corporation # # Authors: # Erdinc Ozturk <erdinc.ozturk@intel.com> # Vinodh Gopal <vinodh.gopal@intel.com> # James Guilford <james.guilford@intel.com> # Tim Chen <tim.c.chen@linux.intel.com> # # This software is available to you under a choice of one of two # licenses. You may choose to be licensed under the terms of the GNU # General Public License (GPL) Version 2, available from the file # COPYING in the main directory of this source tree, or the # OpenIB.org BSD license below: # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the # distribution. # # * Neither the name of the Intel Corporation nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # # THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION ""AS IS"" AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Reference paper titled "Fast CRC Computation for Generic # Polynomials Using PCLMULQDQ Instruction" # URL: http://www.intel.com/content/dam/www/public/us/en/documents # /white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf # #include <linux/linkage.h> .text #define init_crc %edi #define buf %rsi #define len %rdx #define FOLD_CONSTS %xmm10 #define BSWAP_MASK %xmm11 # Fold reg1, reg2 into the next 32 data bytes, storing the result back into # reg1, reg2. .macro fold_32_bytes offset, reg1, reg2 movdqu \offset(buf), %xmm9 movdqu \offset+16(buf), %xmm12 pshufb BSWAP_MASK, %xmm9 pshufb BSWAP_MASK, %xmm12 movdqa \reg1, %xmm8 movdqa \reg2, %xmm13 pclmulqdq $0x00, FOLD_CONSTS, \reg1 pclmulqdq $0x11, FOLD_CONSTS, %xmm8 pclmulqdq $0x00, FOLD_CONSTS, \reg2 pclmulqdq $0x11, FOLD_CONSTS, %xmm13 pxor %xmm9 , \reg1 xorps %xmm8 , \reg1 pxor %xmm12, \reg2 xorps %xmm13, \reg2 .endm # Fold src_reg into dst_reg. .macro fold_16_bytes src_reg, dst_reg movdqa \src_reg, %xmm8 pclmulqdq $0x11, FOLD_CONSTS, \src_reg pclmulqdq $0x00, FOLD_CONSTS, %xmm8 pxor %xmm8, \dst_reg xorps \src_reg, \dst_reg .endm # # u16 crc_t10dif_pcl(u16 init_crc, const *u8 buf, size_t len); # # Assumes len >= 16. # .align 16 SYM_FUNC_START(crc_t10dif_pcl) movdqa .Lbswap_mask(%rip), BSWAP_MASK # For sizes less than 256 bytes, we can't fold 128 bytes at a time. cmp $256, len jl .Lless_than_256_bytes # Load the first 128 data bytes. Byte swapping is necessary to make the # bit order match the polynomial coefficient order. movdqu 16*0(buf), %xmm0 movdqu 16*1(buf), %xmm1 movdqu 16*2(buf), %xmm2 movdqu 16*3(buf), %xmm3 movdqu 16*4(buf), %xmm4 movdqu 16*5(buf), %xmm5 movdqu 16*6(buf), %xmm6 movdqu 16*7(buf), %xmm7 add $128, buf pshufb BSWAP_MASK, %xmm0 pshufb BSWAP_MASK, %xmm1 pshufb BSWAP_MASK, %xmm2 pshufb BSWAP_MASK, %xmm3 pshufb BSWAP_MASK, %xmm4 pshufb BSWAP_MASK, %xmm5 pshufb BSWAP_MASK, %xmm6 pshufb BSWAP_MASK, %xmm7 # XOR the first 16 data *bits* with the initial CRC value. pxor %xmm8, %xmm8 pinsrw $7, init_crc, %xmm8 pxor %xmm8, %xmm0 movdqa .Lfold_across_128_bytes_consts(%rip), FOLD_CONSTS # Subtract 128 for the 128 data bytes just consumed. Subtract another # 128 to simplify the termination condition of the following loop. sub $256, len # While >= 128 data bytes remain (not counting xmm0-7), fold the 128 # bytes xmm0-7 into them, storing the result back into xmm0-7. .Lfold_128_bytes_loop: fold_32_bytes 0, %xmm0, %xmm1 fold_32_bytes 32, %xmm2, %xmm3 fold_32_bytes 64, %xmm4, %xmm5 fold_32_bytes 96, %xmm6, %xmm7 add $128, buf sub $128, len jge .Lfold_128_bytes_loop # Now fold the 112 bytes in xmm0-xmm6 into the 16 bytes in xmm7. # Fold across 64 bytes. movdqa .Lfold_across_64_bytes_consts(%rip), FOLD_CONSTS fold_16_bytes %xmm0, %xmm4 fold_16_bytes %xmm1, %xmm5 fold_16_bytes %xmm2, %xmm6 fold_16_bytes %xmm3, %xmm7 # Fold across 32 bytes. movdqa .Lfold_across_32_bytes_consts(%rip), FOLD_CONSTS fold_16_bytes %xmm4, %xmm6 fold_16_bytes %xmm5, %xmm7 # Fold across 16 bytes. movdqa .Lfold_across_16_bytes_consts(%rip), FOLD_CONSTS fold_16_bytes %xmm6, %xmm7 # Add 128 to get the correct number of data bytes remaining in 0...127 # (not counting xmm7), following the previous extra subtraction by 128. # Then subtract 16 to simplify the termination condition of the # following loop. add $128-16, len # While >= 16 data bytes remain (not counting xmm7), fold the 16 bytes # xmm7 into them, storing the result back into xmm7. jl .Lfold_16_bytes_loop_done .Lfold_16_bytes_loop: movdqa %xmm7, %xmm8 pclmulqdq $0x11, FOLD_CONSTS, %xmm7 pclmulqdq $0x00, FOLD_CONSTS, %xmm8 pxor %xmm8, %xmm7 movdqu (buf), %xmm0 pshufb BSWAP_MASK, %xmm0 pxor %xmm0 , %xmm7 add $16, buf sub $16, len jge .Lfold_16_bytes_loop .Lfold_16_bytes_loop_done: # Add 16 to get the correct number of data bytes remaining in 0...15 # (not counting xmm7), following the previous extra subtraction by 16. add $16, len je .Lreduce_final_16_bytes .Lhandle_partial_segment: # Reduce the last '16 + len' bytes where 1 <= len <= 15 and the first 16 # bytes are in xmm7 and the rest are the remaining data in 'buf'. To do # this without needing a fold constant for each possible 'len', redivide # the bytes into a first chunk of 'len' bytes and a second chunk of 16 # bytes, then fold the first chunk into the second. movdqa %xmm7, %xmm2 # xmm1 = last 16 original data bytes movdqu -16(buf, len), %xmm1 pshufb BSWAP_MASK, %xmm1 # xmm2 = high order part of second chunk: xmm7 left-shifted by 'len' bytes. lea .Lbyteshift_table+16(%rip), %rax sub len, %rax movdqu (%rax), %xmm0 pshufb %xmm0, %xmm2 # xmm7 = first chunk: xmm7 right-shifted by '16-len' bytes. pxor .Lmask1(%rip), %xmm0 pshufb %xmm0, %xmm7 # xmm1 = second chunk: 'len' bytes from xmm1 (low-order bytes), # then '16-len' bytes from xmm2 (high-order bytes). pblendvb %xmm2, %xmm1 #xmm0 is implicit # Fold the first chunk into the second chunk, storing the result in xmm7. movdqa %xmm7, %xmm8 pclmulqdq $0x11, FOLD_CONSTS, %xmm7 pclmulqdq $0x00, FOLD_CONSTS, %xmm8 pxor %xmm8, %xmm7 pxor %xmm1, %xmm7 .Lreduce_final_16_bytes: # Reduce the 128-bit value M(x), stored in xmm7, to the final 16-bit CRC # Load 'x^48 * (x^48 mod G(x))' and 'x^48 * (x^80 mod G(x))'. movdqa .Lfinal_fold_consts(%rip), FOLD_CONSTS # Fold the high 64 bits into the low 64 bits, while also multiplying by # x^64. This produces a 128-bit value congruent to x^64 * M(x) and # whose low 48 bits are 0. movdqa %xmm7, %xmm0 pclmulqdq $0x11, FOLD_CONSTS, %xmm7 # high bits * x^48 * (x^80 mod G(x)) pslldq $8, %xmm0 pxor %xmm0, %xmm7 # + low bits * x^64 # Fold the high 32 bits into the low 96 bits. This produces a 96-bit # value congruent to x^64 * M(x) and whose low 48 bits are 0. movdqa %xmm7, %xmm0 pand .Lmask2(%rip), %xmm0 # zero high 32 bits psrldq $12, %xmm7 # extract high 32 bits pclmulqdq $0x00, FOLD_CONSTS, %xmm7 # high 32 bits * x^48 * (x^48 mod G(x)) pxor %xmm0, %xmm7 # + low bits # Load G(x) and floor(x^48 / G(x)). movdqa .Lbarrett_reduction_consts(%rip), FOLD_CONSTS # Use Barrett reduction to compute the final CRC value. movdqa %xmm7, %xmm0 pclmulqdq $0x11, FOLD_CONSTS, %xmm7 # high 32 bits * floor(x^48 / G(x)) psrlq $32, %xmm7 # /= x^32 pclmulqdq $0x00, FOLD_CONSTS, %xmm7 # *= G(x) psrlq $48, %xmm0 pxor %xmm7, %xmm0 # + low 16 nonzero bits # Final CRC value (x^16 * M(x)) mod G(x) is in low 16 bits of xmm0. pextrw $0, %xmm0, %eax RET .align 16 .Lless_than_256_bytes: # Checksumming a buffer of length 16...255 bytes # Load the first 16 data bytes. movdqu (buf), %xmm7 pshufb BSWAP_MASK, %xmm7 add $16, buf # XOR the first 16 data *bits* with the initial CRC value. pxor %xmm0, %xmm0 pinsrw $7, init_crc, %xmm0 pxor %xmm0, %xmm7 movdqa .Lfold_across_16_bytes_consts(%rip), FOLD_CONSTS cmp $16, len je .Lreduce_final_16_bytes # len == 16 sub $32, len jge .Lfold_16_bytes_loop # 32 <= len <= 255 add $16, len jmp .Lhandle_partial_segment # 17 <= len <= 31 SYM_FUNC_END(crc_t10dif_pcl) .section .rodata, "a", @progbits .align 16 # Fold constants precomputed from the polynomial 0x18bb7 # G(x) = x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x^1 + x^0 .Lfold_across_128_bytes_consts: .quad 0x0000000000006123 # x^(8*128) mod G(x) .quad 0x0000000000002295 # x^(8*128+64) mod G(x) .Lfold_across_64_bytes_consts: .quad 0x0000000000001069 # x^(4*128) mod G(x) .quad 0x000000000000dd31 # x^(4*128+64) mod G(x) .Lfold_across_32_bytes_consts: .quad 0x000000000000857d # x^(2*128) mod G(x) .quad 0x0000000000007acc # x^(2*128+64) mod G(x) .Lfold_across_16_bytes_consts: .quad 0x000000000000a010 # x^(1*128) mod G(x) .quad 0x0000000000001faa # x^(1*128+64) mod G(x) .Lfinal_fold_consts: .quad 0x1368000000000000 # x^48 * (x^48 mod G(x)) .quad 0x2d56000000000000 # x^48 * (x^80 mod G(x)) .Lbarrett_reduction_consts: .quad 0x0000000000018bb7 # G(x) .quad 0x00000001f65a57f8 # floor(x^48 / G(x)) .section .rodata.cst16.mask1, "aM", @progbits, 16 .align 16 .Lmask1: .octa 0x80808080808080808080808080808080 .section .rodata.cst16.mask2, "aM", @progbits, 16 .align 16 .Lmask2: .octa 0x00000000FFFFFFFFFFFFFFFFFFFFFFFF .section .rodata.cst16.bswap_mask, "aM", @progbits, 16 .align 16 .Lbswap_mask: .octa 0x000102030405060708090A0B0C0D0E0F .section .rodata.cst32.byteshift_table, "aM", @progbits, 32 .align 16 # For 1 <= len <= 15, the 16-byte vector beginning at &byteshift_table[16 - len] # is the index vector to shift left by 'len' bytes, and is also {0x80, ..., # 0x80} XOR the index vector to shift right by '16 - len' bytes. .Lbyteshift_table: .byte 0x0, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87 .byte 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f .byte 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7 .byte 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe , 0x0
aixcc-public/challenge-001-exemplar-source
10,447
arch/x86/crypto/camellia-x86_64-asm_64.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Camellia Cipher Algorithm (x86_64) * * Copyright (C) 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> */ #include <linux/linkage.h> .file "camellia-x86_64-asm_64.S" .text .extern camellia_sp10011110; .extern camellia_sp22000222; .extern camellia_sp03303033; .extern camellia_sp00444404; .extern camellia_sp02220222; .extern camellia_sp30333033; .extern camellia_sp44044404; .extern camellia_sp11101110; #define sp10011110 camellia_sp10011110 #define sp22000222 camellia_sp22000222 #define sp03303033 camellia_sp03303033 #define sp00444404 camellia_sp00444404 #define sp02220222 camellia_sp02220222 #define sp30333033 camellia_sp30333033 #define sp44044404 camellia_sp44044404 #define sp11101110 camellia_sp11101110 #define CAMELLIA_TABLE_BYTE_LEN 272 /* struct camellia_ctx: */ #define key_table 0 #define key_length CAMELLIA_TABLE_BYTE_LEN /* register macros */ #define CTX %rdi #define RIO %rsi #define RIOd %esi #define RAB0 %rax #define RCD0 %rcx #define RAB1 %rbx #define RCD1 %rdx #define RAB0d %eax #define RCD0d %ecx #define RAB1d %ebx #define RCD1d %edx #define RAB0bl %al #define RCD0bl %cl #define RAB1bl %bl #define RCD1bl %dl #define RAB0bh %ah #define RCD0bh %ch #define RAB1bh %bh #define RCD1bh %dh #define RT0 %rsi #define RT1 %r12 #define RT2 %r8 #define RT0d %esi #define RT1d %r12d #define RT2d %r8d #define RT2bl %r8b #define RXOR %r9 #define RR12 %r10 #define RDST %r11 #define RXORd %r9d #define RXORbl %r9b #define xor2ror16(T0, T1, tmp1, tmp2, ab, dst) \ movzbl ab ## bl, tmp2 ## d; \ movzbl ab ## bh, tmp1 ## d; \ rorq $16, ab; \ xorq T0(, tmp2, 8), dst; \ xorq T1(, tmp1, 8), dst; /********************************************************************** 1-way camellia **********************************************************************/ #define roundsm(ab, subkey, cd) \ movq (key_table + ((subkey) * 2) * 4)(CTX), RT2; \ \ xor2ror16(sp00444404, sp03303033, RT0, RT1, ab ## 0, cd ## 0); \ xor2ror16(sp22000222, sp10011110, RT0, RT1, ab ## 0, RT2); \ xor2ror16(sp11101110, sp44044404, RT0, RT1, ab ## 0, cd ## 0); \ xor2ror16(sp30333033, sp02220222, RT0, RT1, ab ## 0, RT2); \ \ xorq RT2, cd ## 0; #define fls(l, r, kl, kr) \ movl (key_table + ((kl) * 2) * 4)(CTX), RT0d; \ andl l ## 0d, RT0d; \ roll $1, RT0d; \ shlq $32, RT0; \ xorq RT0, l ## 0; \ movq (key_table + ((kr) * 2) * 4)(CTX), RT1; \ orq r ## 0, RT1; \ shrq $32, RT1; \ xorq RT1, r ## 0; \ \ movq (key_table + ((kl) * 2) * 4)(CTX), RT2; \ orq l ## 0, RT2; \ shrq $32, RT2; \ xorq RT2, l ## 0; \ movl (key_table + ((kr) * 2) * 4)(CTX), RT0d; \ andl r ## 0d, RT0d; \ roll $1, RT0d; \ shlq $32, RT0; \ xorq RT0, r ## 0; #define enc_rounds(i) \ roundsm(RAB, i + 2, RCD); \ roundsm(RCD, i + 3, RAB); \ roundsm(RAB, i + 4, RCD); \ roundsm(RCD, i + 5, RAB); \ roundsm(RAB, i + 6, RCD); \ roundsm(RCD, i + 7, RAB); #define enc_fls(i) \ fls(RAB, RCD, i + 0, i + 1); #define enc_inpack() \ movq (RIO), RAB0; \ bswapq RAB0; \ rolq $32, RAB0; \ movq 4*2(RIO), RCD0; \ bswapq RCD0; \ rorq $32, RCD0; \ xorq key_table(CTX), RAB0; #define enc_outunpack(op, max) \ xorq key_table(CTX, max, 8), RCD0; \ rorq $32, RCD0; \ bswapq RCD0; \ op ## q RCD0, (RIO); \ rolq $32, RAB0; \ bswapq RAB0; \ op ## q RAB0, 4*2(RIO); #define dec_rounds(i) \ roundsm(RAB, i + 7, RCD); \ roundsm(RCD, i + 6, RAB); \ roundsm(RAB, i + 5, RCD); \ roundsm(RCD, i + 4, RAB); \ roundsm(RAB, i + 3, RCD); \ roundsm(RCD, i + 2, RAB); #define dec_fls(i) \ fls(RAB, RCD, i + 1, i + 0); #define dec_inpack(max) \ movq (RIO), RAB0; \ bswapq RAB0; \ rolq $32, RAB0; \ movq 4*2(RIO), RCD0; \ bswapq RCD0; \ rorq $32, RCD0; \ xorq key_table(CTX, max, 8), RAB0; #define dec_outunpack() \ xorq key_table(CTX), RCD0; \ rorq $32, RCD0; \ bswapq RCD0; \ movq RCD0, (RIO); \ rolq $32, RAB0; \ bswapq RAB0; \ movq RAB0, 4*2(RIO); SYM_FUNC_START(__camellia_enc_blk) /* input: * %rdi: ctx, CTX * %rsi: dst * %rdx: src * %rcx: bool xor */ movq %r12, RR12; movq %rcx, RXOR; movq %rsi, RDST; movq %rdx, RIO; enc_inpack(); enc_rounds(0); enc_fls(8); enc_rounds(8); enc_fls(16); enc_rounds(16); movl $24, RT1d; /* max */ cmpb $16, key_length(CTX); je .L__enc_done; enc_fls(24); enc_rounds(24); movl $32, RT1d; /* max */ .L__enc_done: testb RXORbl, RXORbl; movq RDST, RIO; jnz .L__enc_xor; enc_outunpack(mov, RT1); movq RR12, %r12; RET; .L__enc_xor: enc_outunpack(xor, RT1); movq RR12, %r12; RET; SYM_FUNC_END(__camellia_enc_blk) SYM_FUNC_START(camellia_dec_blk) /* input: * %rdi: ctx, CTX * %rsi: dst * %rdx: src */ cmpl $16, key_length(CTX); movl $32, RT2d; movl $24, RXORd; cmovel RXORd, RT2d; /* max */ movq %r12, RR12; movq %rsi, RDST; movq %rdx, RIO; dec_inpack(RT2); cmpb $24, RT2bl; je .L__dec_rounds16; dec_rounds(24); dec_fls(24); .L__dec_rounds16: dec_rounds(16); dec_fls(16); dec_rounds(8); dec_fls(8); dec_rounds(0); movq RDST, RIO; dec_outunpack(); movq RR12, %r12; RET; SYM_FUNC_END(camellia_dec_blk) /********************************************************************** 2-way camellia **********************************************************************/ #define roundsm2(ab, subkey, cd) \ movq (key_table + ((subkey) * 2) * 4)(CTX), RT2; \ xorq RT2, cd ## 1; \ \ xor2ror16(sp00444404, sp03303033, RT0, RT1, ab ## 0, cd ## 0); \ xor2ror16(sp22000222, sp10011110, RT0, RT1, ab ## 0, RT2); \ xor2ror16(sp11101110, sp44044404, RT0, RT1, ab ## 0, cd ## 0); \ xor2ror16(sp30333033, sp02220222, RT0, RT1, ab ## 0, RT2); \ \ xor2ror16(sp00444404, sp03303033, RT0, RT1, ab ## 1, cd ## 1); \ xorq RT2, cd ## 0; \ xor2ror16(sp22000222, sp10011110, RT0, RT1, ab ## 1, cd ## 1); \ xor2ror16(sp11101110, sp44044404, RT0, RT1, ab ## 1, cd ## 1); \ xor2ror16(sp30333033, sp02220222, RT0, RT1, ab ## 1, cd ## 1); #define fls2(l, r, kl, kr) \ movl (key_table + ((kl) * 2) * 4)(CTX), RT0d; \ andl l ## 0d, RT0d; \ roll $1, RT0d; \ shlq $32, RT0; \ xorq RT0, l ## 0; \ movq (key_table + ((kr) * 2) * 4)(CTX), RT1; \ orq r ## 0, RT1; \ shrq $32, RT1; \ xorq RT1, r ## 0; \ \ movl (key_table + ((kl) * 2) * 4)(CTX), RT2d; \ andl l ## 1d, RT2d; \ roll $1, RT2d; \ shlq $32, RT2; \ xorq RT2, l ## 1; \ movq (key_table + ((kr) * 2) * 4)(CTX), RT0; \ orq r ## 1, RT0; \ shrq $32, RT0; \ xorq RT0, r ## 1; \ \ movq (key_table + ((kl) * 2) * 4)(CTX), RT1; \ orq l ## 0, RT1; \ shrq $32, RT1; \ xorq RT1, l ## 0; \ movl (key_table + ((kr) * 2) * 4)(CTX), RT2d; \ andl r ## 0d, RT2d; \ roll $1, RT2d; \ shlq $32, RT2; \ xorq RT2, r ## 0; \ \ movq (key_table + ((kl) * 2) * 4)(CTX), RT0; \ orq l ## 1, RT0; \ shrq $32, RT0; \ xorq RT0, l ## 1; \ movl (key_table + ((kr) * 2) * 4)(CTX), RT1d; \ andl r ## 1d, RT1d; \ roll $1, RT1d; \ shlq $32, RT1; \ xorq RT1, r ## 1; #define enc_rounds2(i) \ roundsm2(RAB, i + 2, RCD); \ roundsm2(RCD, i + 3, RAB); \ roundsm2(RAB, i + 4, RCD); \ roundsm2(RCD, i + 5, RAB); \ roundsm2(RAB, i + 6, RCD); \ roundsm2(RCD, i + 7, RAB); #define enc_fls2(i) \ fls2(RAB, RCD, i + 0, i + 1); #define enc_inpack2() \ movq (RIO), RAB0; \ bswapq RAB0; \ rorq $32, RAB0; \ movq 4*2(RIO), RCD0; \ bswapq RCD0; \ rolq $32, RCD0; \ xorq key_table(CTX), RAB0; \ \ movq 8*2(RIO), RAB1; \ bswapq RAB1; \ rorq $32, RAB1; \ movq 12*2(RIO), RCD1; \ bswapq RCD1; \ rolq $32, RCD1; \ xorq key_table(CTX), RAB1; #define enc_outunpack2(op, max) \ xorq key_table(CTX, max, 8), RCD0; \ rolq $32, RCD0; \ bswapq RCD0; \ op ## q RCD0, (RIO); \ rorq $32, RAB0; \ bswapq RAB0; \ op ## q RAB0, 4*2(RIO); \ \ xorq key_table(CTX, max, 8), RCD1; \ rolq $32, RCD1; \ bswapq RCD1; \ op ## q RCD1, 8*2(RIO); \ rorq $32, RAB1; \ bswapq RAB1; \ op ## q RAB1, 12*2(RIO); #define dec_rounds2(i) \ roundsm2(RAB, i + 7, RCD); \ roundsm2(RCD, i + 6, RAB); \ roundsm2(RAB, i + 5, RCD); \ roundsm2(RCD, i + 4, RAB); \ roundsm2(RAB, i + 3, RCD); \ roundsm2(RCD, i + 2, RAB); #define dec_fls2(i) \ fls2(RAB, RCD, i + 1, i + 0); #define dec_inpack2(max) \ movq (RIO), RAB0; \ bswapq RAB0; \ rorq $32, RAB0; \ movq 4*2(RIO), RCD0; \ bswapq RCD0; \ rolq $32, RCD0; \ xorq key_table(CTX, max, 8), RAB0; \ \ movq 8*2(RIO), RAB1; \ bswapq RAB1; \ rorq $32, RAB1; \ movq 12*2(RIO), RCD1; \ bswapq RCD1; \ rolq $32, RCD1; \ xorq key_table(CTX, max, 8), RAB1; #define dec_outunpack2() \ xorq key_table(CTX), RCD0; \ rolq $32, RCD0; \ bswapq RCD0; \ movq RCD0, (RIO); \ rorq $32, RAB0; \ bswapq RAB0; \ movq RAB0, 4*2(RIO); \ \ xorq key_table(CTX), RCD1; \ rolq $32, RCD1; \ bswapq RCD1; \ movq RCD1, 8*2(RIO); \ rorq $32, RAB1; \ bswapq RAB1; \ movq RAB1, 12*2(RIO); SYM_FUNC_START(__camellia_enc_blk_2way) /* input: * %rdi: ctx, CTX * %rsi: dst * %rdx: src * %rcx: bool xor */ pushq %rbx; movq %r12, RR12; movq %rcx, RXOR; movq %rsi, RDST; movq %rdx, RIO; enc_inpack2(); enc_rounds2(0); enc_fls2(8); enc_rounds2(8); enc_fls2(16); enc_rounds2(16); movl $24, RT2d; /* max */ cmpb $16, key_length(CTX); je .L__enc2_done; enc_fls2(24); enc_rounds2(24); movl $32, RT2d; /* max */ .L__enc2_done: test RXORbl, RXORbl; movq RDST, RIO; jnz .L__enc2_xor; enc_outunpack2(mov, RT2); movq RR12, %r12; popq %rbx; RET; .L__enc2_xor: enc_outunpack2(xor, RT2); movq RR12, %r12; popq %rbx; RET; SYM_FUNC_END(__camellia_enc_blk_2way) SYM_FUNC_START(camellia_dec_blk_2way) /* input: * %rdi: ctx, CTX * %rsi: dst * %rdx: src */ cmpl $16, key_length(CTX); movl $32, RT2d; movl $24, RXORd; cmovel RXORd, RT2d; /* max */ movq %rbx, RXOR; movq %r12, RR12; movq %rsi, RDST; movq %rdx, RIO; dec_inpack2(RT2); cmpb $24, RT2bl; je .L__dec2_rounds16; dec_rounds2(24); dec_fls2(24); .L__dec2_rounds16: dec_rounds2(16); dec_fls2(16); dec_rounds2(8); dec_fls2(8); dec_rounds2(0); movq RDST, RIO; dec_outunpack2(); movq RR12, %r12; movq RXOR, %rbx; RET; SYM_FUNC_END(camellia_dec_blk_2way)
aixcc-public/challenge-001-exemplar-source
14,288
arch/x86/crypto/sha1_avx2_x86_64_asm.S
/* * Implement fast SHA-1 with AVX2 instructions. (x86_64) * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2014 Intel Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * Contact Information: * Ilya Albrekht <ilya.albrekht@intel.com> * Maxim Locktyukhin <maxim.locktyukhin@intel.com> * Ronen Zohar <ronen.zohar@intel.com> * Chandramouli Narayanan <mouli@linux.intel.com> * * BSD LICENSE * * Copyright(c) 2014 Intel Corporation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ /* * SHA-1 implementation with Intel(R) AVX2 instruction set extensions. * *This implementation is based on the previous SSSE3 release: *Visit http://software.intel.com/en-us/articles/ *and refer to improving-the-performance-of-the-secure-hash-algorithm-1/ * *Updates 20-byte SHA-1 record at start of 'state', from 'input', for *even number of 'blocks' consecutive 64-byte blocks. * *extern "C" void sha1_transform_avx2( * struct sha1_state *state, const u8* input, int blocks ); */ #include <linux/linkage.h> #define CTX %rdi /* arg1 */ #define BUF %rsi /* arg2 */ #define CNT %rdx /* arg3 */ #define REG_A %ecx #define REG_B %esi #define REG_C %edi #define REG_D %eax #define REG_E %edx #define REG_TB %ebx #define REG_TA %r12d #define REG_RA %rcx #define REG_RB %rsi #define REG_RC %rdi #define REG_RD %rax #define REG_RE %rdx #define REG_RTA %r12 #define REG_RTB %rbx #define REG_T1 %r11d #define xmm_mov vmovups #define avx2_zeroupper vzeroupper #define RND_F1 1 #define RND_F2 2 #define RND_F3 3 .macro REGALLOC .set A, REG_A .set B, REG_B .set C, REG_C .set D, REG_D .set E, REG_E .set TB, REG_TB .set TA, REG_TA .set RA, REG_RA .set RB, REG_RB .set RC, REG_RC .set RD, REG_RD .set RE, REG_RE .set RTA, REG_RTA .set RTB, REG_RTB .set T1, REG_T1 .endm #define HASH_PTR %r9 #define BLOCKS_CTR %r8 #define BUFFER_PTR %r10 #define BUFFER_PTR2 %r13 #define PRECALC_BUF %r14 #define WK_BUF %r15 #define W_TMP %xmm0 #define WY_TMP %ymm0 #define WY_TMP2 %ymm9 # AVX2 variables #define WY0 %ymm3 #define WY4 %ymm5 #define WY08 %ymm7 #define WY12 %ymm8 #define WY16 %ymm12 #define WY20 %ymm13 #define WY24 %ymm14 #define WY28 %ymm15 #define YMM_SHUFB_BSWAP %ymm10 /* * Keep 2 iterations precalculated at a time: * - 80 DWORDs per iteration * 2 */ #define W_SIZE (80*2*2 +16) #define WK(t) ((((t) % 80) / 4)*32 + ( (t) % 4)*4 + ((t)/80)*16 )(WK_BUF) #define PRECALC_WK(t) ((t)*2*2)(PRECALC_BUF) .macro UPDATE_HASH hash, val add \hash, \val mov \val, \hash .endm .macro PRECALC_RESET_WY .set WY_00, WY0 .set WY_04, WY4 .set WY_08, WY08 .set WY_12, WY12 .set WY_16, WY16 .set WY_20, WY20 .set WY_24, WY24 .set WY_28, WY28 .set WY_32, WY_00 .endm .macro PRECALC_ROTATE_WY /* Rotate macros */ .set WY_32, WY_28 .set WY_28, WY_24 .set WY_24, WY_20 .set WY_20, WY_16 .set WY_16, WY_12 .set WY_12, WY_08 .set WY_08, WY_04 .set WY_04, WY_00 .set WY_00, WY_32 /* Define register aliases */ .set WY, WY_00 .set WY_minus_04, WY_04 .set WY_minus_08, WY_08 .set WY_minus_12, WY_12 .set WY_minus_16, WY_16 .set WY_minus_20, WY_20 .set WY_minus_24, WY_24 .set WY_minus_28, WY_28 .set WY_minus_32, WY .endm .macro PRECALC_00_15 .if (i == 0) # Initialize and rotate registers PRECALC_RESET_WY PRECALC_ROTATE_WY .endif /* message scheduling pre-compute for rounds 0-15 */ .if ((i & 7) == 0) /* * blended AVX2 and ALU instruction scheduling * 1 vector iteration per 8 rounds */ vmovdqu (i * 2)(BUFFER_PTR), W_TMP .elseif ((i & 7) == 1) vinsertf128 $1, ((i-1) * 2)(BUFFER_PTR2),\ WY_TMP, WY_TMP .elseif ((i & 7) == 2) vpshufb YMM_SHUFB_BSWAP, WY_TMP, WY .elseif ((i & 7) == 4) vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP .elseif ((i & 7) == 7) vmovdqu WY_TMP, PRECALC_WK(i&~7) PRECALC_ROTATE_WY .endif .endm .macro PRECALC_16_31 /* * message scheduling pre-compute for rounds 16-31 * calculating last 32 w[i] values in 8 XMM registers * pre-calculate K+w[i] values and store to mem * for later load by ALU add instruction * * "brute force" vectorization for rounds 16-31 only * due to w[i]->w[i-3] dependency */ .if ((i & 7) == 0) /* * blended AVX2 and ALU instruction scheduling * 1 vector iteration per 8 rounds */ /* w[i-14] */ vpalignr $8, WY_minus_16, WY_minus_12, WY vpsrldq $4, WY_minus_04, WY_TMP /* w[i-3] */ .elseif ((i & 7) == 1) vpxor WY_minus_08, WY, WY vpxor WY_minus_16, WY_TMP, WY_TMP .elseif ((i & 7) == 2) vpxor WY_TMP, WY, WY vpslldq $12, WY, WY_TMP2 .elseif ((i & 7) == 3) vpslld $1, WY, WY_TMP vpsrld $31, WY, WY .elseif ((i & 7) == 4) vpor WY, WY_TMP, WY_TMP vpslld $2, WY_TMP2, WY .elseif ((i & 7) == 5) vpsrld $30, WY_TMP2, WY_TMP2 vpxor WY, WY_TMP, WY_TMP .elseif ((i & 7) == 7) vpxor WY_TMP2, WY_TMP, WY vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP vmovdqu WY_TMP, PRECALC_WK(i&~7) PRECALC_ROTATE_WY .endif .endm .macro PRECALC_32_79 /* * in SHA-1 specification: * w[i] = (w[i-3] ^ w[i-8] ^ w[i-14] ^ w[i-16]) rol 1 * instead we do equal: * w[i] = (w[i-6] ^ w[i-16] ^ w[i-28] ^ w[i-32]) rol 2 * allows more efficient vectorization * since w[i]=>w[i-3] dependency is broken */ .if ((i & 7) == 0) /* * blended AVX2 and ALU instruction scheduling * 1 vector iteration per 8 rounds */ vpalignr $8, WY_minus_08, WY_minus_04, WY_TMP .elseif ((i & 7) == 1) /* W is W_minus_32 before xor */ vpxor WY_minus_28, WY, WY .elseif ((i & 7) == 2) vpxor WY_minus_16, WY_TMP, WY_TMP .elseif ((i & 7) == 3) vpxor WY_TMP, WY, WY .elseif ((i & 7) == 4) vpslld $2, WY, WY_TMP .elseif ((i & 7) == 5) vpsrld $30, WY, WY vpor WY, WY_TMP, WY .elseif ((i & 7) == 7) vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP vmovdqu WY_TMP, PRECALC_WK(i&~7) PRECALC_ROTATE_WY .endif .endm .macro PRECALC r, s .set i, \r .if (i < 40) .set K_XMM, 32*0 .elseif (i < 80) .set K_XMM, 32*1 .elseif (i < 120) .set K_XMM, 32*2 .else .set K_XMM, 32*3 .endif .if (i<32) PRECALC_00_15 \s .elseif (i<64) PRECALC_16_31 \s .elseif (i < 160) PRECALC_32_79 \s .endif .endm .macro ROTATE_STATE .set T_REG, E .set E, D .set D, C .set C, B .set B, TB .set TB, A .set A, T_REG .set T_REG, RE .set RE, RD .set RD, RC .set RC, RB .set RB, RTB .set RTB, RA .set RA, T_REG .endm /* Macro relies on saved ROUND_Fx */ .macro RND_FUN f, r .if (\f == RND_F1) ROUND_F1 \r .elseif (\f == RND_F2) ROUND_F2 \r .elseif (\f == RND_F3) ROUND_F3 \r .endif .endm .macro RR r .set round_id, (\r % 80) .if (round_id == 0) /* Precalculate F for first round */ .set ROUND_FUNC, RND_F1 mov B, TB rorx $(32-30), B, B /* b>>>2 */ andn D, TB, T1 and C, TB xor T1, TB .endif RND_FUN ROUND_FUNC, \r ROTATE_STATE .if (round_id == 18) .set ROUND_FUNC, RND_F2 .elseif (round_id == 38) .set ROUND_FUNC, RND_F3 .elseif (round_id == 58) .set ROUND_FUNC, RND_F2 .endif .set round_id, ( (\r+1) % 80) RND_FUN ROUND_FUNC, (\r+1) ROTATE_STATE .endm .macro ROUND_F1 r add WK(\r), E andn C, A, T1 /* ~b&d */ lea (RE,RTB), E /* Add F from the previous round */ rorx $(32-5), A, TA /* T2 = A >>> 5 */ rorx $(32-30),A, TB /* b>>>2 for next round */ PRECALC (\r) /* msg scheduling for next 2 blocks */ /* * Calculate F for the next round * (b & c) ^ andn[b, d] */ and B, A /* b&c */ xor T1, A /* F1 = (b&c) ^ (~b&d) */ lea (RE,RTA), E /* E += A >>> 5 */ .endm .macro ROUND_F2 r add WK(\r), E lea (RE,RTB), E /* Add F from the previous round */ /* Calculate F for the next round */ rorx $(32-5), A, TA /* T2 = A >>> 5 */ .if ((round_id) < 79) rorx $(32-30), A, TB /* b>>>2 for next round */ .endif PRECALC (\r) /* msg scheduling for next 2 blocks */ .if ((round_id) < 79) xor B, A .endif add TA, E /* E += A >>> 5 */ .if ((round_id) < 79) xor C, A .endif .endm .macro ROUND_F3 r add WK(\r), E PRECALC (\r) /* msg scheduling for next 2 blocks */ lea (RE,RTB), E /* Add F from the previous round */ mov B, T1 or A, T1 rorx $(32-5), A, TA /* T2 = A >>> 5 */ rorx $(32-30), A, TB /* b>>>2 for next round */ /* Calculate F for the next round * (b and c) or (d and (b or c)) */ and C, T1 and B, A or T1, A add TA, E /* E += A >>> 5 */ .endm /* Add constant only if (%2 > %3) condition met (uses RTA as temp) * %1 + %2 >= %3 ? %4 : 0 */ .macro ADD_IF_GE a, b, c, d mov \a, RTA add $\d, RTA cmp $\c, \b cmovge RTA, \a .endm /* * macro implements 80 rounds of SHA-1, for multiple blocks with s/w pipelining */ .macro SHA1_PIPELINED_MAIN_BODY REGALLOC mov (HASH_PTR), A mov 4(HASH_PTR), B mov 8(HASH_PTR), C mov 12(HASH_PTR), D mov 16(HASH_PTR), E mov %rsp, PRECALC_BUF lea (2*4*80+32)(%rsp), WK_BUF # Precalc WK for first 2 blocks ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 2, 64 .set i, 0 .rept 160 PRECALC i .set i, i + 1 .endr /* Go to next block if needed */ ADD_IF_GE BUFFER_PTR, BLOCKS_CTR, 3, 128 ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 4, 128 xchg WK_BUF, PRECALC_BUF .align 32 _loop: /* * code loops through more than one block * we use K_BASE value as a signal of a last block, * it is set below by: cmovae BUFFER_PTR, K_BASE */ test BLOCKS_CTR, BLOCKS_CTR jnz _begin .align 32 jmp _end .align 32 _begin: /* * Do first block * rounds: 0,2,4,6,8 */ .set j, 0 .rept 5 RR j .set j, j+2 .endr jmp _loop0 _loop0: /* * rounds: * 10,12,14,16,18 * 20,22,24,26,28 * 30,32,34,36,38 * 40,42,44,46,48 * 50,52,54,56,58 */ .rept 25 RR j .set j, j+2 .endr /* Update Counter */ sub $1, BLOCKS_CTR /* Move to the next block only if needed*/ ADD_IF_GE BUFFER_PTR, BLOCKS_CTR, 4, 128 /* * rounds * 60,62,64,66,68 * 70,72,74,76,78 */ .rept 10 RR j .set j, j+2 .endr UPDATE_HASH (HASH_PTR), A UPDATE_HASH 4(HASH_PTR), TB UPDATE_HASH 8(HASH_PTR), C UPDATE_HASH 12(HASH_PTR), D UPDATE_HASH 16(HASH_PTR), E test BLOCKS_CTR, BLOCKS_CTR jz _loop mov TB, B /* Process second block */ /* * rounds * 0+80, 2+80, 4+80, 6+80, 8+80 * 10+80,12+80,14+80,16+80,18+80 */ .set j, 0 .rept 10 RR j+80 .set j, j+2 .endr jmp _loop1 _loop1: /* * rounds * 20+80,22+80,24+80,26+80,28+80 * 30+80,32+80,34+80,36+80,38+80 */ .rept 10 RR j+80 .set j, j+2 .endr jmp _loop2 _loop2: /* * rounds * 40+80,42+80,44+80,46+80,48+80 * 50+80,52+80,54+80,56+80,58+80 */ .rept 10 RR j+80 .set j, j+2 .endr /* update counter */ sub $1, BLOCKS_CTR /* Move to the next block only if needed*/ ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 4, 128 jmp _loop3 _loop3: /* * rounds * 60+80,62+80,64+80,66+80,68+80 * 70+80,72+80,74+80,76+80,78+80 */ .rept 10 RR j+80 .set j, j+2 .endr UPDATE_HASH (HASH_PTR), A UPDATE_HASH 4(HASH_PTR), TB UPDATE_HASH 8(HASH_PTR), C UPDATE_HASH 12(HASH_PTR), D UPDATE_HASH 16(HASH_PTR), E /* Reset state for AVX2 reg permutation */ mov A, TA mov TB, A mov C, TB mov E, C mov D, B mov TA, D REGALLOC xchg WK_BUF, PRECALC_BUF jmp _loop .align 32 _end: .endm /* * macro implements SHA-1 function's body for several 64-byte blocks * param: function's name */ .macro SHA1_VECTOR_ASM name SYM_FUNC_START(\name) push %rbx push %r12 push %r13 push %r14 push %r15 RESERVE_STACK = (W_SIZE*4 + 8+24) /* Align stack */ push %rbp mov %rsp, %rbp and $~(0x20-1), %rsp sub $RESERVE_STACK, %rsp avx2_zeroupper /* Setup initial values */ mov CTX, HASH_PTR mov BUF, BUFFER_PTR mov BUF, BUFFER_PTR2 mov CNT, BLOCKS_CTR xmm_mov BSWAP_SHUFB_CTL(%rip), YMM_SHUFB_BSWAP SHA1_PIPELINED_MAIN_BODY avx2_zeroupper mov %rbp, %rsp pop %rbp pop %r15 pop %r14 pop %r13 pop %r12 pop %rbx RET SYM_FUNC_END(\name) .endm .section .rodata #define K1 0x5a827999 #define K2 0x6ed9eba1 #define K3 0x8f1bbcdc #define K4 0xca62c1d6 .align 128 K_XMM_AR: .long K1, K1, K1, K1 .long K1, K1, K1, K1 .long K2, K2, K2, K2 .long K2, K2, K2, K2 .long K3, K3, K3, K3 .long K3, K3, K3, K3 .long K4, K4, K4, K4 .long K4, K4, K4, K4 BSWAP_SHUFB_CTL: .long 0x00010203 .long 0x04050607 .long 0x08090a0b .long 0x0c0d0e0f .long 0x00010203 .long 0x04050607 .long 0x08090a0b .long 0x0c0d0e0f .text SHA1_VECTOR_ASM sha1_transform_avx2
aixcc-public/challenge-001-exemplar-source
17,745
arch/x86/crypto/sha256-ssse3-asm.S
######################################################################## # Implement fast SHA-256 with SSSE3 instructions. (x86_64) # # Copyright (C) 2013 Intel Corporation. # # Authors: # James Guilford <james.guilford@intel.com> # Kirk Yap <kirk.s.yap@intel.com> # Tim Chen <tim.c.chen@linux.intel.com> # # This software is available to you under a choice of one of two # licenses. You may choose to be licensed under the terms of the GNU # General Public License (GPL) Version 2, available from the file # COPYING in the main directory of this source tree, or the # OpenIB.org BSD license below: # # Redistribution and use in source and binary forms, with or # without modification, are permitted provided that the following # conditions are met: # # - Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # # - Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials # provided with the distribution. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # ######################################################################## # # This code is described in an Intel White-Paper: # "Fast SHA-256 Implementations on Intel Architecture Processors" # # To find it, surf to http://www.intel.com/p/en_US/embedded # and search for that title. # ######################################################################## #include <linux/linkage.h> #include <linux/cfi_types.h> ## assume buffers not aligned #define MOVDQ movdqu ################################ Define Macros # addm [mem], reg # Add reg to mem using reg-mem add and store .macro addm p1 p2 add \p1, \p2 mov \p2, \p1 .endm ################################ # COPY_XMM_AND_BSWAP xmm, [mem], byte_flip_mask # Load xmm with mem and byte swap each dword .macro COPY_XMM_AND_BSWAP p1 p2 p3 MOVDQ \p2, \p1 pshufb \p3, \p1 .endm ################################ X0 = %xmm4 X1 = %xmm5 X2 = %xmm6 X3 = %xmm7 XTMP0 = %xmm0 XTMP1 = %xmm1 XTMP2 = %xmm2 XTMP3 = %xmm3 XTMP4 = %xmm8 XFER = %xmm9 SHUF_00BA = %xmm10 # shuffle xBxA -> 00BA SHUF_DC00 = %xmm11 # shuffle xDxC -> DC00 BYTE_FLIP_MASK = %xmm12 NUM_BLKS = %rdx # 3rd arg INP = %rsi # 2nd arg CTX = %rdi # 1st arg SRND = %rsi # clobbers INP c = %ecx d = %r8d e = %edx TBL = %r12 a = %eax b = %ebx f = %r9d g = %r10d h = %r11d y0 = %r13d y1 = %r14d y2 = %r15d _INP_END_SIZE = 8 _INP_SIZE = 8 _XFER_SIZE = 16 _XMM_SAVE_SIZE = 0 _INP_END = 0 _INP = _INP_END + _INP_END_SIZE _XFER = _INP + _INP_SIZE _XMM_SAVE = _XFER + _XFER_SIZE STACK_SIZE = _XMM_SAVE + _XMM_SAVE_SIZE # rotate_Xs # Rotate values of symbols X0...X3 .macro rotate_Xs X_ = X0 X0 = X1 X1 = X2 X2 = X3 X3 = X_ .endm # ROTATE_ARGS # Rotate values of symbols a...h .macro ROTATE_ARGS TMP_ = h h = g g = f f = e e = d d = c c = b b = a a = TMP_ .endm .macro FOUR_ROUNDS_AND_SCHED ## compute s0 four at a time and s1 two at a time ## compute W[-16] + W[-7] 4 at a time movdqa X3, XTMP0 mov e, y0 # y0 = e ror $(25-11), y0 # y0 = e >> (25-11) mov a, y1 # y1 = a palignr $4, X2, XTMP0 # XTMP0 = W[-7] ror $(22-13), y1 # y1 = a >> (22-13) xor e, y0 # y0 = e ^ (e >> (25-11)) mov f, y2 # y2 = f ror $(11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6)) movdqa X1, XTMP1 xor a, y1 # y1 = a ^ (a >> (22-13) xor g, y2 # y2 = f^g paddd X0, XTMP0 # XTMP0 = W[-7] + W[-16] xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) and e, y2 # y2 = (f^g)&e ror $(13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2)) ## compute s0 palignr $4, X0, XTMP1 # XTMP1 = W[-15] xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) ror $6, y0 # y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) xor g, y2 # y2 = CH = ((f^g)&e)^g movdqa XTMP1, XTMP2 # XTMP2 = W[-15] ror $2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) add y0, y2 # y2 = S1 + CH add _XFER(%rsp) , y2 # y2 = k + w + S1 + CH movdqa XTMP1, XTMP3 # XTMP3 = W[-15] mov a, y0 # y0 = a add y2, h # h = h + S1 + CH + k + w mov a, y2 # y2 = a pslld $(32-7), XTMP1 # or c, y0 # y0 = a|c add h, d # d = d + h + S1 + CH + k + w and c, y2 # y2 = a&c psrld $7, XTMP2 # and b, y0 # y0 = (a|c)&b add y1, h # h = h + S1 + CH + k + w + S0 por XTMP2, XTMP1 # XTMP1 = W[-15] ror 7 or y2, y0 # y0 = MAJ = (a|c)&b)|(a&c) add y0, h # h = h + S1 + CH + k + w + S0 + MAJ # ROTATE_ARGS # movdqa XTMP3, XTMP2 # XTMP2 = W[-15] mov e, y0 # y0 = e mov a, y1 # y1 = a movdqa XTMP3, XTMP4 # XTMP4 = W[-15] ror $(25-11), y0 # y0 = e >> (25-11) xor e, y0 # y0 = e ^ (e >> (25-11)) mov f, y2 # y2 = f ror $(22-13), y1 # y1 = a >> (22-13) pslld $(32-18), XTMP3 # xor a, y1 # y1 = a ^ (a >> (22-13) ror $(11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6)) xor g, y2 # y2 = f^g psrld $18, XTMP2 # ror $(13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2)) xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) and e, y2 # y2 = (f^g)&e ror $6, y0 # y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) pxor XTMP3, XTMP1 xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) xor g, y2 # y2 = CH = ((f^g)&e)^g psrld $3, XTMP4 # XTMP4 = W[-15] >> 3 add y0, y2 # y2 = S1 + CH add (1*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH ror $2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) pxor XTMP2, XTMP1 # XTMP1 = W[-15] ror 7 ^ W[-15] ror 18 mov a, y0 # y0 = a add y2, h # h = h + S1 + CH + k + w mov a, y2 # y2 = a pxor XTMP4, XTMP1 # XTMP1 = s0 or c, y0 # y0 = a|c add h, d # d = d + h + S1 + CH + k + w and c, y2 # y2 = a&c ## compute low s1 pshufd $0b11111010, X3, XTMP2 # XTMP2 = W[-2] {BBAA} and b, y0 # y0 = (a|c)&b add y1, h # h = h + S1 + CH + k + w + S0 paddd XTMP1, XTMP0 # XTMP0 = W[-16] + W[-7] + s0 or y2, y0 # y0 = MAJ = (a|c)&b)|(a&c) add y0, h # h = h + S1 + CH + k + w + S0 + MAJ ROTATE_ARGS movdqa XTMP2, XTMP3 # XTMP3 = W[-2] {BBAA} mov e, y0 # y0 = e mov a, y1 # y1 = a ror $(25-11), y0 # y0 = e >> (25-11) movdqa XTMP2, XTMP4 # XTMP4 = W[-2] {BBAA} xor e, y0 # y0 = e ^ (e >> (25-11)) ror $(22-13), y1 # y1 = a >> (22-13) mov f, y2 # y2 = f xor a, y1 # y1 = a ^ (a >> (22-13) ror $(11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6)) psrlq $17, XTMP2 # XTMP2 = W[-2] ror 17 {xBxA} xor g, y2 # y2 = f^g psrlq $19, XTMP3 # XTMP3 = W[-2] ror 19 {xBxA} xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) and e, y2 # y2 = (f^g)&e psrld $10, XTMP4 # XTMP4 = W[-2] >> 10 {BBAA} ror $(13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2)) xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) xor g, y2 # y2 = CH = ((f^g)&e)^g ror $6, y0 # y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) pxor XTMP3, XTMP2 add y0, y2 # y2 = S1 + CH ror $2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) add (2*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH pxor XTMP2, XTMP4 # XTMP4 = s1 {xBxA} mov a, y0 # y0 = a add y2, h # h = h + S1 + CH + k + w mov a, y2 # y2 = a pshufb SHUF_00BA, XTMP4 # XTMP4 = s1 {00BA} or c, y0 # y0 = a|c add h, d # d = d + h + S1 + CH + k + w and c, y2 # y2 = a&c paddd XTMP4, XTMP0 # XTMP0 = {..., ..., W[1], W[0]} and b, y0 # y0 = (a|c)&b add y1, h # h = h + S1 + CH + k + w + S0 ## compute high s1 pshufd $0b01010000, XTMP0, XTMP2 # XTMP2 = W[-2] {BBAA} or y2, y0 # y0 = MAJ = (a|c)&b)|(a&c) add y0, h # h = h + S1 + CH + k + w + S0 + MAJ # ROTATE_ARGS # movdqa XTMP2, XTMP3 # XTMP3 = W[-2] {DDCC} mov e, y0 # y0 = e ror $(25-11), y0 # y0 = e >> (25-11) mov a, y1 # y1 = a movdqa XTMP2, X0 # X0 = W[-2] {DDCC} ror $(22-13), y1 # y1 = a >> (22-13) xor e, y0 # y0 = e ^ (e >> (25-11)) mov f, y2 # y2 = f ror $(11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6)) psrlq $17, XTMP2 # XTMP2 = W[-2] ror 17 {xDxC} xor a, y1 # y1 = a ^ (a >> (22-13) xor g, y2 # y2 = f^g psrlq $19, XTMP3 # XTMP3 = W[-2] ror 19 {xDxC} xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25 and e, y2 # y2 = (f^g)&e ror $(13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2)) psrld $10, X0 # X0 = W[-2] >> 10 {DDCC} xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22 ror $6, y0 # y0 = S1 = (e>>6) & (e>>11) ^ (e>>2 xor g, y2 # y2 = CH = ((f^g)&e)^g pxor XTMP3, XTMP2 # ror $2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>2 add y0, y2 # y2 = S1 + CH add (3*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH pxor XTMP2, X0 # X0 = s1 {xDxC} mov a, y0 # y0 = a add y2, h # h = h + S1 + CH + k + w mov a, y2 # y2 = a pshufb SHUF_DC00, X0 # X0 = s1 {DC00} or c, y0 # y0 = a|c add h, d # d = d + h + S1 + CH + k + w and c, y2 # y2 = a&c paddd XTMP0, X0 # X0 = {W[3], W[2], W[1], W[0]} and b, y0 # y0 = (a|c)&b add y1, h # h = h + S1 + CH + k + w + S0 or y2, y0 # y0 = MAJ = (a|c)&b)|(a&c) add y0, h # h = h + S1 + CH + k + w + S0 + MAJ ROTATE_ARGS rotate_Xs .endm ## input is [rsp + _XFER + %1 * 4] .macro DO_ROUND round mov e, y0 # y0 = e ror $(25-11), y0 # y0 = e >> (25-11) mov a, y1 # y1 = a xor e, y0 # y0 = e ^ (e >> (25-11)) ror $(22-13), y1 # y1 = a >> (22-13) mov f, y2 # y2 = f xor a, y1 # y1 = a ^ (a >> (22-13) ror $(11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6)) xor g, y2 # y2 = f^g xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) ror $(13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2)) and e, y2 # y2 = (f^g)&e xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) ror $6, y0 # y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) xor g, y2 # y2 = CH = ((f^g)&e)^g add y0, y2 # y2 = S1 + CH ror $2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) offset = \round * 4 + _XFER add offset(%rsp), y2 # y2 = k + w + S1 + CH mov a, y0 # y0 = a add y2, h # h = h + S1 + CH + k + w mov a, y2 # y2 = a or c, y0 # y0 = a|c add h, d # d = d + h + S1 + CH + k + w and c, y2 # y2 = a&c and b, y0 # y0 = (a|c)&b add y1, h # h = h + S1 + CH + k + w + S0 or y2, y0 # y0 = MAJ = (a|c)&b)|(a&c) add y0, h # h = h + S1 + CH + k + w + S0 + MAJ ROTATE_ARGS .endm ######################################################################## ## void sha256_transform_ssse3(struct sha256_state *state, const u8 *data, ## int blocks); ## arg 1 : pointer to state ## (struct sha256_state is assumed to begin with u32 state[8]) ## arg 2 : pointer to input data ## arg 3 : Num blocks ######################################################################## .text SYM_TYPED_FUNC_START(sha256_transform_ssse3) .align 32 pushq %rbx pushq %r12 pushq %r13 pushq %r14 pushq %r15 pushq %rbp mov %rsp, %rbp subq $STACK_SIZE, %rsp and $~15, %rsp shl $6, NUM_BLKS # convert to bytes jz done_hash add INP, NUM_BLKS mov NUM_BLKS, _INP_END(%rsp) # pointer to end of data ## load initial digest mov 4*0(CTX), a mov 4*1(CTX), b mov 4*2(CTX), c mov 4*3(CTX), d mov 4*4(CTX), e mov 4*5(CTX), f mov 4*6(CTX), g mov 4*7(CTX), h movdqa PSHUFFLE_BYTE_FLIP_MASK(%rip), BYTE_FLIP_MASK movdqa _SHUF_00BA(%rip), SHUF_00BA movdqa _SHUF_DC00(%rip), SHUF_DC00 loop0: lea K256(%rip), TBL ## byte swap first 16 dwords COPY_XMM_AND_BSWAP X0, 0*16(INP), BYTE_FLIP_MASK COPY_XMM_AND_BSWAP X1, 1*16(INP), BYTE_FLIP_MASK COPY_XMM_AND_BSWAP X2, 2*16(INP), BYTE_FLIP_MASK COPY_XMM_AND_BSWAP X3, 3*16(INP), BYTE_FLIP_MASK mov INP, _INP(%rsp) ## schedule 48 input dwords, by doing 3 rounds of 16 each mov $3, SRND .align 16 loop1: movdqa (TBL), XFER paddd X0, XFER movdqa XFER, _XFER(%rsp) FOUR_ROUNDS_AND_SCHED movdqa 1*16(TBL), XFER paddd X0, XFER movdqa XFER, _XFER(%rsp) FOUR_ROUNDS_AND_SCHED movdqa 2*16(TBL), XFER paddd X0, XFER movdqa XFER, _XFER(%rsp) FOUR_ROUNDS_AND_SCHED movdqa 3*16(TBL), XFER paddd X0, XFER movdqa XFER, _XFER(%rsp) add $4*16, TBL FOUR_ROUNDS_AND_SCHED sub $1, SRND jne loop1 mov $2, SRND loop2: paddd (TBL), X0 movdqa X0, _XFER(%rsp) DO_ROUND 0 DO_ROUND 1 DO_ROUND 2 DO_ROUND 3 paddd 1*16(TBL), X1 movdqa X1, _XFER(%rsp) add $2*16, TBL DO_ROUND 0 DO_ROUND 1 DO_ROUND 2 DO_ROUND 3 movdqa X2, X0 movdqa X3, X1 sub $1, SRND jne loop2 addm (4*0)(CTX),a addm (4*1)(CTX),b addm (4*2)(CTX),c addm (4*3)(CTX),d addm (4*4)(CTX),e addm (4*5)(CTX),f addm (4*6)(CTX),g addm (4*7)(CTX),h mov _INP(%rsp), INP add $64, INP cmp _INP_END(%rsp), INP jne loop0 done_hash: mov %rbp, %rsp popq %rbp popq %r15 popq %r14 popq %r13 popq %r12 popq %rbx RET SYM_FUNC_END(sha256_transform_ssse3) .section .rodata.cst256.K256, "aM", @progbits, 256 .align 64 K256: .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 .section .rodata.cst16.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 16 .align 16 PSHUFFLE_BYTE_FLIP_MASK: .octa 0x0c0d0e0f08090a0b0405060700010203 .section .rodata.cst16._SHUF_00BA, "aM", @progbits, 16 .align 16 # shuffle xBxA -> 00BA _SHUF_00BA: .octa 0xFFFFFFFFFFFFFFFF0b0a090803020100 .section .rodata.cst16._SHUF_DC00, "aM", @progbits, 16 .align 16 # shuffle xDxC -> DC00 _SHUF_DC00: .octa 0x0b0a090803020100FFFFFFFFFFFFFFFF
aixcc-public/challenge-001-exemplar-source
2,741
arch/x86/crypto/nh-sse2-x86_64.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * NH - ε-almost-universal hash function, x86_64 SSE2 accelerated * * Copyright 2018 Google LLC * * Author: Eric Biggers <ebiggers@google.com> */ #include <linux/linkage.h> #define PASS0_SUMS %xmm0 #define PASS1_SUMS %xmm1 #define PASS2_SUMS %xmm2 #define PASS3_SUMS %xmm3 #define K0 %xmm4 #define K1 %xmm5 #define K2 %xmm6 #define K3 %xmm7 #define T0 %xmm8 #define T1 %xmm9 #define T2 %xmm10 #define T3 %xmm11 #define T4 %xmm12 #define T5 %xmm13 #define T6 %xmm14 #define T7 %xmm15 #define KEY %rdi #define MESSAGE %rsi #define MESSAGE_LEN %rdx #define HASH %rcx .macro _nh_stride k0, k1, k2, k3, offset // Load next message stride movdqu \offset(MESSAGE), T1 // Load next key stride movdqu \offset(KEY), \k3 // Add message words to key words movdqa T1, T2 movdqa T1, T3 paddd T1, \k0 // reuse k0 to avoid a move paddd \k1, T1 paddd \k2, T2 paddd \k3, T3 // Multiply 32x32 => 64 and accumulate pshufd $0x10, \k0, T4 pshufd $0x32, \k0, \k0 pshufd $0x10, T1, T5 pshufd $0x32, T1, T1 pshufd $0x10, T2, T6 pshufd $0x32, T2, T2 pshufd $0x10, T3, T7 pshufd $0x32, T3, T3 pmuludq T4, \k0 pmuludq T5, T1 pmuludq T6, T2 pmuludq T7, T3 paddq \k0, PASS0_SUMS paddq T1, PASS1_SUMS paddq T2, PASS2_SUMS paddq T3, PASS3_SUMS .endm /* * void nh_sse2(const u32 *key, const u8 *message, size_t message_len, * u8 hash[NH_HASH_BYTES]) * * It's guaranteed that message_len % 16 == 0. */ SYM_FUNC_START(nh_sse2) movdqu 0x00(KEY), K0 movdqu 0x10(KEY), K1 movdqu 0x20(KEY), K2 add $0x30, KEY pxor PASS0_SUMS, PASS0_SUMS pxor PASS1_SUMS, PASS1_SUMS pxor PASS2_SUMS, PASS2_SUMS pxor PASS3_SUMS, PASS3_SUMS sub $0x40, MESSAGE_LEN jl .Lloop4_done .Lloop4: _nh_stride K0, K1, K2, K3, 0x00 _nh_stride K1, K2, K3, K0, 0x10 _nh_stride K2, K3, K0, K1, 0x20 _nh_stride K3, K0, K1, K2, 0x30 add $0x40, KEY add $0x40, MESSAGE sub $0x40, MESSAGE_LEN jge .Lloop4 .Lloop4_done: and $0x3f, MESSAGE_LEN jz .Ldone _nh_stride K0, K1, K2, K3, 0x00 sub $0x10, MESSAGE_LEN jz .Ldone _nh_stride K1, K2, K3, K0, 0x10 sub $0x10, MESSAGE_LEN jz .Ldone _nh_stride K2, K3, K0, K1, 0x20 .Ldone: // Sum the accumulators for each pass, then store the sums to 'hash' movdqa PASS0_SUMS, T0 movdqa PASS2_SUMS, T1 punpcklqdq PASS1_SUMS, T0 // => (PASS0_SUM_A PASS1_SUM_A) punpcklqdq PASS3_SUMS, T1 // => (PASS2_SUM_A PASS3_SUM_A) punpckhqdq PASS1_SUMS, PASS0_SUMS // => (PASS0_SUM_B PASS1_SUM_B) punpckhqdq PASS3_SUMS, PASS2_SUMS // => (PASS2_SUM_B PASS3_SUM_B) paddq PASS0_SUMS, T0 paddq PASS2_SUMS, T1 movdqu T0, 0x00(HASH) movdqu T1, 0x10(HASH) RET SYM_FUNC_END(nh_sse2)
aixcc-public/challenge-001-exemplar-source
21,581
arch/x86/crypto/serpent-avx2-asm_64.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * x86_64/AVX2 assembler optimized version of Serpent * * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> * * Based on AVX assembler implementation of Serpent by: * Copyright © 2012 Johannes Goetzfried * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de> */ #include <linux/linkage.h> #include <asm/frame.h> #include "glue_helper-asm-avx2.S" .file "serpent-avx2-asm_64.S" .section .rodata.cst16.bswap128_mask, "aM", @progbits, 16 .align 16 .Lbswap128_mask: .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 .text #define CTX %rdi #define RNOT %ymm0 #define tp %ymm1 #define RA1 %ymm2 #define RA2 %ymm3 #define RB1 %ymm4 #define RB2 %ymm5 #define RC1 %ymm6 #define RC2 %ymm7 #define RD1 %ymm8 #define RD2 %ymm9 #define RE1 %ymm10 #define RE2 %ymm11 #define RK0 %ymm12 #define RK1 %ymm13 #define RK2 %ymm14 #define RK3 %ymm15 #define RK0x %xmm12 #define RK1x %xmm13 #define RK2x %xmm14 #define RK3x %xmm15 #define S0_1(x0, x1, x2, x3, x4) \ vpor x0, x3, tp; \ vpxor x3, x0, x0; \ vpxor x2, x3, x4; \ vpxor RNOT, x4, x4; \ vpxor x1, tp, x3; \ vpand x0, x1, x1; \ vpxor x4, x1, x1; \ vpxor x0, x2, x2; #define S0_2(x0, x1, x2, x3, x4) \ vpxor x3, x0, x0; \ vpor x0, x4, x4; \ vpxor x2, x0, x0; \ vpand x1, x2, x2; \ vpxor x2, x3, x3; \ vpxor RNOT, x1, x1; \ vpxor x4, x2, x2; \ vpxor x2, x1, x1; #define S1_1(x0, x1, x2, x3, x4) \ vpxor x0, x1, tp; \ vpxor x3, x0, x0; \ vpxor RNOT, x3, x3; \ vpand tp, x1, x4; \ vpor tp, x0, x0; \ vpxor x2, x3, x3; \ vpxor x3, x0, x0; \ vpxor x3, tp, x1; #define S1_2(x0, x1, x2, x3, x4) \ vpxor x4, x3, x3; \ vpor x4, x1, x1; \ vpxor x2, x4, x4; \ vpand x0, x2, x2; \ vpxor x1, x2, x2; \ vpor x0, x1, x1; \ vpxor RNOT, x0, x0; \ vpxor x2, x0, x0; \ vpxor x1, x4, x4; #define S2_1(x0, x1, x2, x3, x4) \ vpxor RNOT, x3, x3; \ vpxor x0, x1, x1; \ vpand x2, x0, tp; \ vpxor x3, tp, tp; \ vpor x0, x3, x3; \ vpxor x1, x2, x2; \ vpxor x1, x3, x3; \ vpand tp, x1, x1; #define S2_2(x0, x1, x2, x3, x4) \ vpxor x2, tp, tp; \ vpand x3, x2, x2; \ vpor x1, x3, x3; \ vpxor RNOT, tp, tp; \ vpxor tp, x3, x3; \ vpxor tp, x0, x4; \ vpxor x2, tp, x0; \ vpor x2, x1, x1; #define S3_1(x0, x1, x2, x3, x4) \ vpxor x3, x1, tp; \ vpor x0, x3, x3; \ vpand x0, x1, x4; \ vpxor x2, x0, x0; \ vpxor tp, x2, x2; \ vpand x3, tp, x1; \ vpxor x3, x2, x2; \ vpor x4, x0, x0; \ vpxor x3, x4, x4; #define S3_2(x0, x1, x2, x3, x4) \ vpxor x0, x1, x1; \ vpand x3, x0, x0; \ vpand x4, x3, x3; \ vpxor x2, x3, x3; \ vpor x1, x4, x4; \ vpand x1, x2, x2; \ vpxor x3, x4, x4; \ vpxor x3, x0, x0; \ vpxor x2, x3, x3; #define S4_1(x0, x1, x2, x3, x4) \ vpand x0, x3, tp; \ vpxor x3, x0, x0; \ vpxor x2, tp, tp; \ vpor x3, x2, x2; \ vpxor x1, x0, x0; \ vpxor tp, x3, x4; \ vpor x0, x2, x2; \ vpxor x1, x2, x2; #define S4_2(x0, x1, x2, x3, x4) \ vpand x0, x1, x1; \ vpxor x4, x1, x1; \ vpand x2, x4, x4; \ vpxor tp, x2, x2; \ vpxor x0, x4, x4; \ vpor x1, tp, x3; \ vpxor RNOT, x1, x1; \ vpxor x0, x3, x3; #define S5_1(x0, x1, x2, x3, x4) \ vpor x0, x1, tp; \ vpxor tp, x2, x2; \ vpxor RNOT, x3, x3; \ vpxor x0, x1, x4; \ vpxor x2, x0, x0; \ vpand x4, tp, x1; \ vpor x3, x4, x4; \ vpxor x0, x4, x4; #define S5_2(x0, x1, x2, x3, x4) \ vpand x3, x0, x0; \ vpxor x3, x1, x1; \ vpxor x2, x3, x3; \ vpxor x1, x0, x0; \ vpand x4, x2, x2; \ vpxor x2, x1, x1; \ vpand x0, x2, x2; \ vpxor x2, x3, x3; #define S6_1(x0, x1, x2, x3, x4) \ vpxor x0, x3, x3; \ vpxor x2, x1, tp; \ vpxor x0, x2, x2; \ vpand x3, x0, x0; \ vpor x3, tp, tp; \ vpxor RNOT, x1, x4; \ vpxor tp, x0, x0; \ vpxor x2, tp, x1; #define S6_2(x0, x1, x2, x3, x4) \ vpxor x4, x3, x3; \ vpxor x0, x4, x4; \ vpand x0, x2, x2; \ vpxor x1, x4, x4; \ vpxor x3, x2, x2; \ vpand x1, x3, x3; \ vpxor x0, x3, x3; \ vpxor x2, x1, x1; #define S7_1(x0, x1, x2, x3, x4) \ vpxor RNOT, x1, tp; \ vpxor RNOT, x0, x0; \ vpand x2, tp, x1; \ vpxor x3, x1, x1; \ vpor tp, x3, x3; \ vpxor x2, tp, x4; \ vpxor x3, x2, x2; \ vpxor x0, x3, x3; \ vpor x1, x0, x0; #define S7_2(x0, x1, x2, x3, x4) \ vpand x0, x2, x2; \ vpxor x4, x0, x0; \ vpxor x3, x4, x4; \ vpand x0, x3, x3; \ vpxor x1, x4, x4; \ vpxor x4, x2, x2; \ vpxor x1, x3, x3; \ vpor x0, x4, x4; \ vpxor x1, x4, x4; #define SI0_1(x0, x1, x2, x3, x4) \ vpxor x0, x1, x1; \ vpor x1, x3, tp; \ vpxor x1, x3, x4; \ vpxor RNOT, x0, x0; \ vpxor tp, x2, x2; \ vpxor x0, tp, x3; \ vpand x1, x0, x0; \ vpxor x2, x0, x0; #define SI0_2(x0, x1, x2, x3, x4) \ vpand x3, x2, x2; \ vpxor x4, x3, x3; \ vpxor x3, x2, x2; \ vpxor x3, x1, x1; \ vpand x0, x3, x3; \ vpxor x0, x1, x1; \ vpxor x2, x0, x0; \ vpxor x3, x4, x4; #define SI1_1(x0, x1, x2, x3, x4) \ vpxor x3, x1, x1; \ vpxor x2, x0, tp; \ vpxor RNOT, x2, x2; \ vpor x1, x0, x4; \ vpxor x3, x4, x4; \ vpand x1, x3, x3; \ vpxor x2, x1, x1; \ vpand x4, x2, x2; #define SI1_2(x0, x1, x2, x3, x4) \ vpxor x1, x4, x4; \ vpor x3, x1, x1; \ vpxor tp, x3, x3; \ vpxor tp, x2, x2; \ vpor x4, tp, x0; \ vpxor x4, x2, x2; \ vpxor x0, x1, x1; \ vpxor x1, x4, x4; #define SI2_1(x0, x1, x2, x3, x4) \ vpxor x1, x2, x2; \ vpxor RNOT, x3, tp; \ vpor x2, tp, tp; \ vpxor x3, x2, x2; \ vpxor x0, x3, x4; \ vpxor x1, tp, x3; \ vpor x2, x1, x1; \ vpxor x0, x2, x2; #define SI2_2(x0, x1, x2, x3, x4) \ vpxor x4, x1, x1; \ vpor x3, x4, x4; \ vpxor x3, x2, x2; \ vpxor x2, x4, x4; \ vpand x1, x2, x2; \ vpxor x3, x2, x2; \ vpxor x4, x3, x3; \ vpxor x0, x4, x4; #define SI3_1(x0, x1, x2, x3, x4) \ vpxor x1, x2, x2; \ vpand x2, x1, tp; \ vpxor x0, tp, tp; \ vpor x1, x0, x0; \ vpxor x3, x1, x4; \ vpxor x3, x0, x0; \ vpor tp, x3, x3; \ vpxor x2, tp, x1; #define SI3_2(x0, x1, x2, x3, x4) \ vpxor x3, x1, x1; \ vpxor x2, x0, x0; \ vpxor x3, x2, x2; \ vpand x1, x3, x3; \ vpxor x0, x1, x1; \ vpand x2, x0, x0; \ vpxor x3, x4, x4; \ vpxor x0, x3, x3; \ vpxor x1, x0, x0; #define SI4_1(x0, x1, x2, x3, x4) \ vpxor x3, x2, x2; \ vpand x1, x0, tp; \ vpxor x2, tp, tp; \ vpor x3, x2, x2; \ vpxor RNOT, x0, x4; \ vpxor tp, x1, x1; \ vpxor x2, tp, x0; \ vpand x4, x2, x2; #define SI4_2(x0, x1, x2, x3, x4) \ vpxor x0, x2, x2; \ vpor x4, x0, x0; \ vpxor x3, x0, x0; \ vpand x2, x3, x3; \ vpxor x3, x4, x4; \ vpxor x1, x3, x3; \ vpand x0, x1, x1; \ vpxor x1, x4, x4; \ vpxor x3, x0, x0; #define SI5_1(x0, x1, x2, x3, x4) \ vpor x2, x1, tp; \ vpxor x1, x2, x2; \ vpxor x3, tp, tp; \ vpand x1, x3, x3; \ vpxor x3, x2, x2; \ vpor x0, x3, x3; \ vpxor RNOT, x0, x0; \ vpxor x2, x3, x3; \ vpor x0, x2, x2; #define SI5_2(x0, x1, x2, x3, x4) \ vpxor tp, x1, x4; \ vpxor x4, x2, x2; \ vpand x0, x4, x4; \ vpxor tp, x0, x0; \ vpxor x3, tp, x1; \ vpand x2, x0, x0; \ vpxor x3, x2, x2; \ vpxor x2, x0, x0; \ vpxor x4, x2, x2; \ vpxor x3, x4, x4; #define SI6_1(x0, x1, x2, x3, x4) \ vpxor x2, x0, x0; \ vpand x3, x0, tp; \ vpxor x3, x2, x2; \ vpxor x2, tp, tp; \ vpxor x1, x3, x3; \ vpor x0, x2, x2; \ vpxor x3, x2, x2; \ vpand tp, x3, x3; #define SI6_2(x0, x1, x2, x3, x4) \ vpxor RNOT, tp, tp; \ vpxor x1, x3, x3; \ vpand x2, x1, x1; \ vpxor tp, x0, x4; \ vpxor x4, x3, x3; \ vpxor x2, x4, x4; \ vpxor x1, tp, x0; \ vpxor x0, x2, x2; #define SI7_1(x0, x1, x2, x3, x4) \ vpand x0, x3, tp; \ vpxor x2, x0, x0; \ vpor x3, x2, x2; \ vpxor x1, x3, x4; \ vpxor RNOT, x0, x0; \ vpor tp, x1, x1; \ vpxor x0, x4, x4; \ vpand x2, x0, x0; \ vpxor x1, x0, x0; #define SI7_2(x0, x1, x2, x3, x4) \ vpand x2, x1, x1; \ vpxor x2, tp, x3; \ vpxor x3, x4, x4; \ vpand x3, x2, x2; \ vpor x0, x3, x3; \ vpxor x4, x1, x1; \ vpxor x4, x3, x3; \ vpand x0, x4, x4; \ vpxor x2, x4, x4; #define get_key(i,j,t) \ vpbroadcastd (4*(i)+(j))*4(CTX), t; #define K2(x0, x1, x2, x3, x4, i) \ get_key(i, 0, RK0); \ get_key(i, 1, RK1); \ get_key(i, 2, RK2); \ get_key(i, 3, RK3); \ vpxor RK0, x0 ## 1, x0 ## 1; \ vpxor RK1, x1 ## 1, x1 ## 1; \ vpxor RK2, x2 ## 1, x2 ## 1; \ vpxor RK3, x3 ## 1, x3 ## 1; \ vpxor RK0, x0 ## 2, x0 ## 2; \ vpxor RK1, x1 ## 2, x1 ## 2; \ vpxor RK2, x2 ## 2, x2 ## 2; \ vpxor RK3, x3 ## 2, x3 ## 2; #define LK2(x0, x1, x2, x3, x4, i) \ vpslld $13, x0 ## 1, x4 ## 1; \ vpsrld $(32 - 13), x0 ## 1, x0 ## 1; \ vpor x4 ## 1, x0 ## 1, x0 ## 1; \ vpxor x0 ## 1, x1 ## 1, x1 ## 1; \ vpslld $3, x2 ## 1, x4 ## 1; \ vpsrld $(32 - 3), x2 ## 1, x2 ## 1; \ vpor x4 ## 1, x2 ## 1, x2 ## 1; \ vpxor x2 ## 1, x1 ## 1, x1 ## 1; \ vpslld $13, x0 ## 2, x4 ## 2; \ vpsrld $(32 - 13), x0 ## 2, x0 ## 2; \ vpor x4 ## 2, x0 ## 2, x0 ## 2; \ vpxor x0 ## 2, x1 ## 2, x1 ## 2; \ vpslld $3, x2 ## 2, x4 ## 2; \ vpsrld $(32 - 3), x2 ## 2, x2 ## 2; \ vpor x4 ## 2, x2 ## 2, x2 ## 2; \ vpxor x2 ## 2, x1 ## 2, x1 ## 2; \ vpslld $1, x1 ## 1, x4 ## 1; \ vpsrld $(32 - 1), x1 ## 1, x1 ## 1; \ vpor x4 ## 1, x1 ## 1, x1 ## 1; \ vpslld $3, x0 ## 1, x4 ## 1; \ vpxor x2 ## 1, x3 ## 1, x3 ## 1; \ vpxor x4 ## 1, x3 ## 1, x3 ## 1; \ get_key(i, 1, RK1); \ vpslld $1, x1 ## 2, x4 ## 2; \ vpsrld $(32 - 1), x1 ## 2, x1 ## 2; \ vpor x4 ## 2, x1 ## 2, x1 ## 2; \ vpslld $3, x0 ## 2, x4 ## 2; \ vpxor x2 ## 2, x3 ## 2, x3 ## 2; \ vpxor x4 ## 2, x3 ## 2, x3 ## 2; \ get_key(i, 3, RK3); \ vpslld $7, x3 ## 1, x4 ## 1; \ vpsrld $(32 - 7), x3 ## 1, x3 ## 1; \ vpor x4 ## 1, x3 ## 1, x3 ## 1; \ vpslld $7, x1 ## 1, x4 ## 1; \ vpxor x1 ## 1, x0 ## 1, x0 ## 1; \ vpxor x3 ## 1, x0 ## 1, x0 ## 1; \ vpxor x3 ## 1, x2 ## 1, x2 ## 1; \ vpxor x4 ## 1, x2 ## 1, x2 ## 1; \ get_key(i, 0, RK0); \ vpslld $7, x3 ## 2, x4 ## 2; \ vpsrld $(32 - 7), x3 ## 2, x3 ## 2; \ vpor x4 ## 2, x3 ## 2, x3 ## 2; \ vpslld $7, x1 ## 2, x4 ## 2; \ vpxor x1 ## 2, x0 ## 2, x0 ## 2; \ vpxor x3 ## 2, x0 ## 2, x0 ## 2; \ vpxor x3 ## 2, x2 ## 2, x2 ## 2; \ vpxor x4 ## 2, x2 ## 2, x2 ## 2; \ get_key(i, 2, RK2); \ vpxor RK1, x1 ## 1, x1 ## 1; \ vpxor RK3, x3 ## 1, x3 ## 1; \ vpslld $5, x0 ## 1, x4 ## 1; \ vpsrld $(32 - 5), x0 ## 1, x0 ## 1; \ vpor x4 ## 1, x0 ## 1, x0 ## 1; \ vpslld $22, x2 ## 1, x4 ## 1; \ vpsrld $(32 - 22), x2 ## 1, x2 ## 1; \ vpor x4 ## 1, x2 ## 1, x2 ## 1; \ vpxor RK0, x0 ## 1, x0 ## 1; \ vpxor RK2, x2 ## 1, x2 ## 1; \ vpxor RK1, x1 ## 2, x1 ## 2; \ vpxor RK3, x3 ## 2, x3 ## 2; \ vpslld $5, x0 ## 2, x4 ## 2; \ vpsrld $(32 - 5), x0 ## 2, x0 ## 2; \ vpor x4 ## 2, x0 ## 2, x0 ## 2; \ vpslld $22, x2 ## 2, x4 ## 2; \ vpsrld $(32 - 22), x2 ## 2, x2 ## 2; \ vpor x4 ## 2, x2 ## 2, x2 ## 2; \ vpxor RK0, x0 ## 2, x0 ## 2; \ vpxor RK2, x2 ## 2, x2 ## 2; #define KL2(x0, x1, x2, x3, x4, i) \ vpxor RK0, x0 ## 1, x0 ## 1; \ vpxor RK2, x2 ## 1, x2 ## 1; \ vpsrld $5, x0 ## 1, x4 ## 1; \ vpslld $(32 - 5), x0 ## 1, x0 ## 1; \ vpor x4 ## 1, x0 ## 1, x0 ## 1; \ vpxor RK3, x3 ## 1, x3 ## 1; \ vpxor RK1, x1 ## 1, x1 ## 1; \ vpsrld $22, x2 ## 1, x4 ## 1; \ vpslld $(32 - 22), x2 ## 1, x2 ## 1; \ vpor x4 ## 1, x2 ## 1, x2 ## 1; \ vpxor x3 ## 1, x2 ## 1, x2 ## 1; \ vpxor RK0, x0 ## 2, x0 ## 2; \ vpxor RK2, x2 ## 2, x2 ## 2; \ vpsrld $5, x0 ## 2, x4 ## 2; \ vpslld $(32 - 5), x0 ## 2, x0 ## 2; \ vpor x4 ## 2, x0 ## 2, x0 ## 2; \ vpxor RK3, x3 ## 2, x3 ## 2; \ vpxor RK1, x1 ## 2, x1 ## 2; \ vpsrld $22, x2 ## 2, x4 ## 2; \ vpslld $(32 - 22), x2 ## 2, x2 ## 2; \ vpor x4 ## 2, x2 ## 2, x2 ## 2; \ vpxor x3 ## 2, x2 ## 2, x2 ## 2; \ vpxor x3 ## 1, x0 ## 1, x0 ## 1; \ vpslld $7, x1 ## 1, x4 ## 1; \ vpxor x1 ## 1, x0 ## 1, x0 ## 1; \ vpxor x4 ## 1, x2 ## 1, x2 ## 1; \ vpsrld $1, x1 ## 1, x4 ## 1; \ vpslld $(32 - 1), x1 ## 1, x1 ## 1; \ vpor x4 ## 1, x1 ## 1, x1 ## 1; \ vpxor x3 ## 2, x0 ## 2, x0 ## 2; \ vpslld $7, x1 ## 2, x4 ## 2; \ vpxor x1 ## 2, x0 ## 2, x0 ## 2; \ vpxor x4 ## 2, x2 ## 2, x2 ## 2; \ vpsrld $1, x1 ## 2, x4 ## 2; \ vpslld $(32 - 1), x1 ## 2, x1 ## 2; \ vpor x4 ## 2, x1 ## 2, x1 ## 2; \ vpsrld $7, x3 ## 1, x4 ## 1; \ vpslld $(32 - 7), x3 ## 1, x3 ## 1; \ vpor x4 ## 1, x3 ## 1, x3 ## 1; \ vpxor x0 ## 1, x1 ## 1, x1 ## 1; \ vpslld $3, x0 ## 1, x4 ## 1; \ vpxor x4 ## 1, x3 ## 1, x3 ## 1; \ vpsrld $7, x3 ## 2, x4 ## 2; \ vpslld $(32 - 7), x3 ## 2, x3 ## 2; \ vpor x4 ## 2, x3 ## 2, x3 ## 2; \ vpxor x0 ## 2, x1 ## 2, x1 ## 2; \ vpslld $3, x0 ## 2, x4 ## 2; \ vpxor x4 ## 2, x3 ## 2, x3 ## 2; \ vpsrld $13, x0 ## 1, x4 ## 1; \ vpslld $(32 - 13), x0 ## 1, x0 ## 1; \ vpor x4 ## 1, x0 ## 1, x0 ## 1; \ vpxor x2 ## 1, x1 ## 1, x1 ## 1; \ vpxor x2 ## 1, x3 ## 1, x3 ## 1; \ vpsrld $3, x2 ## 1, x4 ## 1; \ vpslld $(32 - 3), x2 ## 1, x2 ## 1; \ vpor x4 ## 1, x2 ## 1, x2 ## 1; \ vpsrld $13, x0 ## 2, x4 ## 2; \ vpslld $(32 - 13), x0 ## 2, x0 ## 2; \ vpor x4 ## 2, x0 ## 2, x0 ## 2; \ vpxor x2 ## 2, x1 ## 2, x1 ## 2; \ vpxor x2 ## 2, x3 ## 2, x3 ## 2; \ vpsrld $3, x2 ## 2, x4 ## 2; \ vpslld $(32 - 3), x2 ## 2, x2 ## 2; \ vpor x4 ## 2, x2 ## 2, x2 ## 2; #define S(SBOX, x0, x1, x2, x3, x4) \ SBOX ## _1(x0 ## 1, x1 ## 1, x2 ## 1, x3 ## 1, x4 ## 1); \ SBOX ## _2(x0 ## 1, x1 ## 1, x2 ## 1, x3 ## 1, x4 ## 1); \ SBOX ## _1(x0 ## 2, x1 ## 2, x2 ## 2, x3 ## 2, x4 ## 2); \ SBOX ## _2(x0 ## 2, x1 ## 2, x2 ## 2, x3 ## 2, x4 ## 2); #define SP(SBOX, x0, x1, x2, x3, x4, i) \ get_key(i, 0, RK0); \ SBOX ## _1(x0 ## 1, x1 ## 1, x2 ## 1, x3 ## 1, x4 ## 1); \ get_key(i, 2, RK2); \ SBOX ## _2(x0 ## 1, x1 ## 1, x2 ## 1, x3 ## 1, x4 ## 1); \ get_key(i, 3, RK3); \ SBOX ## _1(x0 ## 2, x1 ## 2, x2 ## 2, x3 ## 2, x4 ## 2); \ get_key(i, 1, RK1); \ SBOX ## _2(x0 ## 2, x1 ## 2, x2 ## 2, x3 ## 2, x4 ## 2); \ #define transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \ vpunpckldq x1, x0, t0; \ vpunpckhdq x1, x0, t2; \ vpunpckldq x3, x2, t1; \ vpunpckhdq x3, x2, x3; \ \ vpunpcklqdq t1, t0, x0; \ vpunpckhqdq t1, t0, x1; \ vpunpcklqdq x3, t2, x2; \ vpunpckhqdq x3, t2, x3; #define read_blocks(x0, x1, x2, x3, t0, t1, t2) \ transpose_4x4(x0, x1, x2, x3, t0, t1, t2) #define write_blocks(x0, x1, x2, x3, t0, t1, t2) \ transpose_4x4(x0, x1, x2, x3, t0, t1, t2) .align 8 SYM_FUNC_START_LOCAL(__serpent_enc_blk16) /* input: * %rdi: ctx, CTX * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: plaintext * output: * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: ciphertext */ vpcmpeqd RNOT, RNOT, RNOT; read_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2); read_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2); K2(RA, RB, RC, RD, RE, 0); S(S0, RA, RB, RC, RD, RE); LK2(RC, RB, RD, RA, RE, 1); S(S1, RC, RB, RD, RA, RE); LK2(RE, RD, RA, RC, RB, 2); S(S2, RE, RD, RA, RC, RB); LK2(RB, RD, RE, RC, RA, 3); S(S3, RB, RD, RE, RC, RA); LK2(RC, RA, RD, RB, RE, 4); S(S4, RC, RA, RD, RB, RE); LK2(RA, RD, RB, RE, RC, 5); S(S5, RA, RD, RB, RE, RC); LK2(RC, RA, RD, RE, RB, 6); S(S6, RC, RA, RD, RE, RB); LK2(RD, RB, RA, RE, RC, 7); S(S7, RD, RB, RA, RE, RC); LK2(RC, RA, RE, RD, RB, 8); S(S0, RC, RA, RE, RD, RB); LK2(RE, RA, RD, RC, RB, 9); S(S1, RE, RA, RD, RC, RB); LK2(RB, RD, RC, RE, RA, 10); S(S2, RB, RD, RC, RE, RA); LK2(RA, RD, RB, RE, RC, 11); S(S3, RA, RD, RB, RE, RC); LK2(RE, RC, RD, RA, RB, 12); S(S4, RE, RC, RD, RA, RB); LK2(RC, RD, RA, RB, RE, 13); S(S5, RC, RD, RA, RB, RE); LK2(RE, RC, RD, RB, RA, 14); S(S6, RE, RC, RD, RB, RA); LK2(RD, RA, RC, RB, RE, 15); S(S7, RD, RA, RC, RB, RE); LK2(RE, RC, RB, RD, RA, 16); S(S0, RE, RC, RB, RD, RA); LK2(RB, RC, RD, RE, RA, 17); S(S1, RB, RC, RD, RE, RA); LK2(RA, RD, RE, RB, RC, 18); S(S2, RA, RD, RE, RB, RC); LK2(RC, RD, RA, RB, RE, 19); S(S3, RC, RD, RA, RB, RE); LK2(RB, RE, RD, RC, RA, 20); S(S4, RB, RE, RD, RC, RA); LK2(RE, RD, RC, RA, RB, 21); S(S5, RE, RD, RC, RA, RB); LK2(RB, RE, RD, RA, RC, 22); S(S6, RB, RE, RD, RA, RC); LK2(RD, RC, RE, RA, RB, 23); S(S7, RD, RC, RE, RA, RB); LK2(RB, RE, RA, RD, RC, 24); S(S0, RB, RE, RA, RD, RC); LK2(RA, RE, RD, RB, RC, 25); S(S1, RA, RE, RD, RB, RC); LK2(RC, RD, RB, RA, RE, 26); S(S2, RC, RD, RB, RA, RE); LK2(RE, RD, RC, RA, RB, 27); S(S3, RE, RD, RC, RA, RB); LK2(RA, RB, RD, RE, RC, 28); S(S4, RA, RB, RD, RE, RC); LK2(RB, RD, RE, RC, RA, 29); S(S5, RB, RD, RE, RC, RA); LK2(RA, RB, RD, RC, RE, 30); S(S6, RA, RB, RD, RC, RE); LK2(RD, RE, RB, RC, RA, 31); S(S7, RD, RE, RB, RC, RA); K2(RA, RB, RC, RD, RE, 32); write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2); write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2); RET; SYM_FUNC_END(__serpent_enc_blk16) .align 8 SYM_FUNC_START_LOCAL(__serpent_dec_blk16) /* input: * %rdi: ctx, CTX * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: ciphertext * output: * RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2: plaintext */ vpcmpeqd RNOT, RNOT, RNOT; read_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2); read_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2); K2(RA, RB, RC, RD, RE, 32); SP(SI7, RA, RB, RC, RD, RE, 31); KL2(RB, RD, RA, RE, RC, 31); SP(SI6, RB, RD, RA, RE, RC, 30); KL2(RA, RC, RE, RB, RD, 30); SP(SI5, RA, RC, RE, RB, RD, 29); KL2(RC, RD, RA, RE, RB, 29); SP(SI4, RC, RD, RA, RE, RB, 28); KL2(RC, RA, RB, RE, RD, 28); SP(SI3, RC, RA, RB, RE, RD, 27); KL2(RB, RC, RD, RE, RA, 27); SP(SI2, RB, RC, RD, RE, RA, 26); KL2(RC, RA, RE, RD, RB, 26); SP(SI1, RC, RA, RE, RD, RB, 25); KL2(RB, RA, RE, RD, RC, 25); SP(SI0, RB, RA, RE, RD, RC, 24); KL2(RE, RC, RA, RB, RD, 24); SP(SI7, RE, RC, RA, RB, RD, 23); KL2(RC, RB, RE, RD, RA, 23); SP(SI6, RC, RB, RE, RD, RA, 22); KL2(RE, RA, RD, RC, RB, 22); SP(SI5, RE, RA, RD, RC, RB, 21); KL2(RA, RB, RE, RD, RC, 21); SP(SI4, RA, RB, RE, RD, RC, 20); KL2(RA, RE, RC, RD, RB, 20); SP(SI3, RA, RE, RC, RD, RB, 19); KL2(RC, RA, RB, RD, RE, 19); SP(SI2, RC, RA, RB, RD, RE, 18); KL2(RA, RE, RD, RB, RC, 18); SP(SI1, RA, RE, RD, RB, RC, 17); KL2(RC, RE, RD, RB, RA, 17); SP(SI0, RC, RE, RD, RB, RA, 16); KL2(RD, RA, RE, RC, RB, 16); SP(SI7, RD, RA, RE, RC, RB, 15); KL2(RA, RC, RD, RB, RE, 15); SP(SI6, RA, RC, RD, RB, RE, 14); KL2(RD, RE, RB, RA, RC, 14); SP(SI5, RD, RE, RB, RA, RC, 13); KL2(RE, RC, RD, RB, RA, 13); SP(SI4, RE, RC, RD, RB, RA, 12); KL2(RE, RD, RA, RB, RC, 12); SP(SI3, RE, RD, RA, RB, RC, 11); KL2(RA, RE, RC, RB, RD, 11); SP(SI2, RA, RE, RC, RB, RD, 10); KL2(RE, RD, RB, RC, RA, 10); SP(SI1, RE, RD, RB, RC, RA, 9); KL2(RA, RD, RB, RC, RE, 9); SP(SI0, RA, RD, RB, RC, RE, 8); KL2(RB, RE, RD, RA, RC, 8); SP(SI7, RB, RE, RD, RA, RC, 7); KL2(RE, RA, RB, RC, RD, 7); SP(SI6, RE, RA, RB, RC, RD, 6); KL2(RB, RD, RC, RE, RA, 6); SP(SI5, RB, RD, RC, RE, RA, 5); KL2(RD, RA, RB, RC, RE, 5); SP(SI4, RD, RA, RB, RC, RE, 4); KL2(RD, RB, RE, RC, RA, 4); SP(SI3, RD, RB, RE, RC, RA, 3); KL2(RE, RD, RA, RC, RB, 3); SP(SI2, RE, RD, RA, RC, RB, 2); KL2(RD, RB, RC, RA, RE, 2); SP(SI1, RD, RB, RC, RA, RE, 1); KL2(RE, RB, RC, RA, RD, 1); S(SI0, RE, RB, RC, RA, RD); K2(RC, RD, RB, RE, RA, 0); write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2); write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2); RET; SYM_FUNC_END(__serpent_dec_blk16) SYM_FUNC_START(serpent_ecb_enc_16way) /* input: * %rdi: ctx, CTX * %rsi: dst * %rdx: src */ FRAME_BEGIN vzeroupper; load_16way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); call __serpent_enc_blk16; store_16way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); vzeroupper; FRAME_END RET; SYM_FUNC_END(serpent_ecb_enc_16way) SYM_FUNC_START(serpent_ecb_dec_16way) /* input: * %rdi: ctx, CTX * %rsi: dst * %rdx: src */ FRAME_BEGIN vzeroupper; load_16way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); call __serpent_dec_blk16; store_16way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2); vzeroupper; FRAME_END RET; SYM_FUNC_END(serpent_ecb_dec_16way) SYM_FUNC_START(serpent_cbc_dec_16way) /* input: * %rdi: ctx, CTX * %rsi: dst * %rdx: src */ FRAME_BEGIN vzeroupper; load_16way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); call __serpent_dec_blk16; store_cbc_16way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2, RK0); vzeroupper; FRAME_END RET; SYM_FUNC_END(serpent_cbc_dec_16way)
aixcc-public/challenge-001-exemplar-source
11,411
arch/x86/crypto/sha1_ssse3_asm.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * This is a SIMD SHA-1 implementation. It requires the Intel(R) Supplemental * SSE3 instruction set extensions introduced in Intel Core Microarchitecture * processors. CPUs supporting Intel(R) AVX extensions will get an additional * boost. * * This work was inspired by the vectorized implementation of Dean Gaudet. * Additional information on it can be found at: * http://www.arctic.org/~dean/crypto/sha1.html * * It was improved upon with more efficient vectorization of the message * scheduling. This implementation has also been optimized for all current and * several future generations of Intel CPUs. * * See this article for more information about the implementation details: * http://software.intel.com/en-us/articles/improving-the-performance-of-the-secure-hash-algorithm-1/ * * Copyright (C) 2010, Intel Corp. * Authors: Maxim Locktyukhin <maxim.locktyukhin@intel.com> * Ronen Zohar <ronen.zohar@intel.com> * * Converted to AT&T syntax and adapted for inclusion in the Linux kernel: * Author: Mathias Krause <minipli@googlemail.com> */ #include <linux/linkage.h> #include <linux/cfi_types.h> #define CTX %rdi // arg1 #define BUF %rsi // arg2 #define CNT %rdx // arg3 #define REG_A %ecx #define REG_B %esi #define REG_C %edi #define REG_D %r12d #define REG_E %edx #define REG_T1 %eax #define REG_T2 %ebx #define K_BASE %r8 #define HASH_PTR %r9 #define BUFFER_PTR %r10 #define BUFFER_END %r11 #define W_TMP1 %xmm0 #define W_TMP2 %xmm9 #define W0 %xmm1 #define W4 %xmm2 #define W8 %xmm3 #define W12 %xmm4 #define W16 %xmm5 #define W20 %xmm6 #define W24 %xmm7 #define W28 %xmm8 #define XMM_SHUFB_BSWAP %xmm10 /* we keep window of 64 w[i]+K pre-calculated values in a circular buffer */ #define WK(t) (((t) & 15) * 4)(%rsp) #define W_PRECALC_AHEAD 16 /* * This macro implements the SHA-1 function's body for single 64-byte block * param: function's name */ .macro SHA1_VECTOR_ASM name SYM_TYPED_FUNC_START(\name) push %rbx push %r12 push %rbp mov %rsp, %rbp sub $64, %rsp # allocate workspace and $~15, %rsp # align stack mov CTX, HASH_PTR mov BUF, BUFFER_PTR shl $6, CNT # multiply by 64 add BUF, CNT mov CNT, BUFFER_END lea K_XMM_AR(%rip), K_BASE xmm_mov BSWAP_SHUFB_CTL(%rip), XMM_SHUFB_BSWAP SHA1_PIPELINED_MAIN_BODY # cleanup workspace mov $8, %ecx mov %rsp, %rdi xor %eax, %eax rep stosq mov %rbp, %rsp # deallocate workspace pop %rbp pop %r12 pop %rbx RET SYM_FUNC_END(\name) .endm /* * This macro implements 80 rounds of SHA-1 for one 64-byte block */ .macro SHA1_PIPELINED_MAIN_BODY INIT_REGALLOC mov (HASH_PTR), A mov 4(HASH_PTR), B mov 8(HASH_PTR), C mov 12(HASH_PTR), D mov 16(HASH_PTR), E .set i, 0 .rept W_PRECALC_AHEAD W_PRECALC i .set i, (i+1) .endr .align 4 1: RR F1,A,B,C,D,E,0 RR F1,D,E,A,B,C,2 RR F1,B,C,D,E,A,4 RR F1,E,A,B,C,D,6 RR F1,C,D,E,A,B,8 RR F1,A,B,C,D,E,10 RR F1,D,E,A,B,C,12 RR F1,B,C,D,E,A,14 RR F1,E,A,B,C,D,16 RR F1,C,D,E,A,B,18 RR F2,A,B,C,D,E,20 RR F2,D,E,A,B,C,22 RR F2,B,C,D,E,A,24 RR F2,E,A,B,C,D,26 RR F2,C,D,E,A,B,28 RR F2,A,B,C,D,E,30 RR F2,D,E,A,B,C,32 RR F2,B,C,D,E,A,34 RR F2,E,A,B,C,D,36 RR F2,C,D,E,A,B,38 RR F3,A,B,C,D,E,40 RR F3,D,E,A,B,C,42 RR F3,B,C,D,E,A,44 RR F3,E,A,B,C,D,46 RR F3,C,D,E,A,B,48 RR F3,A,B,C,D,E,50 RR F3,D,E,A,B,C,52 RR F3,B,C,D,E,A,54 RR F3,E,A,B,C,D,56 RR F3,C,D,E,A,B,58 add $64, BUFFER_PTR # move to the next 64-byte block cmp BUFFER_END, BUFFER_PTR # if the current is the last one use cmovae K_BASE, BUFFER_PTR # dummy source to avoid buffer overrun RR F4,A,B,C,D,E,60 RR F4,D,E,A,B,C,62 RR F4,B,C,D,E,A,64 RR F4,E,A,B,C,D,66 RR F4,C,D,E,A,B,68 RR F4,A,B,C,D,E,70 RR F4,D,E,A,B,C,72 RR F4,B,C,D,E,A,74 RR F4,E,A,B,C,D,76 RR F4,C,D,E,A,B,78 UPDATE_HASH (HASH_PTR), A UPDATE_HASH 4(HASH_PTR), B UPDATE_HASH 8(HASH_PTR), C UPDATE_HASH 12(HASH_PTR), D UPDATE_HASH 16(HASH_PTR), E RESTORE_RENAMED_REGS cmp K_BASE, BUFFER_PTR # K_BASE means, we reached the end jne 1b .endm .macro INIT_REGALLOC .set A, REG_A .set B, REG_B .set C, REG_C .set D, REG_D .set E, REG_E .set T1, REG_T1 .set T2, REG_T2 .endm .macro RESTORE_RENAMED_REGS # order is important (REG_C is where it should be) mov B, REG_B mov D, REG_D mov A, REG_A mov E, REG_E .endm .macro SWAP_REG_NAMES a, b .set _T, \a .set \a, \b .set \b, _T .endm .macro F1 b, c, d mov \c, T1 SWAP_REG_NAMES \c, T1 xor \d, T1 and \b, T1 xor \d, T1 .endm .macro F2 b, c, d mov \d, T1 SWAP_REG_NAMES \d, T1 xor \c, T1 xor \b, T1 .endm .macro F3 b, c ,d mov \c, T1 SWAP_REG_NAMES \c, T1 mov \b, T2 or \b, T1 and \c, T2 and \d, T1 or T2, T1 .endm .macro F4 b, c, d F2 \b, \c, \d .endm .macro UPDATE_HASH hash, val add \hash, \val mov \val, \hash .endm /* * RR does two rounds of SHA-1 back to back with W[] pre-calc * t1 = F(b, c, d); e += w(i) * e += t1; b <<= 30; d += w(i+1); * t1 = F(a, b, c); * d += t1; a <<= 5; * e += a; * t1 = e; a >>= 7; * t1 <<= 5; * d += t1; */ .macro RR F, a, b, c, d, e, round add WK(\round), \e \F \b, \c, \d # t1 = F(b, c, d); W_PRECALC (\round + W_PRECALC_AHEAD) rol $30, \b add T1, \e add WK(\round + 1), \d \F \a, \b, \c W_PRECALC (\round + W_PRECALC_AHEAD + 1) rol $5, \a add \a, \e add T1, \d ror $7, \a # (a <<r 5) >>r 7) => a <<r 30) mov \e, T1 SWAP_REG_NAMES \e, T1 rol $5, T1 add T1, \d # write: \a, \b # rotate: \a<=\d, \b<=\e, \c<=\a, \d<=\b, \e<=\c .endm .macro W_PRECALC r .set i, \r .if (i < 20) .set K_XMM, 0 .elseif (i < 40) .set K_XMM, 16 .elseif (i < 60) .set K_XMM, 32 .elseif (i < 80) .set K_XMM, 48 .endif .if ((i < 16) || ((i >= 80) && (i < (80 + W_PRECALC_AHEAD)))) .set i, ((\r) % 80) # pre-compute for the next iteration .if (i == 0) W_PRECALC_RESET .endif W_PRECALC_00_15 .elseif (i<32) W_PRECALC_16_31 .elseif (i < 80) // rounds 32-79 W_PRECALC_32_79 .endif .endm .macro W_PRECALC_RESET .set W, W0 .set W_minus_04, W4 .set W_minus_08, W8 .set W_minus_12, W12 .set W_minus_16, W16 .set W_minus_20, W20 .set W_minus_24, W24 .set W_minus_28, W28 .set W_minus_32, W .endm .macro W_PRECALC_ROTATE .set W_minus_32, W_minus_28 .set W_minus_28, W_minus_24 .set W_minus_24, W_minus_20 .set W_minus_20, W_minus_16 .set W_minus_16, W_minus_12 .set W_minus_12, W_minus_08 .set W_minus_08, W_minus_04 .set W_minus_04, W .set W, W_minus_32 .endm .macro W_PRECALC_SSSE3 .macro W_PRECALC_00_15 W_PRECALC_00_15_SSSE3 .endm .macro W_PRECALC_16_31 W_PRECALC_16_31_SSSE3 .endm .macro W_PRECALC_32_79 W_PRECALC_32_79_SSSE3 .endm /* message scheduling pre-compute for rounds 0-15 */ .macro W_PRECALC_00_15_SSSE3 .if ((i & 3) == 0) movdqu (i*4)(BUFFER_PTR), W_TMP1 .elseif ((i & 3) == 1) pshufb XMM_SHUFB_BSWAP, W_TMP1 movdqa W_TMP1, W .elseif ((i & 3) == 2) paddd (K_BASE), W_TMP1 .elseif ((i & 3) == 3) movdqa W_TMP1, WK(i&~3) W_PRECALC_ROTATE .endif .endm /* message scheduling pre-compute for rounds 16-31 * * - calculating last 32 w[i] values in 8 XMM registers * - pre-calculate K+w[i] values and store to mem, for later load by ALU add * instruction * * some "heavy-lifting" vectorization for rounds 16-31 due to w[i]->w[i-3] * dependency, but improves for 32-79 */ .macro W_PRECALC_16_31_SSSE3 # blended scheduling of vector and scalar instruction streams, one 4-wide # vector iteration / 4 scalar rounds .if ((i & 3) == 0) movdqa W_minus_12, W palignr $8, W_minus_16, W # w[i-14] movdqa W_minus_04, W_TMP1 psrldq $4, W_TMP1 # w[i-3] pxor W_minus_08, W .elseif ((i & 3) == 1) pxor W_minus_16, W_TMP1 pxor W_TMP1, W movdqa W, W_TMP2 movdqa W, W_TMP1 pslldq $12, W_TMP2 .elseif ((i & 3) == 2) psrld $31, W pslld $1, W_TMP1 por W, W_TMP1 movdqa W_TMP2, W psrld $30, W_TMP2 pslld $2, W .elseif ((i & 3) == 3) pxor W, W_TMP1 pxor W_TMP2, W_TMP1 movdqa W_TMP1, W paddd K_XMM(K_BASE), W_TMP1 movdqa W_TMP1, WK(i&~3) W_PRECALC_ROTATE .endif .endm /* message scheduling pre-compute for rounds 32-79 * * in SHA-1 specification: w[i] = (w[i-3] ^ w[i-8] ^ w[i-14] ^ w[i-16]) rol 1 * instead we do equal: w[i] = (w[i-6] ^ w[i-16] ^ w[i-28] ^ w[i-32]) rol 2 * allows more efficient vectorization since w[i]=>w[i-3] dependency is broken */ .macro W_PRECALC_32_79_SSSE3 .if ((i & 3) == 0) movdqa W_minus_04, W_TMP1 pxor W_minus_28, W # W is W_minus_32 before xor palignr $8, W_minus_08, W_TMP1 .elseif ((i & 3) == 1) pxor W_minus_16, W pxor W_TMP1, W movdqa W, W_TMP1 .elseif ((i & 3) == 2) psrld $30, W pslld $2, W_TMP1 por W, W_TMP1 .elseif ((i & 3) == 3) movdqa W_TMP1, W paddd K_XMM(K_BASE), W_TMP1 movdqa W_TMP1, WK(i&~3) W_PRECALC_ROTATE .endif .endm .endm // W_PRECALC_SSSE3 #define K1 0x5a827999 #define K2 0x6ed9eba1 #define K3 0x8f1bbcdc #define K4 0xca62c1d6 .section .rodata .align 16 K_XMM_AR: .long K1, K1, K1, K1 .long K2, K2, K2, K2 .long K3, K3, K3, K3 .long K4, K4, K4, K4 BSWAP_SHUFB_CTL: .long 0x00010203 .long 0x04050607 .long 0x08090a0b .long 0x0c0d0e0f .section .text W_PRECALC_SSSE3 .macro xmm_mov a, b movdqu \a,\b .endm /* * SSSE3 optimized implementation: * * extern "C" void sha1_transform_ssse3(struct sha1_state *state, * const u8 *data, int blocks); * * Note that struct sha1_state is assumed to begin with u32 state[5]. */ SHA1_VECTOR_ASM sha1_transform_ssse3 .macro W_PRECALC_AVX .purgem W_PRECALC_00_15 .macro W_PRECALC_00_15 W_PRECALC_00_15_AVX .endm .purgem W_PRECALC_16_31 .macro W_PRECALC_16_31 W_PRECALC_16_31_AVX .endm .purgem W_PRECALC_32_79 .macro W_PRECALC_32_79 W_PRECALC_32_79_AVX .endm .macro W_PRECALC_00_15_AVX .if ((i & 3) == 0) vmovdqu (i*4)(BUFFER_PTR), W_TMP1 .elseif ((i & 3) == 1) vpshufb XMM_SHUFB_BSWAP, W_TMP1, W .elseif ((i & 3) == 2) vpaddd (K_BASE), W, W_TMP1 .elseif ((i & 3) == 3) vmovdqa W_TMP1, WK(i&~3) W_PRECALC_ROTATE .endif .endm .macro W_PRECALC_16_31_AVX .if ((i & 3) == 0) vpalignr $8, W_minus_16, W_minus_12, W # w[i-14] vpsrldq $4, W_minus_04, W_TMP1 # w[i-3] vpxor W_minus_08, W, W vpxor W_minus_16, W_TMP1, W_TMP1 .elseif ((i & 3) == 1) vpxor W_TMP1, W, W vpslldq $12, W, W_TMP2 vpslld $1, W, W_TMP1 .elseif ((i & 3) == 2) vpsrld $31, W, W vpor W, W_TMP1, W_TMP1 vpslld $2, W_TMP2, W vpsrld $30, W_TMP2, W_TMP2 .elseif ((i & 3) == 3) vpxor W, W_TMP1, W_TMP1 vpxor W_TMP2, W_TMP1, W vpaddd K_XMM(K_BASE), W, W_TMP1 vmovdqu W_TMP1, WK(i&~3) W_PRECALC_ROTATE .endif .endm .macro W_PRECALC_32_79_AVX .if ((i & 3) == 0) vpalignr $8, W_minus_08, W_minus_04, W_TMP1 vpxor W_minus_28, W, W # W is W_minus_32 before xor .elseif ((i & 3) == 1) vpxor W_minus_16, W_TMP1, W_TMP1 vpxor W_TMP1, W, W .elseif ((i & 3) == 2) vpslld $2, W, W_TMP1 vpsrld $30, W, W vpor W, W_TMP1, W .elseif ((i & 3) == 3) vpaddd K_XMM(K_BASE), W, W_TMP1 vmovdqu W_TMP1, WK(i&~3) W_PRECALC_ROTATE .endif .endm .endm // W_PRECALC_AVX W_PRECALC_AVX .purgem xmm_mov .macro xmm_mov a, b vmovdqu \a,\b .endm /* AVX optimized implementation: * extern "C" void sha1_transform_avx(struct sha1_state *state, * const u8 *data, int blocks); */ SHA1_VECTOR_ASM sha1_transform_avx
aixcc-public/challenge-001-exemplar-source
7,550
arch/x86/crypto/twofish-x86_64-asm_64.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /*************************************************************************** * Copyright (C) 2006 by Joachim Fritschi, <jfritschi@freenet.de> * * * ***************************************************************************/ .file "twofish-x86_64-asm.S" .text #include <linux/linkage.h> #include <asm/asm-offsets.h> #define a_offset 0 #define b_offset 4 #define c_offset 8 #define d_offset 12 /* Structure of the crypto context struct*/ #define s0 0 /* S0 Array 256 Words each */ #define s1 1024 /* S1 Array */ #define s2 2048 /* S2 Array */ #define s3 3072 /* S3 Array */ #define w 4096 /* 8 whitening keys (word) */ #define k 4128 /* key 1-32 ( word ) */ /* define a few register aliases to allow macro substitution */ #define R0 %rax #define R0D %eax #define R0B %al #define R0H %ah #define R1 %rbx #define R1D %ebx #define R1B %bl #define R1H %bh #define R2 %rcx #define R2D %ecx #define R2B %cl #define R2H %ch #define R3 %rdx #define R3D %edx #define R3B %dl #define R3H %dh /* performs input whitening */ #define input_whitening(src,context,offset)\ xor w+offset(context), src; /* performs input whitening */ #define output_whitening(src,context,offset)\ xor w+16+offset(context), src; /* * a input register containing a (rotated 16) * b input register containing b * c input register containing c * d input register containing d (already rol $1) * operations on a and b are interleaved to increase performance */ #define encrypt_round(a,b,c,d,round)\ movzx b ## B, %edi;\ mov s1(%r11,%rdi,4),%r8d;\ movzx a ## B, %edi;\ mov s2(%r11,%rdi,4),%r9d;\ movzx b ## H, %edi;\ ror $16, b ## D;\ xor s2(%r11,%rdi,4),%r8d;\ movzx a ## H, %edi;\ ror $16, a ## D;\ xor s3(%r11,%rdi,4),%r9d;\ movzx b ## B, %edi;\ xor s3(%r11,%rdi,4),%r8d;\ movzx a ## B, %edi;\ xor (%r11,%rdi,4), %r9d;\ movzx b ## H, %edi;\ ror $15, b ## D;\ xor (%r11,%rdi,4), %r8d;\ movzx a ## H, %edi;\ xor s1(%r11,%rdi,4),%r9d;\ add %r8d, %r9d;\ add %r9d, %r8d;\ add k+round(%r11), %r9d;\ xor %r9d, c ## D;\ rol $15, c ## D;\ add k+4+round(%r11),%r8d;\ xor %r8d, d ## D; /* * a input register containing a(rotated 16) * b input register containing b * c input register containing c * d input register containing d (already rol $1) * operations on a and b are interleaved to increase performance * during the round a and b are prepared for the output whitening */ #define encrypt_last_round(a,b,c,d,round)\ mov b ## D, %r10d;\ shl $32, %r10;\ movzx b ## B, %edi;\ mov s1(%r11,%rdi,4),%r8d;\ movzx a ## B, %edi;\ mov s2(%r11,%rdi,4),%r9d;\ movzx b ## H, %edi;\ ror $16, b ## D;\ xor s2(%r11,%rdi,4),%r8d;\ movzx a ## H, %edi;\ ror $16, a ## D;\ xor s3(%r11,%rdi,4),%r9d;\ movzx b ## B, %edi;\ xor s3(%r11,%rdi,4),%r8d;\ movzx a ## B, %edi;\ xor (%r11,%rdi,4), %r9d;\ xor a, %r10;\ movzx b ## H, %edi;\ xor (%r11,%rdi,4), %r8d;\ movzx a ## H, %edi;\ xor s1(%r11,%rdi,4),%r9d;\ add %r8d, %r9d;\ add %r9d, %r8d;\ add k+round(%r11), %r9d;\ xor %r9d, c ## D;\ ror $1, c ## D;\ add k+4+round(%r11),%r8d;\ xor %r8d, d ## D /* * a input register containing a * b input register containing b (rotated 16) * c input register containing c (already rol $1) * d input register containing d * operations on a and b are interleaved to increase performance */ #define decrypt_round(a,b,c,d,round)\ movzx a ## B, %edi;\ mov (%r11,%rdi,4), %r9d;\ movzx b ## B, %edi;\ mov s3(%r11,%rdi,4),%r8d;\ movzx a ## H, %edi;\ ror $16, a ## D;\ xor s1(%r11,%rdi,4),%r9d;\ movzx b ## H, %edi;\ ror $16, b ## D;\ xor (%r11,%rdi,4), %r8d;\ movzx a ## B, %edi;\ xor s2(%r11,%rdi,4),%r9d;\ movzx b ## B, %edi;\ xor s1(%r11,%rdi,4),%r8d;\ movzx a ## H, %edi;\ ror $15, a ## D;\ xor s3(%r11,%rdi,4),%r9d;\ movzx b ## H, %edi;\ xor s2(%r11,%rdi,4),%r8d;\ add %r8d, %r9d;\ add %r9d, %r8d;\ add k+round(%r11), %r9d;\ xor %r9d, c ## D;\ add k+4+round(%r11),%r8d;\ xor %r8d, d ## D;\ rol $15, d ## D; /* * a input register containing a * b input register containing b * c input register containing c (already rol $1) * d input register containing d * operations on a and b are interleaved to increase performance * during the round a and b are prepared for the output whitening */ #define decrypt_last_round(a,b,c,d,round)\ movzx a ## B, %edi;\ mov (%r11,%rdi,4), %r9d;\ movzx b ## B, %edi;\ mov s3(%r11,%rdi,4),%r8d;\ movzx b ## H, %edi;\ ror $16, b ## D;\ xor (%r11,%rdi,4), %r8d;\ movzx a ## H, %edi;\ mov b ## D, %r10d;\ shl $32, %r10;\ xor a, %r10;\ ror $16, a ## D;\ xor s1(%r11,%rdi,4),%r9d;\ movzx b ## B, %edi;\ xor s1(%r11,%rdi,4),%r8d;\ movzx a ## B, %edi;\ xor s2(%r11,%rdi,4),%r9d;\ movzx b ## H, %edi;\ xor s2(%r11,%rdi,4),%r8d;\ movzx a ## H, %edi;\ xor s3(%r11,%rdi,4),%r9d;\ add %r8d, %r9d;\ add %r9d, %r8d;\ add k+round(%r11), %r9d;\ xor %r9d, c ## D;\ add k+4+round(%r11),%r8d;\ xor %r8d, d ## D;\ ror $1, d ## D; SYM_FUNC_START(twofish_enc_blk) pushq R1 /* %rdi contains the ctx address */ /* %rsi contains the output address */ /* %rdx contains the input address */ /* ctx address is moved to free one non-rex register as target for the 8bit high operations */ mov %rdi, %r11 movq (R3), R1 movq 8(R3), R3 input_whitening(R1,%r11,a_offset) input_whitening(R3,%r11,c_offset) mov R1D, R0D rol $16, R0D shr $32, R1 mov R3D, R2D shr $32, R3 rol $1, R3D encrypt_round(R0,R1,R2,R3,0); encrypt_round(R2,R3,R0,R1,8); encrypt_round(R0,R1,R2,R3,2*8); encrypt_round(R2,R3,R0,R1,3*8); encrypt_round(R0,R1,R2,R3,4*8); encrypt_round(R2,R3,R0,R1,5*8); encrypt_round(R0,R1,R2,R3,6*8); encrypt_round(R2,R3,R0,R1,7*8); encrypt_round(R0,R1,R2,R3,8*8); encrypt_round(R2,R3,R0,R1,9*8); encrypt_round(R0,R1,R2,R3,10*8); encrypt_round(R2,R3,R0,R1,11*8); encrypt_round(R0,R1,R2,R3,12*8); encrypt_round(R2,R3,R0,R1,13*8); encrypt_round(R0,R1,R2,R3,14*8); encrypt_last_round(R2,R3,R0,R1,15*8); output_whitening(%r10,%r11,a_offset) movq %r10, (%rsi) shl $32, R1 xor R0, R1 output_whitening(R1,%r11,c_offset) movq R1, 8(%rsi) popq R1 movl $1,%eax RET SYM_FUNC_END(twofish_enc_blk) SYM_FUNC_START(twofish_dec_blk) pushq R1 /* %rdi contains the ctx address */ /* %rsi contains the output address */ /* %rdx contains the input address */ /* ctx address is moved to free one non-rex register as target for the 8bit high operations */ mov %rdi, %r11 movq (R3), R1 movq 8(R3), R3 output_whitening(R1,%r11,a_offset) output_whitening(R3,%r11,c_offset) mov R1D, R0D shr $32, R1 rol $16, R1D mov R3D, R2D shr $32, R3 rol $1, R2D decrypt_round(R0,R1,R2,R3,15*8); decrypt_round(R2,R3,R0,R1,14*8); decrypt_round(R0,R1,R2,R3,13*8); decrypt_round(R2,R3,R0,R1,12*8); decrypt_round(R0,R1,R2,R3,11*8); decrypt_round(R2,R3,R0,R1,10*8); decrypt_round(R0,R1,R2,R3,9*8); decrypt_round(R2,R3,R0,R1,8*8); decrypt_round(R0,R1,R2,R3,7*8); decrypt_round(R2,R3,R0,R1,6*8); decrypt_round(R0,R1,R2,R3,5*8); decrypt_round(R2,R3,R0,R1,4*8); decrypt_round(R0,R1,R2,R3,3*8); decrypt_round(R2,R3,R0,R1,2*8); decrypt_round(R0,R1,R2,R3,1*8); decrypt_last_round(R2,R3,R0,R1,0); input_whitening(%r10,%r11,a_offset) movq %r10, (%rsi) shl $32, R1 xor R0, R1 input_whitening(R1,%r11,c_offset) movq R1, 8(%rsi) popq R1 movl $1,%eax RET SYM_FUNC_END(twofish_dec_blk)
aixcc-public/challenge-001-exemplar-source
1,080
arch/x86/crypto/glue_helper-asm-avx.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Shared glue code for 128bit block ciphers, AVX assembler macros * * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi> */ #define load_8way(src, x0, x1, x2, x3, x4, x5, x6, x7) \ vmovdqu (0*16)(src), x0; \ vmovdqu (1*16)(src), x1; \ vmovdqu (2*16)(src), x2; \ vmovdqu (3*16)(src), x3; \ vmovdqu (4*16)(src), x4; \ vmovdqu (5*16)(src), x5; \ vmovdqu (6*16)(src), x6; \ vmovdqu (7*16)(src), x7; #define store_8way(dst, x0, x1, x2, x3, x4, x5, x6, x7) \ vmovdqu x0, (0*16)(dst); \ vmovdqu x1, (1*16)(dst); \ vmovdqu x2, (2*16)(dst); \ vmovdqu x3, (3*16)(dst); \ vmovdqu x4, (4*16)(dst); \ vmovdqu x5, (5*16)(dst); \ vmovdqu x6, (6*16)(dst); \ vmovdqu x7, (7*16)(dst); #define store_cbc_8way(src, dst, x0, x1, x2, x3, x4, x5, x6, x7) \ vpxor (0*16)(src), x1, x1; \ vpxor (1*16)(src), x2, x2; \ vpxor (2*16)(src), x3, x3; \ vpxor (3*16)(src), x4, x4; \ vpxor (4*16)(src), x5, x5; \ vpxor (5*16)(src), x6, x6; \ vpxor (6*16)(src), x7, x7; \ store_8way(dst, x0, x1, x2, x3, x4, x5, x6, x7);
aixcc-public/challenge-001-exemplar-source
13,107
arch/x86/crypto/sha512-ssse3-asm.S
######################################################################## # Implement fast SHA-512 with SSSE3 instructions. (x86_64) # # Copyright (C) 2013 Intel Corporation. # # Authors: # James Guilford <james.guilford@intel.com> # Kirk Yap <kirk.s.yap@intel.com> # David Cote <david.m.cote@intel.com> # Tim Chen <tim.c.chen@linux.intel.com> # # This software is available to you under a choice of one of two # licenses. You may choose to be licensed under the terms of the GNU # General Public License (GPL) Version 2, available from the file # COPYING in the main directory of this source tree, or the # OpenIB.org BSD license below: # # Redistribution and use in source and binary forms, with or # without modification, are permitted provided that the following # conditions are met: # # - Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # # - Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials # provided with the distribution. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # ######################################################################## # # This code is described in an Intel White-Paper: # "Fast SHA-512 Implementations on Intel Architecture Processors" # # To find it, surf to http://www.intel.com/p/en_US/embedded # and search for that title. # ######################################################################## #include <linux/linkage.h> #include <linux/cfi_types.h> .text # Virtual Registers # ARG1 digest = %rdi # ARG2 msg = %rsi # ARG3 msglen = %rdx T1 = %rcx T2 = %r8 a_64 = %r9 b_64 = %r10 c_64 = %r11 d_64 = %r12 e_64 = %r13 f_64 = %r14 g_64 = %r15 h_64 = %rbx tmp0 = %rax # Local variables (stack frame) W_SIZE = 80*8 WK_SIZE = 2*8 frame_W = 0 frame_WK = frame_W + W_SIZE frame_size = frame_WK + WK_SIZE # Useful QWORD "arrays" for simpler memory references # MSG, DIGEST, K_t, W_t are arrays # WK_2(t) points to 1 of 2 qwords at frame.WK depdending on t being odd/even # Input message (arg1) #define MSG(i) 8*i(msg) # Output Digest (arg2) #define DIGEST(i) 8*i(digest) # SHA Constants (static mem) #define K_t(i) 8*i+K512(%rip) # Message Schedule (stack frame) #define W_t(i) 8*i+frame_W(%rsp) # W[t]+K[t] (stack frame) #define WK_2(i) 8*((i%2))+frame_WK(%rsp) .macro RotateState # Rotate symbols a..h right TMP = h_64 h_64 = g_64 g_64 = f_64 f_64 = e_64 e_64 = d_64 d_64 = c_64 c_64 = b_64 b_64 = a_64 a_64 = TMP .endm .macro SHA512_Round rnd # Compute Round %%t mov f_64, T1 # T1 = f mov e_64, tmp0 # tmp = e xor g_64, T1 # T1 = f ^ g ror $23, tmp0 # 41 # tmp = e ror 23 and e_64, T1 # T1 = (f ^ g) & e xor e_64, tmp0 # tmp = (e ror 23) ^ e xor g_64, T1 # T1 = ((f ^ g) & e) ^ g = CH(e,f,g) idx = \rnd add WK_2(idx), T1 # W[t] + K[t] from message scheduler ror $4, tmp0 # 18 # tmp = ((e ror 23) ^ e) ror 4 xor e_64, tmp0 # tmp = (((e ror 23) ^ e) ror 4) ^ e mov a_64, T2 # T2 = a add h_64, T1 # T1 = CH(e,f,g) + W[t] + K[t] + h ror $14, tmp0 # 14 # tmp = ((((e ror23)^e)ror4)^e)ror14 = S1(e) add tmp0, T1 # T1 = CH(e,f,g) + W[t] + K[t] + S1(e) mov a_64, tmp0 # tmp = a xor c_64, T2 # T2 = a ^ c and c_64, tmp0 # tmp = a & c and b_64, T2 # T2 = (a ^ c) & b xor tmp0, T2 # T2 = ((a ^ c) & b) ^ (a & c) = Maj(a,b,c) mov a_64, tmp0 # tmp = a ror $5, tmp0 # 39 # tmp = a ror 5 xor a_64, tmp0 # tmp = (a ror 5) ^ a add T1, d_64 # e(next_state) = d + T1 ror $6, tmp0 # 34 # tmp = ((a ror 5) ^ a) ror 6 xor a_64, tmp0 # tmp = (((a ror 5) ^ a) ror 6) ^ a lea (T1, T2), h_64 # a(next_state) = T1 + Maj(a,b,c) ror $28, tmp0 # 28 # tmp = ((((a ror5)^a)ror6)^a)ror28 = S0(a) add tmp0, h_64 # a(next_state) = T1 + Maj(a,b,c) S0(a) RotateState .endm .macro SHA512_2Sched_2Round_sse rnd # Compute rounds t-2 and t-1 # Compute message schedule QWORDS t and t+1 # Two rounds are computed based on the values for K[t-2]+W[t-2] and # K[t-1]+W[t-1] which were previously stored at WK_2 by the message # scheduler. # The two new schedule QWORDS are stored at [W_t(%%t)] and [W_t(%%t+1)]. # They are then added to their respective SHA512 constants at # [K_t(%%t)] and [K_t(%%t+1)] and stored at dqword [WK_2(%%t)] # For brievity, the comments following vectored instructions only refer to # the first of a pair of QWORDS. # Eg. XMM2=W[t-2] really means XMM2={W[t-2]|W[t-1]} # The computation of the message schedule and the rounds are tightly # stitched to take advantage of instruction-level parallelism. # For clarity, integer instructions (for the rounds calculation) are indented # by one tab. Vectored instructions (for the message scheduler) are indented # by two tabs. mov f_64, T1 idx = \rnd -2 movdqa W_t(idx), %xmm2 # XMM2 = W[t-2] xor g_64, T1 and e_64, T1 movdqa %xmm2, %xmm0 # XMM0 = W[t-2] xor g_64, T1 idx = \rnd add WK_2(idx), T1 idx = \rnd - 15 movdqu W_t(idx), %xmm5 # XMM5 = W[t-15] mov e_64, tmp0 ror $23, tmp0 # 41 movdqa %xmm5, %xmm3 # XMM3 = W[t-15] xor e_64, tmp0 ror $4, tmp0 # 18 psrlq $61-19, %xmm0 # XMM0 = W[t-2] >> 42 xor e_64, tmp0 ror $14, tmp0 # 14 psrlq $(8-7), %xmm3 # XMM3 = W[t-15] >> 1 add tmp0, T1 add h_64, T1 pxor %xmm2, %xmm0 # XMM0 = (W[t-2] >> 42) ^ W[t-2] mov a_64, T2 xor c_64, T2 pxor %xmm5, %xmm3 # XMM3 = (W[t-15] >> 1) ^ W[t-15] and b_64, T2 mov a_64, tmp0 psrlq $(19-6), %xmm0 # XMM0 = ((W[t-2]>>42)^W[t-2])>>13 and c_64, tmp0 xor tmp0, T2 psrlq $(7-1), %xmm3 # XMM3 = ((W[t-15]>>1)^W[t-15])>>6 mov a_64, tmp0 ror $5, tmp0 # 39 pxor %xmm2, %xmm0 # XMM0 = (((W[t-2]>>42)^W[t-2])>>13)^W[t-2] xor a_64, tmp0 ror $6, tmp0 # 34 pxor %xmm5, %xmm3 # XMM3 = (((W[t-15]>>1)^W[t-15])>>6)^W[t-15] xor a_64, tmp0 ror $28, tmp0 # 28 psrlq $6, %xmm0 # XMM0 = ((((W[t-2]>>42)^W[t-2])>>13)^W[t-2])>>6 add tmp0, T2 add T1, d_64 psrlq $1, %xmm3 # XMM3 = (((W[t-15]>>1)^W[t-15])>>6)^W[t-15]>>1 lea (T1, T2), h_64 RotateState movdqa %xmm2, %xmm1 # XMM1 = W[t-2] mov f_64, T1 xor g_64, T1 movdqa %xmm5, %xmm4 # XMM4 = W[t-15] and e_64, T1 xor g_64, T1 psllq $(64-19)-(64-61) , %xmm1 # XMM1 = W[t-2] << 42 idx = \rnd + 1 add WK_2(idx), T1 mov e_64, tmp0 psllq $(64-1)-(64-8), %xmm4 # XMM4 = W[t-15] << 7 ror $23, tmp0 # 41 xor e_64, tmp0 pxor %xmm2, %xmm1 # XMM1 = (W[t-2] << 42)^W[t-2] ror $4, tmp0 # 18 xor e_64, tmp0 pxor %xmm5, %xmm4 # XMM4 = (W[t-15]<<7)^W[t-15] ror $14, tmp0 # 14 add tmp0, T1 psllq $(64-61), %xmm1 # XMM1 = ((W[t-2] << 42)^W[t-2])<<3 add h_64, T1 mov a_64, T2 psllq $(64-8), %xmm4 # XMM4 = ((W[t-15]<<7)^W[t-15])<<56 xor c_64, T2 and b_64, T2 pxor %xmm1, %xmm0 # XMM0 = s1(W[t-2]) mov a_64, tmp0 and c_64, tmp0 idx = \rnd - 7 movdqu W_t(idx), %xmm1 # XMM1 = W[t-7] xor tmp0, T2 pxor %xmm4, %xmm3 # XMM3 = s0(W[t-15]) mov a_64, tmp0 paddq %xmm3, %xmm0 # XMM0 = s1(W[t-2]) + s0(W[t-15]) ror $5, tmp0 # 39 idx =\rnd-16 paddq W_t(idx), %xmm0 # XMM0 = s1(W[t-2]) + s0(W[t-15]) + W[t-16] xor a_64, tmp0 paddq %xmm1, %xmm0 # XMM0 = s1(W[t-2]) + W[t-7] + s0(W[t-15]) + W[t-16] ror $6, tmp0 # 34 movdqa %xmm0, W_t(\rnd) # Store scheduled qwords xor a_64, tmp0 paddq K_t(\rnd), %xmm0 # Compute W[t]+K[t] ror $28, tmp0 # 28 idx = \rnd movdqa %xmm0, WK_2(idx) # Store W[t]+K[t] for next rounds add tmp0, T2 add T1, d_64 lea (T1, T2), h_64 RotateState .endm ######################################################################## ## void sha512_transform_ssse3(struct sha512_state *state, const u8 *data, ## int blocks); # (struct sha512_state is assumed to begin with u64 state[8]) # Purpose: Updates the SHA512 digest stored at "state" with the message # stored in "data". # The size of the message pointed to by "data" must be an integer multiple # of SHA512 message blocks. # "blocks" is the message length in SHA512 blocks. ######################################################################## SYM_TYPED_FUNC_START(sha512_transform_ssse3) test msglen, msglen je nowork # Save GPRs push %rbx push %r12 push %r13 push %r14 push %r15 # Allocate Stack Space push %rbp mov %rsp, %rbp sub $frame_size, %rsp and $~(0x20 - 1), %rsp updateblock: # Load state variables mov DIGEST(0), a_64 mov DIGEST(1), b_64 mov DIGEST(2), c_64 mov DIGEST(3), d_64 mov DIGEST(4), e_64 mov DIGEST(5), f_64 mov DIGEST(6), g_64 mov DIGEST(7), h_64 t = 0 .rept 80/2 + 1 # (80 rounds) / (2 rounds/iteration) + (1 iteration) # +1 iteration because the scheduler leads hashing by 1 iteration .if t < 2 # BSWAP 2 QWORDS movdqa XMM_QWORD_BSWAP(%rip), %xmm1 movdqu MSG(t), %xmm0 pshufb %xmm1, %xmm0 # BSWAP movdqa %xmm0, W_t(t) # Store Scheduled Pair paddq K_t(t), %xmm0 # Compute W[t]+K[t] movdqa %xmm0, WK_2(t) # Store into WK for rounds .elseif t < 16 # BSWAP 2 QWORDS# Compute 2 Rounds movdqu MSG(t), %xmm0 pshufb %xmm1, %xmm0 # BSWAP SHA512_Round t-2 # Round t-2 movdqa %xmm0, W_t(t) # Store Scheduled Pair paddq K_t(t), %xmm0 # Compute W[t]+K[t] SHA512_Round t-1 # Round t-1 movdqa %xmm0, WK_2(t) # Store W[t]+K[t] into WK .elseif t < 79 # Schedule 2 QWORDS# Compute 2 Rounds SHA512_2Sched_2Round_sse t .else # Compute 2 Rounds SHA512_Round t-2 SHA512_Round t-1 .endif t = t+2 .endr # Update digest add a_64, DIGEST(0) add b_64, DIGEST(1) add c_64, DIGEST(2) add d_64, DIGEST(3) add e_64, DIGEST(4) add f_64, DIGEST(5) add g_64, DIGEST(6) add h_64, DIGEST(7) # Advance to next message block add $16*8, msg dec msglen jnz updateblock # Restore Stack Pointer mov %rbp, %rsp pop %rbp # Restore GPRs pop %r15 pop %r14 pop %r13 pop %r12 pop %rbx nowork: RET SYM_FUNC_END(sha512_transform_ssse3) ######################################################################## ### Binary Data .section .rodata.cst16.XMM_QWORD_BSWAP, "aM", @progbits, 16 .align 16 # Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb. XMM_QWORD_BSWAP: .octa 0x08090a0b0c0d0e0f0001020304050607 # Mergeable 640-byte rodata section. This allows linker to merge the table # with other, exactly the same 640-byte fragment of another rodata section # (if such section exists). .section .rodata.cst640.K512, "aM", @progbits, 640 .align 64 # K[t] used in SHA512 hashing K512: .quad 0x428a2f98d728ae22,0x7137449123ef65cd .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc .quad 0x3956c25bf348b538,0x59f111f1b605d019 .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118 .quad 0xd807aa98a3030242,0x12835b0145706fbe .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1 .quad 0x9bdc06a725c71235,0xc19bf174cf692694 .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3 .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483 .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 .quad 0x983e5152ee66dfab,0xa831c66d2db43210 .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4 .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725 .quad 0x06ca6351e003826f,0x142929670a0e6e70 .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926 .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df .quad 0x650a73548baf63de,0x766a0abb3c77b2a8 .quad 0x81c2c92e47edaee6,0x92722c851482353b .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001 .quad 0xc24b8b70d0f89791,0xc76c51a30654be30 .quad 0xd192e819d6ef5218,0xd69906245565a910 .quad 0xf40e35855771202a,0x106aa07032bbd1b8 .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53 .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 .quad 0x748f82ee5defb2fc,0x78a5636f43172f60 .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec .quad 0x90befffa23631e28,0xa4506cebde82bde9 .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b .quad 0xca273eceea26619c,0xd186b8c721c0c207 .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6 .quad 0x113f9804bef90dae,0x1b710b35131c471b .quad 0x28db77f523047d84,0x32caab7b40c72493 .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
aixcc-public/challenge-001-exemplar-source
25,009
arch/x86/crypto/chacha-avx2-x86_64.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * ChaCha 256-bit cipher algorithm, x64 AVX2 functions * * Copyright (C) 2015 Martin Willi */ #include <linux/linkage.h> .section .rodata.cst32.ROT8, "aM", @progbits, 32 .align 32 ROT8: .octa 0x0e0d0c0f0a09080b0605040702010003 .octa 0x0e0d0c0f0a09080b0605040702010003 .section .rodata.cst32.ROT16, "aM", @progbits, 32 .align 32 ROT16: .octa 0x0d0c0f0e09080b0a0504070601000302 .octa 0x0d0c0f0e09080b0a0504070601000302 .section .rodata.cst32.CTRINC, "aM", @progbits, 32 .align 32 CTRINC: .octa 0x00000003000000020000000100000000 .octa 0x00000007000000060000000500000004 .section .rodata.cst32.CTR2BL, "aM", @progbits, 32 .align 32 CTR2BL: .octa 0x00000000000000000000000000000000 .octa 0x00000000000000000000000000000001 .section .rodata.cst32.CTR4BL, "aM", @progbits, 32 .align 32 CTR4BL: .octa 0x00000000000000000000000000000002 .octa 0x00000000000000000000000000000003 .text SYM_FUNC_START(chacha_2block_xor_avx2) # %rdi: Input state matrix, s # %rsi: up to 2 data blocks output, o # %rdx: up to 2 data blocks input, i # %rcx: input/output length in bytes # %r8d: nrounds # This function encrypts two ChaCha blocks by loading the state # matrix twice across four AVX registers. It performs matrix operations # on four words in each matrix in parallel, but requires shuffling to # rearrange the words after each round. vzeroupper # x0..3[0-2] = s0..3 vbroadcasti128 0x00(%rdi),%ymm0 vbroadcasti128 0x10(%rdi),%ymm1 vbroadcasti128 0x20(%rdi),%ymm2 vbroadcasti128 0x30(%rdi),%ymm3 vpaddd CTR2BL(%rip),%ymm3,%ymm3 vmovdqa %ymm0,%ymm8 vmovdqa %ymm1,%ymm9 vmovdqa %ymm2,%ymm10 vmovdqa %ymm3,%ymm11 vmovdqa ROT8(%rip),%ymm4 vmovdqa ROT16(%rip),%ymm5 mov %rcx,%rax .Ldoubleround: # x0 += x1, x3 = rotl32(x3 ^ x0, 16) vpaddd %ymm1,%ymm0,%ymm0 vpxor %ymm0,%ymm3,%ymm3 vpshufb %ymm5,%ymm3,%ymm3 # x2 += x3, x1 = rotl32(x1 ^ x2, 12) vpaddd %ymm3,%ymm2,%ymm2 vpxor %ymm2,%ymm1,%ymm1 vmovdqa %ymm1,%ymm6 vpslld $12,%ymm6,%ymm6 vpsrld $20,%ymm1,%ymm1 vpor %ymm6,%ymm1,%ymm1 # x0 += x1, x3 = rotl32(x3 ^ x0, 8) vpaddd %ymm1,%ymm0,%ymm0 vpxor %ymm0,%ymm3,%ymm3 vpshufb %ymm4,%ymm3,%ymm3 # x2 += x3, x1 = rotl32(x1 ^ x2, 7) vpaddd %ymm3,%ymm2,%ymm2 vpxor %ymm2,%ymm1,%ymm1 vmovdqa %ymm1,%ymm7 vpslld $7,%ymm7,%ymm7 vpsrld $25,%ymm1,%ymm1 vpor %ymm7,%ymm1,%ymm1 # x1 = shuffle32(x1, MASK(0, 3, 2, 1)) vpshufd $0x39,%ymm1,%ymm1 # x2 = shuffle32(x2, MASK(1, 0, 3, 2)) vpshufd $0x4e,%ymm2,%ymm2 # x3 = shuffle32(x3, MASK(2, 1, 0, 3)) vpshufd $0x93,%ymm3,%ymm3 # x0 += x1, x3 = rotl32(x3 ^ x0, 16) vpaddd %ymm1,%ymm0,%ymm0 vpxor %ymm0,%ymm3,%ymm3 vpshufb %ymm5,%ymm3,%ymm3 # x2 += x3, x1 = rotl32(x1 ^ x2, 12) vpaddd %ymm3,%ymm2,%ymm2 vpxor %ymm2,%ymm1,%ymm1 vmovdqa %ymm1,%ymm6 vpslld $12,%ymm6,%ymm6 vpsrld $20,%ymm1,%ymm1 vpor %ymm6,%ymm1,%ymm1 # x0 += x1, x3 = rotl32(x3 ^ x0, 8) vpaddd %ymm1,%ymm0,%ymm0 vpxor %ymm0,%ymm3,%ymm3 vpshufb %ymm4,%ymm3,%ymm3 # x2 += x3, x1 = rotl32(x1 ^ x2, 7) vpaddd %ymm3,%ymm2,%ymm2 vpxor %ymm2,%ymm1,%ymm1 vmovdqa %ymm1,%ymm7 vpslld $7,%ymm7,%ymm7 vpsrld $25,%ymm1,%ymm1 vpor %ymm7,%ymm1,%ymm1 # x1 = shuffle32(x1, MASK(2, 1, 0, 3)) vpshufd $0x93,%ymm1,%ymm1 # x2 = shuffle32(x2, MASK(1, 0, 3, 2)) vpshufd $0x4e,%ymm2,%ymm2 # x3 = shuffle32(x3, MASK(0, 3, 2, 1)) vpshufd $0x39,%ymm3,%ymm3 sub $2,%r8d jnz .Ldoubleround # o0 = i0 ^ (x0 + s0) vpaddd %ymm8,%ymm0,%ymm7 cmp $0x10,%rax jl .Lxorpart2 vpxor 0x00(%rdx),%xmm7,%xmm6 vmovdqu %xmm6,0x00(%rsi) vextracti128 $1,%ymm7,%xmm0 # o1 = i1 ^ (x1 + s1) vpaddd %ymm9,%ymm1,%ymm7 cmp $0x20,%rax jl .Lxorpart2 vpxor 0x10(%rdx),%xmm7,%xmm6 vmovdqu %xmm6,0x10(%rsi) vextracti128 $1,%ymm7,%xmm1 # o2 = i2 ^ (x2 + s2) vpaddd %ymm10,%ymm2,%ymm7 cmp $0x30,%rax jl .Lxorpart2 vpxor 0x20(%rdx),%xmm7,%xmm6 vmovdqu %xmm6,0x20(%rsi) vextracti128 $1,%ymm7,%xmm2 # o3 = i3 ^ (x3 + s3) vpaddd %ymm11,%ymm3,%ymm7 cmp $0x40,%rax jl .Lxorpart2 vpxor 0x30(%rdx),%xmm7,%xmm6 vmovdqu %xmm6,0x30(%rsi) vextracti128 $1,%ymm7,%xmm3 # xor and write second block vmovdqa %xmm0,%xmm7 cmp $0x50,%rax jl .Lxorpart2 vpxor 0x40(%rdx),%xmm7,%xmm6 vmovdqu %xmm6,0x40(%rsi) vmovdqa %xmm1,%xmm7 cmp $0x60,%rax jl .Lxorpart2 vpxor 0x50(%rdx),%xmm7,%xmm6 vmovdqu %xmm6,0x50(%rsi) vmovdqa %xmm2,%xmm7 cmp $0x70,%rax jl .Lxorpart2 vpxor 0x60(%rdx),%xmm7,%xmm6 vmovdqu %xmm6,0x60(%rsi) vmovdqa %xmm3,%xmm7 cmp $0x80,%rax jl .Lxorpart2 vpxor 0x70(%rdx),%xmm7,%xmm6 vmovdqu %xmm6,0x70(%rsi) .Ldone2: vzeroupper RET .Lxorpart2: # xor remaining bytes from partial register into output mov %rax,%r9 and $0x0f,%r9 jz .Ldone2 and $~0x0f,%rax mov %rsi,%r11 lea 8(%rsp),%r10 sub $0x10,%rsp and $~31,%rsp lea (%rdx,%rax),%rsi mov %rsp,%rdi mov %r9,%rcx rep movsb vpxor 0x00(%rsp),%xmm7,%xmm7 vmovdqa %xmm7,0x00(%rsp) mov %rsp,%rsi lea (%r11,%rax),%rdi mov %r9,%rcx rep movsb lea -8(%r10),%rsp jmp .Ldone2 SYM_FUNC_END(chacha_2block_xor_avx2) SYM_FUNC_START(chacha_4block_xor_avx2) # %rdi: Input state matrix, s # %rsi: up to 4 data blocks output, o # %rdx: up to 4 data blocks input, i # %rcx: input/output length in bytes # %r8d: nrounds # This function encrypts four ChaCha blocks by loading the state # matrix four times across eight AVX registers. It performs matrix # operations on four words in two matrices in parallel, sequentially # to the operations on the four words of the other two matrices. The # required word shuffling has a rather high latency, we can do the # arithmetic on two matrix-pairs without much slowdown. vzeroupper # x0..3[0-4] = s0..3 vbroadcasti128 0x00(%rdi),%ymm0 vbroadcasti128 0x10(%rdi),%ymm1 vbroadcasti128 0x20(%rdi),%ymm2 vbroadcasti128 0x30(%rdi),%ymm3 vmovdqa %ymm0,%ymm4 vmovdqa %ymm1,%ymm5 vmovdqa %ymm2,%ymm6 vmovdqa %ymm3,%ymm7 vpaddd CTR2BL(%rip),%ymm3,%ymm3 vpaddd CTR4BL(%rip),%ymm7,%ymm7 vmovdqa %ymm0,%ymm11 vmovdqa %ymm1,%ymm12 vmovdqa %ymm2,%ymm13 vmovdqa %ymm3,%ymm14 vmovdqa %ymm7,%ymm15 vmovdqa ROT8(%rip),%ymm8 vmovdqa ROT16(%rip),%ymm9 mov %rcx,%rax .Ldoubleround4: # x0 += x1, x3 = rotl32(x3 ^ x0, 16) vpaddd %ymm1,%ymm0,%ymm0 vpxor %ymm0,%ymm3,%ymm3 vpshufb %ymm9,%ymm3,%ymm3 vpaddd %ymm5,%ymm4,%ymm4 vpxor %ymm4,%ymm7,%ymm7 vpshufb %ymm9,%ymm7,%ymm7 # x2 += x3, x1 = rotl32(x1 ^ x2, 12) vpaddd %ymm3,%ymm2,%ymm2 vpxor %ymm2,%ymm1,%ymm1 vmovdqa %ymm1,%ymm10 vpslld $12,%ymm10,%ymm10 vpsrld $20,%ymm1,%ymm1 vpor %ymm10,%ymm1,%ymm1 vpaddd %ymm7,%ymm6,%ymm6 vpxor %ymm6,%ymm5,%ymm5 vmovdqa %ymm5,%ymm10 vpslld $12,%ymm10,%ymm10 vpsrld $20,%ymm5,%ymm5 vpor %ymm10,%ymm5,%ymm5 # x0 += x1, x3 = rotl32(x3 ^ x0, 8) vpaddd %ymm1,%ymm0,%ymm0 vpxor %ymm0,%ymm3,%ymm3 vpshufb %ymm8,%ymm3,%ymm3 vpaddd %ymm5,%ymm4,%ymm4 vpxor %ymm4,%ymm7,%ymm7 vpshufb %ymm8,%ymm7,%ymm7 # x2 += x3, x1 = rotl32(x1 ^ x2, 7) vpaddd %ymm3,%ymm2,%ymm2 vpxor %ymm2,%ymm1,%ymm1 vmovdqa %ymm1,%ymm10 vpslld $7,%ymm10,%ymm10 vpsrld $25,%ymm1,%ymm1 vpor %ymm10,%ymm1,%ymm1 vpaddd %ymm7,%ymm6,%ymm6 vpxor %ymm6,%ymm5,%ymm5 vmovdqa %ymm5,%ymm10 vpslld $7,%ymm10,%ymm10 vpsrld $25,%ymm5,%ymm5 vpor %ymm10,%ymm5,%ymm5 # x1 = shuffle32(x1, MASK(0, 3, 2, 1)) vpshufd $0x39,%ymm1,%ymm1 vpshufd $0x39,%ymm5,%ymm5 # x2 = shuffle32(x2, MASK(1, 0, 3, 2)) vpshufd $0x4e,%ymm2,%ymm2 vpshufd $0x4e,%ymm6,%ymm6 # x3 = shuffle32(x3, MASK(2, 1, 0, 3)) vpshufd $0x93,%ymm3,%ymm3 vpshufd $0x93,%ymm7,%ymm7 # x0 += x1, x3 = rotl32(x3 ^ x0, 16) vpaddd %ymm1,%ymm0,%ymm0 vpxor %ymm0,%ymm3,%ymm3 vpshufb %ymm9,%ymm3,%ymm3 vpaddd %ymm5,%ymm4,%ymm4 vpxor %ymm4,%ymm7,%ymm7 vpshufb %ymm9,%ymm7,%ymm7 # x2 += x3, x1 = rotl32(x1 ^ x2, 12) vpaddd %ymm3,%ymm2,%ymm2 vpxor %ymm2,%ymm1,%ymm1 vmovdqa %ymm1,%ymm10 vpslld $12,%ymm10,%ymm10 vpsrld $20,%ymm1,%ymm1 vpor %ymm10,%ymm1,%ymm1 vpaddd %ymm7,%ymm6,%ymm6 vpxor %ymm6,%ymm5,%ymm5 vmovdqa %ymm5,%ymm10 vpslld $12,%ymm10,%ymm10 vpsrld $20,%ymm5,%ymm5 vpor %ymm10,%ymm5,%ymm5 # x0 += x1, x3 = rotl32(x3 ^ x0, 8) vpaddd %ymm1,%ymm0,%ymm0 vpxor %ymm0,%ymm3,%ymm3 vpshufb %ymm8,%ymm3,%ymm3 vpaddd %ymm5,%ymm4,%ymm4 vpxor %ymm4,%ymm7,%ymm7 vpshufb %ymm8,%ymm7,%ymm7 # x2 += x3, x1 = rotl32(x1 ^ x2, 7) vpaddd %ymm3,%ymm2,%ymm2 vpxor %ymm2,%ymm1,%ymm1 vmovdqa %ymm1,%ymm10 vpslld $7,%ymm10,%ymm10 vpsrld $25,%ymm1,%ymm1 vpor %ymm10,%ymm1,%ymm1 vpaddd %ymm7,%ymm6,%ymm6 vpxor %ymm6,%ymm5,%ymm5 vmovdqa %ymm5,%ymm10 vpslld $7,%ymm10,%ymm10 vpsrld $25,%ymm5,%ymm5 vpor %ymm10,%ymm5,%ymm5 # x1 = shuffle32(x1, MASK(2, 1, 0, 3)) vpshufd $0x93,%ymm1,%ymm1 vpshufd $0x93,%ymm5,%ymm5 # x2 = shuffle32(x2, MASK(1, 0, 3, 2)) vpshufd $0x4e,%ymm2,%ymm2 vpshufd $0x4e,%ymm6,%ymm6 # x3 = shuffle32(x3, MASK(0, 3, 2, 1)) vpshufd $0x39,%ymm3,%ymm3 vpshufd $0x39,%ymm7,%ymm7 sub $2,%r8d jnz .Ldoubleround4 # o0 = i0 ^ (x0 + s0), first block vpaddd %ymm11,%ymm0,%ymm10 cmp $0x10,%rax jl .Lxorpart4 vpxor 0x00(%rdx),%xmm10,%xmm9 vmovdqu %xmm9,0x00(%rsi) vextracti128 $1,%ymm10,%xmm0 # o1 = i1 ^ (x1 + s1), first block vpaddd %ymm12,%ymm1,%ymm10 cmp $0x20,%rax jl .Lxorpart4 vpxor 0x10(%rdx),%xmm10,%xmm9 vmovdqu %xmm9,0x10(%rsi) vextracti128 $1,%ymm10,%xmm1 # o2 = i2 ^ (x2 + s2), first block vpaddd %ymm13,%ymm2,%ymm10 cmp $0x30,%rax jl .Lxorpart4 vpxor 0x20(%rdx),%xmm10,%xmm9 vmovdqu %xmm9,0x20(%rsi) vextracti128 $1,%ymm10,%xmm2 # o3 = i3 ^ (x3 + s3), first block vpaddd %ymm14,%ymm3,%ymm10 cmp $0x40,%rax jl .Lxorpart4 vpxor 0x30(%rdx),%xmm10,%xmm9 vmovdqu %xmm9,0x30(%rsi) vextracti128 $1,%ymm10,%xmm3 # xor and write second block vmovdqa %xmm0,%xmm10 cmp $0x50,%rax jl .Lxorpart4 vpxor 0x40(%rdx),%xmm10,%xmm9 vmovdqu %xmm9,0x40(%rsi) vmovdqa %xmm1,%xmm10 cmp $0x60,%rax jl .Lxorpart4 vpxor 0x50(%rdx),%xmm10,%xmm9 vmovdqu %xmm9,0x50(%rsi) vmovdqa %xmm2,%xmm10 cmp $0x70,%rax jl .Lxorpart4 vpxor 0x60(%rdx),%xmm10,%xmm9 vmovdqu %xmm9,0x60(%rsi) vmovdqa %xmm3,%xmm10 cmp $0x80,%rax jl .Lxorpart4 vpxor 0x70(%rdx),%xmm10,%xmm9 vmovdqu %xmm9,0x70(%rsi) # o0 = i0 ^ (x0 + s0), third block vpaddd %ymm11,%ymm4,%ymm10 cmp $0x90,%rax jl .Lxorpart4 vpxor 0x80(%rdx),%xmm10,%xmm9 vmovdqu %xmm9,0x80(%rsi) vextracti128 $1,%ymm10,%xmm4 # o1 = i1 ^ (x1 + s1), third block vpaddd %ymm12,%ymm5,%ymm10 cmp $0xa0,%rax jl .Lxorpart4 vpxor 0x90(%rdx),%xmm10,%xmm9 vmovdqu %xmm9,0x90(%rsi) vextracti128 $1,%ymm10,%xmm5 # o2 = i2 ^ (x2 + s2), third block vpaddd %ymm13,%ymm6,%ymm10 cmp $0xb0,%rax jl .Lxorpart4 vpxor 0xa0(%rdx),%xmm10,%xmm9 vmovdqu %xmm9,0xa0(%rsi) vextracti128 $1,%ymm10,%xmm6 # o3 = i3 ^ (x3 + s3), third block vpaddd %ymm15,%ymm7,%ymm10 cmp $0xc0,%rax jl .Lxorpart4 vpxor 0xb0(%rdx),%xmm10,%xmm9 vmovdqu %xmm9,0xb0(%rsi) vextracti128 $1,%ymm10,%xmm7 # xor and write fourth block vmovdqa %xmm4,%xmm10 cmp $0xd0,%rax jl .Lxorpart4 vpxor 0xc0(%rdx),%xmm10,%xmm9 vmovdqu %xmm9,0xc0(%rsi) vmovdqa %xmm5,%xmm10 cmp $0xe0,%rax jl .Lxorpart4 vpxor 0xd0(%rdx),%xmm10,%xmm9 vmovdqu %xmm9,0xd0(%rsi) vmovdqa %xmm6,%xmm10 cmp $0xf0,%rax jl .Lxorpart4 vpxor 0xe0(%rdx),%xmm10,%xmm9 vmovdqu %xmm9,0xe0(%rsi) vmovdqa %xmm7,%xmm10 cmp $0x100,%rax jl .Lxorpart4 vpxor 0xf0(%rdx),%xmm10,%xmm9 vmovdqu %xmm9,0xf0(%rsi) .Ldone4: vzeroupper RET .Lxorpart4: # xor remaining bytes from partial register into output mov %rax,%r9 and $0x0f,%r9 jz .Ldone4 and $~0x0f,%rax mov %rsi,%r11 lea 8(%rsp),%r10 sub $0x10,%rsp and $~31,%rsp lea (%rdx,%rax),%rsi mov %rsp,%rdi mov %r9,%rcx rep movsb vpxor 0x00(%rsp),%xmm10,%xmm10 vmovdqa %xmm10,0x00(%rsp) mov %rsp,%rsi lea (%r11,%rax),%rdi mov %r9,%rcx rep movsb lea -8(%r10),%rsp jmp .Ldone4 SYM_FUNC_END(chacha_4block_xor_avx2) SYM_FUNC_START(chacha_8block_xor_avx2) # %rdi: Input state matrix, s # %rsi: up to 8 data blocks output, o # %rdx: up to 8 data blocks input, i # %rcx: input/output length in bytes # %r8d: nrounds # This function encrypts eight consecutive ChaCha blocks by loading # the state matrix in AVX registers eight times. As we need some # scratch registers, we save the first four registers on the stack. The # algorithm performs each operation on the corresponding word of each # state matrix, hence requires no word shuffling. For final XORing step # we transpose the matrix by interleaving 32-, 64- and then 128-bit # words, which allows us to do XOR in AVX registers. 8/16-bit word # rotation is done with the slightly better performing byte shuffling, # 7/12-bit word rotation uses traditional shift+OR. vzeroupper # 4 * 32 byte stack, 32-byte aligned lea 8(%rsp),%r10 and $~31, %rsp sub $0x80, %rsp mov %rcx,%rax # x0..15[0-7] = s[0..15] vpbroadcastd 0x00(%rdi),%ymm0 vpbroadcastd 0x04(%rdi),%ymm1 vpbroadcastd 0x08(%rdi),%ymm2 vpbroadcastd 0x0c(%rdi),%ymm3 vpbroadcastd 0x10(%rdi),%ymm4 vpbroadcastd 0x14(%rdi),%ymm5 vpbroadcastd 0x18(%rdi),%ymm6 vpbroadcastd 0x1c(%rdi),%ymm7 vpbroadcastd 0x20(%rdi),%ymm8 vpbroadcastd 0x24(%rdi),%ymm9 vpbroadcastd 0x28(%rdi),%ymm10 vpbroadcastd 0x2c(%rdi),%ymm11 vpbroadcastd 0x30(%rdi),%ymm12 vpbroadcastd 0x34(%rdi),%ymm13 vpbroadcastd 0x38(%rdi),%ymm14 vpbroadcastd 0x3c(%rdi),%ymm15 # x0..3 on stack vmovdqa %ymm0,0x00(%rsp) vmovdqa %ymm1,0x20(%rsp) vmovdqa %ymm2,0x40(%rsp) vmovdqa %ymm3,0x60(%rsp) vmovdqa CTRINC(%rip),%ymm1 vmovdqa ROT8(%rip),%ymm2 vmovdqa ROT16(%rip),%ymm3 # x12 += counter values 0-3 vpaddd %ymm1,%ymm12,%ymm12 .Ldoubleround8: # x0 += x4, x12 = rotl32(x12 ^ x0, 16) vpaddd 0x00(%rsp),%ymm4,%ymm0 vmovdqa %ymm0,0x00(%rsp) vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm3,%ymm12,%ymm12 # x1 += x5, x13 = rotl32(x13 ^ x1, 16) vpaddd 0x20(%rsp),%ymm5,%ymm0 vmovdqa %ymm0,0x20(%rsp) vpxor %ymm0,%ymm13,%ymm13 vpshufb %ymm3,%ymm13,%ymm13 # x2 += x6, x14 = rotl32(x14 ^ x2, 16) vpaddd 0x40(%rsp),%ymm6,%ymm0 vmovdqa %ymm0,0x40(%rsp) vpxor %ymm0,%ymm14,%ymm14 vpshufb %ymm3,%ymm14,%ymm14 # x3 += x7, x15 = rotl32(x15 ^ x3, 16) vpaddd 0x60(%rsp),%ymm7,%ymm0 vmovdqa %ymm0,0x60(%rsp) vpxor %ymm0,%ymm15,%ymm15 vpshufb %ymm3,%ymm15,%ymm15 # x8 += x12, x4 = rotl32(x4 ^ x8, 12) vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $12,%ymm4,%ymm0 vpsrld $20,%ymm4,%ymm4 vpor %ymm0,%ymm4,%ymm4 # x9 += x13, x5 = rotl32(x5 ^ x9, 12) vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $12,%ymm5,%ymm0 vpsrld $20,%ymm5,%ymm5 vpor %ymm0,%ymm5,%ymm5 # x10 += x14, x6 = rotl32(x6 ^ x10, 12) vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpslld $12,%ymm6,%ymm0 vpsrld $20,%ymm6,%ymm6 vpor %ymm0,%ymm6,%ymm6 # x11 += x15, x7 = rotl32(x7 ^ x11, 12) vpaddd %ymm15,%ymm11,%ymm11 vpxor %ymm11,%ymm7,%ymm7 vpslld $12,%ymm7,%ymm0 vpsrld $20,%ymm7,%ymm7 vpor %ymm0,%ymm7,%ymm7 # x0 += x4, x12 = rotl32(x12 ^ x0, 8) vpaddd 0x00(%rsp),%ymm4,%ymm0 vmovdqa %ymm0,0x00(%rsp) vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm2,%ymm12,%ymm12 # x1 += x5, x13 = rotl32(x13 ^ x1, 8) vpaddd 0x20(%rsp),%ymm5,%ymm0 vmovdqa %ymm0,0x20(%rsp) vpxor %ymm0,%ymm13,%ymm13 vpshufb %ymm2,%ymm13,%ymm13 # x2 += x6, x14 = rotl32(x14 ^ x2, 8) vpaddd 0x40(%rsp),%ymm6,%ymm0 vmovdqa %ymm0,0x40(%rsp) vpxor %ymm0,%ymm14,%ymm14 vpshufb %ymm2,%ymm14,%ymm14 # x3 += x7, x15 = rotl32(x15 ^ x3, 8) vpaddd 0x60(%rsp),%ymm7,%ymm0 vmovdqa %ymm0,0x60(%rsp) vpxor %ymm0,%ymm15,%ymm15 vpshufb %ymm2,%ymm15,%ymm15 # x8 += x12, x4 = rotl32(x4 ^ x8, 7) vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm0 vpsrld $25,%ymm4,%ymm4 vpor %ymm0,%ymm4,%ymm4 # x9 += x13, x5 = rotl32(x5 ^ x9, 7) vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm0 vpsrld $25,%ymm5,%ymm5 vpor %ymm0,%ymm5,%ymm5 # x10 += x14, x6 = rotl32(x6 ^ x10, 7) vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpslld $7,%ymm6,%ymm0 vpsrld $25,%ymm6,%ymm6 vpor %ymm0,%ymm6,%ymm6 # x11 += x15, x7 = rotl32(x7 ^ x11, 7) vpaddd %ymm15,%ymm11,%ymm11 vpxor %ymm11,%ymm7,%ymm7 vpslld $7,%ymm7,%ymm0 vpsrld $25,%ymm7,%ymm7 vpor %ymm0,%ymm7,%ymm7 # x0 += x5, x15 = rotl32(x15 ^ x0, 16) vpaddd 0x00(%rsp),%ymm5,%ymm0 vmovdqa %ymm0,0x00(%rsp) vpxor %ymm0,%ymm15,%ymm15 vpshufb %ymm3,%ymm15,%ymm15 # x1 += x6, x12 = rotl32(x12 ^ x1, 16)%ymm0 vpaddd 0x20(%rsp),%ymm6,%ymm0 vmovdqa %ymm0,0x20(%rsp) vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm3,%ymm12,%ymm12 # x2 += x7, x13 = rotl32(x13 ^ x2, 16) vpaddd 0x40(%rsp),%ymm7,%ymm0 vmovdqa %ymm0,0x40(%rsp) vpxor %ymm0,%ymm13,%ymm13 vpshufb %ymm3,%ymm13,%ymm13 # x3 += x4, x14 = rotl32(x14 ^ x3, 16) vpaddd 0x60(%rsp),%ymm4,%ymm0 vmovdqa %ymm0,0x60(%rsp) vpxor %ymm0,%ymm14,%ymm14 vpshufb %ymm3,%ymm14,%ymm14 # x10 += x15, x5 = rotl32(x5 ^ x10, 12) vpaddd %ymm15,%ymm10,%ymm10 vpxor %ymm10,%ymm5,%ymm5 vpslld $12,%ymm5,%ymm0 vpsrld $20,%ymm5,%ymm5 vpor %ymm0,%ymm5,%ymm5 # x11 += x12, x6 = rotl32(x6 ^ x11, 12) vpaddd %ymm12,%ymm11,%ymm11 vpxor %ymm11,%ymm6,%ymm6 vpslld $12,%ymm6,%ymm0 vpsrld $20,%ymm6,%ymm6 vpor %ymm0,%ymm6,%ymm6 # x8 += x13, x7 = rotl32(x7 ^ x8, 12) vpaddd %ymm13,%ymm8,%ymm8 vpxor %ymm8,%ymm7,%ymm7 vpslld $12,%ymm7,%ymm0 vpsrld $20,%ymm7,%ymm7 vpor %ymm0,%ymm7,%ymm7 # x9 += x14, x4 = rotl32(x4 ^ x9, 12) vpaddd %ymm14,%ymm9,%ymm9 vpxor %ymm9,%ymm4,%ymm4 vpslld $12,%ymm4,%ymm0 vpsrld $20,%ymm4,%ymm4 vpor %ymm0,%ymm4,%ymm4 # x0 += x5, x15 = rotl32(x15 ^ x0, 8) vpaddd 0x00(%rsp),%ymm5,%ymm0 vmovdqa %ymm0,0x00(%rsp) vpxor %ymm0,%ymm15,%ymm15 vpshufb %ymm2,%ymm15,%ymm15 # x1 += x6, x12 = rotl32(x12 ^ x1, 8) vpaddd 0x20(%rsp),%ymm6,%ymm0 vmovdqa %ymm0,0x20(%rsp) vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm2,%ymm12,%ymm12 # x2 += x7, x13 = rotl32(x13 ^ x2, 8) vpaddd 0x40(%rsp),%ymm7,%ymm0 vmovdqa %ymm0,0x40(%rsp) vpxor %ymm0,%ymm13,%ymm13 vpshufb %ymm2,%ymm13,%ymm13 # x3 += x4, x14 = rotl32(x14 ^ x3, 8) vpaddd 0x60(%rsp),%ymm4,%ymm0 vmovdqa %ymm0,0x60(%rsp) vpxor %ymm0,%ymm14,%ymm14 vpshufb %ymm2,%ymm14,%ymm14 # x10 += x15, x5 = rotl32(x5 ^ x10, 7) vpaddd %ymm15,%ymm10,%ymm10 vpxor %ymm10,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm0 vpsrld $25,%ymm5,%ymm5 vpor %ymm0,%ymm5,%ymm5 # x11 += x12, x6 = rotl32(x6 ^ x11, 7) vpaddd %ymm12,%ymm11,%ymm11 vpxor %ymm11,%ymm6,%ymm6 vpslld $7,%ymm6,%ymm0 vpsrld $25,%ymm6,%ymm6 vpor %ymm0,%ymm6,%ymm6 # x8 += x13, x7 = rotl32(x7 ^ x8, 7) vpaddd %ymm13,%ymm8,%ymm8 vpxor %ymm8,%ymm7,%ymm7 vpslld $7,%ymm7,%ymm0 vpsrld $25,%ymm7,%ymm7 vpor %ymm0,%ymm7,%ymm7 # x9 += x14, x4 = rotl32(x4 ^ x9, 7) vpaddd %ymm14,%ymm9,%ymm9 vpxor %ymm9,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm0 vpsrld $25,%ymm4,%ymm4 vpor %ymm0,%ymm4,%ymm4 sub $2,%r8d jnz .Ldoubleround8 # x0..15[0-3] += s[0..15] vpbroadcastd 0x00(%rdi),%ymm0 vpaddd 0x00(%rsp),%ymm0,%ymm0 vmovdqa %ymm0,0x00(%rsp) vpbroadcastd 0x04(%rdi),%ymm0 vpaddd 0x20(%rsp),%ymm0,%ymm0 vmovdqa %ymm0,0x20(%rsp) vpbroadcastd 0x08(%rdi),%ymm0 vpaddd 0x40(%rsp),%ymm0,%ymm0 vmovdqa %ymm0,0x40(%rsp) vpbroadcastd 0x0c(%rdi),%ymm0 vpaddd 0x60(%rsp),%ymm0,%ymm0 vmovdqa %ymm0,0x60(%rsp) vpbroadcastd 0x10(%rdi),%ymm0 vpaddd %ymm0,%ymm4,%ymm4 vpbroadcastd 0x14(%rdi),%ymm0 vpaddd %ymm0,%ymm5,%ymm5 vpbroadcastd 0x18(%rdi),%ymm0 vpaddd %ymm0,%ymm6,%ymm6 vpbroadcastd 0x1c(%rdi),%ymm0 vpaddd %ymm0,%ymm7,%ymm7 vpbroadcastd 0x20(%rdi),%ymm0 vpaddd %ymm0,%ymm8,%ymm8 vpbroadcastd 0x24(%rdi),%ymm0 vpaddd %ymm0,%ymm9,%ymm9 vpbroadcastd 0x28(%rdi),%ymm0 vpaddd %ymm0,%ymm10,%ymm10 vpbroadcastd 0x2c(%rdi),%ymm0 vpaddd %ymm0,%ymm11,%ymm11 vpbroadcastd 0x30(%rdi),%ymm0 vpaddd %ymm0,%ymm12,%ymm12 vpbroadcastd 0x34(%rdi),%ymm0 vpaddd %ymm0,%ymm13,%ymm13 vpbroadcastd 0x38(%rdi),%ymm0 vpaddd %ymm0,%ymm14,%ymm14 vpbroadcastd 0x3c(%rdi),%ymm0 vpaddd %ymm0,%ymm15,%ymm15 # x12 += counter values 0-3 vpaddd %ymm1,%ymm12,%ymm12 # interleave 32-bit words in state n, n+1 vmovdqa 0x00(%rsp),%ymm0 vmovdqa 0x20(%rsp),%ymm1 vpunpckldq %ymm1,%ymm0,%ymm2 vpunpckhdq %ymm1,%ymm0,%ymm1 vmovdqa %ymm2,0x00(%rsp) vmovdqa %ymm1,0x20(%rsp) vmovdqa 0x40(%rsp),%ymm0 vmovdqa 0x60(%rsp),%ymm1 vpunpckldq %ymm1,%ymm0,%ymm2 vpunpckhdq %ymm1,%ymm0,%ymm1 vmovdqa %ymm2,0x40(%rsp) vmovdqa %ymm1,0x60(%rsp) vmovdqa %ymm4,%ymm0 vpunpckldq %ymm5,%ymm0,%ymm4 vpunpckhdq %ymm5,%ymm0,%ymm5 vmovdqa %ymm6,%ymm0 vpunpckldq %ymm7,%ymm0,%ymm6 vpunpckhdq %ymm7,%ymm0,%ymm7 vmovdqa %ymm8,%ymm0 vpunpckldq %ymm9,%ymm0,%ymm8 vpunpckhdq %ymm9,%ymm0,%ymm9 vmovdqa %ymm10,%ymm0 vpunpckldq %ymm11,%ymm0,%ymm10 vpunpckhdq %ymm11,%ymm0,%ymm11 vmovdqa %ymm12,%ymm0 vpunpckldq %ymm13,%ymm0,%ymm12 vpunpckhdq %ymm13,%ymm0,%ymm13 vmovdqa %ymm14,%ymm0 vpunpckldq %ymm15,%ymm0,%ymm14 vpunpckhdq %ymm15,%ymm0,%ymm15 # interleave 64-bit words in state n, n+2 vmovdqa 0x00(%rsp),%ymm0 vmovdqa 0x40(%rsp),%ymm2 vpunpcklqdq %ymm2,%ymm0,%ymm1 vpunpckhqdq %ymm2,%ymm0,%ymm2 vmovdqa %ymm1,0x00(%rsp) vmovdqa %ymm2,0x40(%rsp) vmovdqa 0x20(%rsp),%ymm0 vmovdqa 0x60(%rsp),%ymm2 vpunpcklqdq %ymm2,%ymm0,%ymm1 vpunpckhqdq %ymm2,%ymm0,%ymm2 vmovdqa %ymm1,0x20(%rsp) vmovdqa %ymm2,0x60(%rsp) vmovdqa %ymm4,%ymm0 vpunpcklqdq %ymm6,%ymm0,%ymm4 vpunpckhqdq %ymm6,%ymm0,%ymm6 vmovdqa %ymm5,%ymm0 vpunpcklqdq %ymm7,%ymm0,%ymm5 vpunpckhqdq %ymm7,%ymm0,%ymm7 vmovdqa %ymm8,%ymm0 vpunpcklqdq %ymm10,%ymm0,%ymm8 vpunpckhqdq %ymm10,%ymm0,%ymm10 vmovdqa %ymm9,%ymm0 vpunpcklqdq %ymm11,%ymm0,%ymm9 vpunpckhqdq %ymm11,%ymm0,%ymm11 vmovdqa %ymm12,%ymm0 vpunpcklqdq %ymm14,%ymm0,%ymm12 vpunpckhqdq %ymm14,%ymm0,%ymm14 vmovdqa %ymm13,%ymm0 vpunpcklqdq %ymm15,%ymm0,%ymm13 vpunpckhqdq %ymm15,%ymm0,%ymm15 # interleave 128-bit words in state n, n+4 # xor/write first four blocks vmovdqa 0x00(%rsp),%ymm1 vperm2i128 $0x20,%ymm4,%ymm1,%ymm0 cmp $0x0020,%rax jl .Lxorpart8 vpxor 0x0000(%rdx),%ymm0,%ymm0 vmovdqu %ymm0,0x0000(%rsi) vperm2i128 $0x31,%ymm4,%ymm1,%ymm4 vperm2i128 $0x20,%ymm12,%ymm8,%ymm0 cmp $0x0040,%rax jl .Lxorpart8 vpxor 0x0020(%rdx),%ymm0,%ymm0 vmovdqu %ymm0,0x0020(%rsi) vperm2i128 $0x31,%ymm12,%ymm8,%ymm12 vmovdqa 0x40(%rsp),%ymm1 vperm2i128 $0x20,%ymm6,%ymm1,%ymm0 cmp $0x0060,%rax jl .Lxorpart8 vpxor 0x0040(%rdx),%ymm0,%ymm0 vmovdqu %ymm0,0x0040(%rsi) vperm2i128 $0x31,%ymm6,%ymm1,%ymm6 vperm2i128 $0x20,%ymm14,%ymm10,%ymm0 cmp $0x0080,%rax jl .Lxorpart8 vpxor 0x0060(%rdx),%ymm0,%ymm0 vmovdqu %ymm0,0x0060(%rsi) vperm2i128 $0x31,%ymm14,%ymm10,%ymm14 vmovdqa 0x20(%rsp),%ymm1 vperm2i128 $0x20,%ymm5,%ymm1,%ymm0 cmp $0x00a0,%rax jl .Lxorpart8 vpxor 0x0080(%rdx),%ymm0,%ymm0 vmovdqu %ymm0,0x0080(%rsi) vperm2i128 $0x31,%ymm5,%ymm1,%ymm5 vperm2i128 $0x20,%ymm13,%ymm9,%ymm0 cmp $0x00c0,%rax jl .Lxorpart8 vpxor 0x00a0(%rdx),%ymm0,%ymm0 vmovdqu %ymm0,0x00a0(%rsi) vperm2i128 $0x31,%ymm13,%ymm9,%ymm13 vmovdqa 0x60(%rsp),%ymm1 vperm2i128 $0x20,%ymm7,%ymm1,%ymm0 cmp $0x00e0,%rax jl .Lxorpart8 vpxor 0x00c0(%rdx),%ymm0,%ymm0 vmovdqu %ymm0,0x00c0(%rsi) vperm2i128 $0x31,%ymm7,%ymm1,%ymm7 vperm2i128 $0x20,%ymm15,%ymm11,%ymm0 cmp $0x0100,%rax jl .Lxorpart8 vpxor 0x00e0(%rdx),%ymm0,%ymm0 vmovdqu %ymm0,0x00e0(%rsi) vperm2i128 $0x31,%ymm15,%ymm11,%ymm15 # xor remaining blocks, write to output vmovdqa %ymm4,%ymm0 cmp $0x0120,%rax jl .Lxorpart8 vpxor 0x0100(%rdx),%ymm0,%ymm0 vmovdqu %ymm0,0x0100(%rsi) vmovdqa %ymm12,%ymm0 cmp $0x0140,%rax jl .Lxorpart8 vpxor 0x0120(%rdx),%ymm0,%ymm0 vmovdqu %ymm0,0x0120(%rsi) vmovdqa %ymm6,%ymm0 cmp $0x0160,%rax jl .Lxorpart8 vpxor 0x0140(%rdx),%ymm0,%ymm0 vmovdqu %ymm0,0x0140(%rsi) vmovdqa %ymm14,%ymm0 cmp $0x0180,%rax jl .Lxorpart8 vpxor 0x0160(%rdx),%ymm0,%ymm0 vmovdqu %ymm0,0x0160(%rsi) vmovdqa %ymm5,%ymm0 cmp $0x01a0,%rax jl .Lxorpart8 vpxor 0x0180(%rdx),%ymm0,%ymm0 vmovdqu %ymm0,0x0180(%rsi) vmovdqa %ymm13,%ymm0 cmp $0x01c0,%rax jl .Lxorpart8 vpxor 0x01a0(%rdx),%ymm0,%ymm0 vmovdqu %ymm0,0x01a0(%rsi) vmovdqa %ymm7,%ymm0 cmp $0x01e0,%rax jl .Lxorpart8 vpxor 0x01c0(%rdx),%ymm0,%ymm0 vmovdqu %ymm0,0x01c0(%rsi) vmovdqa %ymm15,%ymm0 cmp $0x0200,%rax jl .Lxorpart8 vpxor 0x01e0(%rdx),%ymm0,%ymm0 vmovdqu %ymm0,0x01e0(%rsi) .Ldone8: vzeroupper lea -8(%r10),%rsp RET .Lxorpart8: # xor remaining bytes from partial register into output mov %rax,%r9 and $0x1f,%r9 jz .Ldone8 and $~0x1f,%rax mov %rsi,%r11 lea (%rdx,%rax),%rsi mov %rsp,%rdi mov %r9,%rcx rep movsb vpxor 0x00(%rsp),%ymm0,%ymm0 vmovdqa %ymm0,0x00(%rsp) mov %rsp,%rsi lea (%r11,%rax),%rdi mov %r9,%rcx rep movsb jmp .Ldone8 SYM_FUNC_END(chacha_8block_xor_avx2)
aixcc-public/challenge-001-exemplar-source
9,043
arch/x86/crypto/cast6-avx-x86_64-asm_64.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Cast6 Cipher 8-way parallel algorithm (AVX/x86_64) * * Copyright (C) 2012 Johannes Goetzfried * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de> * * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi> */ #include <linux/linkage.h> #include <asm/frame.h> #include "glue_helper-asm-avx.S" .file "cast6-avx-x86_64-asm_64.S" .extern cast_s1 .extern cast_s2 .extern cast_s3 .extern cast_s4 /* structure of crypto context */ #define km 0 #define kr (12*4*4) /* s-boxes */ #define s1 cast_s1 #define s2 cast_s2 #define s3 cast_s3 #define s4 cast_s4 /********************************************************************** 8-way AVX cast6 **********************************************************************/ #define CTX %r15 #define RA1 %xmm0 #define RB1 %xmm1 #define RC1 %xmm2 #define RD1 %xmm3 #define RA2 %xmm4 #define RB2 %xmm5 #define RC2 %xmm6 #define RD2 %xmm7 #define RX %xmm8 #define RKM %xmm9 #define RKR %xmm10 #define RKRF %xmm11 #define RKRR %xmm12 #define R32 %xmm13 #define R1ST %xmm14 #define RTMP %xmm15 #define RID1 %rdi #define RID1d %edi #define RID2 %rsi #define RID2d %esi #define RGI1 %rdx #define RGI1bl %dl #define RGI1bh %dh #define RGI2 %rcx #define RGI2bl %cl #define RGI2bh %ch #define RGI3 %rax #define RGI3bl %al #define RGI3bh %ah #define RGI4 %rbx #define RGI4bl %bl #define RGI4bh %bh #define RFS1 %r8 #define RFS1d %r8d #define RFS2 %r9 #define RFS2d %r9d #define RFS3 %r10 #define RFS3d %r10d #define lookup_32bit(src, dst, op1, op2, op3, interleave_op, il_reg) \ movzbl src ## bh, RID1d; \ movzbl src ## bl, RID2d; \ shrq $16, src; \ movl s1(, RID1, 4), dst ## d; \ op1 s2(, RID2, 4), dst ## d; \ movzbl src ## bh, RID1d; \ movzbl src ## bl, RID2d; \ interleave_op(il_reg); \ op2 s3(, RID1, 4), dst ## d; \ op3 s4(, RID2, 4), dst ## d; #define dummy(d) /* do nothing */ #define shr_next(reg) \ shrq $16, reg; #define F_head(a, x, gi1, gi2, op0) \ op0 a, RKM, x; \ vpslld RKRF, x, RTMP; \ vpsrld RKRR, x, x; \ vpor RTMP, x, x; \ \ vmovq x, gi1; \ vpextrq $1, x, gi2; #define F_tail(a, x, gi1, gi2, op1, op2, op3) \ lookup_32bit(##gi1, RFS1, op1, op2, op3, shr_next, ##gi1); \ lookup_32bit(##gi2, RFS3, op1, op2, op3, shr_next, ##gi2); \ \ lookup_32bit(##gi1, RFS2, op1, op2, op3, dummy, none); \ shlq $32, RFS2; \ orq RFS1, RFS2; \ lookup_32bit(##gi2, RFS1, op1, op2, op3, dummy, none); \ shlq $32, RFS1; \ orq RFS1, RFS3; \ \ vmovq RFS2, x; \ vpinsrq $1, RFS3, x, x; #define F_2(a1, b1, a2, b2, op0, op1, op2, op3) \ F_head(b1, RX, RGI1, RGI2, op0); \ F_head(b2, RX, RGI3, RGI4, op0); \ \ F_tail(b1, RX, RGI1, RGI2, op1, op2, op3); \ F_tail(b2, RTMP, RGI3, RGI4, op1, op2, op3); \ \ vpxor a1, RX, a1; \ vpxor a2, RTMP, a2; #define F1_2(a1, b1, a2, b2) \ F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl) #define F2_2(a1, b1, a2, b2) \ F_2(a1, b1, a2, b2, vpxor, subl, addl, xorl) #define F3_2(a1, b1, a2, b2) \ F_2(a1, b1, a2, b2, vpsubd, addl, xorl, subl) #define qop(in, out, f) \ F ## f ## _2(out ## 1, in ## 1, out ## 2, in ## 2); #define get_round_keys(nn) \ vbroadcastss (km+(4*(nn)))(CTX), RKM; \ vpand R1ST, RKR, RKRF; \ vpsubq RKRF, R32, RKRR; \ vpsrldq $1, RKR, RKR; #define Q(n) \ get_round_keys(4*n+0); \ qop(RD, RC, 1); \ \ get_round_keys(4*n+1); \ qop(RC, RB, 2); \ \ get_round_keys(4*n+2); \ qop(RB, RA, 3); \ \ get_round_keys(4*n+3); \ qop(RA, RD, 1); #define QBAR(n) \ get_round_keys(4*n+3); \ qop(RA, RD, 1); \ \ get_round_keys(4*n+2); \ qop(RB, RA, 3); \ \ get_round_keys(4*n+1); \ qop(RC, RB, 2); \ \ get_round_keys(4*n+0); \ qop(RD, RC, 1); #define shuffle(mask) \ vpshufb mask, RKR, RKR; #define preload_rkr(n, do_mask, mask) \ vbroadcastss .L16_mask, RKR; \ /* add 16-bit rotation to key rotations (mod 32) */ \ vpxor (kr+n*16)(CTX), RKR, RKR; \ do_mask(mask); #define transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \ vpunpckldq x1, x0, t0; \ vpunpckhdq x1, x0, t2; \ vpunpckldq x3, x2, t1; \ vpunpckhdq x3, x2, x3; \ \ vpunpcklqdq t1, t0, x0; \ vpunpckhqdq t1, t0, x1; \ vpunpcklqdq x3, t2, x2; \ vpunpckhqdq x3, t2, x3; #define inpack_blocks(x0, x1, x2, x3, t0, t1, t2, rmask) \ vpshufb rmask, x0, x0; \ vpshufb rmask, x1, x1; \ vpshufb rmask, x2, x2; \ vpshufb rmask, x3, x3; \ \ transpose_4x4(x0, x1, x2, x3, t0, t1, t2) #define outunpack_blocks(x0, x1, x2, x3, t0, t1, t2, rmask) \ transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \ \ vpshufb rmask, x0, x0; \ vpshufb rmask, x1, x1; \ vpshufb rmask, x2, x2; \ vpshufb rmask, x3, x3; .section .rodata.cst16, "aM", @progbits, 16 .align 16 .Lbswap_mask: .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 .Lbswap128_mask: .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 .Lrkr_enc_Q_Q_QBAR_QBAR: .byte 0, 1, 2, 3, 4, 5, 6, 7, 11, 10, 9, 8, 15, 14, 13, 12 .Lrkr_enc_QBAR_QBAR_QBAR_QBAR: .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 .Lrkr_dec_Q_Q_Q_Q: .byte 12, 13, 14, 15, 8, 9, 10, 11, 4, 5, 6, 7, 0, 1, 2, 3 .Lrkr_dec_Q_Q_QBAR_QBAR: .byte 12, 13, 14, 15, 8, 9, 10, 11, 7, 6, 5, 4, 3, 2, 1, 0 .Lrkr_dec_QBAR_QBAR_QBAR_QBAR: .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 .section .rodata.cst4.L16_mask, "aM", @progbits, 4 .align 4 .L16_mask: .byte 16, 16, 16, 16 .section .rodata.cst4.L32_mask, "aM", @progbits, 4 .align 4 .L32_mask: .byte 32, 0, 0, 0 .section .rodata.cst4.first_mask, "aM", @progbits, 4 .align 4 .Lfirst_mask: .byte 0x1f, 0, 0, 0 .text .align 8 SYM_FUNC_START_LOCAL(__cast6_enc_blk8) /* input: * %rdi: ctx * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: blocks * output: * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: encrypted blocks */ pushq %r15; pushq %rbx; movq %rdi, CTX; vmovdqa .Lbswap_mask, RKM; vmovd .Lfirst_mask, R1ST; vmovd .L32_mask, R32; inpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM); inpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM); preload_rkr(0, dummy, none); Q(0); Q(1); Q(2); Q(3); preload_rkr(1, shuffle, .Lrkr_enc_Q_Q_QBAR_QBAR); Q(4); Q(5); QBAR(6); QBAR(7); preload_rkr(2, shuffle, .Lrkr_enc_QBAR_QBAR_QBAR_QBAR); QBAR(8); QBAR(9); QBAR(10); QBAR(11); popq %rbx; popq %r15; vmovdqa .Lbswap_mask, RKM; outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM); outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM); RET; SYM_FUNC_END(__cast6_enc_blk8) .align 8 SYM_FUNC_START_LOCAL(__cast6_dec_blk8) /* input: * %rdi: ctx * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: encrypted blocks * output: * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: decrypted blocks */ pushq %r15; pushq %rbx; movq %rdi, CTX; vmovdqa .Lbswap_mask, RKM; vmovd .Lfirst_mask, R1ST; vmovd .L32_mask, R32; inpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM); inpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM); preload_rkr(2, shuffle, .Lrkr_dec_Q_Q_Q_Q); Q(11); Q(10); Q(9); Q(8); preload_rkr(1, shuffle, .Lrkr_dec_Q_Q_QBAR_QBAR); Q(7); Q(6); QBAR(5); QBAR(4); preload_rkr(0, shuffle, .Lrkr_dec_QBAR_QBAR_QBAR_QBAR); QBAR(3); QBAR(2); QBAR(1); QBAR(0); popq %rbx; popq %r15; vmovdqa .Lbswap_mask, RKM; outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM); outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM); RET; SYM_FUNC_END(__cast6_dec_blk8) SYM_FUNC_START(cast6_ecb_enc_8way) /* input: * %rdi: ctx * %rsi: dst * %rdx: src */ FRAME_BEGIN pushq %r15; movq %rdi, CTX; movq %rsi, %r11; load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); call __cast6_enc_blk8; store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); popq %r15; FRAME_END RET; SYM_FUNC_END(cast6_ecb_enc_8way) SYM_FUNC_START(cast6_ecb_dec_8way) /* input: * %rdi: ctx * %rsi: dst * %rdx: src */ FRAME_BEGIN pushq %r15; movq %rdi, CTX; movq %rsi, %r11; load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); call __cast6_dec_blk8; store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); popq %r15; FRAME_END RET; SYM_FUNC_END(cast6_ecb_dec_8way) SYM_FUNC_START(cast6_cbc_dec_8way) /* input: * %rdi: ctx * %rsi: dst * %rdx: src */ FRAME_BEGIN pushq %r12; pushq %r15; movq %rdi, CTX; movq %rsi, %r11; movq %rdx, %r12; load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); call __cast6_dec_blk8; store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); popq %r15; popq %r12; FRAME_END RET; SYM_FUNC_END(cast6_cbc_dec_8way)
aixcc-public/challenge-001-exemplar-source
3,737
arch/x86/crypto/nh-avx2-x86_64.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * NH - ε-almost-universal hash function, x86_64 AVX2 accelerated * * Copyright 2018 Google LLC * * Author: Eric Biggers <ebiggers@google.com> */ #include <linux/linkage.h> #define PASS0_SUMS %ymm0 #define PASS1_SUMS %ymm1 #define PASS2_SUMS %ymm2 #define PASS3_SUMS %ymm3 #define K0 %ymm4 #define K0_XMM %xmm4 #define K1 %ymm5 #define K1_XMM %xmm5 #define K2 %ymm6 #define K2_XMM %xmm6 #define K3 %ymm7 #define K3_XMM %xmm7 #define T0 %ymm8 #define T1 %ymm9 #define T2 %ymm10 #define T2_XMM %xmm10 #define T3 %ymm11 #define T3_XMM %xmm11 #define T4 %ymm12 #define T5 %ymm13 #define T6 %ymm14 #define T7 %ymm15 #define KEY %rdi #define MESSAGE %rsi #define MESSAGE_LEN %rdx #define HASH %rcx .macro _nh_2xstride k0, k1, k2, k3 // Add message words to key words vpaddd \k0, T3, T0 vpaddd \k1, T3, T1 vpaddd \k2, T3, T2 vpaddd \k3, T3, T3 // Multiply 32x32 => 64 and accumulate vpshufd $0x10, T0, T4 vpshufd $0x32, T0, T0 vpshufd $0x10, T1, T5 vpshufd $0x32, T1, T1 vpshufd $0x10, T2, T6 vpshufd $0x32, T2, T2 vpshufd $0x10, T3, T7 vpshufd $0x32, T3, T3 vpmuludq T4, T0, T0 vpmuludq T5, T1, T1 vpmuludq T6, T2, T2 vpmuludq T7, T3, T3 vpaddq T0, PASS0_SUMS, PASS0_SUMS vpaddq T1, PASS1_SUMS, PASS1_SUMS vpaddq T2, PASS2_SUMS, PASS2_SUMS vpaddq T3, PASS3_SUMS, PASS3_SUMS .endm /* * void nh_avx2(const u32 *key, const u8 *message, size_t message_len, * u8 hash[NH_HASH_BYTES]) * * It's guaranteed that message_len % 16 == 0. */ SYM_FUNC_START(nh_avx2) vmovdqu 0x00(KEY), K0 vmovdqu 0x10(KEY), K1 add $0x20, KEY vpxor PASS0_SUMS, PASS0_SUMS, PASS0_SUMS vpxor PASS1_SUMS, PASS1_SUMS, PASS1_SUMS vpxor PASS2_SUMS, PASS2_SUMS, PASS2_SUMS vpxor PASS3_SUMS, PASS3_SUMS, PASS3_SUMS sub $0x40, MESSAGE_LEN jl .Lloop4_done .Lloop4: vmovdqu (MESSAGE), T3 vmovdqu 0x00(KEY), K2 vmovdqu 0x10(KEY), K3 _nh_2xstride K0, K1, K2, K3 vmovdqu 0x20(MESSAGE), T3 vmovdqu 0x20(KEY), K0 vmovdqu 0x30(KEY), K1 _nh_2xstride K2, K3, K0, K1 add $0x40, MESSAGE add $0x40, KEY sub $0x40, MESSAGE_LEN jge .Lloop4 .Lloop4_done: and $0x3f, MESSAGE_LEN jz .Ldone cmp $0x20, MESSAGE_LEN jl .Llast // 2 or 3 strides remain; do 2 more. vmovdqu (MESSAGE), T3 vmovdqu 0x00(KEY), K2 vmovdqu 0x10(KEY), K3 _nh_2xstride K0, K1, K2, K3 add $0x20, MESSAGE add $0x20, KEY sub $0x20, MESSAGE_LEN jz .Ldone vmovdqa K2, K0 vmovdqa K3, K1 .Llast: // Last stride. Zero the high 128 bits of the message and keys so they // don't affect the result when processing them like 2 strides. vmovdqu (MESSAGE), T3_XMM vmovdqa K0_XMM, K0_XMM vmovdqa K1_XMM, K1_XMM vmovdqu 0x00(KEY), K2_XMM vmovdqu 0x10(KEY), K3_XMM _nh_2xstride K0, K1, K2, K3 .Ldone: // Sum the accumulators for each pass, then store the sums to 'hash' // PASS0_SUMS is (0A 0B 0C 0D) // PASS1_SUMS is (1A 1B 1C 1D) // PASS2_SUMS is (2A 2B 2C 2D) // PASS3_SUMS is (3A 3B 3C 3D) // We need the horizontal sums: // (0A + 0B + 0C + 0D, // 1A + 1B + 1C + 1D, // 2A + 2B + 2C + 2D, // 3A + 3B + 3C + 3D) // vpunpcklqdq PASS1_SUMS, PASS0_SUMS, T0 // T0 = (0A 1A 0C 1C) vpunpckhqdq PASS1_SUMS, PASS0_SUMS, T1 // T1 = (0B 1B 0D 1D) vpunpcklqdq PASS3_SUMS, PASS2_SUMS, T2 // T2 = (2A 3A 2C 3C) vpunpckhqdq PASS3_SUMS, PASS2_SUMS, T3 // T3 = (2B 3B 2D 3D) vinserti128 $0x1, T2_XMM, T0, T4 // T4 = (0A 1A 2A 3A) vinserti128 $0x1, T3_XMM, T1, T5 // T5 = (0B 1B 2B 3B) vperm2i128 $0x31, T2, T0, T0 // T0 = (0C 1C 2C 3C) vperm2i128 $0x31, T3, T1, T1 // T1 = (0D 1D 2D 3D) vpaddq T5, T4, T4 vpaddq T1, T0, T0 vpaddq T4, T0, T0 vmovdqu T0, (HASH) RET SYM_FUNC_END(nh_avx2)
aixcc-public/challenge-001-exemplar-source
5,291
arch/x86/crypto/crc32-pclmul_asm.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright 2012 Xyratex Technology Limited * * Using hardware provided PCLMULQDQ instruction to accelerate the CRC32 * calculation. * CRC32 polynomial:0x04c11db7(BE)/0xEDB88320(LE) * PCLMULQDQ is a new instruction in Intel SSE4.2, the reference can be found * at: * http://www.intel.com/products/processor/manuals/ * Intel(R) 64 and IA-32 Architectures Software Developer's Manual * Volume 2B: Instruction Set Reference, N-Z * * Authors: Gregory Prestas <Gregory_Prestas@us.xyratex.com> * Alexander Boyko <Alexander_Boyko@xyratex.com> */ #include <linux/linkage.h> .section .rodata .align 16 /* * [x4*128+32 mod P(x) << 32)]' << 1 = 0x154442bd4 * #define CONSTANT_R1 0x154442bd4LL * * [(x4*128-32 mod P(x) << 32)]' << 1 = 0x1c6e41596 * #define CONSTANT_R2 0x1c6e41596LL */ .Lconstant_R2R1: .octa 0x00000001c6e415960000000154442bd4 /* * [(x128+32 mod P(x) << 32)]' << 1 = 0x1751997d0 * #define CONSTANT_R3 0x1751997d0LL * * [(x128-32 mod P(x) << 32)]' << 1 = 0x0ccaa009e * #define CONSTANT_R4 0x0ccaa009eLL */ .Lconstant_R4R3: .octa 0x00000000ccaa009e00000001751997d0 /* * [(x64 mod P(x) << 32)]' << 1 = 0x163cd6124 * #define CONSTANT_R5 0x163cd6124LL */ .Lconstant_R5: .octa 0x00000000000000000000000163cd6124 .Lconstant_mask32: .octa 0x000000000000000000000000FFFFFFFF /* * #define CRCPOLY_TRUE_LE_FULL 0x1DB710641LL * * Barrett Reduction constant (u64`) = u` = (x**64 / P(x))` = 0x1F7011641LL * #define CONSTANT_RU 0x1F7011641LL */ .Lconstant_RUpoly: .octa 0x00000001F701164100000001DB710641 #define CONSTANT %xmm0 #ifdef __x86_64__ #define BUF %rdi #define LEN %rsi #define CRC %edx #else #define BUF %eax #define LEN %edx #define CRC %ecx #endif .text /** * Calculate crc32 * BUF - buffer (16 bytes aligned) * LEN - sizeof buffer (16 bytes aligned), LEN should be grater than 63 * CRC - initial crc32 * return %eax crc32 * uint crc32_pclmul_le_16(unsigned char const *buffer, * size_t len, uint crc32) */ SYM_FUNC_START(crc32_pclmul_le_16) /* buffer and buffer size are 16 bytes aligned */ movdqa (BUF), %xmm1 movdqa 0x10(BUF), %xmm2 movdqa 0x20(BUF), %xmm3 movdqa 0x30(BUF), %xmm4 movd CRC, CONSTANT pxor CONSTANT, %xmm1 sub $0x40, LEN add $0x40, BUF cmp $0x40, LEN jb less_64 #ifdef __x86_64__ movdqa .Lconstant_R2R1(%rip), CONSTANT #else movdqa .Lconstant_R2R1, CONSTANT #endif loop_64:/* 64 bytes Full cache line folding */ prefetchnta 0x40(BUF) movdqa %xmm1, %xmm5 movdqa %xmm2, %xmm6 movdqa %xmm3, %xmm7 #ifdef __x86_64__ movdqa %xmm4, %xmm8 #endif pclmulqdq $0x00, CONSTANT, %xmm1 pclmulqdq $0x00, CONSTANT, %xmm2 pclmulqdq $0x00, CONSTANT, %xmm3 #ifdef __x86_64__ pclmulqdq $0x00, CONSTANT, %xmm4 #endif pclmulqdq $0x11, CONSTANT, %xmm5 pclmulqdq $0x11, CONSTANT, %xmm6 pclmulqdq $0x11, CONSTANT, %xmm7 #ifdef __x86_64__ pclmulqdq $0x11, CONSTANT, %xmm8 #endif pxor %xmm5, %xmm1 pxor %xmm6, %xmm2 pxor %xmm7, %xmm3 #ifdef __x86_64__ pxor %xmm8, %xmm4 #else /* xmm8 unsupported for x32 */ movdqa %xmm4, %xmm5 pclmulqdq $0x00, CONSTANT, %xmm4 pclmulqdq $0x11, CONSTANT, %xmm5 pxor %xmm5, %xmm4 #endif pxor (BUF), %xmm1 pxor 0x10(BUF), %xmm2 pxor 0x20(BUF), %xmm3 pxor 0x30(BUF), %xmm4 sub $0x40, LEN add $0x40, BUF cmp $0x40, LEN jge loop_64 less_64:/* Folding cache line into 128bit */ #ifdef __x86_64__ movdqa .Lconstant_R4R3(%rip), CONSTANT #else movdqa .Lconstant_R4R3, CONSTANT #endif prefetchnta (BUF) movdqa %xmm1, %xmm5 pclmulqdq $0x00, CONSTANT, %xmm1 pclmulqdq $0x11, CONSTANT, %xmm5 pxor %xmm5, %xmm1 pxor %xmm2, %xmm1 movdqa %xmm1, %xmm5 pclmulqdq $0x00, CONSTANT, %xmm1 pclmulqdq $0x11, CONSTANT, %xmm5 pxor %xmm5, %xmm1 pxor %xmm3, %xmm1 movdqa %xmm1, %xmm5 pclmulqdq $0x00, CONSTANT, %xmm1 pclmulqdq $0x11, CONSTANT, %xmm5 pxor %xmm5, %xmm1 pxor %xmm4, %xmm1 cmp $0x10, LEN jb fold_64 loop_16:/* Folding rest buffer into 128bit */ movdqa %xmm1, %xmm5 pclmulqdq $0x00, CONSTANT, %xmm1 pclmulqdq $0x11, CONSTANT, %xmm5 pxor %xmm5, %xmm1 pxor (BUF), %xmm1 sub $0x10, LEN add $0x10, BUF cmp $0x10, LEN jge loop_16 fold_64: /* perform the last 64 bit fold, also adds 32 zeroes * to the input stream */ pclmulqdq $0x01, %xmm1, CONSTANT /* R4 * xmm1.low */ psrldq $0x08, %xmm1 pxor CONSTANT, %xmm1 /* final 32-bit fold */ movdqa %xmm1, %xmm2 #ifdef __x86_64__ movdqa .Lconstant_R5(%rip), CONSTANT movdqa .Lconstant_mask32(%rip), %xmm3 #else movdqa .Lconstant_R5, CONSTANT movdqa .Lconstant_mask32, %xmm3 #endif psrldq $0x04, %xmm2 pand %xmm3, %xmm1 pclmulqdq $0x00, CONSTANT, %xmm1 pxor %xmm2, %xmm1 /* Finish up with the bit-reversed barrett reduction 64 ==> 32 bits */ #ifdef __x86_64__ movdqa .Lconstant_RUpoly(%rip), CONSTANT #else movdqa .Lconstant_RUpoly, CONSTANT #endif movdqa %xmm1, %xmm2 pand %xmm3, %xmm1 pclmulqdq $0x10, CONSTANT, %xmm1 pand %xmm3, %xmm1 pclmulqdq $0x00, CONSTANT, %xmm1 pxor %xmm2, %xmm1 pextrd $0x01, %xmm1, %eax RET SYM_FUNC_END(crc32_pclmul_le_16)
aixcc-public/challenge-001-exemplar-source
100,008
arch/x86/crypto/aesni-intel_avx-x86_64.S
######################################################################## # Copyright (c) 2013, Intel Corporation # # This software is available to you under a choice of one of two # licenses. You may choose to be licensed under the terms of the GNU # General Public License (GPL) Version 2, available from the file # COPYING in the main directory of this source tree, or the # OpenIB.org BSD license below: # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the # distribution. # # * Neither the name of the Intel Corporation nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # # THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION ""AS IS"" AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES# LOSS OF USE, DATA, OR # PROFITS# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ######################################################################## ## ## Authors: ## Erdinc Ozturk <erdinc.ozturk@intel.com> ## Vinodh Gopal <vinodh.gopal@intel.com> ## James Guilford <james.guilford@intel.com> ## Tim Chen <tim.c.chen@linux.intel.com> ## ## References: ## This code was derived and highly optimized from the code described in paper: ## Vinodh Gopal et. al. Optimized Galois-Counter-Mode Implementation ## on Intel Architecture Processors. August, 2010 ## The details of the implementation is explained in: ## Erdinc Ozturk et. al. Enabling High-Performance Galois-Counter-Mode ## on Intel Architecture Processors. October, 2012. ## ## Assumptions: ## ## ## ## iv: ## 0 1 2 3 ## 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 ## +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ## | Salt (From the SA) | ## +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ## | Initialization Vector | ## | (This is the sequence number from IPSec header) | ## +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ## | 0x1 | ## +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ## ## ## ## AAD: ## AAD padded to 128 bits with 0 ## for example, assume AAD is a u32 vector ## ## if AAD is 8 bytes: ## AAD[3] = {A0, A1}# ## padded AAD in xmm register = {A1 A0 0 0} ## ## 0 1 2 3 ## 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 ## +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ## | SPI (A1) | ## +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ## | 32-bit Sequence Number (A0) | ## +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ## | 0x0 | ## +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ## ## AAD Format with 32-bit Sequence Number ## ## if AAD is 12 bytes: ## AAD[3] = {A0, A1, A2}# ## padded AAD in xmm register = {A2 A1 A0 0} ## ## 0 1 2 3 ## 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 ## +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ## | SPI (A2) | ## +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ## | 64-bit Extended Sequence Number {A1,A0} | ## | | ## +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ## | 0x0 | ## +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ## ## AAD Format with 64-bit Extended Sequence Number ## ## ## aadLen: ## from the definition of the spec, aadLen can only be 8 or 12 bytes. ## The code additionally supports aadLen of length 16 bytes. ## ## TLen: ## from the definition of the spec, TLen can only be 8, 12 or 16 bytes. ## ## poly = x^128 + x^127 + x^126 + x^121 + 1 ## throughout the code, one tab and two tab indentations are used. one tab is ## for GHASH part, two tabs is for AES part. ## #include <linux/linkage.h> # constants in mergeable sections, linker can reorder and merge .section .rodata.cst16.POLY, "aM", @progbits, 16 .align 16 POLY: .octa 0xC2000000000000000000000000000001 .section .rodata.cst16.POLY2, "aM", @progbits, 16 .align 16 POLY2: .octa 0xC20000000000000000000001C2000000 .section .rodata.cst16.TWOONE, "aM", @progbits, 16 .align 16 TWOONE: .octa 0x00000001000000000000000000000001 .section .rodata.cst16.SHUF_MASK, "aM", @progbits, 16 .align 16 SHUF_MASK: .octa 0x000102030405060708090A0B0C0D0E0F .section .rodata.cst16.ONE, "aM", @progbits, 16 .align 16 ONE: .octa 0x00000000000000000000000000000001 .section .rodata.cst16.ONEf, "aM", @progbits, 16 .align 16 ONEf: .octa 0x01000000000000000000000000000000 # order of these constants should not change. # more specifically, ALL_F should follow SHIFT_MASK, and zero should follow ALL_F .section .rodata, "a", @progbits .align 16 SHIFT_MASK: .octa 0x0f0e0d0c0b0a09080706050403020100 ALL_F: .octa 0xffffffffffffffffffffffffffffffff .octa 0x00000000000000000000000000000000 .section .rodata .align 16 .type aad_shift_arr, @object .size aad_shift_arr, 272 aad_shift_arr: .octa 0xffffffffffffffffffffffffffffffff .octa 0xffffffffffffffffffffffffffffff0C .octa 0xffffffffffffffffffffffffffff0D0C .octa 0xffffffffffffffffffffffffff0E0D0C .octa 0xffffffffffffffffffffffff0F0E0D0C .octa 0xffffffffffffffffffffff0C0B0A0908 .octa 0xffffffffffffffffffff0D0C0B0A0908 .octa 0xffffffffffffffffff0E0D0C0B0A0908 .octa 0xffffffffffffffff0F0E0D0C0B0A0908 .octa 0xffffffffffffff0C0B0A090807060504 .octa 0xffffffffffff0D0C0B0A090807060504 .octa 0xffffffffff0E0D0C0B0A090807060504 .octa 0xffffffff0F0E0D0C0B0A090807060504 .octa 0xffffff0C0B0A09080706050403020100 .octa 0xffff0D0C0B0A09080706050403020100 .octa 0xff0E0D0C0B0A09080706050403020100 .octa 0x0F0E0D0C0B0A09080706050403020100 .text #define AadHash 16*0 #define AadLen 16*1 #define InLen (16*1)+8 #define PBlockEncKey 16*2 #define OrigIV 16*3 #define CurCount 16*4 #define PBlockLen 16*5 HashKey = 16*6 # store HashKey <<1 mod poly here HashKey_2 = 16*7 # store HashKey^2 <<1 mod poly here HashKey_3 = 16*8 # store HashKey^3 <<1 mod poly here HashKey_4 = 16*9 # store HashKey^4 <<1 mod poly here HashKey_5 = 16*10 # store HashKey^5 <<1 mod poly here HashKey_6 = 16*11 # store HashKey^6 <<1 mod poly here HashKey_7 = 16*12 # store HashKey^7 <<1 mod poly here HashKey_8 = 16*13 # store HashKey^8 <<1 mod poly here HashKey_k = 16*14 # store XOR of HashKey <<1 mod poly here (for Karatsuba purposes) HashKey_2_k = 16*15 # store XOR of HashKey^2 <<1 mod poly here (for Karatsuba purposes) HashKey_3_k = 16*16 # store XOR of HashKey^3 <<1 mod poly here (for Karatsuba purposes) HashKey_4_k = 16*17 # store XOR of HashKey^4 <<1 mod poly here (for Karatsuba purposes) HashKey_5_k = 16*18 # store XOR of HashKey^5 <<1 mod poly here (for Karatsuba purposes) HashKey_6_k = 16*19 # store XOR of HashKey^6 <<1 mod poly here (for Karatsuba purposes) HashKey_7_k = 16*20 # store XOR of HashKey^7 <<1 mod poly here (for Karatsuba purposes) HashKey_8_k = 16*21 # store XOR of HashKey^8 <<1 mod poly here (for Karatsuba purposes) #define arg1 %rdi #define arg2 %rsi #define arg3 %rdx #define arg4 %rcx #define arg5 %r8 #define arg6 %r9 #define keysize 2*15*16(arg1) i = 0 j = 0 out_order = 0 in_order = 1 DEC = 0 ENC = 1 .macro define_reg r n reg_\r = %xmm\n .endm .macro setreg .altmacro define_reg i %i define_reg j %j .noaltmacro .endm TMP1 = 16*0 # Temporary storage for AAD TMP2 = 16*1 # Temporary storage for AES State 2 (State 1 is stored in an XMM register) TMP3 = 16*2 # Temporary storage for AES State 3 TMP4 = 16*3 # Temporary storage for AES State 4 TMP5 = 16*4 # Temporary storage for AES State 5 TMP6 = 16*5 # Temporary storage for AES State 6 TMP7 = 16*6 # Temporary storage for AES State 7 TMP8 = 16*7 # Temporary storage for AES State 8 VARIABLE_OFFSET = 16*8 ################################ # Utility Macros ################################ .macro FUNC_SAVE push %r12 push %r13 push %r15 push %rbp mov %rsp, %rbp sub $VARIABLE_OFFSET, %rsp and $~63, %rsp # align rsp to 64 bytes .endm .macro FUNC_RESTORE mov %rbp, %rsp pop %rbp pop %r15 pop %r13 pop %r12 .endm # Encryption of a single block .macro ENCRYPT_SINGLE_BLOCK REP XMM0 vpxor (arg1), \XMM0, \XMM0 i = 1 setreg .rep \REP vaesenc 16*i(arg1), \XMM0, \XMM0 i = (i+1) setreg .endr vaesenclast 16*i(arg1), \XMM0, \XMM0 .endm # combined for GCM encrypt and decrypt functions # clobbering all xmm registers # clobbering r10, r11, r12, r13, r15, rax .macro GCM_ENC_DEC INITIAL_BLOCKS GHASH_8_ENCRYPT_8_PARALLEL GHASH_LAST_8 GHASH_MUL ENC_DEC REP vmovdqu AadHash(arg2), %xmm8 vmovdqu HashKey(arg2), %xmm13 # xmm13 = HashKey add arg5, InLen(arg2) # initialize the data pointer offset as zero xor %r11d, %r11d PARTIAL_BLOCK \GHASH_MUL, arg3, arg4, arg5, %r11, %xmm8, \ENC_DEC sub %r11, arg5 mov arg5, %r13 # save the number of bytes of plaintext/ciphertext and $-16, %r13 # r13 = r13 - (r13 mod 16) mov %r13, %r12 shr $4, %r12 and $7, %r12 jz _initial_num_blocks_is_0\@ cmp $7, %r12 je _initial_num_blocks_is_7\@ cmp $6, %r12 je _initial_num_blocks_is_6\@ cmp $5, %r12 je _initial_num_blocks_is_5\@ cmp $4, %r12 je _initial_num_blocks_is_4\@ cmp $3, %r12 je _initial_num_blocks_is_3\@ cmp $2, %r12 je _initial_num_blocks_is_2\@ jmp _initial_num_blocks_is_1\@ _initial_num_blocks_is_7\@: \INITIAL_BLOCKS \REP, 7, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC sub $16*7, %r13 jmp _initial_blocks_encrypted\@ _initial_num_blocks_is_6\@: \INITIAL_BLOCKS \REP, 6, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC sub $16*6, %r13 jmp _initial_blocks_encrypted\@ _initial_num_blocks_is_5\@: \INITIAL_BLOCKS \REP, 5, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC sub $16*5, %r13 jmp _initial_blocks_encrypted\@ _initial_num_blocks_is_4\@: \INITIAL_BLOCKS \REP, 4, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC sub $16*4, %r13 jmp _initial_blocks_encrypted\@ _initial_num_blocks_is_3\@: \INITIAL_BLOCKS \REP, 3, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC sub $16*3, %r13 jmp _initial_blocks_encrypted\@ _initial_num_blocks_is_2\@: \INITIAL_BLOCKS \REP, 2, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC sub $16*2, %r13 jmp _initial_blocks_encrypted\@ _initial_num_blocks_is_1\@: \INITIAL_BLOCKS \REP, 1, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC sub $16*1, %r13 jmp _initial_blocks_encrypted\@ _initial_num_blocks_is_0\@: \INITIAL_BLOCKS \REP, 0, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC _initial_blocks_encrypted\@: test %r13, %r13 je _zero_cipher_left\@ sub $128, %r13 je _eight_cipher_left\@ vmovd %xmm9, %r15d and $255, %r15d vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 _encrypt_by_8_new\@: cmp $(255-8), %r15d jg _encrypt_by_8\@ add $8, %r15b \GHASH_8_ENCRYPT_8_PARALLEL \REP, %xmm0, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm15, out_order, \ENC_DEC add $128, %r11 sub $128, %r13 jne _encrypt_by_8_new\@ vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 jmp _eight_cipher_left\@ _encrypt_by_8\@: vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 add $8, %r15b \GHASH_8_ENCRYPT_8_PARALLEL \REP, %xmm0, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm15, in_order, \ENC_DEC vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 add $128, %r11 sub $128, %r13 jne _encrypt_by_8_new\@ vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 _eight_cipher_left\@: \GHASH_LAST_8 %xmm0, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8 _zero_cipher_left\@: vmovdqu %xmm14, AadHash(arg2) vmovdqu %xmm9, CurCount(arg2) # check for 0 length mov arg5, %r13 and $15, %r13 # r13 = (arg5 mod 16) je _multiple_of_16_bytes\@ # handle the last <16 Byte block separately mov %r13, PBlockLen(arg2) vpaddd ONE(%rip), %xmm9, %xmm9 # INCR CNT to get Yn vmovdqu %xmm9, CurCount(arg2) vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 ENCRYPT_SINGLE_BLOCK \REP, %xmm9 # E(K, Yn) vmovdqu %xmm9, PBlockEncKey(arg2) cmp $16, arg5 jge _large_enough_update\@ lea (arg4,%r11,1), %r10 mov %r13, %r12 READ_PARTIAL_BLOCK %r10 %r12 %xmm1 lea SHIFT_MASK+16(%rip), %r12 sub %r13, %r12 # adjust the shuffle mask pointer to be # able to shift 16-r13 bytes (r13 is the # number of bytes in plaintext mod 16) jmp _final_ghash_mul\@ _large_enough_update\@: sub $16, %r11 add %r13, %r11 # receive the last <16 Byte block vmovdqu (arg4, %r11, 1), %xmm1 sub %r13, %r11 add $16, %r11 lea SHIFT_MASK+16(%rip), %r12 # adjust the shuffle mask pointer to be able to shift 16-r13 bytes # (r13 is the number of bytes in plaintext mod 16) sub %r13, %r12 # get the appropriate shuffle mask vmovdqu (%r12), %xmm2 # shift right 16-r13 bytes vpshufb %xmm2, %xmm1, %xmm1 _final_ghash_mul\@: .if \ENC_DEC == DEC vmovdqa %xmm1, %xmm2 vpxor %xmm1, %xmm9, %xmm9 # Plaintext XOR E(K, Yn) vmovdqu ALL_F-SHIFT_MASK(%r12), %xmm1 # get the appropriate mask to # mask out top 16-r13 bytes of xmm9 vpand %xmm1, %xmm9, %xmm9 # mask out top 16-r13 bytes of xmm9 vpand %xmm1, %xmm2, %xmm2 vpshufb SHUF_MASK(%rip), %xmm2, %xmm2 vpxor %xmm2, %xmm14, %xmm14 vmovdqu %xmm14, AadHash(arg2) .else vpxor %xmm1, %xmm9, %xmm9 # Plaintext XOR E(K, Yn) vmovdqu ALL_F-SHIFT_MASK(%r12), %xmm1 # get the appropriate mask to # mask out top 16-r13 bytes of xmm9 vpand %xmm1, %xmm9, %xmm9 # mask out top 16-r13 bytes of xmm9 vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 vpxor %xmm9, %xmm14, %xmm14 vmovdqu %xmm14, AadHash(arg2) vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 # shuffle xmm9 back to output as ciphertext .endif ############################# # output r13 Bytes vmovq %xmm9, %rax cmp $8, %r13 jle _less_than_8_bytes_left\@ mov %rax, (arg3 , %r11) add $8, %r11 vpsrldq $8, %xmm9, %xmm9 vmovq %xmm9, %rax sub $8, %r13 _less_than_8_bytes_left\@: movb %al, (arg3 , %r11) add $1, %r11 shr $8, %rax sub $1, %r13 jne _less_than_8_bytes_left\@ ############################# _multiple_of_16_bytes\@: .endm # GCM_COMPLETE Finishes update of tag of last partial block # Output: Authorization Tag (AUTH_TAG) # Clobbers rax, r10-r12, and xmm0, xmm1, xmm5-xmm15 .macro GCM_COMPLETE GHASH_MUL REP AUTH_TAG AUTH_TAG_LEN vmovdqu AadHash(arg2), %xmm14 vmovdqu HashKey(arg2), %xmm13 mov PBlockLen(arg2), %r12 test %r12, %r12 je _partial_done\@ #GHASH computation for the last <16 Byte block \GHASH_MUL %xmm14, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6 _partial_done\@: mov AadLen(arg2), %r12 # r12 = aadLen (number of bytes) shl $3, %r12 # convert into number of bits vmovd %r12d, %xmm15 # len(A) in xmm15 mov InLen(arg2), %r12 shl $3, %r12 # len(C) in bits (*128) vmovq %r12, %xmm1 vpslldq $8, %xmm15, %xmm15 # xmm15 = len(A)|| 0x0000000000000000 vpxor %xmm1, %xmm15, %xmm15 # xmm15 = len(A)||len(C) vpxor %xmm15, %xmm14, %xmm14 \GHASH_MUL %xmm14, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6 # final GHASH computation vpshufb SHUF_MASK(%rip), %xmm14, %xmm14 # perform a 16Byte swap vmovdqu OrigIV(arg2), %xmm9 ENCRYPT_SINGLE_BLOCK \REP, %xmm9 # E(K, Y0) vpxor %xmm14, %xmm9, %xmm9 _return_T\@: mov \AUTH_TAG, %r10 # r10 = authTag mov \AUTH_TAG_LEN, %r11 # r11 = auth_tag_len cmp $16, %r11 je _T_16\@ cmp $8, %r11 jl _T_4\@ _T_8\@: vmovq %xmm9, %rax mov %rax, (%r10) add $8, %r10 sub $8, %r11 vpsrldq $8, %xmm9, %xmm9 test %r11, %r11 je _return_T_done\@ _T_4\@: vmovd %xmm9, %eax mov %eax, (%r10) add $4, %r10 sub $4, %r11 vpsrldq $4, %xmm9, %xmm9 test %r11, %r11 je _return_T_done\@ _T_123\@: vmovd %xmm9, %eax cmp $2, %r11 jl _T_1\@ mov %ax, (%r10) cmp $2, %r11 je _return_T_done\@ add $2, %r10 sar $16, %eax _T_1\@: mov %al, (%r10) jmp _return_T_done\@ _T_16\@: vmovdqu %xmm9, (%r10) _return_T_done\@: .endm .macro CALC_AAD_HASH GHASH_MUL AAD AADLEN T1 T2 T3 T4 T5 T6 T7 T8 mov \AAD, %r10 # r10 = AAD mov \AADLEN, %r12 # r12 = aadLen mov %r12, %r11 vpxor \T8, \T8, \T8 vpxor \T7, \T7, \T7 cmp $16, %r11 jl _get_AAD_rest8\@ _get_AAD_blocks\@: vmovdqu (%r10), \T7 vpshufb SHUF_MASK(%rip), \T7, \T7 vpxor \T7, \T8, \T8 \GHASH_MUL \T8, \T2, \T1, \T3, \T4, \T5, \T6 add $16, %r10 sub $16, %r12 sub $16, %r11 cmp $16, %r11 jge _get_AAD_blocks\@ vmovdqu \T8, \T7 test %r11, %r11 je _get_AAD_done\@ vpxor \T7, \T7, \T7 /* read the last <16B of AAD. since we have at least 4B of data right after the AAD (the ICV, and maybe some CT), we can read 4B/8B blocks safely, and then get rid of the extra stuff */ _get_AAD_rest8\@: cmp $4, %r11 jle _get_AAD_rest4\@ movq (%r10), \T1 add $8, %r10 sub $8, %r11 vpslldq $8, \T1, \T1 vpsrldq $8, \T7, \T7 vpxor \T1, \T7, \T7 jmp _get_AAD_rest8\@ _get_AAD_rest4\@: test %r11, %r11 jle _get_AAD_rest0\@ mov (%r10), %eax movq %rax, \T1 add $4, %r10 sub $4, %r11 vpslldq $12, \T1, \T1 vpsrldq $4, \T7, \T7 vpxor \T1, \T7, \T7 _get_AAD_rest0\@: /* finalize: shift out the extra bytes we read, and align left. since pslldq can only shift by an immediate, we use vpshufb and an array of shuffle masks */ movq %r12, %r11 salq $4, %r11 vmovdqu aad_shift_arr(%r11), \T1 vpshufb \T1, \T7, \T7 _get_AAD_rest_final\@: vpshufb SHUF_MASK(%rip), \T7, \T7 vpxor \T8, \T7, \T7 \GHASH_MUL \T7, \T2, \T1, \T3, \T4, \T5, \T6 _get_AAD_done\@: vmovdqu \T7, AadHash(arg2) .endm .macro INIT GHASH_MUL PRECOMPUTE mov arg6, %r11 mov %r11, AadLen(arg2) # ctx_data.aad_length = aad_length xor %r11d, %r11d mov %r11, InLen(arg2) # ctx_data.in_length = 0 mov %r11, PBlockLen(arg2) # ctx_data.partial_block_length = 0 mov %r11, PBlockEncKey(arg2) # ctx_data.partial_block_enc_key = 0 mov arg3, %rax movdqu (%rax), %xmm0 movdqu %xmm0, OrigIV(arg2) # ctx_data.orig_IV = iv vpshufb SHUF_MASK(%rip), %xmm0, %xmm0 movdqu %xmm0, CurCount(arg2) # ctx_data.current_counter = iv vmovdqu (arg4), %xmm6 # xmm6 = HashKey vpshufb SHUF_MASK(%rip), %xmm6, %xmm6 ############### PRECOMPUTATION of HashKey<<1 mod poly from the HashKey vmovdqa %xmm6, %xmm2 vpsllq $1, %xmm6, %xmm6 vpsrlq $63, %xmm2, %xmm2 vmovdqa %xmm2, %xmm1 vpslldq $8, %xmm2, %xmm2 vpsrldq $8, %xmm1, %xmm1 vpor %xmm2, %xmm6, %xmm6 #reduction vpshufd $0b00100100, %xmm1, %xmm2 vpcmpeqd TWOONE(%rip), %xmm2, %xmm2 vpand POLY(%rip), %xmm2, %xmm2 vpxor %xmm2, %xmm6, %xmm6 # xmm6 holds the HashKey<<1 mod poly ####################################################################### vmovdqu %xmm6, HashKey(arg2) # store HashKey<<1 mod poly CALC_AAD_HASH \GHASH_MUL, arg5, arg6, %xmm2, %xmm6, %xmm3, %xmm4, %xmm5, %xmm7, %xmm1, %xmm0 \PRECOMPUTE %xmm6, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5 .endm # Reads DLEN bytes starting at DPTR and stores in XMMDst # where 0 < DLEN < 16 # Clobbers %rax, DLEN .macro READ_PARTIAL_BLOCK DPTR DLEN XMMDst vpxor \XMMDst, \XMMDst, \XMMDst cmp $8, \DLEN jl _read_lt8_\@ mov (\DPTR), %rax vpinsrq $0, %rax, \XMMDst, \XMMDst sub $8, \DLEN jz _done_read_partial_block_\@ xor %eax, %eax _read_next_byte_\@: shl $8, %rax mov 7(\DPTR, \DLEN, 1), %al dec \DLEN jnz _read_next_byte_\@ vpinsrq $1, %rax, \XMMDst, \XMMDst jmp _done_read_partial_block_\@ _read_lt8_\@: xor %eax, %eax _read_next_byte_lt8_\@: shl $8, %rax mov -1(\DPTR, \DLEN, 1), %al dec \DLEN jnz _read_next_byte_lt8_\@ vpinsrq $0, %rax, \XMMDst, \XMMDst _done_read_partial_block_\@: .endm # PARTIAL_BLOCK: Handles encryption/decryption and the tag partial blocks # between update calls. # Requires the input data be at least 1 byte long due to READ_PARTIAL_BLOCK # Outputs encrypted bytes, and updates hash and partial info in gcm_data_context # Clobbers rax, r10, r12, r13, xmm0-6, xmm9-13 .macro PARTIAL_BLOCK GHASH_MUL CYPH_PLAIN_OUT PLAIN_CYPH_IN PLAIN_CYPH_LEN DATA_OFFSET \ AAD_HASH ENC_DEC mov PBlockLen(arg2), %r13 test %r13, %r13 je _partial_block_done_\@ # Leave Macro if no partial blocks # Read in input data without over reading cmp $16, \PLAIN_CYPH_LEN jl _fewer_than_16_bytes_\@ vmovdqu (\PLAIN_CYPH_IN), %xmm1 # If more than 16 bytes, just fill xmm jmp _data_read_\@ _fewer_than_16_bytes_\@: lea (\PLAIN_CYPH_IN, \DATA_OFFSET, 1), %r10 mov \PLAIN_CYPH_LEN, %r12 READ_PARTIAL_BLOCK %r10 %r12 %xmm1 mov PBlockLen(arg2), %r13 _data_read_\@: # Finished reading in data vmovdqu PBlockEncKey(arg2), %xmm9 vmovdqu HashKey(arg2), %xmm13 lea SHIFT_MASK(%rip), %r12 # adjust the shuffle mask pointer to be able to shift r13 bytes # r16-r13 is the number of bytes in plaintext mod 16) add %r13, %r12 vmovdqu (%r12), %xmm2 # get the appropriate shuffle mask vpshufb %xmm2, %xmm9, %xmm9 # shift right r13 bytes .if \ENC_DEC == DEC vmovdqa %xmm1, %xmm3 pxor %xmm1, %xmm9 # Cyphertext XOR E(K, Yn) mov \PLAIN_CYPH_LEN, %r10 add %r13, %r10 # Set r10 to be the amount of data left in CYPH_PLAIN_IN after filling sub $16, %r10 # Determine if if partial block is not being filled and # shift mask accordingly jge _no_extra_mask_1_\@ sub %r10, %r12 _no_extra_mask_1_\@: vmovdqu ALL_F-SHIFT_MASK(%r12), %xmm1 # get the appropriate mask to mask out bottom r13 bytes of xmm9 vpand %xmm1, %xmm9, %xmm9 # mask out bottom r13 bytes of xmm9 vpand %xmm1, %xmm3, %xmm3 vmovdqa SHUF_MASK(%rip), %xmm10 vpshufb %xmm10, %xmm3, %xmm3 vpshufb %xmm2, %xmm3, %xmm3 vpxor %xmm3, \AAD_HASH, \AAD_HASH test %r10, %r10 jl _partial_incomplete_1_\@ # GHASH computation for the last <16 Byte block \GHASH_MUL \AAD_HASH, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6 xor %eax,%eax mov %rax, PBlockLen(arg2) jmp _dec_done_\@ _partial_incomplete_1_\@: add \PLAIN_CYPH_LEN, PBlockLen(arg2) _dec_done_\@: vmovdqu \AAD_HASH, AadHash(arg2) .else vpxor %xmm1, %xmm9, %xmm9 # Plaintext XOR E(K, Yn) mov \PLAIN_CYPH_LEN, %r10 add %r13, %r10 # Set r10 to be the amount of data left in CYPH_PLAIN_IN after filling sub $16, %r10 # Determine if if partial block is not being filled and # shift mask accordingly jge _no_extra_mask_2_\@ sub %r10, %r12 _no_extra_mask_2_\@: vmovdqu ALL_F-SHIFT_MASK(%r12), %xmm1 # get the appropriate mask to mask out bottom r13 bytes of xmm9 vpand %xmm1, %xmm9, %xmm9 vmovdqa SHUF_MASK(%rip), %xmm1 vpshufb %xmm1, %xmm9, %xmm9 vpshufb %xmm2, %xmm9, %xmm9 vpxor %xmm9, \AAD_HASH, \AAD_HASH test %r10, %r10 jl _partial_incomplete_2_\@ # GHASH computation for the last <16 Byte block \GHASH_MUL \AAD_HASH, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6 xor %eax,%eax mov %rax, PBlockLen(arg2) jmp _encode_done_\@ _partial_incomplete_2_\@: add \PLAIN_CYPH_LEN, PBlockLen(arg2) _encode_done_\@: vmovdqu \AAD_HASH, AadHash(arg2) vmovdqa SHUF_MASK(%rip), %xmm10 # shuffle xmm9 back to output as ciphertext vpshufb %xmm10, %xmm9, %xmm9 vpshufb %xmm2, %xmm9, %xmm9 .endif # output encrypted Bytes test %r10, %r10 jl _partial_fill_\@ mov %r13, %r12 mov $16, %r13 # Set r13 to be the number of bytes to write out sub %r12, %r13 jmp _count_set_\@ _partial_fill_\@: mov \PLAIN_CYPH_LEN, %r13 _count_set_\@: vmovdqa %xmm9, %xmm0 vmovq %xmm0, %rax cmp $8, %r13 jle _less_than_8_bytes_left_\@ mov %rax, (\CYPH_PLAIN_OUT, \DATA_OFFSET, 1) add $8, \DATA_OFFSET psrldq $8, %xmm0 vmovq %xmm0, %rax sub $8, %r13 _less_than_8_bytes_left_\@: movb %al, (\CYPH_PLAIN_OUT, \DATA_OFFSET, 1) add $1, \DATA_OFFSET shr $8, %rax sub $1, %r13 jne _less_than_8_bytes_left_\@ _partial_block_done_\@: .endm # PARTIAL_BLOCK ############################################################################### # GHASH_MUL MACRO to implement: Data*HashKey mod (128,127,126,121,0) # Input: A and B (128-bits each, bit-reflected) # Output: C = A*B*x mod poly, (i.e. >>1 ) # To compute GH = GH*HashKey mod poly, give HK = HashKey<<1 mod poly as input # GH = GH * HK * x mod poly which is equivalent to GH*HashKey mod poly. ############################################################################### .macro GHASH_MUL_AVX GH HK T1 T2 T3 T4 T5 vpshufd $0b01001110, \GH, \T2 vpshufd $0b01001110, \HK, \T3 vpxor \GH , \T2, \T2 # T2 = (a1+a0) vpxor \HK , \T3, \T3 # T3 = (b1+b0) vpclmulqdq $0x11, \HK, \GH, \T1 # T1 = a1*b1 vpclmulqdq $0x00, \HK, \GH, \GH # GH = a0*b0 vpclmulqdq $0x00, \T3, \T2, \T2 # T2 = (a1+a0)*(b1+b0) vpxor \GH, \T2,\T2 vpxor \T1, \T2,\T2 # T2 = a0*b1+a1*b0 vpslldq $8, \T2,\T3 # shift-L T3 2 DWs vpsrldq $8, \T2,\T2 # shift-R T2 2 DWs vpxor \T3, \GH, \GH vpxor \T2, \T1, \T1 # <T1:GH> = GH x HK #first phase of the reduction vpslld $31, \GH, \T2 # packed right shifting << 31 vpslld $30, \GH, \T3 # packed right shifting shift << 30 vpslld $25, \GH, \T4 # packed right shifting shift << 25 vpxor \T3, \T2, \T2 # xor the shifted versions vpxor \T4, \T2, \T2 vpsrldq $4, \T2, \T5 # shift-R T5 1 DW vpslldq $12, \T2, \T2 # shift-L T2 3 DWs vpxor \T2, \GH, \GH # first phase of the reduction complete #second phase of the reduction vpsrld $1,\GH, \T2 # packed left shifting >> 1 vpsrld $2,\GH, \T3 # packed left shifting >> 2 vpsrld $7,\GH, \T4 # packed left shifting >> 7 vpxor \T3, \T2, \T2 # xor the shifted versions vpxor \T4, \T2, \T2 vpxor \T5, \T2, \T2 vpxor \T2, \GH, \GH vpxor \T1, \GH, \GH # the result is in GH .endm .macro PRECOMPUTE_AVX HK T1 T2 T3 T4 T5 T6 # Haskey_i_k holds XORed values of the low and high parts of the Haskey_i vmovdqa \HK, \T5 vpshufd $0b01001110, \T5, \T1 vpxor \T5, \T1, \T1 vmovdqu \T1, HashKey_k(arg2) GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^2<<1 mod poly vmovdqu \T5, HashKey_2(arg2) # [HashKey_2] = HashKey^2<<1 mod poly vpshufd $0b01001110, \T5, \T1 vpxor \T5, \T1, \T1 vmovdqu \T1, HashKey_2_k(arg2) GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^3<<1 mod poly vmovdqu \T5, HashKey_3(arg2) vpshufd $0b01001110, \T5, \T1 vpxor \T5, \T1, \T1 vmovdqu \T1, HashKey_3_k(arg2) GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^4<<1 mod poly vmovdqu \T5, HashKey_4(arg2) vpshufd $0b01001110, \T5, \T1 vpxor \T5, \T1, \T1 vmovdqu \T1, HashKey_4_k(arg2) GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^5<<1 mod poly vmovdqu \T5, HashKey_5(arg2) vpshufd $0b01001110, \T5, \T1 vpxor \T5, \T1, \T1 vmovdqu \T1, HashKey_5_k(arg2) GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^6<<1 mod poly vmovdqu \T5, HashKey_6(arg2) vpshufd $0b01001110, \T5, \T1 vpxor \T5, \T1, \T1 vmovdqu \T1, HashKey_6_k(arg2) GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^7<<1 mod poly vmovdqu \T5, HashKey_7(arg2) vpshufd $0b01001110, \T5, \T1 vpxor \T5, \T1, \T1 vmovdqu \T1, HashKey_7_k(arg2) GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^8<<1 mod poly vmovdqu \T5, HashKey_8(arg2) vpshufd $0b01001110, \T5, \T1 vpxor \T5, \T1, \T1 vmovdqu \T1, HashKey_8_k(arg2) .endm ## if a = number of total plaintext bytes ## b = floor(a/16) ## num_initial_blocks = b mod 4# ## encrypt the initial num_initial_blocks blocks and apply ghash on the ciphertext ## r10, r11, r12, rax are clobbered ## arg1, arg2, arg3, arg4 are used as pointers only, not modified .macro INITIAL_BLOCKS_AVX REP num_initial_blocks T1 T2 T3 T4 T5 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 T6 T_key ENC_DEC i = (8-\num_initial_blocks) setreg vmovdqu AadHash(arg2), reg_i # start AES for num_initial_blocks blocks vmovdqu CurCount(arg2), \CTR i = (9-\num_initial_blocks) setreg .rep \num_initial_blocks vpaddd ONE(%rip), \CTR, \CTR # INCR Y0 vmovdqa \CTR, reg_i vpshufb SHUF_MASK(%rip), reg_i, reg_i # perform a 16Byte swap i = (i+1) setreg .endr vmovdqa (arg1), \T_key i = (9-\num_initial_blocks) setreg .rep \num_initial_blocks vpxor \T_key, reg_i, reg_i i = (i+1) setreg .endr j = 1 setreg .rep \REP vmovdqa 16*j(arg1), \T_key i = (9-\num_initial_blocks) setreg .rep \num_initial_blocks vaesenc \T_key, reg_i, reg_i i = (i+1) setreg .endr j = (j+1) setreg .endr vmovdqa 16*j(arg1), \T_key i = (9-\num_initial_blocks) setreg .rep \num_initial_blocks vaesenclast \T_key, reg_i, reg_i i = (i+1) setreg .endr i = (9-\num_initial_blocks) setreg .rep \num_initial_blocks vmovdqu (arg4, %r11), \T1 vpxor \T1, reg_i, reg_i vmovdqu reg_i, (arg3 , %r11) # write back ciphertext for num_initial_blocks blocks add $16, %r11 .if \ENC_DEC == DEC vmovdqa \T1, reg_i .endif vpshufb SHUF_MASK(%rip), reg_i, reg_i # prepare ciphertext for GHASH computations i = (i+1) setreg .endr i = (8-\num_initial_blocks) j = (9-\num_initial_blocks) setreg .rep \num_initial_blocks vpxor reg_i, reg_j, reg_j GHASH_MUL_AVX reg_j, \T2, \T1, \T3, \T4, \T5, \T6 # apply GHASH on num_initial_blocks blocks i = (i+1) j = (j+1) setreg .endr # XMM8 has the combined result here vmovdqa \XMM8, TMP1(%rsp) vmovdqa \XMM8, \T3 cmp $128, %r13 jl _initial_blocks_done\@ # no need for precomputed constants ############################################################################### # Haskey_i_k holds XORed values of the low and high parts of the Haskey_i vpaddd ONE(%rip), \CTR, \CTR # INCR Y0 vmovdqa \CTR, \XMM1 vpshufb SHUF_MASK(%rip), \XMM1, \XMM1 # perform a 16Byte swap vpaddd ONE(%rip), \CTR, \CTR # INCR Y0 vmovdqa \CTR, \XMM2 vpshufb SHUF_MASK(%rip), \XMM2, \XMM2 # perform a 16Byte swap vpaddd ONE(%rip), \CTR, \CTR # INCR Y0 vmovdqa \CTR, \XMM3 vpshufb SHUF_MASK(%rip), \XMM3, \XMM3 # perform a 16Byte swap vpaddd ONE(%rip), \CTR, \CTR # INCR Y0 vmovdqa \CTR, \XMM4 vpshufb SHUF_MASK(%rip), \XMM4, \XMM4 # perform a 16Byte swap vpaddd ONE(%rip), \CTR, \CTR # INCR Y0 vmovdqa \CTR, \XMM5 vpshufb SHUF_MASK(%rip), \XMM5, \XMM5 # perform a 16Byte swap vpaddd ONE(%rip), \CTR, \CTR # INCR Y0 vmovdqa \CTR, \XMM6 vpshufb SHUF_MASK(%rip), \XMM6, \XMM6 # perform a 16Byte swap vpaddd ONE(%rip), \CTR, \CTR # INCR Y0 vmovdqa \CTR, \XMM7 vpshufb SHUF_MASK(%rip), \XMM7, \XMM7 # perform a 16Byte swap vpaddd ONE(%rip), \CTR, \CTR # INCR Y0 vmovdqa \CTR, \XMM8 vpshufb SHUF_MASK(%rip), \XMM8, \XMM8 # perform a 16Byte swap vmovdqa (arg1), \T_key vpxor \T_key, \XMM1, \XMM1 vpxor \T_key, \XMM2, \XMM2 vpxor \T_key, \XMM3, \XMM3 vpxor \T_key, \XMM4, \XMM4 vpxor \T_key, \XMM5, \XMM5 vpxor \T_key, \XMM6, \XMM6 vpxor \T_key, \XMM7, \XMM7 vpxor \T_key, \XMM8, \XMM8 i = 1 setreg .rep \REP # do REP rounds vmovdqa 16*i(arg1), \T_key vaesenc \T_key, \XMM1, \XMM1 vaesenc \T_key, \XMM2, \XMM2 vaesenc \T_key, \XMM3, \XMM3 vaesenc \T_key, \XMM4, \XMM4 vaesenc \T_key, \XMM5, \XMM5 vaesenc \T_key, \XMM6, \XMM6 vaesenc \T_key, \XMM7, \XMM7 vaesenc \T_key, \XMM8, \XMM8 i = (i+1) setreg .endr vmovdqa 16*i(arg1), \T_key vaesenclast \T_key, \XMM1, \XMM1 vaesenclast \T_key, \XMM2, \XMM2 vaesenclast \T_key, \XMM3, \XMM3 vaesenclast \T_key, \XMM4, \XMM4 vaesenclast \T_key, \XMM5, \XMM5 vaesenclast \T_key, \XMM6, \XMM6 vaesenclast \T_key, \XMM7, \XMM7 vaesenclast \T_key, \XMM8, \XMM8 vmovdqu (arg4, %r11), \T1 vpxor \T1, \XMM1, \XMM1 vmovdqu \XMM1, (arg3 , %r11) .if \ENC_DEC == DEC vmovdqa \T1, \XMM1 .endif vmovdqu 16*1(arg4, %r11), \T1 vpxor \T1, \XMM2, \XMM2 vmovdqu \XMM2, 16*1(arg3 , %r11) .if \ENC_DEC == DEC vmovdqa \T1, \XMM2 .endif vmovdqu 16*2(arg4, %r11), \T1 vpxor \T1, \XMM3, \XMM3 vmovdqu \XMM3, 16*2(arg3 , %r11) .if \ENC_DEC == DEC vmovdqa \T1, \XMM3 .endif vmovdqu 16*3(arg4, %r11), \T1 vpxor \T1, \XMM4, \XMM4 vmovdqu \XMM4, 16*3(arg3 , %r11) .if \ENC_DEC == DEC vmovdqa \T1, \XMM4 .endif vmovdqu 16*4(arg4, %r11), \T1 vpxor \T1, \XMM5, \XMM5 vmovdqu \XMM5, 16*4(arg3 , %r11) .if \ENC_DEC == DEC vmovdqa \T1, \XMM5 .endif vmovdqu 16*5(arg4, %r11), \T1 vpxor \T1, \XMM6, \XMM6 vmovdqu \XMM6, 16*5(arg3 , %r11) .if \ENC_DEC == DEC vmovdqa \T1, \XMM6 .endif vmovdqu 16*6(arg4, %r11), \T1 vpxor \T1, \XMM7, \XMM7 vmovdqu \XMM7, 16*6(arg3 , %r11) .if \ENC_DEC == DEC vmovdqa \T1, \XMM7 .endif vmovdqu 16*7(arg4, %r11), \T1 vpxor \T1, \XMM8, \XMM8 vmovdqu \XMM8, 16*7(arg3 , %r11) .if \ENC_DEC == DEC vmovdqa \T1, \XMM8 .endif add $128, %r11 vpshufb SHUF_MASK(%rip), \XMM1, \XMM1 # perform a 16Byte swap vpxor TMP1(%rsp), \XMM1, \XMM1 # combine GHASHed value with the corresponding ciphertext vpshufb SHUF_MASK(%rip), \XMM2, \XMM2 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM3, \XMM3 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM4, \XMM4 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM5, \XMM5 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM6, \XMM6 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM7, \XMM7 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM8, \XMM8 # perform a 16Byte swap ############################################################################### _initial_blocks_done\@: .endm # encrypt 8 blocks at a time # ghash the 8 previously encrypted ciphertext blocks # arg1, arg2, arg3, arg4 are used as pointers only, not modified # r11 is the data offset value .macro GHASH_8_ENCRYPT_8_PARALLEL_AVX REP T1 T2 T3 T4 T5 T6 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 T7 loop_idx ENC_DEC vmovdqa \XMM1, \T2 vmovdqa \XMM2, TMP2(%rsp) vmovdqa \XMM3, TMP3(%rsp) vmovdqa \XMM4, TMP4(%rsp) vmovdqa \XMM5, TMP5(%rsp) vmovdqa \XMM6, TMP6(%rsp) vmovdqa \XMM7, TMP7(%rsp) vmovdqa \XMM8, TMP8(%rsp) .if \loop_idx == in_order vpaddd ONE(%rip), \CTR, \XMM1 # INCR CNT vpaddd ONE(%rip), \XMM1, \XMM2 vpaddd ONE(%rip), \XMM2, \XMM3 vpaddd ONE(%rip), \XMM3, \XMM4 vpaddd ONE(%rip), \XMM4, \XMM5 vpaddd ONE(%rip), \XMM5, \XMM6 vpaddd ONE(%rip), \XMM6, \XMM7 vpaddd ONE(%rip), \XMM7, \XMM8 vmovdqa \XMM8, \CTR vpshufb SHUF_MASK(%rip), \XMM1, \XMM1 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM2, \XMM2 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM3, \XMM3 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM4, \XMM4 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM5, \XMM5 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM6, \XMM6 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM7, \XMM7 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM8, \XMM8 # perform a 16Byte swap .else vpaddd ONEf(%rip), \CTR, \XMM1 # INCR CNT vpaddd ONEf(%rip), \XMM1, \XMM2 vpaddd ONEf(%rip), \XMM2, \XMM3 vpaddd ONEf(%rip), \XMM3, \XMM4 vpaddd ONEf(%rip), \XMM4, \XMM5 vpaddd ONEf(%rip), \XMM5, \XMM6 vpaddd ONEf(%rip), \XMM6, \XMM7 vpaddd ONEf(%rip), \XMM7, \XMM8 vmovdqa \XMM8, \CTR .endif ####################################################################### vmovdqu (arg1), \T1 vpxor \T1, \XMM1, \XMM1 vpxor \T1, \XMM2, \XMM2 vpxor \T1, \XMM3, \XMM3 vpxor \T1, \XMM4, \XMM4 vpxor \T1, \XMM5, \XMM5 vpxor \T1, \XMM6, \XMM6 vpxor \T1, \XMM7, \XMM7 vpxor \T1, \XMM8, \XMM8 ####################################################################### vmovdqu 16*1(arg1), \T1 vaesenc \T1, \XMM1, \XMM1 vaesenc \T1, \XMM2, \XMM2 vaesenc \T1, \XMM3, \XMM3 vaesenc \T1, \XMM4, \XMM4 vaesenc \T1, \XMM5, \XMM5 vaesenc \T1, \XMM6, \XMM6 vaesenc \T1, \XMM7, \XMM7 vaesenc \T1, \XMM8, \XMM8 vmovdqu 16*2(arg1), \T1 vaesenc \T1, \XMM1, \XMM1 vaesenc \T1, \XMM2, \XMM2 vaesenc \T1, \XMM3, \XMM3 vaesenc \T1, \XMM4, \XMM4 vaesenc \T1, \XMM5, \XMM5 vaesenc \T1, \XMM6, \XMM6 vaesenc \T1, \XMM7, \XMM7 vaesenc \T1, \XMM8, \XMM8 ####################################################################### vmovdqu HashKey_8(arg2), \T5 vpclmulqdq $0x11, \T5, \T2, \T4 # T4 = a1*b1 vpclmulqdq $0x00, \T5, \T2, \T7 # T7 = a0*b0 vpshufd $0b01001110, \T2, \T6 vpxor \T2, \T6, \T6 vmovdqu HashKey_8_k(arg2), \T5 vpclmulqdq $0x00, \T5, \T6, \T6 vmovdqu 16*3(arg1), \T1 vaesenc \T1, \XMM1, \XMM1 vaesenc \T1, \XMM2, \XMM2 vaesenc \T1, \XMM3, \XMM3 vaesenc \T1, \XMM4, \XMM4 vaesenc \T1, \XMM5, \XMM5 vaesenc \T1, \XMM6, \XMM6 vaesenc \T1, \XMM7, \XMM7 vaesenc \T1, \XMM8, \XMM8 vmovdqa TMP2(%rsp), \T1 vmovdqu HashKey_7(arg2), \T5 vpclmulqdq $0x11, \T5, \T1, \T3 vpxor \T3, \T4, \T4 vpclmulqdq $0x00, \T5, \T1, \T3 vpxor \T3, \T7, \T7 vpshufd $0b01001110, \T1, \T3 vpxor \T1, \T3, \T3 vmovdqu HashKey_7_k(arg2), \T5 vpclmulqdq $0x10, \T5, \T3, \T3 vpxor \T3, \T6, \T6 vmovdqu 16*4(arg1), \T1 vaesenc \T1, \XMM1, \XMM1 vaesenc \T1, \XMM2, \XMM2 vaesenc \T1, \XMM3, \XMM3 vaesenc \T1, \XMM4, \XMM4 vaesenc \T1, \XMM5, \XMM5 vaesenc \T1, \XMM6, \XMM6 vaesenc \T1, \XMM7, \XMM7 vaesenc \T1, \XMM8, \XMM8 ####################################################################### vmovdqa TMP3(%rsp), \T1 vmovdqu HashKey_6(arg2), \T5 vpclmulqdq $0x11, \T5, \T1, \T3 vpxor \T3, \T4, \T4 vpclmulqdq $0x00, \T5, \T1, \T3 vpxor \T3, \T7, \T7 vpshufd $0b01001110, \T1, \T3 vpxor \T1, \T3, \T3 vmovdqu HashKey_6_k(arg2), \T5 vpclmulqdq $0x10, \T5, \T3, \T3 vpxor \T3, \T6, \T6 vmovdqu 16*5(arg1), \T1 vaesenc \T1, \XMM1, \XMM1 vaesenc \T1, \XMM2, \XMM2 vaesenc \T1, \XMM3, \XMM3 vaesenc \T1, \XMM4, \XMM4 vaesenc \T1, \XMM5, \XMM5 vaesenc \T1, \XMM6, \XMM6 vaesenc \T1, \XMM7, \XMM7 vaesenc \T1, \XMM8, \XMM8 vmovdqa TMP4(%rsp), \T1 vmovdqu HashKey_5(arg2), \T5 vpclmulqdq $0x11, \T5, \T1, \T3 vpxor \T3, \T4, \T4 vpclmulqdq $0x00, \T5, \T1, \T3 vpxor \T3, \T7, \T7 vpshufd $0b01001110, \T1, \T3 vpxor \T1, \T3, \T3 vmovdqu HashKey_5_k(arg2), \T5 vpclmulqdq $0x10, \T5, \T3, \T3 vpxor \T3, \T6, \T6 vmovdqu 16*6(arg1), \T1 vaesenc \T1, \XMM1, \XMM1 vaesenc \T1, \XMM2, \XMM2 vaesenc \T1, \XMM3, \XMM3 vaesenc \T1, \XMM4, \XMM4 vaesenc \T1, \XMM5, \XMM5 vaesenc \T1, \XMM6, \XMM6 vaesenc \T1, \XMM7, \XMM7 vaesenc \T1, \XMM8, \XMM8 vmovdqa TMP5(%rsp), \T1 vmovdqu HashKey_4(arg2), \T5 vpclmulqdq $0x11, \T5, \T1, \T3 vpxor \T3, \T4, \T4 vpclmulqdq $0x00, \T5, \T1, \T3 vpxor \T3, \T7, \T7 vpshufd $0b01001110, \T1, \T3 vpxor \T1, \T3, \T3 vmovdqu HashKey_4_k(arg2), \T5 vpclmulqdq $0x10, \T5, \T3, \T3 vpxor \T3, \T6, \T6 vmovdqu 16*7(arg1), \T1 vaesenc \T1, \XMM1, \XMM1 vaesenc \T1, \XMM2, \XMM2 vaesenc \T1, \XMM3, \XMM3 vaesenc \T1, \XMM4, \XMM4 vaesenc \T1, \XMM5, \XMM5 vaesenc \T1, \XMM6, \XMM6 vaesenc \T1, \XMM7, \XMM7 vaesenc \T1, \XMM8, \XMM8 vmovdqa TMP6(%rsp), \T1 vmovdqu HashKey_3(arg2), \T5 vpclmulqdq $0x11, \T5, \T1, \T3 vpxor \T3, \T4, \T4 vpclmulqdq $0x00, \T5, \T1, \T3 vpxor \T3, \T7, \T7 vpshufd $0b01001110, \T1, \T3 vpxor \T1, \T3, \T3 vmovdqu HashKey_3_k(arg2), \T5 vpclmulqdq $0x10, \T5, \T3, \T3 vpxor \T3, \T6, \T6 vmovdqu 16*8(arg1), \T1 vaesenc \T1, \XMM1, \XMM1 vaesenc \T1, \XMM2, \XMM2 vaesenc \T1, \XMM3, \XMM3 vaesenc \T1, \XMM4, \XMM4 vaesenc \T1, \XMM5, \XMM5 vaesenc \T1, \XMM6, \XMM6 vaesenc \T1, \XMM7, \XMM7 vaesenc \T1, \XMM8, \XMM8 vmovdqa TMP7(%rsp), \T1 vmovdqu HashKey_2(arg2), \T5 vpclmulqdq $0x11, \T5, \T1, \T3 vpxor \T3, \T4, \T4 vpclmulqdq $0x00, \T5, \T1, \T3 vpxor \T3, \T7, \T7 vpshufd $0b01001110, \T1, \T3 vpxor \T1, \T3, \T3 vmovdqu HashKey_2_k(arg2), \T5 vpclmulqdq $0x10, \T5, \T3, \T3 vpxor \T3, \T6, \T6 ####################################################################### vmovdqu 16*9(arg1), \T5 vaesenc \T5, \XMM1, \XMM1 vaesenc \T5, \XMM2, \XMM2 vaesenc \T5, \XMM3, \XMM3 vaesenc \T5, \XMM4, \XMM4 vaesenc \T5, \XMM5, \XMM5 vaesenc \T5, \XMM6, \XMM6 vaesenc \T5, \XMM7, \XMM7 vaesenc \T5, \XMM8, \XMM8 vmovdqa TMP8(%rsp), \T1 vmovdqu HashKey(arg2), \T5 vpclmulqdq $0x11, \T5, \T1, \T3 vpxor \T3, \T4, \T4 vpclmulqdq $0x00, \T5, \T1, \T3 vpxor \T3, \T7, \T7 vpshufd $0b01001110, \T1, \T3 vpxor \T1, \T3, \T3 vmovdqu HashKey_k(arg2), \T5 vpclmulqdq $0x10, \T5, \T3, \T3 vpxor \T3, \T6, \T6 vpxor \T4, \T6, \T6 vpxor \T7, \T6, \T6 vmovdqu 16*10(arg1), \T5 i = 11 setreg .rep (\REP-9) vaesenc \T5, \XMM1, \XMM1 vaesenc \T5, \XMM2, \XMM2 vaesenc \T5, \XMM3, \XMM3 vaesenc \T5, \XMM4, \XMM4 vaesenc \T5, \XMM5, \XMM5 vaesenc \T5, \XMM6, \XMM6 vaesenc \T5, \XMM7, \XMM7 vaesenc \T5, \XMM8, \XMM8 vmovdqu 16*i(arg1), \T5 i = i + 1 setreg .endr i = 0 j = 1 setreg .rep 8 vpxor 16*i(arg4, %r11), \T5, \T2 .if \ENC_DEC == ENC vaesenclast \T2, reg_j, reg_j .else vaesenclast \T2, reg_j, \T3 vmovdqu 16*i(arg4, %r11), reg_j vmovdqu \T3, 16*i(arg3, %r11) .endif i = (i+1) j = (j+1) setreg .endr ####################################################################### vpslldq $8, \T6, \T3 # shift-L T3 2 DWs vpsrldq $8, \T6, \T6 # shift-R T2 2 DWs vpxor \T3, \T7, \T7 vpxor \T4, \T6, \T6 # accumulate the results in T6:T7 ####################################################################### #first phase of the reduction ####################################################################### vpslld $31, \T7, \T2 # packed right shifting << 31 vpslld $30, \T7, \T3 # packed right shifting shift << 30 vpslld $25, \T7, \T4 # packed right shifting shift << 25 vpxor \T3, \T2, \T2 # xor the shifted versions vpxor \T4, \T2, \T2 vpsrldq $4, \T2, \T1 # shift-R T1 1 DW vpslldq $12, \T2, \T2 # shift-L T2 3 DWs vpxor \T2, \T7, \T7 # first phase of the reduction complete ####################################################################### .if \ENC_DEC == ENC vmovdqu \XMM1, 16*0(arg3,%r11) # Write to the Ciphertext buffer vmovdqu \XMM2, 16*1(arg3,%r11) # Write to the Ciphertext buffer vmovdqu \XMM3, 16*2(arg3,%r11) # Write to the Ciphertext buffer vmovdqu \XMM4, 16*3(arg3,%r11) # Write to the Ciphertext buffer vmovdqu \XMM5, 16*4(arg3,%r11) # Write to the Ciphertext buffer vmovdqu \XMM6, 16*5(arg3,%r11) # Write to the Ciphertext buffer vmovdqu \XMM7, 16*6(arg3,%r11) # Write to the Ciphertext buffer vmovdqu \XMM8, 16*7(arg3,%r11) # Write to the Ciphertext buffer .endif ####################################################################### #second phase of the reduction vpsrld $1, \T7, \T2 # packed left shifting >> 1 vpsrld $2, \T7, \T3 # packed left shifting >> 2 vpsrld $7, \T7, \T4 # packed left shifting >> 7 vpxor \T3, \T2, \T2 # xor the shifted versions vpxor \T4, \T2, \T2 vpxor \T1, \T2, \T2 vpxor \T2, \T7, \T7 vpxor \T7, \T6, \T6 # the result is in T6 ####################################################################### vpshufb SHUF_MASK(%rip), \XMM1, \XMM1 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM2, \XMM2 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM3, \XMM3 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM4, \XMM4 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM5, \XMM5 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM6, \XMM6 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM7, \XMM7 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM8, \XMM8 # perform a 16Byte swap vpxor \T6, \XMM1, \XMM1 .endm # GHASH the last 4 ciphertext blocks. .macro GHASH_LAST_8_AVX T1 T2 T3 T4 T5 T6 T7 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 ## Karatsuba Method vpshufd $0b01001110, \XMM1, \T2 vpxor \XMM1, \T2, \T2 vmovdqu HashKey_8(arg2), \T5 vpclmulqdq $0x11, \T5, \XMM1, \T6 vpclmulqdq $0x00, \T5, \XMM1, \T7 vmovdqu HashKey_8_k(arg2), \T3 vpclmulqdq $0x00, \T3, \T2, \XMM1 ###################### vpshufd $0b01001110, \XMM2, \T2 vpxor \XMM2, \T2, \T2 vmovdqu HashKey_7(arg2), \T5 vpclmulqdq $0x11, \T5, \XMM2, \T4 vpxor \T4, \T6, \T6 vpclmulqdq $0x00, \T5, \XMM2, \T4 vpxor \T4, \T7, \T7 vmovdqu HashKey_7_k(arg2), \T3 vpclmulqdq $0x00, \T3, \T2, \T2 vpxor \T2, \XMM1, \XMM1 ###################### vpshufd $0b01001110, \XMM3, \T2 vpxor \XMM3, \T2, \T2 vmovdqu HashKey_6(arg2), \T5 vpclmulqdq $0x11, \T5, \XMM3, \T4 vpxor \T4, \T6, \T6 vpclmulqdq $0x00, \T5, \XMM3, \T4 vpxor \T4, \T7, \T7 vmovdqu HashKey_6_k(arg2), \T3 vpclmulqdq $0x00, \T3, \T2, \T2 vpxor \T2, \XMM1, \XMM1 ###################### vpshufd $0b01001110, \XMM4, \T2 vpxor \XMM4, \T2, \T2 vmovdqu HashKey_5(arg2), \T5 vpclmulqdq $0x11, \T5, \XMM4, \T4 vpxor \T4, \T6, \T6 vpclmulqdq $0x00, \T5, \XMM4, \T4 vpxor \T4, \T7, \T7 vmovdqu HashKey_5_k(arg2), \T3 vpclmulqdq $0x00, \T3, \T2, \T2 vpxor \T2, \XMM1, \XMM1 ###################### vpshufd $0b01001110, \XMM5, \T2 vpxor \XMM5, \T2, \T2 vmovdqu HashKey_4(arg2), \T5 vpclmulqdq $0x11, \T5, \XMM5, \T4 vpxor \T4, \T6, \T6 vpclmulqdq $0x00, \T5, \XMM5, \T4 vpxor \T4, \T7, \T7 vmovdqu HashKey_4_k(arg2), \T3 vpclmulqdq $0x00, \T3, \T2, \T2 vpxor \T2, \XMM1, \XMM1 ###################### vpshufd $0b01001110, \XMM6, \T2 vpxor \XMM6, \T2, \T2 vmovdqu HashKey_3(arg2), \T5 vpclmulqdq $0x11, \T5, \XMM6, \T4 vpxor \T4, \T6, \T6 vpclmulqdq $0x00, \T5, \XMM6, \T4 vpxor \T4, \T7, \T7 vmovdqu HashKey_3_k(arg2), \T3 vpclmulqdq $0x00, \T3, \T2, \T2 vpxor \T2, \XMM1, \XMM1 ###################### vpshufd $0b01001110, \XMM7, \T2 vpxor \XMM7, \T2, \T2 vmovdqu HashKey_2(arg2), \T5 vpclmulqdq $0x11, \T5, \XMM7, \T4 vpxor \T4, \T6, \T6 vpclmulqdq $0x00, \T5, \XMM7, \T4 vpxor \T4, \T7, \T7 vmovdqu HashKey_2_k(arg2), \T3 vpclmulqdq $0x00, \T3, \T2, \T2 vpxor \T2, \XMM1, \XMM1 ###################### vpshufd $0b01001110, \XMM8, \T2 vpxor \XMM8, \T2, \T2 vmovdqu HashKey(arg2), \T5 vpclmulqdq $0x11, \T5, \XMM8, \T4 vpxor \T4, \T6, \T6 vpclmulqdq $0x00, \T5, \XMM8, \T4 vpxor \T4, \T7, \T7 vmovdqu HashKey_k(arg2), \T3 vpclmulqdq $0x00, \T3, \T2, \T2 vpxor \T2, \XMM1, \XMM1 vpxor \T6, \XMM1, \XMM1 vpxor \T7, \XMM1, \T2 vpslldq $8, \T2, \T4 vpsrldq $8, \T2, \T2 vpxor \T4, \T7, \T7 vpxor \T2, \T6, \T6 # <T6:T7> holds the result of # the accumulated carry-less multiplications ####################################################################### #first phase of the reduction vpslld $31, \T7, \T2 # packed right shifting << 31 vpslld $30, \T7, \T3 # packed right shifting shift << 30 vpslld $25, \T7, \T4 # packed right shifting shift << 25 vpxor \T3, \T2, \T2 # xor the shifted versions vpxor \T4, \T2, \T2 vpsrldq $4, \T2, \T1 # shift-R T1 1 DW vpslldq $12, \T2, \T2 # shift-L T2 3 DWs vpxor \T2, \T7, \T7 # first phase of the reduction complete ####################################################################### #second phase of the reduction vpsrld $1, \T7, \T2 # packed left shifting >> 1 vpsrld $2, \T7, \T3 # packed left shifting >> 2 vpsrld $7, \T7, \T4 # packed left shifting >> 7 vpxor \T3, \T2, \T2 # xor the shifted versions vpxor \T4, \T2, \T2 vpxor \T1, \T2, \T2 vpxor \T2, \T7, \T7 vpxor \T7, \T6, \T6 # the result is in T6 .endm ############################################################# #void aesni_gcm_precomp_avx_gen2 # (gcm_data *my_ctx_data, # gcm_context_data *data, # u8 *hash_subkey# /* H, the Hash sub key input. Data starts on a 16-byte boundary. */ # u8 *iv, /* Pre-counter block j0: 4 byte salt # (from Security Association) concatenated with 8 byte # Initialisation Vector (from IPSec ESP Payload) # concatenated with 0x00000001. 16-byte aligned pointer. */ # const u8 *aad, /* Additional Authentication Data (AAD)*/ # u64 aad_len) /* Length of AAD in bytes. With RFC4106 this is going to be 8 or 12 Bytes */ ############################################################# SYM_FUNC_START(aesni_gcm_init_avx_gen2) FUNC_SAVE INIT GHASH_MUL_AVX, PRECOMPUTE_AVX FUNC_RESTORE RET SYM_FUNC_END(aesni_gcm_init_avx_gen2) ############################################################################### #void aesni_gcm_enc_update_avx_gen2( # gcm_data *my_ctx_data, /* aligned to 16 Bytes */ # gcm_context_data *data, # u8 *out, /* Ciphertext output. Encrypt in-place is allowed. */ # const u8 *in, /* Plaintext input */ # u64 plaintext_len) /* Length of data in Bytes for encryption. */ ############################################################################### SYM_FUNC_START(aesni_gcm_enc_update_avx_gen2) FUNC_SAVE mov keysize, %eax cmp $32, %eax je key_256_enc_update cmp $16, %eax je key_128_enc_update # must be 192 GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, ENC, 11 FUNC_RESTORE RET key_128_enc_update: GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, ENC, 9 FUNC_RESTORE RET key_256_enc_update: GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, ENC, 13 FUNC_RESTORE RET SYM_FUNC_END(aesni_gcm_enc_update_avx_gen2) ############################################################################### #void aesni_gcm_dec_update_avx_gen2( # gcm_data *my_ctx_data, /* aligned to 16 Bytes */ # gcm_context_data *data, # u8 *out, /* Plaintext output. Decrypt in-place is allowed. */ # const u8 *in, /* Ciphertext input */ # u64 plaintext_len) /* Length of data in Bytes for encryption. */ ############################################################################### SYM_FUNC_START(aesni_gcm_dec_update_avx_gen2) FUNC_SAVE mov keysize,%eax cmp $32, %eax je key_256_dec_update cmp $16, %eax je key_128_dec_update # must be 192 GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, DEC, 11 FUNC_RESTORE RET key_128_dec_update: GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, DEC, 9 FUNC_RESTORE RET key_256_dec_update: GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, DEC, 13 FUNC_RESTORE RET SYM_FUNC_END(aesni_gcm_dec_update_avx_gen2) ############################################################################### #void aesni_gcm_finalize_avx_gen2( # gcm_data *my_ctx_data, /* aligned to 16 Bytes */ # gcm_context_data *data, # u8 *auth_tag, /* Authenticated Tag output. */ # u64 auth_tag_len)# /* Authenticated Tag Length in bytes. # Valid values are 16 (most likely), 12 or 8. */ ############################################################################### SYM_FUNC_START(aesni_gcm_finalize_avx_gen2) FUNC_SAVE mov keysize,%eax cmp $32, %eax je key_256_finalize cmp $16, %eax je key_128_finalize # must be 192 GCM_COMPLETE GHASH_MUL_AVX, 11, arg3, arg4 FUNC_RESTORE RET key_128_finalize: GCM_COMPLETE GHASH_MUL_AVX, 9, arg3, arg4 FUNC_RESTORE RET key_256_finalize: GCM_COMPLETE GHASH_MUL_AVX, 13, arg3, arg4 FUNC_RESTORE RET SYM_FUNC_END(aesni_gcm_finalize_avx_gen2) ############################################################################### # GHASH_MUL MACRO to implement: Data*HashKey mod (128,127,126,121,0) # Input: A and B (128-bits each, bit-reflected) # Output: C = A*B*x mod poly, (i.e. >>1 ) # To compute GH = GH*HashKey mod poly, give HK = HashKey<<1 mod poly as input # GH = GH * HK * x mod poly which is equivalent to GH*HashKey mod poly. ############################################################################### .macro GHASH_MUL_AVX2 GH HK T1 T2 T3 T4 T5 vpclmulqdq $0x11,\HK,\GH,\T1 # T1 = a1*b1 vpclmulqdq $0x00,\HK,\GH,\T2 # T2 = a0*b0 vpclmulqdq $0x01,\HK,\GH,\T3 # T3 = a1*b0 vpclmulqdq $0x10,\HK,\GH,\GH # GH = a0*b1 vpxor \T3, \GH, \GH vpsrldq $8 , \GH, \T3 # shift-R GH 2 DWs vpslldq $8 , \GH, \GH # shift-L GH 2 DWs vpxor \T3, \T1, \T1 vpxor \T2, \GH, \GH ####################################################################### #first phase of the reduction vmovdqa POLY2(%rip), \T3 vpclmulqdq $0x01, \GH, \T3, \T2 vpslldq $8, \T2, \T2 # shift-L T2 2 DWs vpxor \T2, \GH, \GH # first phase of the reduction complete ####################################################################### #second phase of the reduction vpclmulqdq $0x00, \GH, \T3, \T2 vpsrldq $4, \T2, \T2 # shift-R T2 1 DW (Shift-R only 1-DW to obtain 2-DWs shift-R) vpclmulqdq $0x10, \GH, \T3, \GH vpslldq $4, \GH, \GH # shift-L GH 1 DW (Shift-L 1-DW to obtain result with no shifts) vpxor \T2, \GH, \GH # second phase of the reduction complete ####################################################################### vpxor \T1, \GH, \GH # the result is in GH .endm .macro PRECOMPUTE_AVX2 HK T1 T2 T3 T4 T5 T6 # Haskey_i_k holds XORed values of the low and high parts of the Haskey_i vmovdqa \HK, \T5 GHASH_MUL_AVX2 \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^2<<1 mod poly vmovdqu \T5, HashKey_2(arg2) # [HashKey_2] = HashKey^2<<1 mod poly GHASH_MUL_AVX2 \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^3<<1 mod poly vmovdqu \T5, HashKey_3(arg2) GHASH_MUL_AVX2 \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^4<<1 mod poly vmovdqu \T5, HashKey_4(arg2) GHASH_MUL_AVX2 \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^5<<1 mod poly vmovdqu \T5, HashKey_5(arg2) GHASH_MUL_AVX2 \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^6<<1 mod poly vmovdqu \T5, HashKey_6(arg2) GHASH_MUL_AVX2 \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^7<<1 mod poly vmovdqu \T5, HashKey_7(arg2) GHASH_MUL_AVX2 \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^8<<1 mod poly vmovdqu \T5, HashKey_8(arg2) .endm ## if a = number of total plaintext bytes ## b = floor(a/16) ## num_initial_blocks = b mod 4# ## encrypt the initial num_initial_blocks blocks and apply ghash on the ciphertext ## r10, r11, r12, rax are clobbered ## arg1, arg2, arg3, arg4 are used as pointers only, not modified .macro INITIAL_BLOCKS_AVX2 REP num_initial_blocks T1 T2 T3 T4 T5 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 T6 T_key ENC_DEC VER i = (8-\num_initial_blocks) setreg vmovdqu AadHash(arg2), reg_i # start AES for num_initial_blocks blocks vmovdqu CurCount(arg2), \CTR i = (9-\num_initial_blocks) setreg .rep \num_initial_blocks vpaddd ONE(%rip), \CTR, \CTR # INCR Y0 vmovdqa \CTR, reg_i vpshufb SHUF_MASK(%rip), reg_i, reg_i # perform a 16Byte swap i = (i+1) setreg .endr vmovdqa (arg1), \T_key i = (9-\num_initial_blocks) setreg .rep \num_initial_blocks vpxor \T_key, reg_i, reg_i i = (i+1) setreg .endr j = 1 setreg .rep \REP vmovdqa 16*j(arg1), \T_key i = (9-\num_initial_blocks) setreg .rep \num_initial_blocks vaesenc \T_key, reg_i, reg_i i = (i+1) setreg .endr j = (j+1) setreg .endr vmovdqa 16*j(arg1), \T_key i = (9-\num_initial_blocks) setreg .rep \num_initial_blocks vaesenclast \T_key, reg_i, reg_i i = (i+1) setreg .endr i = (9-\num_initial_blocks) setreg .rep \num_initial_blocks vmovdqu (arg4, %r11), \T1 vpxor \T1, reg_i, reg_i vmovdqu reg_i, (arg3 , %r11) # write back ciphertext for # num_initial_blocks blocks add $16, %r11 .if \ENC_DEC == DEC vmovdqa \T1, reg_i .endif vpshufb SHUF_MASK(%rip), reg_i, reg_i # prepare ciphertext for GHASH computations i = (i+1) setreg .endr i = (8-\num_initial_blocks) j = (9-\num_initial_blocks) setreg .rep \num_initial_blocks vpxor reg_i, reg_j, reg_j GHASH_MUL_AVX2 reg_j, \T2, \T1, \T3, \T4, \T5, \T6 # apply GHASH on num_initial_blocks blocks i = (i+1) j = (j+1) setreg .endr # XMM8 has the combined result here vmovdqa \XMM8, TMP1(%rsp) vmovdqa \XMM8, \T3 cmp $128, %r13 jl _initial_blocks_done\@ # no need for precomputed constants ############################################################################### # Haskey_i_k holds XORed values of the low and high parts of the Haskey_i vpaddd ONE(%rip), \CTR, \CTR # INCR Y0 vmovdqa \CTR, \XMM1 vpshufb SHUF_MASK(%rip), \XMM1, \XMM1 # perform a 16Byte swap vpaddd ONE(%rip), \CTR, \CTR # INCR Y0 vmovdqa \CTR, \XMM2 vpshufb SHUF_MASK(%rip), \XMM2, \XMM2 # perform a 16Byte swap vpaddd ONE(%rip), \CTR, \CTR # INCR Y0 vmovdqa \CTR, \XMM3 vpshufb SHUF_MASK(%rip), \XMM3, \XMM3 # perform a 16Byte swap vpaddd ONE(%rip), \CTR, \CTR # INCR Y0 vmovdqa \CTR, \XMM4 vpshufb SHUF_MASK(%rip), \XMM4, \XMM4 # perform a 16Byte swap vpaddd ONE(%rip), \CTR, \CTR # INCR Y0 vmovdqa \CTR, \XMM5 vpshufb SHUF_MASK(%rip), \XMM5, \XMM5 # perform a 16Byte swap vpaddd ONE(%rip), \CTR, \CTR # INCR Y0 vmovdqa \CTR, \XMM6 vpshufb SHUF_MASK(%rip), \XMM6, \XMM6 # perform a 16Byte swap vpaddd ONE(%rip), \CTR, \CTR # INCR Y0 vmovdqa \CTR, \XMM7 vpshufb SHUF_MASK(%rip), \XMM7, \XMM7 # perform a 16Byte swap vpaddd ONE(%rip), \CTR, \CTR # INCR Y0 vmovdqa \CTR, \XMM8 vpshufb SHUF_MASK(%rip), \XMM8, \XMM8 # perform a 16Byte swap vmovdqa (arg1), \T_key vpxor \T_key, \XMM1, \XMM1 vpxor \T_key, \XMM2, \XMM2 vpxor \T_key, \XMM3, \XMM3 vpxor \T_key, \XMM4, \XMM4 vpxor \T_key, \XMM5, \XMM5 vpxor \T_key, \XMM6, \XMM6 vpxor \T_key, \XMM7, \XMM7 vpxor \T_key, \XMM8, \XMM8 i = 1 setreg .rep \REP # do REP rounds vmovdqa 16*i(arg1), \T_key vaesenc \T_key, \XMM1, \XMM1 vaesenc \T_key, \XMM2, \XMM2 vaesenc \T_key, \XMM3, \XMM3 vaesenc \T_key, \XMM4, \XMM4 vaesenc \T_key, \XMM5, \XMM5 vaesenc \T_key, \XMM6, \XMM6 vaesenc \T_key, \XMM7, \XMM7 vaesenc \T_key, \XMM8, \XMM8 i = (i+1) setreg .endr vmovdqa 16*i(arg1), \T_key vaesenclast \T_key, \XMM1, \XMM1 vaesenclast \T_key, \XMM2, \XMM2 vaesenclast \T_key, \XMM3, \XMM3 vaesenclast \T_key, \XMM4, \XMM4 vaesenclast \T_key, \XMM5, \XMM5 vaesenclast \T_key, \XMM6, \XMM6 vaesenclast \T_key, \XMM7, \XMM7 vaesenclast \T_key, \XMM8, \XMM8 vmovdqu (arg4, %r11), \T1 vpxor \T1, \XMM1, \XMM1 vmovdqu \XMM1, (arg3 , %r11) .if \ENC_DEC == DEC vmovdqa \T1, \XMM1 .endif vmovdqu 16*1(arg4, %r11), \T1 vpxor \T1, \XMM2, \XMM2 vmovdqu \XMM2, 16*1(arg3 , %r11) .if \ENC_DEC == DEC vmovdqa \T1, \XMM2 .endif vmovdqu 16*2(arg4, %r11), \T1 vpxor \T1, \XMM3, \XMM3 vmovdqu \XMM3, 16*2(arg3 , %r11) .if \ENC_DEC == DEC vmovdqa \T1, \XMM3 .endif vmovdqu 16*3(arg4, %r11), \T1 vpxor \T1, \XMM4, \XMM4 vmovdqu \XMM4, 16*3(arg3 , %r11) .if \ENC_DEC == DEC vmovdqa \T1, \XMM4 .endif vmovdqu 16*4(arg4, %r11), \T1 vpxor \T1, \XMM5, \XMM5 vmovdqu \XMM5, 16*4(arg3 , %r11) .if \ENC_DEC == DEC vmovdqa \T1, \XMM5 .endif vmovdqu 16*5(arg4, %r11), \T1 vpxor \T1, \XMM6, \XMM6 vmovdqu \XMM6, 16*5(arg3 , %r11) .if \ENC_DEC == DEC vmovdqa \T1, \XMM6 .endif vmovdqu 16*6(arg4, %r11), \T1 vpxor \T1, \XMM7, \XMM7 vmovdqu \XMM7, 16*6(arg3 , %r11) .if \ENC_DEC == DEC vmovdqa \T1, \XMM7 .endif vmovdqu 16*7(arg4, %r11), \T1 vpxor \T1, \XMM8, \XMM8 vmovdqu \XMM8, 16*7(arg3 , %r11) .if \ENC_DEC == DEC vmovdqa \T1, \XMM8 .endif add $128, %r11 vpshufb SHUF_MASK(%rip), \XMM1, \XMM1 # perform a 16Byte swap vpxor TMP1(%rsp), \XMM1, \XMM1 # combine GHASHed value with # the corresponding ciphertext vpshufb SHUF_MASK(%rip), \XMM2, \XMM2 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM3, \XMM3 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM4, \XMM4 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM5, \XMM5 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM6, \XMM6 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM7, \XMM7 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM8, \XMM8 # perform a 16Byte swap ############################################################################### _initial_blocks_done\@: .endm # encrypt 8 blocks at a time # ghash the 8 previously encrypted ciphertext blocks # arg1, arg2, arg3, arg4 are used as pointers only, not modified # r11 is the data offset value .macro GHASH_8_ENCRYPT_8_PARALLEL_AVX2 REP T1 T2 T3 T4 T5 T6 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 T7 loop_idx ENC_DEC vmovdqa \XMM1, \T2 vmovdqa \XMM2, TMP2(%rsp) vmovdqa \XMM3, TMP3(%rsp) vmovdqa \XMM4, TMP4(%rsp) vmovdqa \XMM5, TMP5(%rsp) vmovdqa \XMM6, TMP6(%rsp) vmovdqa \XMM7, TMP7(%rsp) vmovdqa \XMM8, TMP8(%rsp) .if \loop_idx == in_order vpaddd ONE(%rip), \CTR, \XMM1 # INCR CNT vpaddd ONE(%rip), \XMM1, \XMM2 vpaddd ONE(%rip), \XMM2, \XMM3 vpaddd ONE(%rip), \XMM3, \XMM4 vpaddd ONE(%rip), \XMM4, \XMM5 vpaddd ONE(%rip), \XMM5, \XMM6 vpaddd ONE(%rip), \XMM6, \XMM7 vpaddd ONE(%rip), \XMM7, \XMM8 vmovdqa \XMM8, \CTR vpshufb SHUF_MASK(%rip), \XMM1, \XMM1 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM2, \XMM2 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM3, \XMM3 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM4, \XMM4 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM5, \XMM5 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM6, \XMM6 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM7, \XMM7 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM8, \XMM8 # perform a 16Byte swap .else vpaddd ONEf(%rip), \CTR, \XMM1 # INCR CNT vpaddd ONEf(%rip), \XMM1, \XMM2 vpaddd ONEf(%rip), \XMM2, \XMM3 vpaddd ONEf(%rip), \XMM3, \XMM4 vpaddd ONEf(%rip), \XMM4, \XMM5 vpaddd ONEf(%rip), \XMM5, \XMM6 vpaddd ONEf(%rip), \XMM6, \XMM7 vpaddd ONEf(%rip), \XMM7, \XMM8 vmovdqa \XMM8, \CTR .endif ####################################################################### vmovdqu (arg1), \T1 vpxor \T1, \XMM1, \XMM1 vpxor \T1, \XMM2, \XMM2 vpxor \T1, \XMM3, \XMM3 vpxor \T1, \XMM4, \XMM4 vpxor \T1, \XMM5, \XMM5 vpxor \T1, \XMM6, \XMM6 vpxor \T1, \XMM7, \XMM7 vpxor \T1, \XMM8, \XMM8 ####################################################################### vmovdqu 16*1(arg1), \T1 vaesenc \T1, \XMM1, \XMM1 vaesenc \T1, \XMM2, \XMM2 vaesenc \T1, \XMM3, \XMM3 vaesenc \T1, \XMM4, \XMM4 vaesenc \T1, \XMM5, \XMM5 vaesenc \T1, \XMM6, \XMM6 vaesenc \T1, \XMM7, \XMM7 vaesenc \T1, \XMM8, \XMM8 vmovdqu 16*2(arg1), \T1 vaesenc \T1, \XMM1, \XMM1 vaesenc \T1, \XMM2, \XMM2 vaesenc \T1, \XMM3, \XMM3 vaesenc \T1, \XMM4, \XMM4 vaesenc \T1, \XMM5, \XMM5 vaesenc \T1, \XMM6, \XMM6 vaesenc \T1, \XMM7, \XMM7 vaesenc \T1, \XMM8, \XMM8 ####################################################################### vmovdqu HashKey_8(arg2), \T5 vpclmulqdq $0x11, \T5, \T2, \T4 # T4 = a1*b1 vpclmulqdq $0x00, \T5, \T2, \T7 # T7 = a0*b0 vpclmulqdq $0x01, \T5, \T2, \T6 # T6 = a1*b0 vpclmulqdq $0x10, \T5, \T2, \T5 # T5 = a0*b1 vpxor \T5, \T6, \T6 vmovdqu 16*3(arg1), \T1 vaesenc \T1, \XMM1, \XMM1 vaesenc \T1, \XMM2, \XMM2 vaesenc \T1, \XMM3, \XMM3 vaesenc \T1, \XMM4, \XMM4 vaesenc \T1, \XMM5, \XMM5 vaesenc \T1, \XMM6, \XMM6 vaesenc \T1, \XMM7, \XMM7 vaesenc \T1, \XMM8, \XMM8 vmovdqa TMP2(%rsp), \T1 vmovdqu HashKey_7(arg2), \T5 vpclmulqdq $0x11, \T5, \T1, \T3 vpxor \T3, \T4, \T4 vpclmulqdq $0x00, \T5, \T1, \T3 vpxor \T3, \T7, \T7 vpclmulqdq $0x01, \T5, \T1, \T3 vpxor \T3, \T6, \T6 vpclmulqdq $0x10, \T5, \T1, \T3 vpxor \T3, \T6, \T6 vmovdqu 16*4(arg1), \T1 vaesenc \T1, \XMM1, \XMM1 vaesenc \T1, \XMM2, \XMM2 vaesenc \T1, \XMM3, \XMM3 vaesenc \T1, \XMM4, \XMM4 vaesenc \T1, \XMM5, \XMM5 vaesenc \T1, \XMM6, \XMM6 vaesenc \T1, \XMM7, \XMM7 vaesenc \T1, \XMM8, \XMM8 ####################################################################### vmovdqa TMP3(%rsp), \T1 vmovdqu HashKey_6(arg2), \T5 vpclmulqdq $0x11, \T5, \T1, \T3 vpxor \T3, \T4, \T4 vpclmulqdq $0x00, \T5, \T1, \T3 vpxor \T3, \T7, \T7 vpclmulqdq $0x01, \T5, \T1, \T3 vpxor \T3, \T6, \T6 vpclmulqdq $0x10, \T5, \T1, \T3 vpxor \T3, \T6, \T6 vmovdqu 16*5(arg1), \T1 vaesenc \T1, \XMM1, \XMM1 vaesenc \T1, \XMM2, \XMM2 vaesenc \T1, \XMM3, \XMM3 vaesenc \T1, \XMM4, \XMM4 vaesenc \T1, \XMM5, \XMM5 vaesenc \T1, \XMM6, \XMM6 vaesenc \T1, \XMM7, \XMM7 vaesenc \T1, \XMM8, \XMM8 vmovdqa TMP4(%rsp), \T1 vmovdqu HashKey_5(arg2), \T5 vpclmulqdq $0x11, \T5, \T1, \T3 vpxor \T3, \T4, \T4 vpclmulqdq $0x00, \T5, \T1, \T3 vpxor \T3, \T7, \T7 vpclmulqdq $0x01, \T5, \T1, \T3 vpxor \T3, \T6, \T6 vpclmulqdq $0x10, \T5, \T1, \T3 vpxor \T3, \T6, \T6 vmovdqu 16*6(arg1), \T1 vaesenc \T1, \XMM1, \XMM1 vaesenc \T1, \XMM2, \XMM2 vaesenc \T1, \XMM3, \XMM3 vaesenc \T1, \XMM4, \XMM4 vaesenc \T1, \XMM5, \XMM5 vaesenc \T1, \XMM6, \XMM6 vaesenc \T1, \XMM7, \XMM7 vaesenc \T1, \XMM8, \XMM8 vmovdqa TMP5(%rsp), \T1 vmovdqu HashKey_4(arg2), \T5 vpclmulqdq $0x11, \T5, \T1, \T3 vpxor \T3, \T4, \T4 vpclmulqdq $0x00, \T5, \T1, \T3 vpxor \T3, \T7, \T7 vpclmulqdq $0x01, \T5, \T1, \T3 vpxor \T3, \T6, \T6 vpclmulqdq $0x10, \T5, \T1, \T3 vpxor \T3, \T6, \T6 vmovdqu 16*7(arg1), \T1 vaesenc \T1, \XMM1, \XMM1 vaesenc \T1, \XMM2, \XMM2 vaesenc \T1, \XMM3, \XMM3 vaesenc \T1, \XMM4, \XMM4 vaesenc \T1, \XMM5, \XMM5 vaesenc \T1, \XMM6, \XMM6 vaesenc \T1, \XMM7, \XMM7 vaesenc \T1, \XMM8, \XMM8 vmovdqa TMP6(%rsp), \T1 vmovdqu HashKey_3(arg2), \T5 vpclmulqdq $0x11, \T5, \T1, \T3 vpxor \T3, \T4, \T4 vpclmulqdq $0x00, \T5, \T1, \T3 vpxor \T3, \T7, \T7 vpclmulqdq $0x01, \T5, \T1, \T3 vpxor \T3, \T6, \T6 vpclmulqdq $0x10, \T5, \T1, \T3 vpxor \T3, \T6, \T6 vmovdqu 16*8(arg1), \T1 vaesenc \T1, \XMM1, \XMM1 vaesenc \T1, \XMM2, \XMM2 vaesenc \T1, \XMM3, \XMM3 vaesenc \T1, \XMM4, \XMM4 vaesenc \T1, \XMM5, \XMM5 vaesenc \T1, \XMM6, \XMM6 vaesenc \T1, \XMM7, \XMM7 vaesenc \T1, \XMM8, \XMM8 vmovdqa TMP7(%rsp), \T1 vmovdqu HashKey_2(arg2), \T5 vpclmulqdq $0x11, \T5, \T1, \T3 vpxor \T3, \T4, \T4 vpclmulqdq $0x00, \T5, \T1, \T3 vpxor \T3, \T7, \T7 vpclmulqdq $0x01, \T5, \T1, \T3 vpxor \T3, \T6, \T6 vpclmulqdq $0x10, \T5, \T1, \T3 vpxor \T3, \T6, \T6 ####################################################################### vmovdqu 16*9(arg1), \T5 vaesenc \T5, \XMM1, \XMM1 vaesenc \T5, \XMM2, \XMM2 vaesenc \T5, \XMM3, \XMM3 vaesenc \T5, \XMM4, \XMM4 vaesenc \T5, \XMM5, \XMM5 vaesenc \T5, \XMM6, \XMM6 vaesenc \T5, \XMM7, \XMM7 vaesenc \T5, \XMM8, \XMM8 vmovdqa TMP8(%rsp), \T1 vmovdqu HashKey(arg2), \T5 vpclmulqdq $0x00, \T5, \T1, \T3 vpxor \T3, \T7, \T7 vpclmulqdq $0x01, \T5, \T1, \T3 vpxor \T3, \T6, \T6 vpclmulqdq $0x10, \T5, \T1, \T3 vpxor \T3, \T6, \T6 vpclmulqdq $0x11, \T5, \T1, \T3 vpxor \T3, \T4, \T1 vmovdqu 16*10(arg1), \T5 i = 11 setreg .rep (\REP-9) vaesenc \T5, \XMM1, \XMM1 vaesenc \T5, \XMM2, \XMM2 vaesenc \T5, \XMM3, \XMM3 vaesenc \T5, \XMM4, \XMM4 vaesenc \T5, \XMM5, \XMM5 vaesenc \T5, \XMM6, \XMM6 vaesenc \T5, \XMM7, \XMM7 vaesenc \T5, \XMM8, \XMM8 vmovdqu 16*i(arg1), \T5 i = i + 1 setreg .endr i = 0 j = 1 setreg .rep 8 vpxor 16*i(arg4, %r11), \T5, \T2 .if \ENC_DEC == ENC vaesenclast \T2, reg_j, reg_j .else vaesenclast \T2, reg_j, \T3 vmovdqu 16*i(arg4, %r11), reg_j vmovdqu \T3, 16*i(arg3, %r11) .endif i = (i+1) j = (j+1) setreg .endr ####################################################################### vpslldq $8, \T6, \T3 # shift-L T3 2 DWs vpsrldq $8, \T6, \T6 # shift-R T2 2 DWs vpxor \T3, \T7, \T7 vpxor \T6, \T1, \T1 # accumulate the results in T1:T7 ####################################################################### #first phase of the reduction vmovdqa POLY2(%rip), \T3 vpclmulqdq $0x01, \T7, \T3, \T2 vpslldq $8, \T2, \T2 # shift-L xmm2 2 DWs vpxor \T2, \T7, \T7 # first phase of the reduction complete ####################################################################### .if \ENC_DEC == ENC vmovdqu \XMM1, 16*0(arg3,%r11) # Write to the Ciphertext buffer vmovdqu \XMM2, 16*1(arg3,%r11) # Write to the Ciphertext buffer vmovdqu \XMM3, 16*2(arg3,%r11) # Write to the Ciphertext buffer vmovdqu \XMM4, 16*3(arg3,%r11) # Write to the Ciphertext buffer vmovdqu \XMM5, 16*4(arg3,%r11) # Write to the Ciphertext buffer vmovdqu \XMM6, 16*5(arg3,%r11) # Write to the Ciphertext buffer vmovdqu \XMM7, 16*6(arg3,%r11) # Write to the Ciphertext buffer vmovdqu \XMM8, 16*7(arg3,%r11) # Write to the Ciphertext buffer .endif ####################################################################### #second phase of the reduction vpclmulqdq $0x00, \T7, \T3, \T2 vpsrldq $4, \T2, \T2 # shift-R xmm2 1 DW (Shift-R only 1-DW to obtain 2-DWs shift-R) vpclmulqdq $0x10, \T7, \T3, \T4 vpslldq $4, \T4, \T4 # shift-L xmm0 1 DW (Shift-L 1-DW to obtain result with no shifts) vpxor \T2, \T4, \T4 # second phase of the reduction complete ####################################################################### vpxor \T4, \T1, \T1 # the result is in T1 vpshufb SHUF_MASK(%rip), \XMM1, \XMM1 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM2, \XMM2 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM3, \XMM3 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM4, \XMM4 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM5, \XMM5 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM6, \XMM6 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM7, \XMM7 # perform a 16Byte swap vpshufb SHUF_MASK(%rip), \XMM8, \XMM8 # perform a 16Byte swap vpxor \T1, \XMM1, \XMM1 .endm # GHASH the last 4 ciphertext blocks. .macro GHASH_LAST_8_AVX2 T1 T2 T3 T4 T5 T6 T7 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 ## Karatsuba Method vmovdqu HashKey_8(arg2), \T5 vpshufd $0b01001110, \XMM1, \T2 vpshufd $0b01001110, \T5, \T3 vpxor \XMM1, \T2, \T2 vpxor \T5, \T3, \T3 vpclmulqdq $0x11, \T5, \XMM1, \T6 vpclmulqdq $0x00, \T5, \XMM1, \T7 vpclmulqdq $0x00, \T3, \T2, \XMM1 ###################### vmovdqu HashKey_7(arg2), \T5 vpshufd $0b01001110, \XMM2, \T2 vpshufd $0b01001110, \T5, \T3 vpxor \XMM2, \T2, \T2 vpxor \T5, \T3, \T3 vpclmulqdq $0x11, \T5, \XMM2, \T4 vpxor \T4, \T6, \T6 vpclmulqdq $0x00, \T5, \XMM2, \T4 vpxor \T4, \T7, \T7 vpclmulqdq $0x00, \T3, \T2, \T2 vpxor \T2, \XMM1, \XMM1 ###################### vmovdqu HashKey_6(arg2), \T5 vpshufd $0b01001110, \XMM3, \T2 vpshufd $0b01001110, \T5, \T3 vpxor \XMM3, \T2, \T2 vpxor \T5, \T3, \T3 vpclmulqdq $0x11, \T5, \XMM3, \T4 vpxor \T4, \T6, \T6 vpclmulqdq $0x00, \T5, \XMM3, \T4 vpxor \T4, \T7, \T7 vpclmulqdq $0x00, \T3, \T2, \T2 vpxor \T2, \XMM1, \XMM1 ###################### vmovdqu HashKey_5(arg2), \T5 vpshufd $0b01001110, \XMM4, \T2 vpshufd $0b01001110, \T5, \T3 vpxor \XMM4, \T2, \T2 vpxor \T5, \T3, \T3 vpclmulqdq $0x11, \T5, \XMM4, \T4 vpxor \T4, \T6, \T6 vpclmulqdq $0x00, \T5, \XMM4, \T4 vpxor \T4, \T7, \T7 vpclmulqdq $0x00, \T3, \T2, \T2 vpxor \T2, \XMM1, \XMM1 ###################### vmovdqu HashKey_4(arg2), \T5 vpshufd $0b01001110, \XMM5, \T2 vpshufd $0b01001110, \T5, \T3 vpxor \XMM5, \T2, \T2 vpxor \T5, \T3, \T3 vpclmulqdq $0x11, \T5, \XMM5, \T4 vpxor \T4, \T6, \T6 vpclmulqdq $0x00, \T5, \XMM5, \T4 vpxor \T4, \T7, \T7 vpclmulqdq $0x00, \T3, \T2, \T2 vpxor \T2, \XMM1, \XMM1 ###################### vmovdqu HashKey_3(arg2), \T5 vpshufd $0b01001110, \XMM6, \T2 vpshufd $0b01001110, \T5, \T3 vpxor \XMM6, \T2, \T2 vpxor \T5, \T3, \T3 vpclmulqdq $0x11, \T5, \XMM6, \T4 vpxor \T4, \T6, \T6 vpclmulqdq $0x00, \T5, \XMM6, \T4 vpxor \T4, \T7, \T7 vpclmulqdq $0x00, \T3, \T2, \T2 vpxor \T2, \XMM1, \XMM1 ###################### vmovdqu HashKey_2(arg2), \T5 vpshufd $0b01001110, \XMM7, \T2 vpshufd $0b01001110, \T5, \T3 vpxor \XMM7, \T2, \T2 vpxor \T5, \T3, \T3 vpclmulqdq $0x11, \T5, \XMM7, \T4 vpxor \T4, \T6, \T6 vpclmulqdq $0x00, \T5, \XMM7, \T4 vpxor \T4, \T7, \T7 vpclmulqdq $0x00, \T3, \T2, \T2 vpxor \T2, \XMM1, \XMM1 ###################### vmovdqu HashKey(arg2), \T5 vpshufd $0b01001110, \XMM8, \T2 vpshufd $0b01001110, \T5, \T3 vpxor \XMM8, \T2, \T2 vpxor \T5, \T3, \T3 vpclmulqdq $0x11, \T5, \XMM8, \T4 vpxor \T4, \T6, \T6 vpclmulqdq $0x00, \T5, \XMM8, \T4 vpxor \T4, \T7, \T7 vpclmulqdq $0x00, \T3, \T2, \T2 vpxor \T2, \XMM1, \XMM1 vpxor \T6, \XMM1, \XMM1 vpxor \T7, \XMM1, \T2 vpslldq $8, \T2, \T4 vpsrldq $8, \T2, \T2 vpxor \T4, \T7, \T7 vpxor \T2, \T6, \T6 # <T6:T7> holds the result of the # accumulated carry-less multiplications ####################################################################### #first phase of the reduction vmovdqa POLY2(%rip), \T3 vpclmulqdq $0x01, \T7, \T3, \T2 vpslldq $8, \T2, \T2 # shift-L xmm2 2 DWs vpxor \T2, \T7, \T7 # first phase of the reduction complete ####################################################################### #second phase of the reduction vpclmulqdq $0x00, \T7, \T3, \T2 vpsrldq $4, \T2, \T2 # shift-R T2 1 DW (Shift-R only 1-DW to obtain 2-DWs shift-R) vpclmulqdq $0x10, \T7, \T3, \T4 vpslldq $4, \T4, \T4 # shift-L T4 1 DW (Shift-L 1-DW to obtain result with no shifts) vpxor \T2, \T4, \T4 # second phase of the reduction complete ####################################################################### vpxor \T4, \T6, \T6 # the result is in T6 .endm ############################################################# #void aesni_gcm_init_avx_gen4 # (gcm_data *my_ctx_data, # gcm_context_data *data, # u8 *iv, /* Pre-counter block j0: 4 byte salt # (from Security Association) concatenated with 8 byte # Initialisation Vector (from IPSec ESP Payload) # concatenated with 0x00000001. 16-byte aligned pointer. */ # u8 *hash_subkey# /* H, the Hash sub key input. Data starts on a 16-byte boundary. */ # const u8 *aad, /* Additional Authentication Data (AAD)*/ # u64 aad_len) /* Length of AAD in bytes. With RFC4106 this is going to be 8 or 12 Bytes */ ############################################################# SYM_FUNC_START(aesni_gcm_init_avx_gen4) FUNC_SAVE INIT GHASH_MUL_AVX2, PRECOMPUTE_AVX2 FUNC_RESTORE RET SYM_FUNC_END(aesni_gcm_init_avx_gen4) ############################################################################### #void aesni_gcm_enc_avx_gen4( # gcm_data *my_ctx_data, /* aligned to 16 Bytes */ # gcm_context_data *data, # u8 *out, /* Ciphertext output. Encrypt in-place is allowed. */ # const u8 *in, /* Plaintext input */ # u64 plaintext_len) /* Length of data in Bytes for encryption. */ ############################################################################### SYM_FUNC_START(aesni_gcm_enc_update_avx_gen4) FUNC_SAVE mov keysize,%eax cmp $32, %eax je key_256_enc_update4 cmp $16, %eax je key_128_enc_update4 # must be 192 GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, ENC, 11 FUNC_RESTORE RET key_128_enc_update4: GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, ENC, 9 FUNC_RESTORE RET key_256_enc_update4: GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, ENC, 13 FUNC_RESTORE RET SYM_FUNC_END(aesni_gcm_enc_update_avx_gen4) ############################################################################### #void aesni_gcm_dec_update_avx_gen4( # gcm_data *my_ctx_data, /* aligned to 16 Bytes */ # gcm_context_data *data, # u8 *out, /* Plaintext output. Decrypt in-place is allowed. */ # const u8 *in, /* Ciphertext input */ # u64 plaintext_len) /* Length of data in Bytes for encryption. */ ############################################################################### SYM_FUNC_START(aesni_gcm_dec_update_avx_gen4) FUNC_SAVE mov keysize,%eax cmp $32, %eax je key_256_dec_update4 cmp $16, %eax je key_128_dec_update4 # must be 192 GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, DEC, 11 FUNC_RESTORE RET key_128_dec_update4: GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, DEC, 9 FUNC_RESTORE RET key_256_dec_update4: GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, DEC, 13 FUNC_RESTORE RET SYM_FUNC_END(aesni_gcm_dec_update_avx_gen4) ############################################################################### #void aesni_gcm_finalize_avx_gen4( # gcm_data *my_ctx_data, /* aligned to 16 Bytes */ # gcm_context_data *data, # u8 *auth_tag, /* Authenticated Tag output. */ # u64 auth_tag_len)# /* Authenticated Tag Length in bytes. # Valid values are 16 (most likely), 12 or 8. */ ############################################################################### SYM_FUNC_START(aesni_gcm_finalize_avx_gen4) FUNC_SAVE mov keysize,%eax cmp $32, %eax je key_256_finalize4 cmp $16, %eax je key_128_finalize4 # must be 192 GCM_COMPLETE GHASH_MUL_AVX2, 11, arg3, arg4 FUNC_RESTORE RET key_128_finalize4: GCM_COMPLETE GHASH_MUL_AVX2, 9, arg3, arg4 FUNC_RESTORE RET key_256_finalize4: GCM_COMPLETE GHASH_MUL_AVX2, 13, arg3, arg4 FUNC_RESTORE RET SYM_FUNC_END(aesni_gcm_finalize_avx_gen4)
aixcc-public/challenge-001-exemplar-source
18,119
arch/x86/crypto/sm4-aesni-avx-asm_64.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * SM4 Cipher Algorithm, AES-NI/AVX optimized. * as specified in * https://tools.ietf.org/id/draft-ribose-cfrg-sm4-10.html * * Copyright (C) 2018 Markku-Juhani O. Saarinen <mjos@iki.fi> * Copyright (C) 2020 Jussi Kivilinna <jussi.kivilinna@iki.fi> * Copyright (c) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com> */ /* Based on SM4 AES-NI work by libgcrypt and Markku-Juhani O. Saarinen at: * https://github.com/mjosaarinen/sm4ni */ #include <linux/linkage.h> #include <linux/cfi_types.h> #include <asm/frame.h> #define rRIP (%rip) #define RX0 %xmm0 #define RX1 %xmm1 #define MASK_4BIT %xmm2 #define RTMP0 %xmm3 #define RTMP1 %xmm4 #define RTMP2 %xmm5 #define RTMP3 %xmm6 #define RTMP4 %xmm7 #define RA0 %xmm8 #define RA1 %xmm9 #define RA2 %xmm10 #define RA3 %xmm11 #define RB0 %xmm12 #define RB1 %xmm13 #define RB2 %xmm14 #define RB3 %xmm15 #define RNOT %xmm0 #define RBSWAP %xmm1 /* Transpose four 32-bit words between 128-bit vectors. */ #define transpose_4x4(x0, x1, x2, x3, t1, t2) \ vpunpckhdq x1, x0, t2; \ vpunpckldq x1, x0, x0; \ \ vpunpckldq x3, x2, t1; \ vpunpckhdq x3, x2, x2; \ \ vpunpckhqdq t1, x0, x1; \ vpunpcklqdq t1, x0, x0; \ \ vpunpckhqdq x2, t2, x3; \ vpunpcklqdq x2, t2, x2; /* pre-SubByte transform. */ #define transform_pre(x, lo_t, hi_t, mask4bit, tmp0) \ vpand x, mask4bit, tmp0; \ vpandn x, mask4bit, x; \ vpsrld $4, x, x; \ \ vpshufb tmp0, lo_t, tmp0; \ vpshufb x, hi_t, x; \ vpxor tmp0, x, x; /* post-SubByte transform. Note: x has been XOR'ed with mask4bit by * 'vaeslastenc' instruction. */ #define transform_post(x, lo_t, hi_t, mask4bit, tmp0) \ vpandn mask4bit, x, tmp0; \ vpsrld $4, x, x; \ vpand x, mask4bit, x; \ \ vpshufb tmp0, lo_t, tmp0; \ vpshufb x, hi_t, x; \ vpxor tmp0, x, x; .section .rodata.cst16, "aM", @progbits, 16 .align 16 /* * Following four affine transform look-up tables are from work by * Markku-Juhani O. Saarinen, at https://github.com/mjosaarinen/sm4ni * * These allow exposing SM4 S-Box from AES SubByte. */ /* pre-SubByte affine transform, from SM4 field to AES field. */ .Lpre_tf_lo_s: .quad 0x9197E2E474720701, 0xC7C1B4B222245157 .Lpre_tf_hi_s: .quad 0xE240AB09EB49A200, 0xF052B91BF95BB012 /* post-SubByte affine transform, from AES field to SM4 field. */ .Lpost_tf_lo_s: .quad 0x5B67F2CEA19D0834, 0xEDD14478172BBE82 .Lpost_tf_hi_s: .quad 0xAE7201DD73AFDC00, 0x11CDBE62CC1063BF /* For isolating SubBytes from AESENCLAST, inverse shift row */ .Linv_shift_row: .byte 0x00, 0x0d, 0x0a, 0x07, 0x04, 0x01, 0x0e, 0x0b .byte 0x08, 0x05, 0x02, 0x0f, 0x0c, 0x09, 0x06, 0x03 /* Inverse shift row + Rotate left by 8 bits on 32-bit words with vpshufb */ .Linv_shift_row_rol_8: .byte 0x07, 0x00, 0x0d, 0x0a, 0x0b, 0x04, 0x01, 0x0e .byte 0x0f, 0x08, 0x05, 0x02, 0x03, 0x0c, 0x09, 0x06 /* Inverse shift row + Rotate left by 16 bits on 32-bit words with vpshufb */ .Linv_shift_row_rol_16: .byte 0x0a, 0x07, 0x00, 0x0d, 0x0e, 0x0b, 0x04, 0x01 .byte 0x02, 0x0f, 0x08, 0x05, 0x06, 0x03, 0x0c, 0x09 /* Inverse shift row + Rotate left by 24 bits on 32-bit words with vpshufb */ .Linv_shift_row_rol_24: .byte 0x0d, 0x0a, 0x07, 0x00, 0x01, 0x0e, 0x0b, 0x04 .byte 0x05, 0x02, 0x0f, 0x08, 0x09, 0x06, 0x03, 0x0c /* For CTR-mode IV byteswap */ .Lbswap128_mask: .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 /* For input word byte-swap */ .Lbswap32_mask: .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 .align 4 /* 4-bit mask */ .L0f0f0f0f: .long 0x0f0f0f0f /* 12 bytes, only for padding */ .Lpadding_deadbeef: .long 0xdeadbeef, 0xdeadbeef, 0xdeadbeef .text .align 16 /* * void sm4_aesni_avx_crypt4(const u32 *rk, u8 *dst, * const u8 *src, int nblocks) */ .align 8 SYM_FUNC_START(sm4_aesni_avx_crypt4) /* input: * %rdi: round key array, CTX * %rsi: dst (1..4 blocks) * %rdx: src (1..4 blocks) * %rcx: num blocks (1..4) */ FRAME_BEGIN vmovdqu 0*16(%rdx), RA0; vmovdqa RA0, RA1; vmovdqa RA0, RA2; vmovdqa RA0, RA3; cmpq $2, %rcx; jb .Lblk4_load_input_done; vmovdqu 1*16(%rdx), RA1; je .Lblk4_load_input_done; vmovdqu 2*16(%rdx), RA2; cmpq $3, %rcx; je .Lblk4_load_input_done; vmovdqu 3*16(%rdx), RA3; .Lblk4_load_input_done: vmovdqa .Lbswap32_mask rRIP, RTMP2; vpshufb RTMP2, RA0, RA0; vpshufb RTMP2, RA1, RA1; vpshufb RTMP2, RA2, RA2; vpshufb RTMP2, RA3, RA3; vbroadcastss .L0f0f0f0f rRIP, MASK_4BIT; vmovdqa .Lpre_tf_lo_s rRIP, RTMP4; vmovdqa .Lpre_tf_hi_s rRIP, RB0; vmovdqa .Lpost_tf_lo_s rRIP, RB1; vmovdqa .Lpost_tf_hi_s rRIP, RB2; vmovdqa .Linv_shift_row rRIP, RB3; vmovdqa .Linv_shift_row_rol_8 rRIP, RTMP2; vmovdqa .Linv_shift_row_rol_16 rRIP, RTMP3; transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1); #define ROUND(round, s0, s1, s2, s3) \ vbroadcastss (4*(round))(%rdi), RX0; \ vpxor s1, RX0, RX0; \ vpxor s2, RX0, RX0; \ vpxor s3, RX0, RX0; /* s1 ^ s2 ^ s3 ^ rk */ \ \ /* sbox, non-linear part */ \ transform_pre(RX0, RTMP4, RB0, MASK_4BIT, RTMP0); \ vaesenclast MASK_4BIT, RX0, RX0; \ transform_post(RX0, RB1, RB2, MASK_4BIT, RTMP0); \ \ /* linear part */ \ vpshufb RB3, RX0, RTMP0; \ vpxor RTMP0, s0, s0; /* s0 ^ x */ \ vpshufb RTMP2, RX0, RTMP1; \ vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) */ \ vpshufb RTMP3, RX0, RTMP1; \ vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) ^ rol(x,16) */ \ vpshufb .Linv_shift_row_rol_24 rRIP, RX0, RTMP1; \ vpxor RTMP1, s0, s0; /* s0 ^ x ^ rol(x,24) */ \ vpslld $2, RTMP0, RTMP1; \ vpsrld $30, RTMP0, RTMP0; \ vpxor RTMP0, s0, s0; \ /* s0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \ vpxor RTMP1, s0, s0; leaq (32*4)(%rdi), %rax; .align 16 .Lroundloop_blk4: ROUND(0, RA0, RA1, RA2, RA3); ROUND(1, RA1, RA2, RA3, RA0); ROUND(2, RA2, RA3, RA0, RA1); ROUND(3, RA3, RA0, RA1, RA2); leaq (4*4)(%rdi), %rdi; cmpq %rax, %rdi; jne .Lroundloop_blk4; #undef ROUND vmovdqa .Lbswap128_mask rRIP, RTMP2; transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1); vpshufb RTMP2, RA0, RA0; vpshufb RTMP2, RA1, RA1; vpshufb RTMP2, RA2, RA2; vpshufb RTMP2, RA3, RA3; vmovdqu RA0, 0*16(%rsi); cmpq $2, %rcx; jb .Lblk4_store_output_done; vmovdqu RA1, 1*16(%rsi); je .Lblk4_store_output_done; vmovdqu RA2, 2*16(%rsi); cmpq $3, %rcx; je .Lblk4_store_output_done; vmovdqu RA3, 3*16(%rsi); .Lblk4_store_output_done: vzeroall; FRAME_END RET; SYM_FUNC_END(sm4_aesni_avx_crypt4) .align 8 SYM_FUNC_START_LOCAL(__sm4_crypt_blk8) /* input: * %rdi: round key array, CTX * RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: eight parallel * plaintext blocks * output: * RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: eight parallel * ciphertext blocks */ FRAME_BEGIN vmovdqa .Lbswap32_mask rRIP, RTMP2; vpshufb RTMP2, RA0, RA0; vpshufb RTMP2, RA1, RA1; vpshufb RTMP2, RA2, RA2; vpshufb RTMP2, RA3, RA3; vpshufb RTMP2, RB0, RB0; vpshufb RTMP2, RB1, RB1; vpshufb RTMP2, RB2, RB2; vpshufb RTMP2, RB3, RB3; vbroadcastss .L0f0f0f0f rRIP, MASK_4BIT; transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1); transpose_4x4(RB0, RB1, RB2, RB3, RTMP0, RTMP1); #define ROUND(round, s0, s1, s2, s3, r0, r1, r2, r3) \ vbroadcastss (4*(round))(%rdi), RX0; \ vmovdqa .Lpre_tf_lo_s rRIP, RTMP4; \ vmovdqa .Lpre_tf_hi_s rRIP, RTMP1; \ vmovdqa RX0, RX1; \ vpxor s1, RX0, RX0; \ vpxor s2, RX0, RX0; \ vpxor s3, RX0, RX0; /* s1 ^ s2 ^ s3 ^ rk */ \ vmovdqa .Lpost_tf_lo_s rRIP, RTMP2; \ vmovdqa .Lpost_tf_hi_s rRIP, RTMP3; \ vpxor r1, RX1, RX1; \ vpxor r2, RX1, RX1; \ vpxor r3, RX1, RX1; /* r1 ^ r2 ^ r3 ^ rk */ \ \ /* sbox, non-linear part */ \ transform_pre(RX0, RTMP4, RTMP1, MASK_4BIT, RTMP0); \ transform_pre(RX1, RTMP4, RTMP1, MASK_4BIT, RTMP0); \ vmovdqa .Linv_shift_row rRIP, RTMP4; \ vaesenclast MASK_4BIT, RX0, RX0; \ vaesenclast MASK_4BIT, RX1, RX1; \ transform_post(RX0, RTMP2, RTMP3, MASK_4BIT, RTMP0); \ transform_post(RX1, RTMP2, RTMP3, MASK_4BIT, RTMP0); \ \ /* linear part */ \ vpshufb RTMP4, RX0, RTMP0; \ vpxor RTMP0, s0, s0; /* s0 ^ x */ \ vpshufb RTMP4, RX1, RTMP2; \ vmovdqa .Linv_shift_row_rol_8 rRIP, RTMP4; \ vpxor RTMP2, r0, r0; /* r0 ^ x */ \ vpshufb RTMP4, RX0, RTMP1; \ vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) */ \ vpshufb RTMP4, RX1, RTMP3; \ vmovdqa .Linv_shift_row_rol_16 rRIP, RTMP4; \ vpxor RTMP3, RTMP2, RTMP2; /* x ^ rol(x,8) */ \ vpshufb RTMP4, RX0, RTMP1; \ vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) ^ rol(x,16) */ \ vpshufb RTMP4, RX1, RTMP3; \ vmovdqa .Linv_shift_row_rol_24 rRIP, RTMP4; \ vpxor RTMP3, RTMP2, RTMP2; /* x ^ rol(x,8) ^ rol(x,16) */ \ vpshufb RTMP4, RX0, RTMP1; \ vpxor RTMP1, s0, s0; /* s0 ^ x ^ rol(x,24) */ \ /* s0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \ vpslld $2, RTMP0, RTMP1; \ vpsrld $30, RTMP0, RTMP0; \ vpxor RTMP0, s0, s0; \ vpxor RTMP1, s0, s0; \ vpshufb RTMP4, RX1, RTMP3; \ vpxor RTMP3, r0, r0; /* r0 ^ x ^ rol(x,24) */ \ /* r0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \ vpslld $2, RTMP2, RTMP3; \ vpsrld $30, RTMP2, RTMP2; \ vpxor RTMP2, r0, r0; \ vpxor RTMP3, r0, r0; leaq (32*4)(%rdi), %rax; .align 16 .Lroundloop_blk8: ROUND(0, RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3); ROUND(1, RA1, RA2, RA3, RA0, RB1, RB2, RB3, RB0); ROUND(2, RA2, RA3, RA0, RA1, RB2, RB3, RB0, RB1); ROUND(3, RA3, RA0, RA1, RA2, RB3, RB0, RB1, RB2); leaq (4*4)(%rdi), %rdi; cmpq %rax, %rdi; jne .Lroundloop_blk8; #undef ROUND vmovdqa .Lbswap128_mask rRIP, RTMP2; transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1); transpose_4x4(RB0, RB1, RB2, RB3, RTMP0, RTMP1); vpshufb RTMP2, RA0, RA0; vpshufb RTMP2, RA1, RA1; vpshufb RTMP2, RA2, RA2; vpshufb RTMP2, RA3, RA3; vpshufb RTMP2, RB0, RB0; vpshufb RTMP2, RB1, RB1; vpshufb RTMP2, RB2, RB2; vpshufb RTMP2, RB3, RB3; FRAME_END RET; SYM_FUNC_END(__sm4_crypt_blk8) /* * void sm4_aesni_avx_crypt8(const u32 *rk, u8 *dst, * const u8 *src, int nblocks) */ .align 8 SYM_FUNC_START(sm4_aesni_avx_crypt8) /* input: * %rdi: round key array, CTX * %rsi: dst (1..8 blocks) * %rdx: src (1..8 blocks) * %rcx: num blocks (1..8) */ cmpq $5, %rcx; jb sm4_aesni_avx_crypt4; FRAME_BEGIN vmovdqu (0 * 16)(%rdx), RA0; vmovdqu (1 * 16)(%rdx), RA1; vmovdqu (2 * 16)(%rdx), RA2; vmovdqu (3 * 16)(%rdx), RA3; vmovdqu (4 * 16)(%rdx), RB0; vmovdqa RB0, RB1; vmovdqa RB0, RB2; vmovdqa RB0, RB3; je .Lblk8_load_input_done; vmovdqu (5 * 16)(%rdx), RB1; cmpq $7, %rcx; jb .Lblk8_load_input_done; vmovdqu (6 * 16)(%rdx), RB2; je .Lblk8_load_input_done; vmovdqu (7 * 16)(%rdx), RB3; .Lblk8_load_input_done: call __sm4_crypt_blk8; cmpq $6, %rcx; vmovdqu RA0, (0 * 16)(%rsi); vmovdqu RA1, (1 * 16)(%rsi); vmovdqu RA2, (2 * 16)(%rsi); vmovdqu RA3, (3 * 16)(%rsi); vmovdqu RB0, (4 * 16)(%rsi); jb .Lblk8_store_output_done; vmovdqu RB1, (5 * 16)(%rsi); je .Lblk8_store_output_done; vmovdqu RB2, (6 * 16)(%rsi); cmpq $7, %rcx; je .Lblk8_store_output_done; vmovdqu RB3, (7 * 16)(%rsi); .Lblk8_store_output_done: vzeroall; FRAME_END RET; SYM_FUNC_END(sm4_aesni_avx_crypt8) /* * void sm4_aesni_avx_ctr_enc_blk8(const u32 *rk, u8 *dst, * const u8 *src, u8 *iv) */ .align 8 SYM_TYPED_FUNC_START(sm4_aesni_avx_ctr_enc_blk8) /* input: * %rdi: round key array, CTX * %rsi: dst (8 blocks) * %rdx: src (8 blocks) * %rcx: iv (big endian, 128bit) */ FRAME_BEGIN /* load IV and byteswap */ vmovdqu (%rcx), RA0; vmovdqa .Lbswap128_mask rRIP, RBSWAP; vpshufb RBSWAP, RA0, RTMP0; /* be => le */ vpcmpeqd RNOT, RNOT, RNOT; vpsrldq $8, RNOT, RNOT; /* low: -1, high: 0 */ #define inc_le128(x, minus_one, tmp) \ vpcmpeqq minus_one, x, tmp; \ vpsubq minus_one, x, x; \ vpslldq $8, tmp, tmp; \ vpsubq tmp, x, x; /* construct IVs */ inc_le128(RTMP0, RNOT, RTMP2); /* +1 */ vpshufb RBSWAP, RTMP0, RA1; inc_le128(RTMP0, RNOT, RTMP2); /* +2 */ vpshufb RBSWAP, RTMP0, RA2; inc_le128(RTMP0, RNOT, RTMP2); /* +3 */ vpshufb RBSWAP, RTMP0, RA3; inc_le128(RTMP0, RNOT, RTMP2); /* +4 */ vpshufb RBSWAP, RTMP0, RB0; inc_le128(RTMP0, RNOT, RTMP2); /* +5 */ vpshufb RBSWAP, RTMP0, RB1; inc_le128(RTMP0, RNOT, RTMP2); /* +6 */ vpshufb RBSWAP, RTMP0, RB2; inc_le128(RTMP0, RNOT, RTMP2); /* +7 */ vpshufb RBSWAP, RTMP0, RB3; inc_le128(RTMP0, RNOT, RTMP2); /* +8 */ vpshufb RBSWAP, RTMP0, RTMP1; /* store new IV */ vmovdqu RTMP1, (%rcx); call __sm4_crypt_blk8; vpxor (0 * 16)(%rdx), RA0, RA0; vpxor (1 * 16)(%rdx), RA1, RA1; vpxor (2 * 16)(%rdx), RA2, RA2; vpxor (3 * 16)(%rdx), RA3, RA3; vpxor (4 * 16)(%rdx), RB0, RB0; vpxor (5 * 16)(%rdx), RB1, RB1; vpxor (6 * 16)(%rdx), RB2, RB2; vpxor (7 * 16)(%rdx), RB3, RB3; vmovdqu RA0, (0 * 16)(%rsi); vmovdqu RA1, (1 * 16)(%rsi); vmovdqu RA2, (2 * 16)(%rsi); vmovdqu RA3, (3 * 16)(%rsi); vmovdqu RB0, (4 * 16)(%rsi); vmovdqu RB1, (5 * 16)(%rsi); vmovdqu RB2, (6 * 16)(%rsi); vmovdqu RB3, (7 * 16)(%rsi); vzeroall; FRAME_END RET; SYM_FUNC_END(sm4_aesni_avx_ctr_enc_blk8) /* * void sm4_aesni_avx_cbc_dec_blk8(const u32 *rk, u8 *dst, * const u8 *src, u8 *iv) */ .align 8 SYM_TYPED_FUNC_START(sm4_aesni_avx_cbc_dec_blk8) /* input: * %rdi: round key array, CTX * %rsi: dst (8 blocks) * %rdx: src (8 blocks) * %rcx: iv */ FRAME_BEGIN vmovdqu (0 * 16)(%rdx), RA0; vmovdqu (1 * 16)(%rdx), RA1; vmovdqu (2 * 16)(%rdx), RA2; vmovdqu (3 * 16)(%rdx), RA3; vmovdqu (4 * 16)(%rdx), RB0; vmovdqu (5 * 16)(%rdx), RB1; vmovdqu (6 * 16)(%rdx), RB2; vmovdqu (7 * 16)(%rdx), RB3; call __sm4_crypt_blk8; vmovdqu (7 * 16)(%rdx), RNOT; vpxor (%rcx), RA0, RA0; vpxor (0 * 16)(%rdx), RA1, RA1; vpxor (1 * 16)(%rdx), RA2, RA2; vpxor (2 * 16)(%rdx), RA3, RA3; vpxor (3 * 16)(%rdx), RB0, RB0; vpxor (4 * 16)(%rdx), RB1, RB1; vpxor (5 * 16)(%rdx), RB2, RB2; vpxor (6 * 16)(%rdx), RB3, RB3; vmovdqu RNOT, (%rcx); /* store new IV */ vmovdqu RA0, (0 * 16)(%rsi); vmovdqu RA1, (1 * 16)(%rsi); vmovdqu RA2, (2 * 16)(%rsi); vmovdqu RA3, (3 * 16)(%rsi); vmovdqu RB0, (4 * 16)(%rsi); vmovdqu RB1, (5 * 16)(%rsi); vmovdqu RB2, (6 * 16)(%rsi); vmovdqu RB3, (7 * 16)(%rsi); vzeroall; FRAME_END RET; SYM_FUNC_END(sm4_aesni_avx_cbc_dec_blk8) /* * void sm4_aesni_avx_cfb_dec_blk8(const u32 *rk, u8 *dst, * const u8 *src, u8 *iv) */ .align 8 SYM_TYPED_FUNC_START(sm4_aesni_avx_cfb_dec_blk8) /* input: * %rdi: round key array, CTX * %rsi: dst (8 blocks) * %rdx: src (8 blocks) * %rcx: iv */ FRAME_BEGIN /* Load input */ vmovdqu (%rcx), RA0; vmovdqu 0 * 16(%rdx), RA1; vmovdqu 1 * 16(%rdx), RA2; vmovdqu 2 * 16(%rdx), RA3; vmovdqu 3 * 16(%rdx), RB0; vmovdqu 4 * 16(%rdx), RB1; vmovdqu 5 * 16(%rdx), RB2; vmovdqu 6 * 16(%rdx), RB3; /* Update IV */ vmovdqu 7 * 16(%rdx), RNOT; vmovdqu RNOT, (%rcx); call __sm4_crypt_blk8; vpxor (0 * 16)(%rdx), RA0, RA0; vpxor (1 * 16)(%rdx), RA1, RA1; vpxor (2 * 16)(%rdx), RA2, RA2; vpxor (3 * 16)(%rdx), RA3, RA3; vpxor (4 * 16)(%rdx), RB0, RB0; vpxor (5 * 16)(%rdx), RB1, RB1; vpxor (6 * 16)(%rdx), RB2, RB2; vpxor (7 * 16)(%rdx), RB3, RB3; vmovdqu RA0, (0 * 16)(%rsi); vmovdqu RA1, (1 * 16)(%rsi); vmovdqu RA2, (2 * 16)(%rsi); vmovdqu RA3, (3 * 16)(%rsi); vmovdqu RB0, (4 * 16)(%rsi); vmovdqu RB1, (5 * 16)(%rsi); vmovdqu RB2, (6 * 16)(%rsi); vmovdqu RB3, (7 * 16)(%rsi); vzeroall; FRAME_END RET; SYM_FUNC_END(sm4_aesni_avx_cfb_dec_blk8)
aixcc-public/challenge-001-exemplar-source
10,654
arch/x86/crypto/sha256_ni_asm.S
/* * Intel SHA Extensions optimized implementation of a SHA-256 update function * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * Contact Information: * Sean Gulley <sean.m.gulley@intel.com> * Tim Chen <tim.c.chen@linux.intel.com> * * BSD LICENSE * * Copyright(c) 2015 Intel Corporation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include <linux/linkage.h> #include <linux/cfi_types.h> #define DIGEST_PTR %rdi /* 1st arg */ #define DATA_PTR %rsi /* 2nd arg */ #define NUM_BLKS %rdx /* 3rd arg */ #define SHA256CONSTANTS %rax #define MSG %xmm0 #define STATE0 %xmm1 #define STATE1 %xmm2 #define MSGTMP0 %xmm3 #define MSGTMP1 %xmm4 #define MSGTMP2 %xmm5 #define MSGTMP3 %xmm6 #define MSGTMP4 %xmm7 #define SHUF_MASK %xmm8 #define ABEF_SAVE %xmm9 #define CDGH_SAVE %xmm10 /* * Intel SHA Extensions optimized implementation of a SHA-256 update function * * The function takes a pointer to the current hash values, a pointer to the * input data, and a number of 64 byte blocks to process. Once all blocks have * been processed, the digest pointer is updated with the resulting hash value. * The function only processes complete blocks, there is no functionality to * store partial blocks. All message padding and hash value initialization must * be done outside the update function. * * The indented lines in the loop are instructions related to rounds processing. * The non-indented lines are instructions related to the message schedule. * * void sha256_ni_transform(uint32_t *digest, const void *data, uint32_t numBlocks); * digest : pointer to digest * data: pointer to input data * numBlocks: Number of blocks to process */ .text .align 32 SYM_TYPED_FUNC_START(sha256_ni_transform) shl $6, NUM_BLKS /* convert to bytes */ jz .Ldone_hash add DATA_PTR, NUM_BLKS /* pointer to end of data */ /* * load initial hash values * Need to reorder these appropriately * DCBA, HGFE -> ABEF, CDGH */ movdqu 0*16(DIGEST_PTR), STATE0 movdqu 1*16(DIGEST_PTR), STATE1 pshufd $0xB1, STATE0, STATE0 /* CDAB */ pshufd $0x1B, STATE1, STATE1 /* EFGH */ movdqa STATE0, MSGTMP4 palignr $8, STATE1, STATE0 /* ABEF */ pblendw $0xF0, MSGTMP4, STATE1 /* CDGH */ movdqa PSHUFFLE_BYTE_FLIP_MASK(%rip), SHUF_MASK lea K256(%rip), SHA256CONSTANTS .Lloop0: /* Save hash values for addition after rounds */ movdqa STATE0, ABEF_SAVE movdqa STATE1, CDGH_SAVE /* Rounds 0-3 */ movdqu 0*16(DATA_PTR), MSG pshufb SHUF_MASK, MSG movdqa MSG, MSGTMP0 paddd 0*16(SHA256CONSTANTS), MSG sha256rnds2 STATE0, STATE1 pshufd $0x0E, MSG, MSG sha256rnds2 STATE1, STATE0 /* Rounds 4-7 */ movdqu 1*16(DATA_PTR), MSG pshufb SHUF_MASK, MSG movdqa MSG, MSGTMP1 paddd 1*16(SHA256CONSTANTS), MSG sha256rnds2 STATE0, STATE1 pshufd $0x0E, MSG, MSG sha256rnds2 STATE1, STATE0 sha256msg1 MSGTMP1, MSGTMP0 /* Rounds 8-11 */ movdqu 2*16(DATA_PTR), MSG pshufb SHUF_MASK, MSG movdqa MSG, MSGTMP2 paddd 2*16(SHA256CONSTANTS), MSG sha256rnds2 STATE0, STATE1 pshufd $0x0E, MSG, MSG sha256rnds2 STATE1, STATE0 sha256msg1 MSGTMP2, MSGTMP1 /* Rounds 12-15 */ movdqu 3*16(DATA_PTR), MSG pshufb SHUF_MASK, MSG movdqa MSG, MSGTMP3 paddd 3*16(SHA256CONSTANTS), MSG sha256rnds2 STATE0, STATE1 movdqa MSGTMP3, MSGTMP4 palignr $4, MSGTMP2, MSGTMP4 paddd MSGTMP4, MSGTMP0 sha256msg2 MSGTMP3, MSGTMP0 pshufd $0x0E, MSG, MSG sha256rnds2 STATE1, STATE0 sha256msg1 MSGTMP3, MSGTMP2 /* Rounds 16-19 */ movdqa MSGTMP0, MSG paddd 4*16(SHA256CONSTANTS), MSG sha256rnds2 STATE0, STATE1 movdqa MSGTMP0, MSGTMP4 palignr $4, MSGTMP3, MSGTMP4 paddd MSGTMP4, MSGTMP1 sha256msg2 MSGTMP0, MSGTMP1 pshufd $0x0E, MSG, MSG sha256rnds2 STATE1, STATE0 sha256msg1 MSGTMP0, MSGTMP3 /* Rounds 20-23 */ movdqa MSGTMP1, MSG paddd 5*16(SHA256CONSTANTS), MSG sha256rnds2 STATE0, STATE1 movdqa MSGTMP1, MSGTMP4 palignr $4, MSGTMP0, MSGTMP4 paddd MSGTMP4, MSGTMP2 sha256msg2 MSGTMP1, MSGTMP2 pshufd $0x0E, MSG, MSG sha256rnds2 STATE1, STATE0 sha256msg1 MSGTMP1, MSGTMP0 /* Rounds 24-27 */ movdqa MSGTMP2, MSG paddd 6*16(SHA256CONSTANTS), MSG sha256rnds2 STATE0, STATE1 movdqa MSGTMP2, MSGTMP4 palignr $4, MSGTMP1, MSGTMP4 paddd MSGTMP4, MSGTMP3 sha256msg2 MSGTMP2, MSGTMP3 pshufd $0x0E, MSG, MSG sha256rnds2 STATE1, STATE0 sha256msg1 MSGTMP2, MSGTMP1 /* Rounds 28-31 */ movdqa MSGTMP3, MSG paddd 7*16(SHA256CONSTANTS), MSG sha256rnds2 STATE0, STATE1 movdqa MSGTMP3, MSGTMP4 palignr $4, MSGTMP2, MSGTMP4 paddd MSGTMP4, MSGTMP0 sha256msg2 MSGTMP3, MSGTMP0 pshufd $0x0E, MSG, MSG sha256rnds2 STATE1, STATE0 sha256msg1 MSGTMP3, MSGTMP2 /* Rounds 32-35 */ movdqa MSGTMP0, MSG paddd 8*16(SHA256CONSTANTS), MSG sha256rnds2 STATE0, STATE1 movdqa MSGTMP0, MSGTMP4 palignr $4, MSGTMP3, MSGTMP4 paddd MSGTMP4, MSGTMP1 sha256msg2 MSGTMP0, MSGTMP1 pshufd $0x0E, MSG, MSG sha256rnds2 STATE1, STATE0 sha256msg1 MSGTMP0, MSGTMP3 /* Rounds 36-39 */ movdqa MSGTMP1, MSG paddd 9*16(SHA256CONSTANTS), MSG sha256rnds2 STATE0, STATE1 movdqa MSGTMP1, MSGTMP4 palignr $4, MSGTMP0, MSGTMP4 paddd MSGTMP4, MSGTMP2 sha256msg2 MSGTMP1, MSGTMP2 pshufd $0x0E, MSG, MSG sha256rnds2 STATE1, STATE0 sha256msg1 MSGTMP1, MSGTMP0 /* Rounds 40-43 */ movdqa MSGTMP2, MSG paddd 10*16(SHA256CONSTANTS), MSG sha256rnds2 STATE0, STATE1 movdqa MSGTMP2, MSGTMP4 palignr $4, MSGTMP1, MSGTMP4 paddd MSGTMP4, MSGTMP3 sha256msg2 MSGTMP2, MSGTMP3 pshufd $0x0E, MSG, MSG sha256rnds2 STATE1, STATE0 sha256msg1 MSGTMP2, MSGTMP1 /* Rounds 44-47 */ movdqa MSGTMP3, MSG paddd 11*16(SHA256CONSTANTS), MSG sha256rnds2 STATE0, STATE1 movdqa MSGTMP3, MSGTMP4 palignr $4, MSGTMP2, MSGTMP4 paddd MSGTMP4, MSGTMP0 sha256msg2 MSGTMP3, MSGTMP0 pshufd $0x0E, MSG, MSG sha256rnds2 STATE1, STATE0 sha256msg1 MSGTMP3, MSGTMP2 /* Rounds 48-51 */ movdqa MSGTMP0, MSG paddd 12*16(SHA256CONSTANTS), MSG sha256rnds2 STATE0, STATE1 movdqa MSGTMP0, MSGTMP4 palignr $4, MSGTMP3, MSGTMP4 paddd MSGTMP4, MSGTMP1 sha256msg2 MSGTMP0, MSGTMP1 pshufd $0x0E, MSG, MSG sha256rnds2 STATE1, STATE0 sha256msg1 MSGTMP0, MSGTMP3 /* Rounds 52-55 */ movdqa MSGTMP1, MSG paddd 13*16(SHA256CONSTANTS), MSG sha256rnds2 STATE0, STATE1 movdqa MSGTMP1, MSGTMP4 palignr $4, MSGTMP0, MSGTMP4 paddd MSGTMP4, MSGTMP2 sha256msg2 MSGTMP1, MSGTMP2 pshufd $0x0E, MSG, MSG sha256rnds2 STATE1, STATE0 /* Rounds 56-59 */ movdqa MSGTMP2, MSG paddd 14*16(SHA256CONSTANTS), MSG sha256rnds2 STATE0, STATE1 movdqa MSGTMP2, MSGTMP4 palignr $4, MSGTMP1, MSGTMP4 paddd MSGTMP4, MSGTMP3 sha256msg2 MSGTMP2, MSGTMP3 pshufd $0x0E, MSG, MSG sha256rnds2 STATE1, STATE0 /* Rounds 60-63 */ movdqa MSGTMP3, MSG paddd 15*16(SHA256CONSTANTS), MSG sha256rnds2 STATE0, STATE1 pshufd $0x0E, MSG, MSG sha256rnds2 STATE1, STATE0 /* Add current hash values with previously saved */ paddd ABEF_SAVE, STATE0 paddd CDGH_SAVE, STATE1 /* Increment data pointer and loop if more to process */ add $64, DATA_PTR cmp NUM_BLKS, DATA_PTR jne .Lloop0 /* Write hash values back in the correct order */ pshufd $0x1B, STATE0, STATE0 /* FEBA */ pshufd $0xB1, STATE1, STATE1 /* DCHG */ movdqa STATE0, MSGTMP4 pblendw $0xF0, STATE1, STATE0 /* DCBA */ palignr $8, MSGTMP4, STATE1 /* HGFE */ movdqu STATE0, 0*16(DIGEST_PTR) movdqu STATE1, 1*16(DIGEST_PTR) .Ldone_hash: RET SYM_FUNC_END(sha256_ni_transform) .section .rodata.cst256.K256, "aM", @progbits, 256 .align 64 K256: .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 .section .rodata.cst16.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 16 .align 16 PSHUFFLE_BYTE_FLIP_MASK: .octa 0x0c0d0e0f08090a0b0405060700010203
aixcc-public/challenge-001-exemplar-source
15,737
arch/x86/crypto/sm4-aesni-avx2-asm_64.S
// SPDX-License-Identifier: GPL-2.0-or-later /* * SM4 Cipher Algorithm, AES-NI/AVX2 optimized. * as specified in * https://tools.ietf.org/id/draft-ribose-cfrg-sm4-10.html * * Copyright (C) 2018 Markku-Juhani O. Saarinen <mjos@iki.fi> * Copyright (C) 2020 Jussi Kivilinna <jussi.kivilinna@iki.fi> * Copyright (c) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com> */ /* Based on SM4 AES-NI work by libgcrypt and Markku-Juhani O. Saarinen at: * https://github.com/mjosaarinen/sm4ni */ #include <linux/linkage.h> #include <linux/cfi_types.h> #include <asm/frame.h> #define rRIP (%rip) /* vector registers */ #define RX0 %ymm0 #define RX1 %ymm1 #define MASK_4BIT %ymm2 #define RTMP0 %ymm3 #define RTMP1 %ymm4 #define RTMP2 %ymm5 #define RTMP3 %ymm6 #define RTMP4 %ymm7 #define RA0 %ymm8 #define RA1 %ymm9 #define RA2 %ymm10 #define RA3 %ymm11 #define RB0 %ymm12 #define RB1 %ymm13 #define RB2 %ymm14 #define RB3 %ymm15 #define RNOT %ymm0 #define RBSWAP %ymm1 #define RX0x %xmm0 #define RX1x %xmm1 #define MASK_4BITx %xmm2 #define RNOTx %xmm0 #define RBSWAPx %xmm1 #define RTMP0x %xmm3 #define RTMP1x %xmm4 #define RTMP2x %xmm5 #define RTMP3x %xmm6 #define RTMP4x %xmm7 /* helper macros */ /* Transpose four 32-bit words between 128-bit vector lanes. */ #define transpose_4x4(x0, x1, x2, x3, t1, t2) \ vpunpckhdq x1, x0, t2; \ vpunpckldq x1, x0, x0; \ \ vpunpckldq x3, x2, t1; \ vpunpckhdq x3, x2, x2; \ \ vpunpckhqdq t1, x0, x1; \ vpunpcklqdq t1, x0, x0; \ \ vpunpckhqdq x2, t2, x3; \ vpunpcklqdq x2, t2, x2; /* post-SubByte transform. */ #define transform_pre(x, lo_t, hi_t, mask4bit, tmp0) \ vpand x, mask4bit, tmp0; \ vpandn x, mask4bit, x; \ vpsrld $4, x, x; \ \ vpshufb tmp0, lo_t, tmp0; \ vpshufb x, hi_t, x; \ vpxor tmp0, x, x; /* post-SubByte transform. Note: x has been XOR'ed with mask4bit by * 'vaeslastenc' instruction. */ #define transform_post(x, lo_t, hi_t, mask4bit, tmp0) \ vpandn mask4bit, x, tmp0; \ vpsrld $4, x, x; \ vpand x, mask4bit, x; \ \ vpshufb tmp0, lo_t, tmp0; \ vpshufb x, hi_t, x; \ vpxor tmp0, x, x; .section .rodata.cst16, "aM", @progbits, 16 .align 16 /* * Following four affine transform look-up tables are from work by * Markku-Juhani O. Saarinen, at https://github.com/mjosaarinen/sm4ni * * These allow exposing SM4 S-Box from AES SubByte. */ /* pre-SubByte affine transform, from SM4 field to AES field. */ .Lpre_tf_lo_s: .quad 0x9197E2E474720701, 0xC7C1B4B222245157 .Lpre_tf_hi_s: .quad 0xE240AB09EB49A200, 0xF052B91BF95BB012 /* post-SubByte affine transform, from AES field to SM4 field. */ .Lpost_tf_lo_s: .quad 0x5B67F2CEA19D0834, 0xEDD14478172BBE82 .Lpost_tf_hi_s: .quad 0xAE7201DD73AFDC00, 0x11CDBE62CC1063BF /* For isolating SubBytes from AESENCLAST, inverse shift row */ .Linv_shift_row: .byte 0x00, 0x0d, 0x0a, 0x07, 0x04, 0x01, 0x0e, 0x0b .byte 0x08, 0x05, 0x02, 0x0f, 0x0c, 0x09, 0x06, 0x03 /* Inverse shift row + Rotate left by 8 bits on 32-bit words with vpshufb */ .Linv_shift_row_rol_8: .byte 0x07, 0x00, 0x0d, 0x0a, 0x0b, 0x04, 0x01, 0x0e .byte 0x0f, 0x08, 0x05, 0x02, 0x03, 0x0c, 0x09, 0x06 /* Inverse shift row + Rotate left by 16 bits on 32-bit words with vpshufb */ .Linv_shift_row_rol_16: .byte 0x0a, 0x07, 0x00, 0x0d, 0x0e, 0x0b, 0x04, 0x01 .byte 0x02, 0x0f, 0x08, 0x05, 0x06, 0x03, 0x0c, 0x09 /* Inverse shift row + Rotate left by 24 bits on 32-bit words with vpshufb */ .Linv_shift_row_rol_24: .byte 0x0d, 0x0a, 0x07, 0x00, 0x01, 0x0e, 0x0b, 0x04 .byte 0x05, 0x02, 0x0f, 0x08, 0x09, 0x06, 0x03, 0x0c /* For CTR-mode IV byteswap */ .Lbswap128_mask: .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 /* For input word byte-swap */ .Lbswap32_mask: .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 .align 4 /* 4-bit mask */ .L0f0f0f0f: .long 0x0f0f0f0f /* 12 bytes, only for padding */ .Lpadding_deadbeef: .long 0xdeadbeef, 0xdeadbeef, 0xdeadbeef .text .align 16 .align 8 SYM_FUNC_START_LOCAL(__sm4_crypt_blk16) /* input: * %rdi: round key array, CTX * RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: sixteen parallel * plaintext blocks * output: * RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: sixteen parallel * ciphertext blocks */ FRAME_BEGIN vbroadcasti128 .Lbswap32_mask rRIP, RTMP2; vpshufb RTMP2, RA0, RA0; vpshufb RTMP2, RA1, RA1; vpshufb RTMP2, RA2, RA2; vpshufb RTMP2, RA3, RA3; vpshufb RTMP2, RB0, RB0; vpshufb RTMP2, RB1, RB1; vpshufb RTMP2, RB2, RB2; vpshufb RTMP2, RB3, RB3; vpbroadcastd .L0f0f0f0f rRIP, MASK_4BIT; transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1); transpose_4x4(RB0, RB1, RB2, RB3, RTMP0, RTMP1); #define ROUND(round, s0, s1, s2, s3, r0, r1, r2, r3) \ vpbroadcastd (4*(round))(%rdi), RX0; \ vbroadcasti128 .Lpre_tf_lo_s rRIP, RTMP4; \ vbroadcasti128 .Lpre_tf_hi_s rRIP, RTMP1; \ vmovdqa RX0, RX1; \ vpxor s1, RX0, RX0; \ vpxor s2, RX0, RX0; \ vpxor s3, RX0, RX0; /* s1 ^ s2 ^ s3 ^ rk */ \ vbroadcasti128 .Lpost_tf_lo_s rRIP, RTMP2; \ vbroadcasti128 .Lpost_tf_hi_s rRIP, RTMP3; \ vpxor r1, RX1, RX1; \ vpxor r2, RX1, RX1; \ vpxor r3, RX1, RX1; /* r1 ^ r2 ^ r3 ^ rk */ \ \ /* sbox, non-linear part */ \ transform_pre(RX0, RTMP4, RTMP1, MASK_4BIT, RTMP0); \ transform_pre(RX1, RTMP4, RTMP1, MASK_4BIT, RTMP0); \ vextracti128 $1, RX0, RTMP4x; \ vextracti128 $1, RX1, RTMP0x; \ vaesenclast MASK_4BITx, RX0x, RX0x; \ vaesenclast MASK_4BITx, RTMP4x, RTMP4x; \ vaesenclast MASK_4BITx, RX1x, RX1x; \ vaesenclast MASK_4BITx, RTMP0x, RTMP0x; \ vinserti128 $1, RTMP4x, RX0, RX0; \ vbroadcasti128 .Linv_shift_row rRIP, RTMP4; \ vinserti128 $1, RTMP0x, RX1, RX1; \ transform_post(RX0, RTMP2, RTMP3, MASK_4BIT, RTMP0); \ transform_post(RX1, RTMP2, RTMP3, MASK_4BIT, RTMP0); \ \ /* linear part */ \ vpshufb RTMP4, RX0, RTMP0; \ vpxor RTMP0, s0, s0; /* s0 ^ x */ \ vpshufb RTMP4, RX1, RTMP2; \ vbroadcasti128 .Linv_shift_row_rol_8 rRIP, RTMP4; \ vpxor RTMP2, r0, r0; /* r0 ^ x */ \ vpshufb RTMP4, RX0, RTMP1; \ vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) */ \ vpshufb RTMP4, RX1, RTMP3; \ vbroadcasti128 .Linv_shift_row_rol_16 rRIP, RTMP4; \ vpxor RTMP3, RTMP2, RTMP2; /* x ^ rol(x,8) */ \ vpshufb RTMP4, RX0, RTMP1; \ vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) ^ rol(x,16) */ \ vpshufb RTMP4, RX1, RTMP3; \ vbroadcasti128 .Linv_shift_row_rol_24 rRIP, RTMP4; \ vpxor RTMP3, RTMP2, RTMP2; /* x ^ rol(x,8) ^ rol(x,16) */ \ vpshufb RTMP4, RX0, RTMP1; \ vpxor RTMP1, s0, s0; /* s0 ^ x ^ rol(x,24) */ \ vpslld $2, RTMP0, RTMP1; \ vpsrld $30, RTMP0, RTMP0; \ vpxor RTMP0, s0, s0; \ /* s0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \ vpxor RTMP1, s0, s0; \ vpshufb RTMP4, RX1, RTMP3; \ vpxor RTMP3, r0, r0; /* r0 ^ x ^ rol(x,24) */ \ vpslld $2, RTMP2, RTMP3; \ vpsrld $30, RTMP2, RTMP2; \ vpxor RTMP2, r0, r0; \ /* r0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \ vpxor RTMP3, r0, r0; leaq (32*4)(%rdi), %rax; .align 16 .Lroundloop_blk8: ROUND(0, RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3); ROUND(1, RA1, RA2, RA3, RA0, RB1, RB2, RB3, RB0); ROUND(2, RA2, RA3, RA0, RA1, RB2, RB3, RB0, RB1); ROUND(3, RA3, RA0, RA1, RA2, RB3, RB0, RB1, RB2); leaq (4*4)(%rdi), %rdi; cmpq %rax, %rdi; jne .Lroundloop_blk8; #undef ROUND vbroadcasti128 .Lbswap128_mask rRIP, RTMP2; transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1); transpose_4x4(RB0, RB1, RB2, RB3, RTMP0, RTMP1); vpshufb RTMP2, RA0, RA0; vpshufb RTMP2, RA1, RA1; vpshufb RTMP2, RA2, RA2; vpshufb RTMP2, RA3, RA3; vpshufb RTMP2, RB0, RB0; vpshufb RTMP2, RB1, RB1; vpshufb RTMP2, RB2, RB2; vpshufb RTMP2, RB3, RB3; FRAME_END RET; SYM_FUNC_END(__sm4_crypt_blk16) #define inc_le128(x, minus_one, tmp) \ vpcmpeqq minus_one, x, tmp; \ vpsubq minus_one, x, x; \ vpslldq $8, tmp, tmp; \ vpsubq tmp, x, x; /* * void sm4_aesni_avx2_ctr_enc_blk16(const u32 *rk, u8 *dst, * const u8 *src, u8 *iv) */ .align 8 SYM_TYPED_FUNC_START(sm4_aesni_avx2_ctr_enc_blk16) /* input: * %rdi: round key array, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) * %rcx: iv (big endian, 128bit) */ FRAME_BEGIN movq 8(%rcx), %rax; bswapq %rax; vzeroupper; vbroadcasti128 .Lbswap128_mask rRIP, RTMP3; vpcmpeqd RNOT, RNOT, RNOT; vpsrldq $8, RNOT, RNOT; /* ab: -1:0 ; cd: -1:0 */ vpaddq RNOT, RNOT, RTMP2; /* ab: -2:0 ; cd: -2:0 */ /* load IV and byteswap */ vmovdqu (%rcx), RTMP4x; vpshufb RTMP3x, RTMP4x, RTMP4x; vmovdqa RTMP4x, RTMP0x; inc_le128(RTMP4x, RNOTx, RTMP1x); vinserti128 $1, RTMP4x, RTMP0, RTMP0; vpshufb RTMP3, RTMP0, RA0; /* +1 ; +0 */ /* check need for handling 64-bit overflow and carry */ cmpq $(0xffffffffffffffff - 16), %rax; ja .Lhandle_ctr_carry; /* construct IVs */ vpsubq RTMP2, RTMP0, RTMP0; /* +3 ; +2 */ vpshufb RTMP3, RTMP0, RA1; vpsubq RTMP2, RTMP0, RTMP0; /* +5 ; +4 */ vpshufb RTMP3, RTMP0, RA2; vpsubq RTMP2, RTMP0, RTMP0; /* +7 ; +6 */ vpshufb RTMP3, RTMP0, RA3; vpsubq RTMP2, RTMP0, RTMP0; /* +9 ; +8 */ vpshufb RTMP3, RTMP0, RB0; vpsubq RTMP2, RTMP0, RTMP0; /* +11 ; +10 */ vpshufb RTMP3, RTMP0, RB1; vpsubq RTMP2, RTMP0, RTMP0; /* +13 ; +12 */ vpshufb RTMP3, RTMP0, RB2; vpsubq RTMP2, RTMP0, RTMP0; /* +15 ; +14 */ vpshufb RTMP3, RTMP0, RB3; vpsubq RTMP2, RTMP0, RTMP0; /* +16 */ vpshufb RTMP3x, RTMP0x, RTMP0x; jmp .Lctr_carry_done; .Lhandle_ctr_carry: /* construct IVs */ inc_le128(RTMP0, RNOT, RTMP1); inc_le128(RTMP0, RNOT, RTMP1); vpshufb RTMP3, RTMP0, RA1; /* +3 ; +2 */ inc_le128(RTMP0, RNOT, RTMP1); inc_le128(RTMP0, RNOT, RTMP1); vpshufb RTMP3, RTMP0, RA2; /* +5 ; +4 */ inc_le128(RTMP0, RNOT, RTMP1); inc_le128(RTMP0, RNOT, RTMP1); vpshufb RTMP3, RTMP0, RA3; /* +7 ; +6 */ inc_le128(RTMP0, RNOT, RTMP1); inc_le128(RTMP0, RNOT, RTMP1); vpshufb RTMP3, RTMP0, RB0; /* +9 ; +8 */ inc_le128(RTMP0, RNOT, RTMP1); inc_le128(RTMP0, RNOT, RTMP1); vpshufb RTMP3, RTMP0, RB1; /* +11 ; +10 */ inc_le128(RTMP0, RNOT, RTMP1); inc_le128(RTMP0, RNOT, RTMP1); vpshufb RTMP3, RTMP0, RB2; /* +13 ; +12 */ inc_le128(RTMP0, RNOT, RTMP1); inc_le128(RTMP0, RNOT, RTMP1); vpshufb RTMP3, RTMP0, RB3; /* +15 ; +14 */ inc_le128(RTMP0, RNOT, RTMP1); vextracti128 $1, RTMP0, RTMP0x; vpshufb RTMP3x, RTMP0x, RTMP0x; /* +16 */ .align 4 .Lctr_carry_done: /* store new IV */ vmovdqu RTMP0x, (%rcx); call __sm4_crypt_blk16; vpxor (0 * 32)(%rdx), RA0, RA0; vpxor (1 * 32)(%rdx), RA1, RA1; vpxor (2 * 32)(%rdx), RA2, RA2; vpxor (3 * 32)(%rdx), RA3, RA3; vpxor (4 * 32)(%rdx), RB0, RB0; vpxor (5 * 32)(%rdx), RB1, RB1; vpxor (6 * 32)(%rdx), RB2, RB2; vpxor (7 * 32)(%rdx), RB3, RB3; vmovdqu RA0, (0 * 32)(%rsi); vmovdqu RA1, (1 * 32)(%rsi); vmovdqu RA2, (2 * 32)(%rsi); vmovdqu RA3, (3 * 32)(%rsi); vmovdqu RB0, (4 * 32)(%rsi); vmovdqu RB1, (5 * 32)(%rsi); vmovdqu RB2, (6 * 32)(%rsi); vmovdqu RB3, (7 * 32)(%rsi); vzeroall; FRAME_END RET; SYM_FUNC_END(sm4_aesni_avx2_ctr_enc_blk16) /* * void sm4_aesni_avx2_cbc_dec_blk16(const u32 *rk, u8 *dst, * const u8 *src, u8 *iv) */ .align 8 SYM_TYPED_FUNC_START(sm4_aesni_avx2_cbc_dec_blk16) /* input: * %rdi: round key array, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) * %rcx: iv */ FRAME_BEGIN vzeroupper; vmovdqu (0 * 32)(%rdx), RA0; vmovdqu (1 * 32)(%rdx), RA1; vmovdqu (2 * 32)(%rdx), RA2; vmovdqu (3 * 32)(%rdx), RA3; vmovdqu (4 * 32)(%rdx), RB0; vmovdqu (5 * 32)(%rdx), RB1; vmovdqu (6 * 32)(%rdx), RB2; vmovdqu (7 * 32)(%rdx), RB3; call __sm4_crypt_blk16; vmovdqu (%rcx), RNOTx; vinserti128 $1, (%rdx), RNOT, RNOT; vpxor RNOT, RA0, RA0; vpxor (0 * 32 + 16)(%rdx), RA1, RA1; vpxor (1 * 32 + 16)(%rdx), RA2, RA2; vpxor (2 * 32 + 16)(%rdx), RA3, RA3; vpxor (3 * 32 + 16)(%rdx), RB0, RB0; vpxor (4 * 32 + 16)(%rdx), RB1, RB1; vpxor (5 * 32 + 16)(%rdx), RB2, RB2; vpxor (6 * 32 + 16)(%rdx), RB3, RB3; vmovdqu (7 * 32 + 16)(%rdx), RNOTx; vmovdqu RNOTx, (%rcx); /* store new IV */ vmovdqu RA0, (0 * 32)(%rsi); vmovdqu RA1, (1 * 32)(%rsi); vmovdqu RA2, (2 * 32)(%rsi); vmovdqu RA3, (3 * 32)(%rsi); vmovdqu RB0, (4 * 32)(%rsi); vmovdqu RB1, (5 * 32)(%rsi); vmovdqu RB2, (6 * 32)(%rsi); vmovdqu RB3, (7 * 32)(%rsi); vzeroall; FRAME_END RET; SYM_FUNC_END(sm4_aesni_avx2_cbc_dec_blk16) /* * void sm4_aesni_avx2_cfb_dec_blk16(const u32 *rk, u8 *dst, * const u8 *src, u8 *iv) */ .align 8 SYM_TYPED_FUNC_START(sm4_aesni_avx2_cfb_dec_blk16) /* input: * %rdi: round key array, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) * %rcx: iv */ FRAME_BEGIN vzeroupper; /* Load input */ vmovdqu (%rcx), RNOTx; vinserti128 $1, (%rdx), RNOT, RA0; vmovdqu (0 * 32 + 16)(%rdx), RA1; vmovdqu (1 * 32 + 16)(%rdx), RA2; vmovdqu (2 * 32 + 16)(%rdx), RA3; vmovdqu (3 * 32 + 16)(%rdx), RB0; vmovdqu (4 * 32 + 16)(%rdx), RB1; vmovdqu (5 * 32 + 16)(%rdx), RB2; vmovdqu (6 * 32 + 16)(%rdx), RB3; /* Update IV */ vmovdqu (7 * 32 + 16)(%rdx), RNOTx; vmovdqu RNOTx, (%rcx); call __sm4_crypt_blk16; vpxor (0 * 32)(%rdx), RA0, RA0; vpxor (1 * 32)(%rdx), RA1, RA1; vpxor (2 * 32)(%rdx), RA2, RA2; vpxor (3 * 32)(%rdx), RA3, RA3; vpxor (4 * 32)(%rdx), RB0, RB0; vpxor (5 * 32)(%rdx), RB1, RB1; vpxor (6 * 32)(%rdx), RB2, RB2; vpxor (7 * 32)(%rdx), RB3, RB3; vmovdqu RA0, (0 * 32)(%rsi); vmovdqu RA1, (1 * 32)(%rsi); vmovdqu RA2, (2 * 32)(%rsi); vmovdqu RA3, (3 * 32)(%rsi); vmovdqu RB0, (4 * 32)(%rsi); vmovdqu RB1, (5 * 32)(%rsi); vmovdqu RB2, (6 * 32)(%rsi); vmovdqu RB3, (7 * 32)(%rsi); vzeroall; FRAME_END RET; SYM_FUNC_END(sm4_aesni_avx2_cfb_dec_blk16)
aixcc-public/challenge-001-exemplar-source
20,441
arch/x86/crypto/chacha-avx512vl-x86_64.S
/* SPDX-License-Identifier: GPL-2.0+ */ /* * ChaCha 256-bit cipher algorithm, x64 AVX-512VL functions * * Copyright (C) 2018 Martin Willi */ #include <linux/linkage.h> .section .rodata.cst32.CTR2BL, "aM", @progbits, 32 .align 32 CTR2BL: .octa 0x00000000000000000000000000000000 .octa 0x00000000000000000000000000000001 .section .rodata.cst32.CTR4BL, "aM", @progbits, 32 .align 32 CTR4BL: .octa 0x00000000000000000000000000000002 .octa 0x00000000000000000000000000000003 .section .rodata.cst32.CTR8BL, "aM", @progbits, 32 .align 32 CTR8BL: .octa 0x00000003000000020000000100000000 .octa 0x00000007000000060000000500000004 .text SYM_FUNC_START(chacha_2block_xor_avx512vl) # %rdi: Input state matrix, s # %rsi: up to 2 data blocks output, o # %rdx: up to 2 data blocks input, i # %rcx: input/output length in bytes # %r8d: nrounds # This function encrypts two ChaCha blocks by loading the state # matrix twice across four AVX registers. It performs matrix operations # on four words in each matrix in parallel, but requires shuffling to # rearrange the words after each round. vzeroupper # x0..3[0-2] = s0..3 vbroadcasti128 0x00(%rdi),%ymm0 vbroadcasti128 0x10(%rdi),%ymm1 vbroadcasti128 0x20(%rdi),%ymm2 vbroadcasti128 0x30(%rdi),%ymm3 vpaddd CTR2BL(%rip),%ymm3,%ymm3 vmovdqa %ymm0,%ymm8 vmovdqa %ymm1,%ymm9 vmovdqa %ymm2,%ymm10 vmovdqa %ymm3,%ymm11 .Ldoubleround: # x0 += x1, x3 = rotl32(x3 ^ x0, 16) vpaddd %ymm1,%ymm0,%ymm0 vpxord %ymm0,%ymm3,%ymm3 vprold $16,%ymm3,%ymm3 # x2 += x3, x1 = rotl32(x1 ^ x2, 12) vpaddd %ymm3,%ymm2,%ymm2 vpxord %ymm2,%ymm1,%ymm1 vprold $12,%ymm1,%ymm1 # x0 += x1, x3 = rotl32(x3 ^ x0, 8) vpaddd %ymm1,%ymm0,%ymm0 vpxord %ymm0,%ymm3,%ymm3 vprold $8,%ymm3,%ymm3 # x2 += x3, x1 = rotl32(x1 ^ x2, 7) vpaddd %ymm3,%ymm2,%ymm2 vpxord %ymm2,%ymm1,%ymm1 vprold $7,%ymm1,%ymm1 # x1 = shuffle32(x1, MASK(0, 3, 2, 1)) vpshufd $0x39,%ymm1,%ymm1 # x2 = shuffle32(x2, MASK(1, 0, 3, 2)) vpshufd $0x4e,%ymm2,%ymm2 # x3 = shuffle32(x3, MASK(2, 1, 0, 3)) vpshufd $0x93,%ymm3,%ymm3 # x0 += x1, x3 = rotl32(x3 ^ x0, 16) vpaddd %ymm1,%ymm0,%ymm0 vpxord %ymm0,%ymm3,%ymm3 vprold $16,%ymm3,%ymm3 # x2 += x3, x1 = rotl32(x1 ^ x2, 12) vpaddd %ymm3,%ymm2,%ymm2 vpxord %ymm2,%ymm1,%ymm1 vprold $12,%ymm1,%ymm1 # x0 += x1, x3 = rotl32(x3 ^ x0, 8) vpaddd %ymm1,%ymm0,%ymm0 vpxord %ymm0,%ymm3,%ymm3 vprold $8,%ymm3,%ymm3 # x2 += x3, x1 = rotl32(x1 ^ x2, 7) vpaddd %ymm3,%ymm2,%ymm2 vpxord %ymm2,%ymm1,%ymm1 vprold $7,%ymm1,%ymm1 # x1 = shuffle32(x1, MASK(2, 1, 0, 3)) vpshufd $0x93,%ymm1,%ymm1 # x2 = shuffle32(x2, MASK(1, 0, 3, 2)) vpshufd $0x4e,%ymm2,%ymm2 # x3 = shuffle32(x3, MASK(0, 3, 2, 1)) vpshufd $0x39,%ymm3,%ymm3 sub $2,%r8d jnz .Ldoubleround # o0 = i0 ^ (x0 + s0) vpaddd %ymm8,%ymm0,%ymm7 cmp $0x10,%rcx jl .Lxorpart2 vpxord 0x00(%rdx),%xmm7,%xmm6 vmovdqu %xmm6,0x00(%rsi) vextracti128 $1,%ymm7,%xmm0 # o1 = i1 ^ (x1 + s1) vpaddd %ymm9,%ymm1,%ymm7 cmp $0x20,%rcx jl .Lxorpart2 vpxord 0x10(%rdx),%xmm7,%xmm6 vmovdqu %xmm6,0x10(%rsi) vextracti128 $1,%ymm7,%xmm1 # o2 = i2 ^ (x2 + s2) vpaddd %ymm10,%ymm2,%ymm7 cmp $0x30,%rcx jl .Lxorpart2 vpxord 0x20(%rdx),%xmm7,%xmm6 vmovdqu %xmm6,0x20(%rsi) vextracti128 $1,%ymm7,%xmm2 # o3 = i3 ^ (x3 + s3) vpaddd %ymm11,%ymm3,%ymm7 cmp $0x40,%rcx jl .Lxorpart2 vpxord 0x30(%rdx),%xmm7,%xmm6 vmovdqu %xmm6,0x30(%rsi) vextracti128 $1,%ymm7,%xmm3 # xor and write second block vmovdqa %xmm0,%xmm7 cmp $0x50,%rcx jl .Lxorpart2 vpxord 0x40(%rdx),%xmm7,%xmm6 vmovdqu %xmm6,0x40(%rsi) vmovdqa %xmm1,%xmm7 cmp $0x60,%rcx jl .Lxorpart2 vpxord 0x50(%rdx),%xmm7,%xmm6 vmovdqu %xmm6,0x50(%rsi) vmovdqa %xmm2,%xmm7 cmp $0x70,%rcx jl .Lxorpart2 vpxord 0x60(%rdx),%xmm7,%xmm6 vmovdqu %xmm6,0x60(%rsi) vmovdqa %xmm3,%xmm7 cmp $0x80,%rcx jl .Lxorpart2 vpxord 0x70(%rdx),%xmm7,%xmm6 vmovdqu %xmm6,0x70(%rsi) .Ldone2: vzeroupper RET .Lxorpart2: # xor remaining bytes from partial register into output mov %rcx,%rax and $0xf,%rcx jz .Ldone2 mov %rax,%r9 and $~0xf,%r9 mov $1,%rax shld %cl,%rax,%rax sub $1,%rax kmovq %rax,%k1 vmovdqu8 (%rdx,%r9),%xmm1{%k1}{z} vpxord %xmm7,%xmm1,%xmm1 vmovdqu8 %xmm1,(%rsi,%r9){%k1} jmp .Ldone2 SYM_FUNC_END(chacha_2block_xor_avx512vl) SYM_FUNC_START(chacha_4block_xor_avx512vl) # %rdi: Input state matrix, s # %rsi: up to 4 data blocks output, o # %rdx: up to 4 data blocks input, i # %rcx: input/output length in bytes # %r8d: nrounds # This function encrypts four ChaCha blocks by loading the state # matrix four times across eight AVX registers. It performs matrix # operations on four words in two matrices in parallel, sequentially # to the operations on the four words of the other two matrices. The # required word shuffling has a rather high latency, we can do the # arithmetic on two matrix-pairs without much slowdown. vzeroupper # x0..3[0-4] = s0..3 vbroadcasti128 0x00(%rdi),%ymm0 vbroadcasti128 0x10(%rdi),%ymm1 vbroadcasti128 0x20(%rdi),%ymm2 vbroadcasti128 0x30(%rdi),%ymm3 vmovdqa %ymm0,%ymm4 vmovdqa %ymm1,%ymm5 vmovdqa %ymm2,%ymm6 vmovdqa %ymm3,%ymm7 vpaddd CTR2BL(%rip),%ymm3,%ymm3 vpaddd CTR4BL(%rip),%ymm7,%ymm7 vmovdqa %ymm0,%ymm11 vmovdqa %ymm1,%ymm12 vmovdqa %ymm2,%ymm13 vmovdqa %ymm3,%ymm14 vmovdqa %ymm7,%ymm15 .Ldoubleround4: # x0 += x1, x3 = rotl32(x3 ^ x0, 16) vpaddd %ymm1,%ymm0,%ymm0 vpxord %ymm0,%ymm3,%ymm3 vprold $16,%ymm3,%ymm3 vpaddd %ymm5,%ymm4,%ymm4 vpxord %ymm4,%ymm7,%ymm7 vprold $16,%ymm7,%ymm7 # x2 += x3, x1 = rotl32(x1 ^ x2, 12) vpaddd %ymm3,%ymm2,%ymm2 vpxord %ymm2,%ymm1,%ymm1 vprold $12,%ymm1,%ymm1 vpaddd %ymm7,%ymm6,%ymm6 vpxord %ymm6,%ymm5,%ymm5 vprold $12,%ymm5,%ymm5 # x0 += x1, x3 = rotl32(x3 ^ x0, 8) vpaddd %ymm1,%ymm0,%ymm0 vpxord %ymm0,%ymm3,%ymm3 vprold $8,%ymm3,%ymm3 vpaddd %ymm5,%ymm4,%ymm4 vpxord %ymm4,%ymm7,%ymm7 vprold $8,%ymm7,%ymm7 # x2 += x3, x1 = rotl32(x1 ^ x2, 7) vpaddd %ymm3,%ymm2,%ymm2 vpxord %ymm2,%ymm1,%ymm1 vprold $7,%ymm1,%ymm1 vpaddd %ymm7,%ymm6,%ymm6 vpxord %ymm6,%ymm5,%ymm5 vprold $7,%ymm5,%ymm5 # x1 = shuffle32(x1, MASK(0, 3, 2, 1)) vpshufd $0x39,%ymm1,%ymm1 vpshufd $0x39,%ymm5,%ymm5 # x2 = shuffle32(x2, MASK(1, 0, 3, 2)) vpshufd $0x4e,%ymm2,%ymm2 vpshufd $0x4e,%ymm6,%ymm6 # x3 = shuffle32(x3, MASK(2, 1, 0, 3)) vpshufd $0x93,%ymm3,%ymm3 vpshufd $0x93,%ymm7,%ymm7 # x0 += x1, x3 = rotl32(x3 ^ x0, 16) vpaddd %ymm1,%ymm0,%ymm0 vpxord %ymm0,%ymm3,%ymm3 vprold $16,%ymm3,%ymm3 vpaddd %ymm5,%ymm4,%ymm4 vpxord %ymm4,%ymm7,%ymm7 vprold $16,%ymm7,%ymm7 # x2 += x3, x1 = rotl32(x1 ^ x2, 12) vpaddd %ymm3,%ymm2,%ymm2 vpxord %ymm2,%ymm1,%ymm1 vprold $12,%ymm1,%ymm1 vpaddd %ymm7,%ymm6,%ymm6 vpxord %ymm6,%ymm5,%ymm5 vprold $12,%ymm5,%ymm5 # x0 += x1, x3 = rotl32(x3 ^ x0, 8) vpaddd %ymm1,%ymm0,%ymm0 vpxord %ymm0,%ymm3,%ymm3 vprold $8,%ymm3,%ymm3 vpaddd %ymm5,%ymm4,%ymm4 vpxord %ymm4,%ymm7,%ymm7 vprold $8,%ymm7,%ymm7 # x2 += x3, x1 = rotl32(x1 ^ x2, 7) vpaddd %ymm3,%ymm2,%ymm2 vpxord %ymm2,%ymm1,%ymm1 vprold $7,%ymm1,%ymm1 vpaddd %ymm7,%ymm6,%ymm6 vpxord %ymm6,%ymm5,%ymm5 vprold $7,%ymm5,%ymm5 # x1 = shuffle32(x1, MASK(2, 1, 0, 3)) vpshufd $0x93,%ymm1,%ymm1 vpshufd $0x93,%ymm5,%ymm5 # x2 = shuffle32(x2, MASK(1, 0, 3, 2)) vpshufd $0x4e,%ymm2,%ymm2 vpshufd $0x4e,%ymm6,%ymm6 # x3 = shuffle32(x3, MASK(0, 3, 2, 1)) vpshufd $0x39,%ymm3,%ymm3 vpshufd $0x39,%ymm7,%ymm7 sub $2,%r8d jnz .Ldoubleround4 # o0 = i0 ^ (x0 + s0), first block vpaddd %ymm11,%ymm0,%ymm10 cmp $0x10,%rcx jl .Lxorpart4 vpxord 0x00(%rdx),%xmm10,%xmm9 vmovdqu %xmm9,0x00(%rsi) vextracti128 $1,%ymm10,%xmm0 # o1 = i1 ^ (x1 + s1), first block vpaddd %ymm12,%ymm1,%ymm10 cmp $0x20,%rcx jl .Lxorpart4 vpxord 0x10(%rdx),%xmm10,%xmm9 vmovdqu %xmm9,0x10(%rsi) vextracti128 $1,%ymm10,%xmm1 # o2 = i2 ^ (x2 + s2), first block vpaddd %ymm13,%ymm2,%ymm10 cmp $0x30,%rcx jl .Lxorpart4 vpxord 0x20(%rdx),%xmm10,%xmm9 vmovdqu %xmm9,0x20(%rsi) vextracti128 $1,%ymm10,%xmm2 # o3 = i3 ^ (x3 + s3), first block vpaddd %ymm14,%ymm3,%ymm10 cmp $0x40,%rcx jl .Lxorpart4 vpxord 0x30(%rdx),%xmm10,%xmm9 vmovdqu %xmm9,0x30(%rsi) vextracti128 $1,%ymm10,%xmm3 # xor and write second block vmovdqa %xmm0,%xmm10 cmp $0x50,%rcx jl .Lxorpart4 vpxord 0x40(%rdx),%xmm10,%xmm9 vmovdqu %xmm9,0x40(%rsi) vmovdqa %xmm1,%xmm10 cmp $0x60,%rcx jl .Lxorpart4 vpxord 0x50(%rdx),%xmm10,%xmm9 vmovdqu %xmm9,0x50(%rsi) vmovdqa %xmm2,%xmm10 cmp $0x70,%rcx jl .Lxorpart4 vpxord 0x60(%rdx),%xmm10,%xmm9 vmovdqu %xmm9,0x60(%rsi) vmovdqa %xmm3,%xmm10 cmp $0x80,%rcx jl .Lxorpart4 vpxord 0x70(%rdx),%xmm10,%xmm9 vmovdqu %xmm9,0x70(%rsi) # o0 = i0 ^ (x0 + s0), third block vpaddd %ymm11,%ymm4,%ymm10 cmp $0x90,%rcx jl .Lxorpart4 vpxord 0x80(%rdx),%xmm10,%xmm9 vmovdqu %xmm9,0x80(%rsi) vextracti128 $1,%ymm10,%xmm4 # o1 = i1 ^ (x1 + s1), third block vpaddd %ymm12,%ymm5,%ymm10 cmp $0xa0,%rcx jl .Lxorpart4 vpxord 0x90(%rdx),%xmm10,%xmm9 vmovdqu %xmm9,0x90(%rsi) vextracti128 $1,%ymm10,%xmm5 # o2 = i2 ^ (x2 + s2), third block vpaddd %ymm13,%ymm6,%ymm10 cmp $0xb0,%rcx jl .Lxorpart4 vpxord 0xa0(%rdx),%xmm10,%xmm9 vmovdqu %xmm9,0xa0(%rsi) vextracti128 $1,%ymm10,%xmm6 # o3 = i3 ^ (x3 + s3), third block vpaddd %ymm15,%ymm7,%ymm10 cmp $0xc0,%rcx jl .Lxorpart4 vpxord 0xb0(%rdx),%xmm10,%xmm9 vmovdqu %xmm9,0xb0(%rsi) vextracti128 $1,%ymm10,%xmm7 # xor and write fourth block vmovdqa %xmm4,%xmm10 cmp $0xd0,%rcx jl .Lxorpart4 vpxord 0xc0(%rdx),%xmm10,%xmm9 vmovdqu %xmm9,0xc0(%rsi) vmovdqa %xmm5,%xmm10 cmp $0xe0,%rcx jl .Lxorpart4 vpxord 0xd0(%rdx),%xmm10,%xmm9 vmovdqu %xmm9,0xd0(%rsi) vmovdqa %xmm6,%xmm10 cmp $0xf0,%rcx jl .Lxorpart4 vpxord 0xe0(%rdx),%xmm10,%xmm9 vmovdqu %xmm9,0xe0(%rsi) vmovdqa %xmm7,%xmm10 cmp $0x100,%rcx jl .Lxorpart4 vpxord 0xf0(%rdx),%xmm10,%xmm9 vmovdqu %xmm9,0xf0(%rsi) .Ldone4: vzeroupper RET .Lxorpart4: # xor remaining bytes from partial register into output mov %rcx,%rax and $0xf,%rcx jz .Ldone4 mov %rax,%r9 and $~0xf,%r9 mov $1,%rax shld %cl,%rax,%rax sub $1,%rax kmovq %rax,%k1 vmovdqu8 (%rdx,%r9),%xmm1{%k1}{z} vpxord %xmm10,%xmm1,%xmm1 vmovdqu8 %xmm1,(%rsi,%r9){%k1} jmp .Ldone4 SYM_FUNC_END(chacha_4block_xor_avx512vl) SYM_FUNC_START(chacha_8block_xor_avx512vl) # %rdi: Input state matrix, s # %rsi: up to 8 data blocks output, o # %rdx: up to 8 data blocks input, i # %rcx: input/output length in bytes # %r8d: nrounds # This function encrypts eight consecutive ChaCha blocks by loading # the state matrix in AVX registers eight times. Compared to AVX2, this # mostly benefits from the new rotate instructions in VL and the # additional registers. vzeroupper # x0..15[0-7] = s[0..15] vpbroadcastd 0x00(%rdi),%ymm0 vpbroadcastd 0x04(%rdi),%ymm1 vpbroadcastd 0x08(%rdi),%ymm2 vpbroadcastd 0x0c(%rdi),%ymm3 vpbroadcastd 0x10(%rdi),%ymm4 vpbroadcastd 0x14(%rdi),%ymm5 vpbroadcastd 0x18(%rdi),%ymm6 vpbroadcastd 0x1c(%rdi),%ymm7 vpbroadcastd 0x20(%rdi),%ymm8 vpbroadcastd 0x24(%rdi),%ymm9 vpbroadcastd 0x28(%rdi),%ymm10 vpbroadcastd 0x2c(%rdi),%ymm11 vpbroadcastd 0x30(%rdi),%ymm12 vpbroadcastd 0x34(%rdi),%ymm13 vpbroadcastd 0x38(%rdi),%ymm14 vpbroadcastd 0x3c(%rdi),%ymm15 # x12 += counter values 0-3 vpaddd CTR8BL(%rip),%ymm12,%ymm12 vmovdqa64 %ymm0,%ymm16 vmovdqa64 %ymm1,%ymm17 vmovdqa64 %ymm2,%ymm18 vmovdqa64 %ymm3,%ymm19 vmovdqa64 %ymm4,%ymm20 vmovdqa64 %ymm5,%ymm21 vmovdqa64 %ymm6,%ymm22 vmovdqa64 %ymm7,%ymm23 vmovdqa64 %ymm8,%ymm24 vmovdqa64 %ymm9,%ymm25 vmovdqa64 %ymm10,%ymm26 vmovdqa64 %ymm11,%ymm27 vmovdqa64 %ymm12,%ymm28 vmovdqa64 %ymm13,%ymm29 vmovdqa64 %ymm14,%ymm30 vmovdqa64 %ymm15,%ymm31 .Ldoubleround8: # x0 += x4, x12 = rotl32(x12 ^ x0, 16) vpaddd %ymm0,%ymm4,%ymm0 vpxord %ymm0,%ymm12,%ymm12 vprold $16,%ymm12,%ymm12 # x1 += x5, x13 = rotl32(x13 ^ x1, 16) vpaddd %ymm1,%ymm5,%ymm1 vpxord %ymm1,%ymm13,%ymm13 vprold $16,%ymm13,%ymm13 # x2 += x6, x14 = rotl32(x14 ^ x2, 16) vpaddd %ymm2,%ymm6,%ymm2 vpxord %ymm2,%ymm14,%ymm14 vprold $16,%ymm14,%ymm14 # x3 += x7, x15 = rotl32(x15 ^ x3, 16) vpaddd %ymm3,%ymm7,%ymm3 vpxord %ymm3,%ymm15,%ymm15 vprold $16,%ymm15,%ymm15 # x8 += x12, x4 = rotl32(x4 ^ x8, 12) vpaddd %ymm12,%ymm8,%ymm8 vpxord %ymm8,%ymm4,%ymm4 vprold $12,%ymm4,%ymm4 # x9 += x13, x5 = rotl32(x5 ^ x9, 12) vpaddd %ymm13,%ymm9,%ymm9 vpxord %ymm9,%ymm5,%ymm5 vprold $12,%ymm5,%ymm5 # x10 += x14, x6 = rotl32(x6 ^ x10, 12) vpaddd %ymm14,%ymm10,%ymm10 vpxord %ymm10,%ymm6,%ymm6 vprold $12,%ymm6,%ymm6 # x11 += x15, x7 = rotl32(x7 ^ x11, 12) vpaddd %ymm15,%ymm11,%ymm11 vpxord %ymm11,%ymm7,%ymm7 vprold $12,%ymm7,%ymm7 # x0 += x4, x12 = rotl32(x12 ^ x0, 8) vpaddd %ymm0,%ymm4,%ymm0 vpxord %ymm0,%ymm12,%ymm12 vprold $8,%ymm12,%ymm12 # x1 += x5, x13 = rotl32(x13 ^ x1, 8) vpaddd %ymm1,%ymm5,%ymm1 vpxord %ymm1,%ymm13,%ymm13 vprold $8,%ymm13,%ymm13 # x2 += x6, x14 = rotl32(x14 ^ x2, 8) vpaddd %ymm2,%ymm6,%ymm2 vpxord %ymm2,%ymm14,%ymm14 vprold $8,%ymm14,%ymm14 # x3 += x7, x15 = rotl32(x15 ^ x3, 8) vpaddd %ymm3,%ymm7,%ymm3 vpxord %ymm3,%ymm15,%ymm15 vprold $8,%ymm15,%ymm15 # x8 += x12, x4 = rotl32(x4 ^ x8, 7) vpaddd %ymm12,%ymm8,%ymm8 vpxord %ymm8,%ymm4,%ymm4 vprold $7,%ymm4,%ymm4 # x9 += x13, x5 = rotl32(x5 ^ x9, 7) vpaddd %ymm13,%ymm9,%ymm9 vpxord %ymm9,%ymm5,%ymm5 vprold $7,%ymm5,%ymm5 # x10 += x14, x6 = rotl32(x6 ^ x10, 7) vpaddd %ymm14,%ymm10,%ymm10 vpxord %ymm10,%ymm6,%ymm6 vprold $7,%ymm6,%ymm6 # x11 += x15, x7 = rotl32(x7 ^ x11, 7) vpaddd %ymm15,%ymm11,%ymm11 vpxord %ymm11,%ymm7,%ymm7 vprold $7,%ymm7,%ymm7 # x0 += x5, x15 = rotl32(x15 ^ x0, 16) vpaddd %ymm0,%ymm5,%ymm0 vpxord %ymm0,%ymm15,%ymm15 vprold $16,%ymm15,%ymm15 # x1 += x6, x12 = rotl32(x12 ^ x1, 16) vpaddd %ymm1,%ymm6,%ymm1 vpxord %ymm1,%ymm12,%ymm12 vprold $16,%ymm12,%ymm12 # x2 += x7, x13 = rotl32(x13 ^ x2, 16) vpaddd %ymm2,%ymm7,%ymm2 vpxord %ymm2,%ymm13,%ymm13 vprold $16,%ymm13,%ymm13 # x3 += x4, x14 = rotl32(x14 ^ x3, 16) vpaddd %ymm3,%ymm4,%ymm3 vpxord %ymm3,%ymm14,%ymm14 vprold $16,%ymm14,%ymm14 # x10 += x15, x5 = rotl32(x5 ^ x10, 12) vpaddd %ymm15,%ymm10,%ymm10 vpxord %ymm10,%ymm5,%ymm5 vprold $12,%ymm5,%ymm5 # x11 += x12, x6 = rotl32(x6 ^ x11, 12) vpaddd %ymm12,%ymm11,%ymm11 vpxord %ymm11,%ymm6,%ymm6 vprold $12,%ymm6,%ymm6 # x8 += x13, x7 = rotl32(x7 ^ x8, 12) vpaddd %ymm13,%ymm8,%ymm8 vpxord %ymm8,%ymm7,%ymm7 vprold $12,%ymm7,%ymm7 # x9 += x14, x4 = rotl32(x4 ^ x9, 12) vpaddd %ymm14,%ymm9,%ymm9 vpxord %ymm9,%ymm4,%ymm4 vprold $12,%ymm4,%ymm4 # x0 += x5, x15 = rotl32(x15 ^ x0, 8) vpaddd %ymm0,%ymm5,%ymm0 vpxord %ymm0,%ymm15,%ymm15 vprold $8,%ymm15,%ymm15 # x1 += x6, x12 = rotl32(x12 ^ x1, 8) vpaddd %ymm1,%ymm6,%ymm1 vpxord %ymm1,%ymm12,%ymm12 vprold $8,%ymm12,%ymm12 # x2 += x7, x13 = rotl32(x13 ^ x2, 8) vpaddd %ymm2,%ymm7,%ymm2 vpxord %ymm2,%ymm13,%ymm13 vprold $8,%ymm13,%ymm13 # x3 += x4, x14 = rotl32(x14 ^ x3, 8) vpaddd %ymm3,%ymm4,%ymm3 vpxord %ymm3,%ymm14,%ymm14 vprold $8,%ymm14,%ymm14 # x10 += x15, x5 = rotl32(x5 ^ x10, 7) vpaddd %ymm15,%ymm10,%ymm10 vpxord %ymm10,%ymm5,%ymm5 vprold $7,%ymm5,%ymm5 # x11 += x12, x6 = rotl32(x6 ^ x11, 7) vpaddd %ymm12,%ymm11,%ymm11 vpxord %ymm11,%ymm6,%ymm6 vprold $7,%ymm6,%ymm6 # x8 += x13, x7 = rotl32(x7 ^ x8, 7) vpaddd %ymm13,%ymm8,%ymm8 vpxord %ymm8,%ymm7,%ymm7 vprold $7,%ymm7,%ymm7 # x9 += x14, x4 = rotl32(x4 ^ x9, 7) vpaddd %ymm14,%ymm9,%ymm9 vpxord %ymm9,%ymm4,%ymm4 vprold $7,%ymm4,%ymm4 sub $2,%r8d jnz .Ldoubleround8 # x0..15[0-3] += s[0..15] vpaddd %ymm16,%ymm0,%ymm0 vpaddd %ymm17,%ymm1,%ymm1 vpaddd %ymm18,%ymm2,%ymm2 vpaddd %ymm19,%ymm3,%ymm3 vpaddd %ymm20,%ymm4,%ymm4 vpaddd %ymm21,%ymm5,%ymm5 vpaddd %ymm22,%ymm6,%ymm6 vpaddd %ymm23,%ymm7,%ymm7 vpaddd %ymm24,%ymm8,%ymm8 vpaddd %ymm25,%ymm9,%ymm9 vpaddd %ymm26,%ymm10,%ymm10 vpaddd %ymm27,%ymm11,%ymm11 vpaddd %ymm28,%ymm12,%ymm12 vpaddd %ymm29,%ymm13,%ymm13 vpaddd %ymm30,%ymm14,%ymm14 vpaddd %ymm31,%ymm15,%ymm15 # interleave 32-bit words in state n, n+1 vpunpckldq %ymm1,%ymm0,%ymm16 vpunpckhdq %ymm1,%ymm0,%ymm17 vpunpckldq %ymm3,%ymm2,%ymm18 vpunpckhdq %ymm3,%ymm2,%ymm19 vpunpckldq %ymm5,%ymm4,%ymm20 vpunpckhdq %ymm5,%ymm4,%ymm21 vpunpckldq %ymm7,%ymm6,%ymm22 vpunpckhdq %ymm7,%ymm6,%ymm23 vpunpckldq %ymm9,%ymm8,%ymm24 vpunpckhdq %ymm9,%ymm8,%ymm25 vpunpckldq %ymm11,%ymm10,%ymm26 vpunpckhdq %ymm11,%ymm10,%ymm27 vpunpckldq %ymm13,%ymm12,%ymm28 vpunpckhdq %ymm13,%ymm12,%ymm29 vpunpckldq %ymm15,%ymm14,%ymm30 vpunpckhdq %ymm15,%ymm14,%ymm31 # interleave 64-bit words in state n, n+2 vpunpcklqdq %ymm18,%ymm16,%ymm0 vpunpcklqdq %ymm19,%ymm17,%ymm1 vpunpckhqdq %ymm18,%ymm16,%ymm2 vpunpckhqdq %ymm19,%ymm17,%ymm3 vpunpcklqdq %ymm22,%ymm20,%ymm4 vpunpcklqdq %ymm23,%ymm21,%ymm5 vpunpckhqdq %ymm22,%ymm20,%ymm6 vpunpckhqdq %ymm23,%ymm21,%ymm7 vpunpcklqdq %ymm26,%ymm24,%ymm8 vpunpcklqdq %ymm27,%ymm25,%ymm9 vpunpckhqdq %ymm26,%ymm24,%ymm10 vpunpckhqdq %ymm27,%ymm25,%ymm11 vpunpcklqdq %ymm30,%ymm28,%ymm12 vpunpcklqdq %ymm31,%ymm29,%ymm13 vpunpckhqdq %ymm30,%ymm28,%ymm14 vpunpckhqdq %ymm31,%ymm29,%ymm15 # interleave 128-bit words in state n, n+4 # xor/write first four blocks vmovdqa64 %ymm0,%ymm16 vperm2i128 $0x20,%ymm4,%ymm0,%ymm0 cmp $0x0020,%rcx jl .Lxorpart8 vpxord 0x0000(%rdx),%ymm0,%ymm0 vmovdqu64 %ymm0,0x0000(%rsi) vmovdqa64 %ymm16,%ymm0 vperm2i128 $0x31,%ymm4,%ymm0,%ymm4 vperm2i128 $0x20,%ymm12,%ymm8,%ymm0 cmp $0x0040,%rcx jl .Lxorpart8 vpxord 0x0020(%rdx),%ymm0,%ymm0 vmovdqu64 %ymm0,0x0020(%rsi) vperm2i128 $0x31,%ymm12,%ymm8,%ymm12 vperm2i128 $0x20,%ymm6,%ymm2,%ymm0 cmp $0x0060,%rcx jl .Lxorpart8 vpxord 0x0040(%rdx),%ymm0,%ymm0 vmovdqu64 %ymm0,0x0040(%rsi) vperm2i128 $0x31,%ymm6,%ymm2,%ymm6 vperm2i128 $0x20,%ymm14,%ymm10,%ymm0 cmp $0x0080,%rcx jl .Lxorpart8 vpxord 0x0060(%rdx),%ymm0,%ymm0 vmovdqu64 %ymm0,0x0060(%rsi) vperm2i128 $0x31,%ymm14,%ymm10,%ymm14 vperm2i128 $0x20,%ymm5,%ymm1,%ymm0 cmp $0x00a0,%rcx jl .Lxorpart8 vpxord 0x0080(%rdx),%ymm0,%ymm0 vmovdqu64 %ymm0,0x0080(%rsi) vperm2i128 $0x31,%ymm5,%ymm1,%ymm5 vperm2i128 $0x20,%ymm13,%ymm9,%ymm0 cmp $0x00c0,%rcx jl .Lxorpart8 vpxord 0x00a0(%rdx),%ymm0,%ymm0 vmovdqu64 %ymm0,0x00a0(%rsi) vperm2i128 $0x31,%ymm13,%ymm9,%ymm13 vperm2i128 $0x20,%ymm7,%ymm3,%ymm0 cmp $0x00e0,%rcx jl .Lxorpart8 vpxord 0x00c0(%rdx),%ymm0,%ymm0 vmovdqu64 %ymm0,0x00c0(%rsi) vperm2i128 $0x31,%ymm7,%ymm3,%ymm7 vperm2i128 $0x20,%ymm15,%ymm11,%ymm0 cmp $0x0100,%rcx jl .Lxorpart8 vpxord 0x00e0(%rdx),%ymm0,%ymm0 vmovdqu64 %ymm0,0x00e0(%rsi) vperm2i128 $0x31,%ymm15,%ymm11,%ymm15 # xor remaining blocks, write to output vmovdqa64 %ymm4,%ymm0 cmp $0x0120,%rcx jl .Lxorpart8 vpxord 0x0100(%rdx),%ymm0,%ymm0 vmovdqu64 %ymm0,0x0100(%rsi) vmovdqa64 %ymm12,%ymm0 cmp $0x0140,%rcx jl .Lxorpart8 vpxord 0x0120(%rdx),%ymm0,%ymm0 vmovdqu64 %ymm0,0x0120(%rsi) vmovdqa64 %ymm6,%ymm0 cmp $0x0160,%rcx jl .Lxorpart8 vpxord 0x0140(%rdx),%ymm0,%ymm0 vmovdqu64 %ymm0,0x0140(%rsi) vmovdqa64 %ymm14,%ymm0 cmp $0x0180,%rcx jl .Lxorpart8 vpxord 0x0160(%rdx),%ymm0,%ymm0 vmovdqu64 %ymm0,0x0160(%rsi) vmovdqa64 %ymm5,%ymm0 cmp $0x01a0,%rcx jl .Lxorpart8 vpxord 0x0180(%rdx),%ymm0,%ymm0 vmovdqu64 %ymm0,0x0180(%rsi) vmovdqa64 %ymm13,%ymm0 cmp $0x01c0,%rcx jl .Lxorpart8 vpxord 0x01a0(%rdx),%ymm0,%ymm0 vmovdqu64 %ymm0,0x01a0(%rsi) vmovdqa64 %ymm7,%ymm0 cmp $0x01e0,%rcx jl .Lxorpart8 vpxord 0x01c0(%rdx),%ymm0,%ymm0 vmovdqu64 %ymm0,0x01c0(%rsi) vmovdqa64 %ymm15,%ymm0 cmp $0x0200,%rcx jl .Lxorpart8 vpxord 0x01e0(%rdx),%ymm0,%ymm0 vmovdqu64 %ymm0,0x01e0(%rsi) .Ldone8: vzeroupper RET .Lxorpart8: # xor remaining bytes from partial register into output mov %rcx,%rax and $0x1f,%rcx jz .Ldone8 mov %rax,%r9 and $~0x1f,%r9 mov $1,%rax shld %cl,%rax,%rax sub $1,%rax kmovq %rax,%k1 vmovdqu8 (%rdx,%r9),%ymm1{%k1}{z} vpxord %ymm0,%ymm1,%ymm1 vmovdqu8 %ymm1,(%rsi,%r9){%k1} jmp .Ldone8 SYM_FUNC_END(chacha_8block_xor_avx512vl)
aixcc-public/challenge-001-exemplar-source
17,216
arch/x86/crypto/chacha-ssse3-x86_64.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * ChaCha 256-bit cipher algorithm, x64 SSSE3 functions * * Copyright (C) 2015 Martin Willi */ #include <linux/linkage.h> #include <asm/frame.h> .section .rodata.cst16.ROT8, "aM", @progbits, 16 .align 16 ROT8: .octa 0x0e0d0c0f0a09080b0605040702010003 .section .rodata.cst16.ROT16, "aM", @progbits, 16 .align 16 ROT16: .octa 0x0d0c0f0e09080b0a0504070601000302 .section .rodata.cst16.CTRINC, "aM", @progbits, 16 .align 16 CTRINC: .octa 0x00000003000000020000000100000000 .text /* * chacha_permute - permute one block * * Permute one 64-byte block where the state matrix is in %xmm0-%xmm3. This * function performs matrix operations on four words in parallel, but requires * shuffling to rearrange the words after each round. 8/16-bit word rotation is * done with the slightly better performing SSSE3 byte shuffling, 7/12-bit word * rotation uses traditional shift+OR. * * The round count is given in %r8d. * * Clobbers: %r8d, %xmm4-%xmm7 */ SYM_FUNC_START_LOCAL(chacha_permute) movdqa ROT8(%rip),%xmm4 movdqa ROT16(%rip),%xmm5 .Ldoubleround: # x0 += x1, x3 = rotl32(x3 ^ x0, 16) paddd %xmm1,%xmm0 pxor %xmm0,%xmm3 pshufb %xmm5,%xmm3 # x2 += x3, x1 = rotl32(x1 ^ x2, 12) paddd %xmm3,%xmm2 pxor %xmm2,%xmm1 movdqa %xmm1,%xmm6 pslld $12,%xmm6 psrld $20,%xmm1 por %xmm6,%xmm1 # x0 += x1, x3 = rotl32(x3 ^ x0, 8) paddd %xmm1,%xmm0 pxor %xmm0,%xmm3 pshufb %xmm4,%xmm3 # x2 += x3, x1 = rotl32(x1 ^ x2, 7) paddd %xmm3,%xmm2 pxor %xmm2,%xmm1 movdqa %xmm1,%xmm7 pslld $7,%xmm7 psrld $25,%xmm1 por %xmm7,%xmm1 # x1 = shuffle32(x1, MASK(0, 3, 2, 1)) pshufd $0x39,%xmm1,%xmm1 # x2 = shuffle32(x2, MASK(1, 0, 3, 2)) pshufd $0x4e,%xmm2,%xmm2 # x3 = shuffle32(x3, MASK(2, 1, 0, 3)) pshufd $0x93,%xmm3,%xmm3 # x0 += x1, x3 = rotl32(x3 ^ x0, 16) paddd %xmm1,%xmm0 pxor %xmm0,%xmm3 pshufb %xmm5,%xmm3 # x2 += x3, x1 = rotl32(x1 ^ x2, 12) paddd %xmm3,%xmm2 pxor %xmm2,%xmm1 movdqa %xmm1,%xmm6 pslld $12,%xmm6 psrld $20,%xmm1 por %xmm6,%xmm1 # x0 += x1, x3 = rotl32(x3 ^ x0, 8) paddd %xmm1,%xmm0 pxor %xmm0,%xmm3 pshufb %xmm4,%xmm3 # x2 += x3, x1 = rotl32(x1 ^ x2, 7) paddd %xmm3,%xmm2 pxor %xmm2,%xmm1 movdqa %xmm1,%xmm7 pslld $7,%xmm7 psrld $25,%xmm1 por %xmm7,%xmm1 # x1 = shuffle32(x1, MASK(2, 1, 0, 3)) pshufd $0x93,%xmm1,%xmm1 # x2 = shuffle32(x2, MASK(1, 0, 3, 2)) pshufd $0x4e,%xmm2,%xmm2 # x3 = shuffle32(x3, MASK(0, 3, 2, 1)) pshufd $0x39,%xmm3,%xmm3 sub $2,%r8d jnz .Ldoubleround RET SYM_FUNC_END(chacha_permute) SYM_FUNC_START(chacha_block_xor_ssse3) # %rdi: Input state matrix, s # %rsi: up to 1 data block output, o # %rdx: up to 1 data block input, i # %rcx: input/output length in bytes # %r8d: nrounds FRAME_BEGIN # x0..3 = s0..3 movdqu 0x00(%rdi),%xmm0 movdqu 0x10(%rdi),%xmm1 movdqu 0x20(%rdi),%xmm2 movdqu 0x30(%rdi),%xmm3 movdqa %xmm0,%xmm8 movdqa %xmm1,%xmm9 movdqa %xmm2,%xmm10 movdqa %xmm3,%xmm11 mov %rcx,%rax call chacha_permute # o0 = i0 ^ (x0 + s0) paddd %xmm8,%xmm0 cmp $0x10,%rax jl .Lxorpart movdqu 0x00(%rdx),%xmm4 pxor %xmm4,%xmm0 movdqu %xmm0,0x00(%rsi) # o1 = i1 ^ (x1 + s1) paddd %xmm9,%xmm1 movdqa %xmm1,%xmm0 cmp $0x20,%rax jl .Lxorpart movdqu 0x10(%rdx),%xmm0 pxor %xmm1,%xmm0 movdqu %xmm0,0x10(%rsi) # o2 = i2 ^ (x2 + s2) paddd %xmm10,%xmm2 movdqa %xmm2,%xmm0 cmp $0x30,%rax jl .Lxorpart movdqu 0x20(%rdx),%xmm0 pxor %xmm2,%xmm0 movdqu %xmm0,0x20(%rsi) # o3 = i3 ^ (x3 + s3) paddd %xmm11,%xmm3 movdqa %xmm3,%xmm0 cmp $0x40,%rax jl .Lxorpart movdqu 0x30(%rdx),%xmm0 pxor %xmm3,%xmm0 movdqu %xmm0,0x30(%rsi) .Ldone: FRAME_END RET .Lxorpart: # xor remaining bytes from partial register into output mov %rax,%r9 and $0x0f,%r9 jz .Ldone and $~0x0f,%rax mov %rsi,%r11 lea 8(%rsp),%r10 sub $0x10,%rsp and $~31,%rsp lea (%rdx,%rax),%rsi mov %rsp,%rdi mov %r9,%rcx rep movsb pxor 0x00(%rsp),%xmm0 movdqa %xmm0,0x00(%rsp) mov %rsp,%rsi lea (%r11,%rax),%rdi mov %r9,%rcx rep movsb lea -8(%r10),%rsp jmp .Ldone SYM_FUNC_END(chacha_block_xor_ssse3) SYM_FUNC_START(hchacha_block_ssse3) # %rdi: Input state matrix, s # %rsi: output (8 32-bit words) # %edx: nrounds FRAME_BEGIN movdqu 0x00(%rdi),%xmm0 movdqu 0x10(%rdi),%xmm1 movdqu 0x20(%rdi),%xmm2 movdqu 0x30(%rdi),%xmm3 mov %edx,%r8d call chacha_permute movdqu %xmm0,0x00(%rsi) movdqu %xmm3,0x10(%rsi) FRAME_END RET SYM_FUNC_END(hchacha_block_ssse3) SYM_FUNC_START(chacha_4block_xor_ssse3) # %rdi: Input state matrix, s # %rsi: up to 4 data blocks output, o # %rdx: up to 4 data blocks input, i # %rcx: input/output length in bytes # %r8d: nrounds # This function encrypts four consecutive ChaCha blocks by loading the # the state matrix in SSE registers four times. As we need some scratch # registers, we save the first four registers on the stack. The # algorithm performs each operation on the corresponding word of each # state matrix, hence requires no word shuffling. For final XORing step # we transpose the matrix by interleaving 32- and then 64-bit words, # which allows us to do XOR in SSE registers. 8/16-bit word rotation is # done with the slightly better performing SSSE3 byte shuffling, # 7/12-bit word rotation uses traditional shift+OR. lea 8(%rsp),%r10 sub $0x80,%rsp and $~63,%rsp mov %rcx,%rax # x0..15[0-3] = s0..3[0..3] movq 0x00(%rdi),%xmm1 pshufd $0x00,%xmm1,%xmm0 pshufd $0x55,%xmm1,%xmm1 movq 0x08(%rdi),%xmm3 pshufd $0x00,%xmm3,%xmm2 pshufd $0x55,%xmm3,%xmm3 movq 0x10(%rdi),%xmm5 pshufd $0x00,%xmm5,%xmm4 pshufd $0x55,%xmm5,%xmm5 movq 0x18(%rdi),%xmm7 pshufd $0x00,%xmm7,%xmm6 pshufd $0x55,%xmm7,%xmm7 movq 0x20(%rdi),%xmm9 pshufd $0x00,%xmm9,%xmm8 pshufd $0x55,%xmm9,%xmm9 movq 0x28(%rdi),%xmm11 pshufd $0x00,%xmm11,%xmm10 pshufd $0x55,%xmm11,%xmm11 movq 0x30(%rdi),%xmm13 pshufd $0x00,%xmm13,%xmm12 pshufd $0x55,%xmm13,%xmm13 movq 0x38(%rdi),%xmm15 pshufd $0x00,%xmm15,%xmm14 pshufd $0x55,%xmm15,%xmm15 # x0..3 on stack movdqa %xmm0,0x00(%rsp) movdqa %xmm1,0x10(%rsp) movdqa %xmm2,0x20(%rsp) movdqa %xmm3,0x30(%rsp) movdqa CTRINC(%rip),%xmm1 movdqa ROT8(%rip),%xmm2 movdqa ROT16(%rip),%xmm3 # x12 += counter values 0-3 paddd %xmm1,%xmm12 .Ldoubleround4: # x0 += x4, x12 = rotl32(x12 ^ x0, 16) movdqa 0x00(%rsp),%xmm0 paddd %xmm4,%xmm0 movdqa %xmm0,0x00(%rsp) pxor %xmm0,%xmm12 pshufb %xmm3,%xmm12 # x1 += x5, x13 = rotl32(x13 ^ x1, 16) movdqa 0x10(%rsp),%xmm0 paddd %xmm5,%xmm0 movdqa %xmm0,0x10(%rsp) pxor %xmm0,%xmm13 pshufb %xmm3,%xmm13 # x2 += x6, x14 = rotl32(x14 ^ x2, 16) movdqa 0x20(%rsp),%xmm0 paddd %xmm6,%xmm0 movdqa %xmm0,0x20(%rsp) pxor %xmm0,%xmm14 pshufb %xmm3,%xmm14 # x3 += x7, x15 = rotl32(x15 ^ x3, 16) movdqa 0x30(%rsp),%xmm0 paddd %xmm7,%xmm0 movdqa %xmm0,0x30(%rsp) pxor %xmm0,%xmm15 pshufb %xmm3,%xmm15 # x8 += x12, x4 = rotl32(x4 ^ x8, 12) paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm0 pslld $12,%xmm0 psrld $20,%xmm4 por %xmm0,%xmm4 # x9 += x13, x5 = rotl32(x5 ^ x9, 12) paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm0 pslld $12,%xmm0 psrld $20,%xmm5 por %xmm0,%xmm5 # x10 += x14, x6 = rotl32(x6 ^ x10, 12) paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm0 pslld $12,%xmm0 psrld $20,%xmm6 por %xmm0,%xmm6 # x11 += x15, x7 = rotl32(x7 ^ x11, 12) paddd %xmm15,%xmm11 pxor %xmm11,%xmm7 movdqa %xmm7,%xmm0 pslld $12,%xmm0 psrld $20,%xmm7 por %xmm0,%xmm7 # x0 += x4, x12 = rotl32(x12 ^ x0, 8) movdqa 0x00(%rsp),%xmm0 paddd %xmm4,%xmm0 movdqa %xmm0,0x00(%rsp) pxor %xmm0,%xmm12 pshufb %xmm2,%xmm12 # x1 += x5, x13 = rotl32(x13 ^ x1, 8) movdqa 0x10(%rsp),%xmm0 paddd %xmm5,%xmm0 movdqa %xmm0,0x10(%rsp) pxor %xmm0,%xmm13 pshufb %xmm2,%xmm13 # x2 += x6, x14 = rotl32(x14 ^ x2, 8) movdqa 0x20(%rsp),%xmm0 paddd %xmm6,%xmm0 movdqa %xmm0,0x20(%rsp) pxor %xmm0,%xmm14 pshufb %xmm2,%xmm14 # x3 += x7, x15 = rotl32(x15 ^ x3, 8) movdqa 0x30(%rsp),%xmm0 paddd %xmm7,%xmm0 movdqa %xmm0,0x30(%rsp) pxor %xmm0,%xmm15 pshufb %xmm2,%xmm15 # x8 += x12, x4 = rotl32(x4 ^ x8, 7) paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm0 pslld $7,%xmm0 psrld $25,%xmm4 por %xmm0,%xmm4 # x9 += x13, x5 = rotl32(x5 ^ x9, 7) paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm0 pslld $7,%xmm0 psrld $25,%xmm5 por %xmm0,%xmm5 # x10 += x14, x6 = rotl32(x6 ^ x10, 7) paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm0 pslld $7,%xmm0 psrld $25,%xmm6 por %xmm0,%xmm6 # x11 += x15, x7 = rotl32(x7 ^ x11, 7) paddd %xmm15,%xmm11 pxor %xmm11,%xmm7 movdqa %xmm7,%xmm0 pslld $7,%xmm0 psrld $25,%xmm7 por %xmm0,%xmm7 # x0 += x5, x15 = rotl32(x15 ^ x0, 16) movdqa 0x00(%rsp),%xmm0 paddd %xmm5,%xmm0 movdqa %xmm0,0x00(%rsp) pxor %xmm0,%xmm15 pshufb %xmm3,%xmm15 # x1 += x6, x12 = rotl32(x12 ^ x1, 16) movdqa 0x10(%rsp),%xmm0 paddd %xmm6,%xmm0 movdqa %xmm0,0x10(%rsp) pxor %xmm0,%xmm12 pshufb %xmm3,%xmm12 # x2 += x7, x13 = rotl32(x13 ^ x2, 16) movdqa 0x20(%rsp),%xmm0 paddd %xmm7,%xmm0 movdqa %xmm0,0x20(%rsp) pxor %xmm0,%xmm13 pshufb %xmm3,%xmm13 # x3 += x4, x14 = rotl32(x14 ^ x3, 16) movdqa 0x30(%rsp),%xmm0 paddd %xmm4,%xmm0 movdqa %xmm0,0x30(%rsp) pxor %xmm0,%xmm14 pshufb %xmm3,%xmm14 # x10 += x15, x5 = rotl32(x5 ^ x10, 12) paddd %xmm15,%xmm10 pxor %xmm10,%xmm5 movdqa %xmm5,%xmm0 pslld $12,%xmm0 psrld $20,%xmm5 por %xmm0,%xmm5 # x11 += x12, x6 = rotl32(x6 ^ x11, 12) paddd %xmm12,%xmm11 pxor %xmm11,%xmm6 movdqa %xmm6,%xmm0 pslld $12,%xmm0 psrld $20,%xmm6 por %xmm0,%xmm6 # x8 += x13, x7 = rotl32(x7 ^ x8, 12) paddd %xmm13,%xmm8 pxor %xmm8,%xmm7 movdqa %xmm7,%xmm0 pslld $12,%xmm0 psrld $20,%xmm7 por %xmm0,%xmm7 # x9 += x14, x4 = rotl32(x4 ^ x9, 12) paddd %xmm14,%xmm9 pxor %xmm9,%xmm4 movdqa %xmm4,%xmm0 pslld $12,%xmm0 psrld $20,%xmm4 por %xmm0,%xmm4 # x0 += x5, x15 = rotl32(x15 ^ x0, 8) movdqa 0x00(%rsp),%xmm0 paddd %xmm5,%xmm0 movdqa %xmm0,0x00(%rsp) pxor %xmm0,%xmm15 pshufb %xmm2,%xmm15 # x1 += x6, x12 = rotl32(x12 ^ x1, 8) movdqa 0x10(%rsp),%xmm0 paddd %xmm6,%xmm0 movdqa %xmm0,0x10(%rsp) pxor %xmm0,%xmm12 pshufb %xmm2,%xmm12 # x2 += x7, x13 = rotl32(x13 ^ x2, 8) movdqa 0x20(%rsp),%xmm0 paddd %xmm7,%xmm0 movdqa %xmm0,0x20(%rsp) pxor %xmm0,%xmm13 pshufb %xmm2,%xmm13 # x3 += x4, x14 = rotl32(x14 ^ x3, 8) movdqa 0x30(%rsp),%xmm0 paddd %xmm4,%xmm0 movdqa %xmm0,0x30(%rsp) pxor %xmm0,%xmm14 pshufb %xmm2,%xmm14 # x10 += x15, x5 = rotl32(x5 ^ x10, 7) paddd %xmm15,%xmm10 pxor %xmm10,%xmm5 movdqa %xmm5,%xmm0 pslld $7,%xmm0 psrld $25,%xmm5 por %xmm0,%xmm5 # x11 += x12, x6 = rotl32(x6 ^ x11, 7) paddd %xmm12,%xmm11 pxor %xmm11,%xmm6 movdqa %xmm6,%xmm0 pslld $7,%xmm0 psrld $25,%xmm6 por %xmm0,%xmm6 # x8 += x13, x7 = rotl32(x7 ^ x8, 7) paddd %xmm13,%xmm8 pxor %xmm8,%xmm7 movdqa %xmm7,%xmm0 pslld $7,%xmm0 psrld $25,%xmm7 por %xmm0,%xmm7 # x9 += x14, x4 = rotl32(x4 ^ x9, 7) paddd %xmm14,%xmm9 pxor %xmm9,%xmm4 movdqa %xmm4,%xmm0 pslld $7,%xmm0 psrld $25,%xmm4 por %xmm0,%xmm4 sub $2,%r8d jnz .Ldoubleround4 # x0[0-3] += s0[0] # x1[0-3] += s0[1] movq 0x00(%rdi),%xmm3 pshufd $0x00,%xmm3,%xmm2 pshufd $0x55,%xmm3,%xmm3 paddd 0x00(%rsp),%xmm2 movdqa %xmm2,0x00(%rsp) paddd 0x10(%rsp),%xmm3 movdqa %xmm3,0x10(%rsp) # x2[0-3] += s0[2] # x3[0-3] += s0[3] movq 0x08(%rdi),%xmm3 pshufd $0x00,%xmm3,%xmm2 pshufd $0x55,%xmm3,%xmm3 paddd 0x20(%rsp),%xmm2 movdqa %xmm2,0x20(%rsp) paddd 0x30(%rsp),%xmm3 movdqa %xmm3,0x30(%rsp) # x4[0-3] += s1[0] # x5[0-3] += s1[1] movq 0x10(%rdi),%xmm3 pshufd $0x00,%xmm3,%xmm2 pshufd $0x55,%xmm3,%xmm3 paddd %xmm2,%xmm4 paddd %xmm3,%xmm5 # x6[0-3] += s1[2] # x7[0-3] += s1[3] movq 0x18(%rdi),%xmm3 pshufd $0x00,%xmm3,%xmm2 pshufd $0x55,%xmm3,%xmm3 paddd %xmm2,%xmm6 paddd %xmm3,%xmm7 # x8[0-3] += s2[0] # x9[0-3] += s2[1] movq 0x20(%rdi),%xmm3 pshufd $0x00,%xmm3,%xmm2 pshufd $0x55,%xmm3,%xmm3 paddd %xmm2,%xmm8 paddd %xmm3,%xmm9 # x10[0-3] += s2[2] # x11[0-3] += s2[3] movq 0x28(%rdi),%xmm3 pshufd $0x00,%xmm3,%xmm2 pshufd $0x55,%xmm3,%xmm3 paddd %xmm2,%xmm10 paddd %xmm3,%xmm11 # x12[0-3] += s3[0] # x13[0-3] += s3[1] movq 0x30(%rdi),%xmm3 pshufd $0x00,%xmm3,%xmm2 pshufd $0x55,%xmm3,%xmm3 paddd %xmm2,%xmm12 paddd %xmm3,%xmm13 # x14[0-3] += s3[2] # x15[0-3] += s3[3] movq 0x38(%rdi),%xmm3 pshufd $0x00,%xmm3,%xmm2 pshufd $0x55,%xmm3,%xmm3 paddd %xmm2,%xmm14 paddd %xmm3,%xmm15 # x12 += counter values 0-3 paddd %xmm1,%xmm12 # interleave 32-bit words in state n, n+1 movdqa 0x00(%rsp),%xmm0 movdqa 0x10(%rsp),%xmm1 movdqa %xmm0,%xmm2 punpckldq %xmm1,%xmm2 punpckhdq %xmm1,%xmm0 movdqa %xmm2,0x00(%rsp) movdqa %xmm0,0x10(%rsp) movdqa 0x20(%rsp),%xmm0 movdqa 0x30(%rsp),%xmm1 movdqa %xmm0,%xmm2 punpckldq %xmm1,%xmm2 punpckhdq %xmm1,%xmm0 movdqa %xmm2,0x20(%rsp) movdqa %xmm0,0x30(%rsp) movdqa %xmm4,%xmm0 punpckldq %xmm5,%xmm4 punpckhdq %xmm5,%xmm0 movdqa %xmm0,%xmm5 movdqa %xmm6,%xmm0 punpckldq %xmm7,%xmm6 punpckhdq %xmm7,%xmm0 movdqa %xmm0,%xmm7 movdqa %xmm8,%xmm0 punpckldq %xmm9,%xmm8 punpckhdq %xmm9,%xmm0 movdqa %xmm0,%xmm9 movdqa %xmm10,%xmm0 punpckldq %xmm11,%xmm10 punpckhdq %xmm11,%xmm0 movdqa %xmm0,%xmm11 movdqa %xmm12,%xmm0 punpckldq %xmm13,%xmm12 punpckhdq %xmm13,%xmm0 movdqa %xmm0,%xmm13 movdqa %xmm14,%xmm0 punpckldq %xmm15,%xmm14 punpckhdq %xmm15,%xmm0 movdqa %xmm0,%xmm15 # interleave 64-bit words in state n, n+2 movdqa 0x00(%rsp),%xmm0 movdqa 0x20(%rsp),%xmm1 movdqa %xmm0,%xmm2 punpcklqdq %xmm1,%xmm2 punpckhqdq %xmm1,%xmm0 movdqa %xmm2,0x00(%rsp) movdqa %xmm0,0x20(%rsp) movdqa 0x10(%rsp),%xmm0 movdqa 0x30(%rsp),%xmm1 movdqa %xmm0,%xmm2 punpcklqdq %xmm1,%xmm2 punpckhqdq %xmm1,%xmm0 movdqa %xmm2,0x10(%rsp) movdqa %xmm0,0x30(%rsp) movdqa %xmm4,%xmm0 punpcklqdq %xmm6,%xmm4 punpckhqdq %xmm6,%xmm0 movdqa %xmm0,%xmm6 movdqa %xmm5,%xmm0 punpcklqdq %xmm7,%xmm5 punpckhqdq %xmm7,%xmm0 movdqa %xmm0,%xmm7 movdqa %xmm8,%xmm0 punpcklqdq %xmm10,%xmm8 punpckhqdq %xmm10,%xmm0 movdqa %xmm0,%xmm10 movdqa %xmm9,%xmm0 punpcklqdq %xmm11,%xmm9 punpckhqdq %xmm11,%xmm0 movdqa %xmm0,%xmm11 movdqa %xmm12,%xmm0 punpcklqdq %xmm14,%xmm12 punpckhqdq %xmm14,%xmm0 movdqa %xmm0,%xmm14 movdqa %xmm13,%xmm0 punpcklqdq %xmm15,%xmm13 punpckhqdq %xmm15,%xmm0 movdqa %xmm0,%xmm15 # xor with corresponding input, write to output movdqa 0x00(%rsp),%xmm0 cmp $0x10,%rax jl .Lxorpart4 movdqu 0x00(%rdx),%xmm1 pxor %xmm1,%xmm0 movdqu %xmm0,0x00(%rsi) movdqu %xmm4,%xmm0 cmp $0x20,%rax jl .Lxorpart4 movdqu 0x10(%rdx),%xmm1 pxor %xmm1,%xmm0 movdqu %xmm0,0x10(%rsi) movdqu %xmm8,%xmm0 cmp $0x30,%rax jl .Lxorpart4 movdqu 0x20(%rdx),%xmm1 pxor %xmm1,%xmm0 movdqu %xmm0,0x20(%rsi) movdqu %xmm12,%xmm0 cmp $0x40,%rax jl .Lxorpart4 movdqu 0x30(%rdx),%xmm1 pxor %xmm1,%xmm0 movdqu %xmm0,0x30(%rsi) movdqa 0x20(%rsp),%xmm0 cmp $0x50,%rax jl .Lxorpart4 movdqu 0x40(%rdx),%xmm1 pxor %xmm1,%xmm0 movdqu %xmm0,0x40(%rsi) movdqu %xmm6,%xmm0 cmp $0x60,%rax jl .Lxorpart4 movdqu 0x50(%rdx),%xmm1 pxor %xmm1,%xmm0 movdqu %xmm0,0x50(%rsi) movdqu %xmm10,%xmm0 cmp $0x70,%rax jl .Lxorpart4 movdqu 0x60(%rdx),%xmm1 pxor %xmm1,%xmm0 movdqu %xmm0,0x60(%rsi) movdqu %xmm14,%xmm0 cmp $0x80,%rax jl .Lxorpart4 movdqu 0x70(%rdx),%xmm1 pxor %xmm1,%xmm0 movdqu %xmm0,0x70(%rsi) movdqa 0x10(%rsp),%xmm0 cmp $0x90,%rax jl .Lxorpart4 movdqu 0x80(%rdx),%xmm1 pxor %xmm1,%xmm0 movdqu %xmm0,0x80(%rsi) movdqu %xmm5,%xmm0 cmp $0xa0,%rax jl .Lxorpart4 movdqu 0x90(%rdx),%xmm1 pxor %xmm1,%xmm0 movdqu %xmm0,0x90(%rsi) movdqu %xmm9,%xmm0 cmp $0xb0,%rax jl .Lxorpart4 movdqu 0xa0(%rdx),%xmm1 pxor %xmm1,%xmm0 movdqu %xmm0,0xa0(%rsi) movdqu %xmm13,%xmm0 cmp $0xc0,%rax jl .Lxorpart4 movdqu 0xb0(%rdx),%xmm1 pxor %xmm1,%xmm0 movdqu %xmm0,0xb0(%rsi) movdqa 0x30(%rsp),%xmm0 cmp $0xd0,%rax jl .Lxorpart4 movdqu 0xc0(%rdx),%xmm1 pxor %xmm1,%xmm0 movdqu %xmm0,0xc0(%rsi) movdqu %xmm7,%xmm0 cmp $0xe0,%rax jl .Lxorpart4 movdqu 0xd0(%rdx),%xmm1 pxor %xmm1,%xmm0 movdqu %xmm0,0xd0(%rsi) movdqu %xmm11,%xmm0 cmp $0xf0,%rax jl .Lxorpart4 movdqu 0xe0(%rdx),%xmm1 pxor %xmm1,%xmm0 movdqu %xmm0,0xe0(%rsi) movdqu %xmm15,%xmm0 cmp $0x100,%rax jl .Lxorpart4 movdqu 0xf0(%rdx),%xmm1 pxor %xmm1,%xmm0 movdqu %xmm0,0xf0(%rsi) .Ldone4: lea -8(%r10),%rsp RET .Lxorpart4: # xor remaining bytes from partial register into output mov %rax,%r9 and $0x0f,%r9 jz .Ldone4 and $~0x0f,%rax mov %rsi,%r11 lea (%rdx,%rax),%rsi mov %rsp,%rdi mov %r9,%rcx rep movsb pxor 0x00(%rsp),%xmm0 movdqa %xmm0,0x00(%rsp) mov %rsp,%rsi lea (%r11,%rax),%rdi mov %r9,%rcx rep movsb jmp .Ldone4 SYM_FUNC_END(chacha_4block_xor_ssse3)
aixcc-public/challenge-001-exemplar-source
9,013
arch/x86/crypto/twofish-avx-x86_64-asm_64.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Twofish Cipher 8-way parallel algorithm (AVX/x86_64) * * Copyright (C) 2012 Johannes Goetzfried * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de> * * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi> */ #include <linux/linkage.h> #include <asm/frame.h> #include "glue_helper-asm-avx.S" .file "twofish-avx-x86_64-asm_64.S" .section .rodata.cst16.bswap128_mask, "aM", @progbits, 16 .align 16 .Lbswap128_mask: .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 .text /* structure of crypto context */ #define s0 0 #define s1 1024 #define s2 2048 #define s3 3072 #define w 4096 #define k 4128 /********************************************************************** 8-way AVX twofish **********************************************************************/ #define CTX %rdi #define RA1 %xmm0 #define RB1 %xmm1 #define RC1 %xmm2 #define RD1 %xmm3 #define RA2 %xmm4 #define RB2 %xmm5 #define RC2 %xmm6 #define RD2 %xmm7 #define RX0 %xmm8 #define RY0 %xmm9 #define RX1 %xmm10 #define RY1 %xmm11 #define RK1 %xmm12 #define RK2 %xmm13 #define RT %xmm14 #define RR %xmm15 #define RID1 %r13 #define RID1d %r13d #define RID2 %rsi #define RID2d %esi #define RGI1 %rdx #define RGI1bl %dl #define RGI1bh %dh #define RGI2 %rcx #define RGI2bl %cl #define RGI2bh %ch #define RGI3 %rax #define RGI3bl %al #define RGI3bh %ah #define RGI4 %rbx #define RGI4bl %bl #define RGI4bh %bh #define RGS1 %r8 #define RGS1d %r8d #define RGS2 %r9 #define RGS2d %r9d #define RGS3 %r10 #define RGS3d %r10d #define lookup_32bit(t0, t1, t2, t3, src, dst, interleave_op, il_reg) \ movzbl src ## bl, RID1d; \ movzbl src ## bh, RID2d; \ shrq $16, src; \ movl t0(CTX, RID1, 4), dst ## d; \ movl t1(CTX, RID2, 4), RID2d; \ movzbl src ## bl, RID1d; \ xorl RID2d, dst ## d; \ movzbl src ## bh, RID2d; \ interleave_op(il_reg); \ xorl t2(CTX, RID1, 4), dst ## d; \ xorl t3(CTX, RID2, 4), dst ## d; #define dummy(d) /* do nothing */ #define shr_next(reg) \ shrq $16, reg; #define G(gi1, gi2, x, t0, t1, t2, t3) \ lookup_32bit(t0, t1, t2, t3, ##gi1, RGS1, shr_next, ##gi1); \ lookup_32bit(t0, t1, t2, t3, ##gi2, RGS3, shr_next, ##gi2); \ \ lookup_32bit(t0, t1, t2, t3, ##gi1, RGS2, dummy, none); \ shlq $32, RGS2; \ orq RGS1, RGS2; \ lookup_32bit(t0, t1, t2, t3, ##gi2, RGS1, dummy, none); \ shlq $32, RGS1; \ orq RGS1, RGS3; #define round_head_2(a, b, x1, y1, x2, y2) \ vmovq b ## 1, RGI3; \ vpextrq $1, b ## 1, RGI4; \ \ G(RGI1, RGI2, x1, s0, s1, s2, s3); \ vmovq a ## 2, RGI1; \ vpextrq $1, a ## 2, RGI2; \ vmovq RGS2, x1; \ vpinsrq $1, RGS3, x1, x1; \ \ G(RGI3, RGI4, y1, s1, s2, s3, s0); \ vmovq b ## 2, RGI3; \ vpextrq $1, b ## 2, RGI4; \ vmovq RGS2, y1; \ vpinsrq $1, RGS3, y1, y1; \ \ G(RGI1, RGI2, x2, s0, s1, s2, s3); \ vmovq RGS2, x2; \ vpinsrq $1, RGS3, x2, x2; \ \ G(RGI3, RGI4, y2, s1, s2, s3, s0); \ vmovq RGS2, y2; \ vpinsrq $1, RGS3, y2, y2; #define encround_tail(a, b, c, d, x, y, prerotate) \ vpaddd x, y, x; \ vpaddd x, RK1, RT;\ prerotate(b); \ vpxor RT, c, c; \ vpaddd y, x, y; \ vpaddd y, RK2, y; \ vpsrld $1, c, RT; \ vpslld $(32 - 1), c, c; \ vpor c, RT, c; \ vpxor d, y, d; \ #define decround_tail(a, b, c, d, x, y, prerotate) \ vpaddd x, y, x; \ vpaddd x, RK1, RT;\ prerotate(a); \ vpxor RT, c, c; \ vpaddd y, x, y; \ vpaddd y, RK2, y; \ vpxor d, y, d; \ vpsrld $1, d, y; \ vpslld $(32 - 1), d, d; \ vpor d, y, d; \ #define rotate_1l(x) \ vpslld $1, x, RR; \ vpsrld $(32 - 1), x, x; \ vpor x, RR, x; #define preload_rgi(c) \ vmovq c, RGI1; \ vpextrq $1, c, RGI2; #define encrypt_round(n, a, b, c, d, preload, prerotate) \ vbroadcastss (k+4*(2*(n)))(CTX), RK1; \ vbroadcastss (k+4*(2*(n)+1))(CTX), RK2; \ round_head_2(a, b, RX0, RY0, RX1, RY1); \ encround_tail(a ## 1, b ## 1, c ## 1, d ## 1, RX0, RY0, prerotate); \ preload(c ## 1); \ encround_tail(a ## 2, b ## 2, c ## 2, d ## 2, RX1, RY1, prerotate); #define decrypt_round(n, a, b, c, d, preload, prerotate) \ vbroadcastss (k+4*(2*(n)))(CTX), RK1; \ vbroadcastss (k+4*(2*(n)+1))(CTX), RK2; \ round_head_2(a, b, RX0, RY0, RX1, RY1); \ decround_tail(a ## 1, b ## 1, c ## 1, d ## 1, RX0, RY0, prerotate); \ preload(c ## 1); \ decround_tail(a ## 2, b ## 2, c ## 2, d ## 2, RX1, RY1, prerotate); #define encrypt_cycle(n) \ encrypt_round((2*n), RA, RB, RC, RD, preload_rgi, rotate_1l); \ encrypt_round(((2*n) + 1), RC, RD, RA, RB, preload_rgi, rotate_1l); #define encrypt_cycle_last(n) \ encrypt_round((2*n), RA, RB, RC, RD, preload_rgi, rotate_1l); \ encrypt_round(((2*n) + 1), RC, RD, RA, RB, dummy, dummy); #define decrypt_cycle(n) \ decrypt_round(((2*n) + 1), RC, RD, RA, RB, preload_rgi, rotate_1l); \ decrypt_round((2*n), RA, RB, RC, RD, preload_rgi, rotate_1l); #define decrypt_cycle_last(n) \ decrypt_round(((2*n) + 1), RC, RD, RA, RB, preload_rgi, rotate_1l); \ decrypt_round((2*n), RA, RB, RC, RD, dummy, dummy); #define transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \ vpunpckldq x1, x0, t0; \ vpunpckhdq x1, x0, t2; \ vpunpckldq x3, x2, t1; \ vpunpckhdq x3, x2, x3; \ \ vpunpcklqdq t1, t0, x0; \ vpunpckhqdq t1, t0, x1; \ vpunpcklqdq x3, t2, x2; \ vpunpckhqdq x3, t2, x3; #define inpack_blocks(x0, x1, x2, x3, wkey, t0, t1, t2) \ vpxor x0, wkey, x0; \ vpxor x1, wkey, x1; \ vpxor x2, wkey, x2; \ vpxor x3, wkey, x3; \ \ transpose_4x4(x0, x1, x2, x3, t0, t1, t2) #define outunpack_blocks(x0, x1, x2, x3, wkey, t0, t1, t2) \ transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \ \ vpxor x0, wkey, x0; \ vpxor x1, wkey, x1; \ vpxor x2, wkey, x2; \ vpxor x3, wkey, x3; .align 8 SYM_FUNC_START_LOCAL(__twofish_enc_blk8) /* input: * %rdi: ctx, CTX * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: blocks * output: * RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2: encrypted blocks */ vmovdqu w(CTX), RK1; pushq %r13; pushq %rbx; pushq %rcx; inpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2); preload_rgi(RA1); rotate_1l(RD1); inpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2); rotate_1l(RD2); encrypt_cycle(0); encrypt_cycle(1); encrypt_cycle(2); encrypt_cycle(3); encrypt_cycle(4); encrypt_cycle(5); encrypt_cycle(6); encrypt_cycle_last(7); vmovdqu (w+4*4)(CTX), RK1; popq %rcx; popq %rbx; popq %r13; outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2); outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2); RET; SYM_FUNC_END(__twofish_enc_blk8) .align 8 SYM_FUNC_START_LOCAL(__twofish_dec_blk8) /* input: * %rdi: ctx, CTX * RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2: encrypted blocks * output: * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: decrypted blocks */ vmovdqu (w+4*4)(CTX), RK1; pushq %r13; pushq %rbx; inpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2); preload_rgi(RC1); rotate_1l(RA1); inpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2); rotate_1l(RA2); decrypt_cycle(7); decrypt_cycle(6); decrypt_cycle(5); decrypt_cycle(4); decrypt_cycle(3); decrypt_cycle(2); decrypt_cycle(1); decrypt_cycle_last(0); vmovdqu (w)(CTX), RK1; popq %rbx; popq %r13; outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2); outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2); RET; SYM_FUNC_END(__twofish_dec_blk8) SYM_FUNC_START(twofish_ecb_enc_8way) /* input: * %rdi: ctx, CTX * %rsi: dst * %rdx: src */ FRAME_BEGIN movq %rsi, %r11; load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); call __twofish_enc_blk8; store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2); FRAME_END RET; SYM_FUNC_END(twofish_ecb_enc_8way) SYM_FUNC_START(twofish_ecb_dec_8way) /* input: * %rdi: ctx, CTX * %rsi: dst * %rdx: src */ FRAME_BEGIN movq %rsi, %r11; load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2); call __twofish_dec_blk8; store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); FRAME_END RET; SYM_FUNC_END(twofish_ecb_dec_8way) SYM_FUNC_START(twofish_cbc_dec_8way) /* input: * %rdi: ctx, CTX * %rsi: dst * %rdx: src */ FRAME_BEGIN pushq %r12; movq %rsi, %r11; movq %rdx, %r12; load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2); call __twofish_dec_blk8; store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); popq %r12; FRAME_END RET; SYM_FUNC_END(twofish_cbc_dec_8way)
aixcc-public/challenge-001-exemplar-source
28,735
arch/x86/crypto/camellia-aesni-avx2-asm_64.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * x86_64/AVX2/AES-NI assembler implementation of Camellia * * Copyright © 2013 Jussi Kivilinna <jussi.kivilinna@iki.fi> */ #include <linux/linkage.h> #include <asm/frame.h> #define CAMELLIA_TABLE_BYTE_LEN 272 /* struct camellia_ctx: */ #define key_table 0 #define key_length CAMELLIA_TABLE_BYTE_LEN /* register macros */ #define CTX %rdi #define RIO %r8 /********************************************************************** helper macros **********************************************************************/ #define filter_8bit(x, lo_t, hi_t, mask4bit, tmp0) \ vpand x, mask4bit, tmp0; \ vpandn x, mask4bit, x; \ vpsrld $4, x, x; \ \ vpshufb tmp0, lo_t, tmp0; \ vpshufb x, hi_t, x; \ vpxor tmp0, x, x; #define ymm0_x xmm0 #define ymm1_x xmm1 #define ymm2_x xmm2 #define ymm3_x xmm3 #define ymm4_x xmm4 #define ymm5_x xmm5 #define ymm6_x xmm6 #define ymm7_x xmm7 #define ymm8_x xmm8 #define ymm9_x xmm9 #define ymm10_x xmm10 #define ymm11_x xmm11 #define ymm12_x xmm12 #define ymm13_x xmm13 #define ymm14_x xmm14 #define ymm15_x xmm15 /********************************************************************** 32-way camellia **********************************************************************/ /* * IN: * x0..x7: byte-sliced AB state * mem_cd: register pointer storing CD state * key: index for key material * OUT: * x0..x7: new byte-sliced CD state */ #define roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2, t3, t4, t5, t6, \ t7, mem_cd, key) \ /* \ * S-function with AES subbytes \ */ \ vbroadcasti128 .Linv_shift_row, t4; \ vpbroadcastd .L0f0f0f0f, t7; \ vbroadcasti128 .Lpre_tf_lo_s1, t5; \ vbroadcasti128 .Lpre_tf_hi_s1, t6; \ vbroadcasti128 .Lpre_tf_lo_s4, t2; \ vbroadcasti128 .Lpre_tf_hi_s4, t3; \ \ /* AES inverse shift rows */ \ vpshufb t4, x0, x0; \ vpshufb t4, x7, x7; \ vpshufb t4, x3, x3; \ vpshufb t4, x6, x6; \ vpshufb t4, x2, x2; \ vpshufb t4, x5, x5; \ vpshufb t4, x1, x1; \ vpshufb t4, x4, x4; \ \ /* prefilter sboxes 1, 2 and 3 */ \ /* prefilter sbox 4 */ \ filter_8bit(x0, t5, t6, t7, t4); \ filter_8bit(x7, t5, t6, t7, t4); \ vextracti128 $1, x0, t0##_x; \ vextracti128 $1, x7, t1##_x; \ filter_8bit(x3, t2, t3, t7, t4); \ filter_8bit(x6, t2, t3, t7, t4); \ vextracti128 $1, x3, t3##_x; \ vextracti128 $1, x6, t2##_x; \ filter_8bit(x2, t5, t6, t7, t4); \ filter_8bit(x5, t5, t6, t7, t4); \ filter_8bit(x1, t5, t6, t7, t4); \ filter_8bit(x4, t5, t6, t7, t4); \ \ vpxor t4##_x, t4##_x, t4##_x; \ \ /* AES subbytes + AES shift rows */ \ vextracti128 $1, x2, t6##_x; \ vextracti128 $1, x5, t5##_x; \ vaesenclast t4##_x, x0##_x, x0##_x; \ vaesenclast t4##_x, t0##_x, t0##_x; \ vinserti128 $1, t0##_x, x0, x0; \ vaesenclast t4##_x, x7##_x, x7##_x; \ vaesenclast t4##_x, t1##_x, t1##_x; \ vinserti128 $1, t1##_x, x7, x7; \ vaesenclast t4##_x, x3##_x, x3##_x; \ vaesenclast t4##_x, t3##_x, t3##_x; \ vinserti128 $1, t3##_x, x3, x3; \ vaesenclast t4##_x, x6##_x, x6##_x; \ vaesenclast t4##_x, t2##_x, t2##_x; \ vinserti128 $1, t2##_x, x6, x6; \ vextracti128 $1, x1, t3##_x; \ vextracti128 $1, x4, t2##_x; \ vbroadcasti128 .Lpost_tf_lo_s1, t0; \ vbroadcasti128 .Lpost_tf_hi_s1, t1; \ vaesenclast t4##_x, x2##_x, x2##_x; \ vaesenclast t4##_x, t6##_x, t6##_x; \ vinserti128 $1, t6##_x, x2, x2; \ vaesenclast t4##_x, x5##_x, x5##_x; \ vaesenclast t4##_x, t5##_x, t5##_x; \ vinserti128 $1, t5##_x, x5, x5; \ vaesenclast t4##_x, x1##_x, x1##_x; \ vaesenclast t4##_x, t3##_x, t3##_x; \ vinserti128 $1, t3##_x, x1, x1; \ vaesenclast t4##_x, x4##_x, x4##_x; \ vaesenclast t4##_x, t2##_x, t2##_x; \ vinserti128 $1, t2##_x, x4, x4; \ \ /* postfilter sboxes 1 and 4 */ \ vbroadcasti128 .Lpost_tf_lo_s3, t2; \ vbroadcasti128 .Lpost_tf_hi_s3, t3; \ filter_8bit(x0, t0, t1, t7, t6); \ filter_8bit(x7, t0, t1, t7, t6); \ filter_8bit(x3, t0, t1, t7, t6); \ filter_8bit(x6, t0, t1, t7, t6); \ \ /* postfilter sbox 3 */ \ vbroadcasti128 .Lpost_tf_lo_s2, t4; \ vbroadcasti128 .Lpost_tf_hi_s2, t5; \ filter_8bit(x2, t2, t3, t7, t6); \ filter_8bit(x5, t2, t3, t7, t6); \ \ vpbroadcastq key, t0; /* higher 64-bit duplicate ignored */ \ \ /* postfilter sbox 2 */ \ filter_8bit(x1, t4, t5, t7, t2); \ filter_8bit(x4, t4, t5, t7, t2); \ vpxor t7, t7, t7; \ \ vpsrldq $1, t0, t1; \ vpsrldq $2, t0, t2; \ vpshufb t7, t1, t1; \ vpsrldq $3, t0, t3; \ \ /* P-function */ \ vpxor x5, x0, x0; \ vpxor x6, x1, x1; \ vpxor x7, x2, x2; \ vpxor x4, x3, x3; \ \ vpshufb t7, t2, t2; \ vpsrldq $4, t0, t4; \ vpshufb t7, t3, t3; \ vpsrldq $5, t0, t5; \ vpshufb t7, t4, t4; \ \ vpxor x2, x4, x4; \ vpxor x3, x5, x5; \ vpxor x0, x6, x6; \ vpxor x1, x7, x7; \ \ vpsrldq $6, t0, t6; \ vpshufb t7, t5, t5; \ vpshufb t7, t6, t6; \ \ vpxor x7, x0, x0; \ vpxor x4, x1, x1; \ vpxor x5, x2, x2; \ vpxor x6, x3, x3; \ \ vpxor x3, x4, x4; \ vpxor x0, x5, x5; \ vpxor x1, x6, x6; \ vpxor x2, x7, x7; /* note: high and low parts swapped */ \ \ /* Add key material and result to CD (x becomes new CD) */ \ \ vpxor t6, x1, x1; \ vpxor 5 * 32(mem_cd), x1, x1; \ \ vpsrldq $7, t0, t6; \ vpshufb t7, t0, t0; \ vpshufb t7, t6, t7; \ \ vpxor t7, x0, x0; \ vpxor 4 * 32(mem_cd), x0, x0; \ \ vpxor t5, x2, x2; \ vpxor 6 * 32(mem_cd), x2, x2; \ \ vpxor t4, x3, x3; \ vpxor 7 * 32(mem_cd), x3, x3; \ \ vpxor t3, x4, x4; \ vpxor 0 * 32(mem_cd), x4, x4; \ \ vpxor t2, x5, x5; \ vpxor 1 * 32(mem_cd), x5, x5; \ \ vpxor t1, x6, x6; \ vpxor 2 * 32(mem_cd), x6, x6; \ \ vpxor t0, x7, x7; \ vpxor 3 * 32(mem_cd), x7, x7; /* * Size optimization... with inlined roundsm32 binary would be over 5 times * larger and would only marginally faster. */ .align 8 SYM_FUNC_START_LOCAL(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd) roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, %rcx, (%r9)); RET; SYM_FUNC_END(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd) .align 8 SYM_FUNC_START_LOCAL(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab) roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3, %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11, %rax, (%r9)); RET; SYM_FUNC_END(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab) /* * IN/OUT: * x0..x7: byte-sliced AB state preloaded * mem_ab: byte-sliced AB state in memory * mem_cb: byte-sliced CD state in memory */ #define two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, i, dir, store_ab) \ leaq (key_table + (i) * 8)(CTX), %r9; \ call roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd; \ \ vmovdqu x0, 4 * 32(mem_cd); \ vmovdqu x1, 5 * 32(mem_cd); \ vmovdqu x2, 6 * 32(mem_cd); \ vmovdqu x3, 7 * 32(mem_cd); \ vmovdqu x4, 0 * 32(mem_cd); \ vmovdqu x5, 1 * 32(mem_cd); \ vmovdqu x6, 2 * 32(mem_cd); \ vmovdqu x7, 3 * 32(mem_cd); \ \ leaq (key_table + ((i) + (dir)) * 8)(CTX), %r9; \ call roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab; \ \ store_ab(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab); #define dummy_store(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab) /* do nothing */ #define store_ab_state(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab) \ /* Store new AB state */ \ vmovdqu x4, 4 * 32(mem_ab); \ vmovdqu x5, 5 * 32(mem_ab); \ vmovdqu x6, 6 * 32(mem_ab); \ vmovdqu x7, 7 * 32(mem_ab); \ vmovdqu x0, 0 * 32(mem_ab); \ vmovdqu x1, 1 * 32(mem_ab); \ vmovdqu x2, 2 * 32(mem_ab); \ vmovdqu x3, 3 * 32(mem_ab); #define enc_rounds32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, i) \ two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 2, 1, store_ab_state); \ two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 4, 1, store_ab_state); \ two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 6, 1, dummy_store); #define dec_rounds32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, i) \ two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 7, -1, store_ab_state); \ two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 5, -1, store_ab_state); \ two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 3, -1, dummy_store); /* * IN: * v0..3: byte-sliced 32-bit integers * OUT: * v0..3: (IN <<< 1) */ #define rol32_1_32(v0, v1, v2, v3, t0, t1, t2, zero) \ vpcmpgtb v0, zero, t0; \ vpaddb v0, v0, v0; \ vpabsb t0, t0; \ \ vpcmpgtb v1, zero, t1; \ vpaddb v1, v1, v1; \ vpabsb t1, t1; \ \ vpcmpgtb v2, zero, t2; \ vpaddb v2, v2, v2; \ vpabsb t2, t2; \ \ vpor t0, v1, v1; \ \ vpcmpgtb v3, zero, t0; \ vpaddb v3, v3, v3; \ vpabsb t0, t0; \ \ vpor t1, v2, v2; \ vpor t2, v3, v3; \ vpor t0, v0, v0; /* * IN: * r: byte-sliced AB state in memory * l: byte-sliced CD state in memory * OUT: * x0..x7: new byte-sliced CD state */ #define fls32(l, l0, l1, l2, l3, l4, l5, l6, l7, r, t0, t1, t2, t3, tt0, \ tt1, tt2, tt3, kll, klr, krl, krr) \ /* \ * t0 = kll; \ * t0 &= ll; \ * lr ^= rol32(t0, 1); \ */ \ vpbroadcastd kll, t0; /* only lowest 32-bit used */ \ vpxor tt0, tt0, tt0; \ vpshufb tt0, t0, t3; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t2; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t1; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t0; \ \ vpand l0, t0, t0; \ vpand l1, t1, t1; \ vpand l2, t2, t2; \ vpand l3, t3, t3; \ \ rol32_1_32(t3, t2, t1, t0, tt1, tt2, tt3, tt0); \ \ vpxor l4, t0, l4; \ vpbroadcastd krr, t0; /* only lowest 32-bit used */ \ vmovdqu l4, 4 * 32(l); \ vpxor l5, t1, l5; \ vmovdqu l5, 5 * 32(l); \ vpxor l6, t2, l6; \ vmovdqu l6, 6 * 32(l); \ vpxor l7, t3, l7; \ vmovdqu l7, 7 * 32(l); \ \ /* \ * t2 = krr; \ * t2 |= rr; \ * rl ^= t2; \ */ \ \ vpshufb tt0, t0, t3; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t2; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t1; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t0; \ \ vpor 4 * 32(r), t0, t0; \ vpor 5 * 32(r), t1, t1; \ vpor 6 * 32(r), t2, t2; \ vpor 7 * 32(r), t3, t3; \ \ vpxor 0 * 32(r), t0, t0; \ vpxor 1 * 32(r), t1, t1; \ vpxor 2 * 32(r), t2, t2; \ vpxor 3 * 32(r), t3, t3; \ vmovdqu t0, 0 * 32(r); \ vpbroadcastd krl, t0; /* only lowest 32-bit used */ \ vmovdqu t1, 1 * 32(r); \ vmovdqu t2, 2 * 32(r); \ vmovdqu t3, 3 * 32(r); \ \ /* \ * t2 = krl; \ * t2 &= rl; \ * rr ^= rol32(t2, 1); \ */ \ vpshufb tt0, t0, t3; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t2; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t1; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t0; \ \ vpand 0 * 32(r), t0, t0; \ vpand 1 * 32(r), t1, t1; \ vpand 2 * 32(r), t2, t2; \ vpand 3 * 32(r), t3, t3; \ \ rol32_1_32(t3, t2, t1, t0, tt1, tt2, tt3, tt0); \ \ vpxor 4 * 32(r), t0, t0; \ vpxor 5 * 32(r), t1, t1; \ vpxor 6 * 32(r), t2, t2; \ vpxor 7 * 32(r), t3, t3; \ vmovdqu t0, 4 * 32(r); \ vpbroadcastd klr, t0; /* only lowest 32-bit used */ \ vmovdqu t1, 5 * 32(r); \ vmovdqu t2, 6 * 32(r); \ vmovdqu t3, 7 * 32(r); \ \ /* \ * t0 = klr; \ * t0 |= lr; \ * ll ^= t0; \ */ \ \ vpshufb tt0, t0, t3; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t2; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t1; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t0; \ \ vpor l4, t0, t0; \ vpor l5, t1, t1; \ vpor l6, t2, t2; \ vpor l7, t3, t3; \ \ vpxor l0, t0, l0; \ vmovdqu l0, 0 * 32(l); \ vpxor l1, t1, l1; \ vmovdqu l1, 1 * 32(l); \ vpxor l2, t2, l2; \ vmovdqu l2, 2 * 32(l); \ vpxor l3, t3, l3; \ vmovdqu l3, 3 * 32(l); #define transpose_4x4(x0, x1, x2, x3, t1, t2) \ vpunpckhdq x1, x0, t2; \ vpunpckldq x1, x0, x0; \ \ vpunpckldq x3, x2, t1; \ vpunpckhdq x3, x2, x2; \ \ vpunpckhqdq t1, x0, x1; \ vpunpcklqdq t1, x0, x0; \ \ vpunpckhqdq x2, t2, x3; \ vpunpcklqdq x2, t2, x2; #define byteslice_16x16b_fast(a0, b0, c0, d0, a1, b1, c1, d1, a2, b2, c2, d2, \ a3, b3, c3, d3, st0, st1) \ vmovdqu d2, st0; \ vmovdqu d3, st1; \ transpose_4x4(a0, a1, a2, a3, d2, d3); \ transpose_4x4(b0, b1, b2, b3, d2, d3); \ vmovdqu st0, d2; \ vmovdqu st1, d3; \ \ vmovdqu a0, st0; \ vmovdqu a1, st1; \ transpose_4x4(c0, c1, c2, c3, a0, a1); \ transpose_4x4(d0, d1, d2, d3, a0, a1); \ \ vbroadcasti128 .Lshufb_16x16b, a0; \ vmovdqu st1, a1; \ vpshufb a0, a2, a2; \ vpshufb a0, a3, a3; \ vpshufb a0, b0, b0; \ vpshufb a0, b1, b1; \ vpshufb a0, b2, b2; \ vpshufb a0, b3, b3; \ vpshufb a0, a1, a1; \ vpshufb a0, c0, c0; \ vpshufb a0, c1, c1; \ vpshufb a0, c2, c2; \ vpshufb a0, c3, c3; \ vpshufb a0, d0, d0; \ vpshufb a0, d1, d1; \ vpshufb a0, d2, d2; \ vpshufb a0, d3, d3; \ vmovdqu d3, st1; \ vmovdqu st0, d3; \ vpshufb a0, d3, a0; \ vmovdqu d2, st0; \ \ transpose_4x4(a0, b0, c0, d0, d2, d3); \ transpose_4x4(a1, b1, c1, d1, d2, d3); \ vmovdqu st0, d2; \ vmovdqu st1, d3; \ \ vmovdqu b0, st0; \ vmovdqu b1, st1; \ transpose_4x4(a2, b2, c2, d2, b0, b1); \ transpose_4x4(a3, b3, c3, d3, b0, b1); \ vmovdqu st0, b0; \ vmovdqu st1, b1; \ /* does not adjust output bytes inside vectors */ /* load blocks to registers and apply pre-whitening */ #define inpack32_pre(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, rio, key) \ vpbroadcastq key, x0; \ vpshufb .Lpack_bswap, x0, x0; \ \ vpxor 0 * 32(rio), x0, y7; \ vpxor 1 * 32(rio), x0, y6; \ vpxor 2 * 32(rio), x0, y5; \ vpxor 3 * 32(rio), x0, y4; \ vpxor 4 * 32(rio), x0, y3; \ vpxor 5 * 32(rio), x0, y2; \ vpxor 6 * 32(rio), x0, y1; \ vpxor 7 * 32(rio), x0, y0; \ vpxor 8 * 32(rio), x0, x7; \ vpxor 9 * 32(rio), x0, x6; \ vpxor 10 * 32(rio), x0, x5; \ vpxor 11 * 32(rio), x0, x4; \ vpxor 12 * 32(rio), x0, x3; \ vpxor 13 * 32(rio), x0, x2; \ vpxor 14 * 32(rio), x0, x1; \ vpxor 15 * 32(rio), x0, x0; /* byteslice pre-whitened blocks and store to temporary memory */ #define inpack32_post(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd) \ byteslice_16x16b_fast(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, \ y4, y5, y6, y7, (mem_ab), (mem_cd)); \ \ vmovdqu x0, 0 * 32(mem_ab); \ vmovdqu x1, 1 * 32(mem_ab); \ vmovdqu x2, 2 * 32(mem_ab); \ vmovdqu x3, 3 * 32(mem_ab); \ vmovdqu x4, 4 * 32(mem_ab); \ vmovdqu x5, 5 * 32(mem_ab); \ vmovdqu x6, 6 * 32(mem_ab); \ vmovdqu x7, 7 * 32(mem_ab); \ vmovdqu y0, 0 * 32(mem_cd); \ vmovdqu y1, 1 * 32(mem_cd); \ vmovdqu y2, 2 * 32(mem_cd); \ vmovdqu y3, 3 * 32(mem_cd); \ vmovdqu y4, 4 * 32(mem_cd); \ vmovdqu y5, 5 * 32(mem_cd); \ vmovdqu y6, 6 * 32(mem_cd); \ vmovdqu y7, 7 * 32(mem_cd); /* de-byteslice, apply post-whitening and store blocks */ #define outunpack32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, \ y5, y6, y7, key, stack_tmp0, stack_tmp1) \ byteslice_16x16b_fast(y0, y4, x0, x4, y1, y5, x1, x5, y2, y6, x2, x6, \ y3, y7, x3, x7, stack_tmp0, stack_tmp1); \ \ vmovdqu x0, stack_tmp0; \ \ vpbroadcastq key, x0; \ vpshufb .Lpack_bswap, x0, x0; \ \ vpxor x0, y7, y7; \ vpxor x0, y6, y6; \ vpxor x0, y5, y5; \ vpxor x0, y4, y4; \ vpxor x0, y3, y3; \ vpxor x0, y2, y2; \ vpxor x0, y1, y1; \ vpxor x0, y0, y0; \ vpxor x0, x7, x7; \ vpxor x0, x6, x6; \ vpxor x0, x5, x5; \ vpxor x0, x4, x4; \ vpxor x0, x3, x3; \ vpxor x0, x2, x2; \ vpxor x0, x1, x1; \ vpxor stack_tmp0, x0, x0; #define write_output(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, rio) \ vmovdqu x0, 0 * 32(rio); \ vmovdqu x1, 1 * 32(rio); \ vmovdqu x2, 2 * 32(rio); \ vmovdqu x3, 3 * 32(rio); \ vmovdqu x4, 4 * 32(rio); \ vmovdqu x5, 5 * 32(rio); \ vmovdqu x6, 6 * 32(rio); \ vmovdqu x7, 7 * 32(rio); \ vmovdqu y0, 8 * 32(rio); \ vmovdqu y1, 9 * 32(rio); \ vmovdqu y2, 10 * 32(rio); \ vmovdqu y3, 11 * 32(rio); \ vmovdqu y4, 12 * 32(rio); \ vmovdqu y5, 13 * 32(rio); \ vmovdqu y6, 14 * 32(rio); \ vmovdqu y7, 15 * 32(rio); .section .rodata.cst32.shufb_16x16b, "aM", @progbits, 32 .align 32 #define SHUFB_BYTES(idx) \ 0 + (idx), 4 + (idx), 8 + (idx), 12 + (idx) .Lshufb_16x16b: .byte SHUFB_BYTES(0), SHUFB_BYTES(1), SHUFB_BYTES(2), SHUFB_BYTES(3) .byte SHUFB_BYTES(0), SHUFB_BYTES(1), SHUFB_BYTES(2), SHUFB_BYTES(3) .section .rodata.cst32.pack_bswap, "aM", @progbits, 32 .align 32 .Lpack_bswap: .long 0x00010203, 0x04050607, 0x80808080, 0x80808080 .long 0x00010203, 0x04050607, 0x80808080, 0x80808080 /* NB: section is mergeable, all elements must be aligned 16-byte blocks */ .section .rodata.cst16, "aM", @progbits, 16 .align 16 /* * pre-SubByte transform * * pre-lookup for sbox1, sbox2, sbox3: * swap_bitendianness( * isom_map_camellia_to_aes( * camellia_f( * swap_bitendianess(in) * ) * ) * ) * * (note: '⊕ 0xc5' inside camellia_f()) */ .Lpre_tf_lo_s1: .byte 0x45, 0xe8, 0x40, 0xed, 0x2e, 0x83, 0x2b, 0x86 .byte 0x4b, 0xe6, 0x4e, 0xe3, 0x20, 0x8d, 0x25, 0x88 .Lpre_tf_hi_s1: .byte 0x00, 0x51, 0xf1, 0xa0, 0x8a, 0xdb, 0x7b, 0x2a .byte 0x09, 0x58, 0xf8, 0xa9, 0x83, 0xd2, 0x72, 0x23 /* * pre-SubByte transform * * pre-lookup for sbox4: * swap_bitendianness( * isom_map_camellia_to_aes( * camellia_f( * swap_bitendianess(in <<< 1) * ) * ) * ) * * (note: '⊕ 0xc5' inside camellia_f()) */ .Lpre_tf_lo_s4: .byte 0x45, 0x40, 0x2e, 0x2b, 0x4b, 0x4e, 0x20, 0x25 .byte 0x14, 0x11, 0x7f, 0x7a, 0x1a, 0x1f, 0x71, 0x74 .Lpre_tf_hi_s4: .byte 0x00, 0xf1, 0x8a, 0x7b, 0x09, 0xf8, 0x83, 0x72 .byte 0xad, 0x5c, 0x27, 0xd6, 0xa4, 0x55, 0x2e, 0xdf /* * post-SubByte transform * * post-lookup for sbox1, sbox4: * swap_bitendianness( * camellia_h( * isom_map_aes_to_camellia( * swap_bitendianness( * aes_inverse_affine_transform(in) * ) * ) * ) * ) * * (note: '⊕ 0x6e' inside camellia_h()) */ .Lpost_tf_lo_s1: .byte 0x3c, 0xcc, 0xcf, 0x3f, 0x32, 0xc2, 0xc1, 0x31 .byte 0xdc, 0x2c, 0x2f, 0xdf, 0xd2, 0x22, 0x21, 0xd1 .Lpost_tf_hi_s1: .byte 0x00, 0xf9, 0x86, 0x7f, 0xd7, 0x2e, 0x51, 0xa8 .byte 0xa4, 0x5d, 0x22, 0xdb, 0x73, 0x8a, 0xf5, 0x0c /* * post-SubByte transform * * post-lookup for sbox2: * swap_bitendianness( * camellia_h( * isom_map_aes_to_camellia( * swap_bitendianness( * aes_inverse_affine_transform(in) * ) * ) * ) * ) <<< 1 * * (note: '⊕ 0x6e' inside camellia_h()) */ .Lpost_tf_lo_s2: .byte 0x78, 0x99, 0x9f, 0x7e, 0x64, 0x85, 0x83, 0x62 .byte 0xb9, 0x58, 0x5e, 0xbf, 0xa5, 0x44, 0x42, 0xa3 .Lpost_tf_hi_s2: .byte 0x00, 0xf3, 0x0d, 0xfe, 0xaf, 0x5c, 0xa2, 0x51 .byte 0x49, 0xba, 0x44, 0xb7, 0xe6, 0x15, 0xeb, 0x18 /* * post-SubByte transform * * post-lookup for sbox3: * swap_bitendianness( * camellia_h( * isom_map_aes_to_camellia( * swap_bitendianness( * aes_inverse_affine_transform(in) * ) * ) * ) * ) >>> 1 * * (note: '⊕ 0x6e' inside camellia_h()) */ .Lpost_tf_lo_s3: .byte 0x1e, 0x66, 0xe7, 0x9f, 0x19, 0x61, 0xe0, 0x98 .byte 0x6e, 0x16, 0x97, 0xef, 0x69, 0x11, 0x90, 0xe8 .Lpost_tf_hi_s3: .byte 0x00, 0xfc, 0x43, 0xbf, 0xeb, 0x17, 0xa8, 0x54 .byte 0x52, 0xae, 0x11, 0xed, 0xb9, 0x45, 0xfa, 0x06 /* For isolating SubBytes from AESENCLAST, inverse shift row */ .Linv_shift_row: .byte 0x00, 0x0d, 0x0a, 0x07, 0x04, 0x01, 0x0e, 0x0b .byte 0x08, 0x05, 0x02, 0x0f, 0x0c, 0x09, 0x06, 0x03 .section .rodata.cst4.L0f0f0f0f, "aM", @progbits, 4 .align 4 /* 4-bit mask */ .L0f0f0f0f: .long 0x0f0f0f0f .text .align 8 SYM_FUNC_START_LOCAL(__camellia_enc_blk32) /* input: * %rdi: ctx, CTX * %rax: temporary storage, 512 bytes * %ymm0..%ymm15: 32 plaintext blocks * output: * %ymm0..%ymm15: 32 encrypted blocks, order swapped: * 7, 8, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 */ FRAME_BEGIN leaq 8 * 32(%rax), %rcx; inpack32_post(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, %rax, %rcx); enc_rounds32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, %rax, %rcx, 0); fls32(%rax, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %rcx, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, ((key_table + (8) * 8) + 0)(CTX), ((key_table + (8) * 8) + 4)(CTX), ((key_table + (8) * 8) + 8)(CTX), ((key_table + (8) * 8) + 12)(CTX)); enc_rounds32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, %rax, %rcx, 8); fls32(%rax, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %rcx, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, ((key_table + (16) * 8) + 0)(CTX), ((key_table + (16) * 8) + 4)(CTX), ((key_table + (16) * 8) + 8)(CTX), ((key_table + (16) * 8) + 12)(CTX)); enc_rounds32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, %rax, %rcx, 16); movl $24, %r8d; cmpl $16, key_length(CTX); jne .Lenc_max32; .Lenc_done: /* load CD for output */ vmovdqu 0 * 32(%rcx), %ymm8; vmovdqu 1 * 32(%rcx), %ymm9; vmovdqu 2 * 32(%rcx), %ymm10; vmovdqu 3 * 32(%rcx), %ymm11; vmovdqu 4 * 32(%rcx), %ymm12; vmovdqu 5 * 32(%rcx), %ymm13; vmovdqu 6 * 32(%rcx), %ymm14; vmovdqu 7 * 32(%rcx), %ymm15; outunpack32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax)); FRAME_END RET; .align 8 .Lenc_max32: movl $32, %r8d; fls32(%rax, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %rcx, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, ((key_table + (24) * 8) + 0)(CTX), ((key_table + (24) * 8) + 4)(CTX), ((key_table + (24) * 8) + 8)(CTX), ((key_table + (24) * 8) + 12)(CTX)); enc_rounds32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, %rax, %rcx, 24); jmp .Lenc_done; SYM_FUNC_END(__camellia_enc_blk32) .align 8 SYM_FUNC_START_LOCAL(__camellia_dec_blk32) /* input: * %rdi: ctx, CTX * %rax: temporary storage, 512 bytes * %r8d: 24 for 16 byte key, 32 for larger * %ymm0..%ymm15: 16 encrypted blocks * output: * %ymm0..%ymm15: 16 plaintext blocks, order swapped: * 7, 8, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 */ FRAME_BEGIN leaq 8 * 32(%rax), %rcx; inpack32_post(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, %rax, %rcx); cmpl $32, %r8d; je .Ldec_max32; .Ldec_max24: dec_rounds32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, %rax, %rcx, 16); fls32(%rax, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %rcx, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, ((key_table + (16) * 8) + 8)(CTX), ((key_table + (16) * 8) + 12)(CTX), ((key_table + (16) * 8) + 0)(CTX), ((key_table + (16) * 8) + 4)(CTX)); dec_rounds32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, %rax, %rcx, 8); fls32(%rax, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %rcx, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, ((key_table + (8) * 8) + 8)(CTX), ((key_table + (8) * 8) + 12)(CTX), ((key_table + (8) * 8) + 0)(CTX), ((key_table + (8) * 8) + 4)(CTX)); dec_rounds32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, %rax, %rcx, 0); /* load CD for output */ vmovdqu 0 * 32(%rcx), %ymm8; vmovdqu 1 * 32(%rcx), %ymm9; vmovdqu 2 * 32(%rcx), %ymm10; vmovdqu 3 * 32(%rcx), %ymm11; vmovdqu 4 * 32(%rcx), %ymm12; vmovdqu 5 * 32(%rcx), %ymm13; vmovdqu 6 * 32(%rcx), %ymm14; vmovdqu 7 * 32(%rcx), %ymm15; outunpack32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax)); FRAME_END RET; .align 8 .Ldec_max32: dec_rounds32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, %rax, %rcx, 24); fls32(%rax, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %rcx, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, ((key_table + (24) * 8) + 8)(CTX), ((key_table + (24) * 8) + 12)(CTX), ((key_table + (24) * 8) + 0)(CTX), ((key_table + (24) * 8) + 4)(CTX)); jmp .Ldec_max24; SYM_FUNC_END(__camellia_dec_blk32) SYM_FUNC_START(camellia_ecb_enc_32way) /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) * %rdx: src (32 blocks) */ FRAME_BEGIN vzeroupper; inpack32_pre(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, %rdx, (key_table)(CTX)); /* now dst can be used as temporary buffer (even in src == dst case) */ movq %rsi, %rax; call __camellia_enc_blk32; write_output(%ymm7, %ymm6, %ymm5, %ymm4, %ymm3, %ymm2, %ymm1, %ymm0, %ymm15, %ymm14, %ymm13, %ymm12, %ymm11, %ymm10, %ymm9, %ymm8, %rsi); vzeroupper; FRAME_END RET; SYM_FUNC_END(camellia_ecb_enc_32way) SYM_FUNC_START(camellia_ecb_dec_32way) /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) * %rdx: src (32 blocks) */ FRAME_BEGIN vzeroupper; cmpl $16, key_length(CTX); movl $32, %r8d; movl $24, %eax; cmovel %eax, %r8d; /* max */ inpack32_pre(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, %rdx, (key_table)(CTX, %r8, 8)); /* now dst can be used as temporary buffer (even in src == dst case) */ movq %rsi, %rax; call __camellia_dec_blk32; write_output(%ymm7, %ymm6, %ymm5, %ymm4, %ymm3, %ymm2, %ymm1, %ymm0, %ymm15, %ymm14, %ymm13, %ymm12, %ymm11, %ymm10, %ymm9, %ymm8, %rsi); vzeroupper; FRAME_END RET; SYM_FUNC_END(camellia_ecb_dec_32way) SYM_FUNC_START(camellia_cbc_dec_32way) /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) * %rdx: src (32 blocks) */ FRAME_BEGIN subq $(16 * 32), %rsp; vzeroupper; cmpl $16, key_length(CTX); movl $32, %r8d; movl $24, %eax; cmovel %eax, %r8d; /* max */ inpack32_pre(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, %rdx, (key_table)(CTX, %r8, 8)); cmpq %rsi, %rdx; je .Lcbc_dec_use_stack; /* dst can be used as temporary storage, src is not overwritten. */ movq %rsi, %rax; jmp .Lcbc_dec_continue; .Lcbc_dec_use_stack: /* * dst still in-use (because dst == src), so use stack for temporary * storage. */ movq %rsp, %rax; .Lcbc_dec_continue: call __camellia_dec_blk32; vmovdqu %ymm7, (%rax); vpxor %ymm7, %ymm7, %ymm7; vinserti128 $1, (%rdx), %ymm7, %ymm7; vpxor (%rax), %ymm7, %ymm7; vpxor (0 * 32 + 16)(%rdx), %ymm6, %ymm6; vpxor (1 * 32 + 16)(%rdx), %ymm5, %ymm5; vpxor (2 * 32 + 16)(%rdx), %ymm4, %ymm4; vpxor (3 * 32 + 16)(%rdx), %ymm3, %ymm3; vpxor (4 * 32 + 16)(%rdx), %ymm2, %ymm2; vpxor (5 * 32 + 16)(%rdx), %ymm1, %ymm1; vpxor (6 * 32 + 16)(%rdx), %ymm0, %ymm0; vpxor (7 * 32 + 16)(%rdx), %ymm15, %ymm15; vpxor (8 * 32 + 16)(%rdx), %ymm14, %ymm14; vpxor (9 * 32 + 16)(%rdx), %ymm13, %ymm13; vpxor (10 * 32 + 16)(%rdx), %ymm12, %ymm12; vpxor (11 * 32 + 16)(%rdx), %ymm11, %ymm11; vpxor (12 * 32 + 16)(%rdx), %ymm10, %ymm10; vpxor (13 * 32 + 16)(%rdx), %ymm9, %ymm9; vpxor (14 * 32 + 16)(%rdx), %ymm8, %ymm8; write_output(%ymm7, %ymm6, %ymm5, %ymm4, %ymm3, %ymm2, %ymm1, %ymm0, %ymm15, %ymm14, %ymm13, %ymm12, %ymm11, %ymm10, %ymm9, %ymm8, %rsi); vzeroupper; addq $(16 * 32), %rsp; FRAME_END RET; SYM_FUNC_END(camellia_cbc_dec_32way)
aixcc-public/challenge-001-exemplar-source
18,538
arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Serpent Cipher 8-way parallel algorithm (x86_64/SSE2) * * Copyright (C) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> * * Based on crypto/serpent.c by * Copyright (C) 2002 Dag Arne Osvik <osvik@ii.uib.no> * 2003 Herbert Valerio Riedel <hvr@gnu.org> */ #include <linux/linkage.h> .file "serpent-sse2-x86_64-asm_64.S" .text #define CTX %rdi /********************************************************************** 8-way SSE2 serpent **********************************************************************/ #define RA1 %xmm0 #define RB1 %xmm1 #define RC1 %xmm2 #define RD1 %xmm3 #define RE1 %xmm4 #define RA2 %xmm5 #define RB2 %xmm6 #define RC2 %xmm7 #define RD2 %xmm8 #define RE2 %xmm9 #define RNOT %xmm10 #define RK0 %xmm11 #define RK1 %xmm12 #define RK2 %xmm13 #define RK3 %xmm14 #define S0_1(x0, x1, x2, x3, x4) \ movdqa x3, x4; \ por x0, x3; \ pxor x4, x0; \ pxor x2, x4; \ pxor RNOT, x4; \ pxor x1, x3; \ pand x0, x1; \ pxor x4, x1; \ pxor x0, x2; #define S0_2(x0, x1, x2, x3, x4) \ pxor x3, x0; \ por x0, x4; \ pxor x2, x0; \ pand x1, x2; \ pxor x2, x3; \ pxor RNOT, x1; \ pxor x4, x2; \ pxor x2, x1; #define S1_1(x0, x1, x2, x3, x4) \ movdqa x1, x4; \ pxor x0, x1; \ pxor x3, x0; \ pxor RNOT, x3; \ pand x1, x4; \ por x1, x0; \ pxor x2, x3; \ pxor x3, x0; \ pxor x3, x1; #define S1_2(x0, x1, x2, x3, x4) \ pxor x4, x3; \ por x4, x1; \ pxor x2, x4; \ pand x0, x2; \ pxor x1, x2; \ por x0, x1; \ pxor RNOT, x0; \ pxor x2, x0; \ pxor x1, x4; #define S2_1(x0, x1, x2, x3, x4) \ pxor RNOT, x3; \ pxor x0, x1; \ movdqa x0, x4; \ pand x2, x0; \ pxor x3, x0; \ por x4, x3; \ pxor x1, x2; \ pxor x1, x3; \ pand x0, x1; #define S2_2(x0, x1, x2, x3, x4) \ pxor x2, x0; \ pand x3, x2; \ por x1, x3; \ pxor RNOT, x0; \ pxor x0, x3; \ pxor x0, x4; \ pxor x2, x0; \ por x2, x1; #define S3_1(x0, x1, x2, x3, x4) \ movdqa x1, x4; \ pxor x3, x1; \ por x0, x3; \ pand x0, x4; \ pxor x2, x0; \ pxor x1, x2; \ pand x3, x1; \ pxor x3, x2; \ por x4, x0; \ pxor x3, x4; #define S3_2(x0, x1, x2, x3, x4) \ pxor x0, x1; \ pand x3, x0; \ pand x4, x3; \ pxor x2, x3; \ por x1, x4; \ pand x1, x2; \ pxor x3, x4; \ pxor x3, x0; \ pxor x2, x3; #define S4_1(x0, x1, x2, x3, x4) \ movdqa x3, x4; \ pand x0, x3; \ pxor x4, x0; \ pxor x2, x3; \ por x4, x2; \ pxor x1, x0; \ pxor x3, x4; \ por x0, x2; \ pxor x1, x2; #define S4_2(x0, x1, x2, x3, x4) \ pand x0, x1; \ pxor x4, x1; \ pand x2, x4; \ pxor x3, x2; \ pxor x0, x4; \ por x1, x3; \ pxor RNOT, x1; \ pxor x0, x3; #define S5_1(x0, x1, x2, x3, x4) \ movdqa x1, x4; \ por x0, x1; \ pxor x1, x2; \ pxor RNOT, x3; \ pxor x0, x4; \ pxor x2, x0; \ pand x4, x1; \ por x3, x4; \ pxor x0, x4; #define S5_2(x0, x1, x2, x3, x4) \ pand x3, x0; \ pxor x3, x1; \ pxor x2, x3; \ pxor x1, x0; \ pand x4, x2; \ pxor x2, x1; \ pand x0, x2; \ pxor x2, x3; #define S6_1(x0, x1, x2, x3, x4) \ movdqa x1, x4; \ pxor x0, x3; \ pxor x2, x1; \ pxor x0, x2; \ pand x3, x0; \ por x3, x1; \ pxor RNOT, x4; \ pxor x1, x0; \ pxor x2, x1; #define S6_2(x0, x1, x2, x3, x4) \ pxor x4, x3; \ pxor x0, x4; \ pand x0, x2; \ pxor x1, x4; \ pxor x3, x2; \ pand x1, x3; \ pxor x0, x3; \ pxor x2, x1; #define S7_1(x0, x1, x2, x3, x4) \ pxor RNOT, x1; \ movdqa x1, x4; \ pxor RNOT, x0; \ pand x2, x1; \ pxor x3, x1; \ por x4, x3; \ pxor x2, x4; \ pxor x3, x2; \ pxor x0, x3; \ por x1, x0; #define S7_2(x0, x1, x2, x3, x4) \ pand x0, x2; \ pxor x4, x0; \ pxor x3, x4; \ pand x0, x3; \ pxor x1, x4; \ pxor x4, x2; \ pxor x1, x3; \ por x0, x4; \ pxor x1, x4; #define SI0_1(x0, x1, x2, x3, x4) \ movdqa x3, x4; \ pxor x0, x1; \ por x1, x3; \ pxor x1, x4; \ pxor RNOT, x0; \ pxor x3, x2; \ pxor x0, x3; \ pand x1, x0; \ pxor x2, x0; #define SI0_2(x0, x1, x2, x3, x4) \ pand x3, x2; \ pxor x4, x3; \ pxor x3, x2; \ pxor x3, x1; \ pand x0, x3; \ pxor x0, x1; \ pxor x2, x0; \ pxor x3, x4; #define SI1_1(x0, x1, x2, x3, x4) \ pxor x3, x1; \ movdqa x0, x4; \ pxor x2, x0; \ pxor RNOT, x2; \ por x1, x4; \ pxor x3, x4; \ pand x1, x3; \ pxor x2, x1; \ pand x4, x2; #define SI1_2(x0, x1, x2, x3, x4) \ pxor x1, x4; \ por x3, x1; \ pxor x0, x3; \ pxor x0, x2; \ por x4, x0; \ pxor x4, x2; \ pxor x0, x1; \ pxor x1, x4; #define SI2_1(x0, x1, x2, x3, x4) \ pxor x1, x2; \ movdqa x3, x4; \ pxor RNOT, x3; \ por x2, x3; \ pxor x4, x2; \ pxor x0, x4; \ pxor x1, x3; \ por x2, x1; \ pxor x0, x2; #define SI2_2(x0, x1, x2, x3, x4) \ pxor x4, x1; \ por x3, x4; \ pxor x3, x2; \ pxor x2, x4; \ pand x1, x2; \ pxor x3, x2; \ pxor x4, x3; \ pxor x0, x4; #define SI3_1(x0, x1, x2, x3, x4) \ pxor x1, x2; \ movdqa x1, x4; \ pand x2, x1; \ pxor x0, x1; \ por x4, x0; \ pxor x3, x4; \ pxor x3, x0; \ por x1, x3; \ pxor x2, x1; #define SI3_2(x0, x1, x2, x3, x4) \ pxor x3, x1; \ pxor x2, x0; \ pxor x3, x2; \ pand x1, x3; \ pxor x0, x1; \ pand x2, x0; \ pxor x3, x4; \ pxor x0, x3; \ pxor x1, x0; #define SI4_1(x0, x1, x2, x3, x4) \ pxor x3, x2; \ movdqa x0, x4; \ pand x1, x0; \ pxor x2, x0; \ por x3, x2; \ pxor RNOT, x4; \ pxor x0, x1; \ pxor x2, x0; \ pand x4, x2; #define SI4_2(x0, x1, x2, x3, x4) \ pxor x0, x2; \ por x4, x0; \ pxor x3, x0; \ pand x2, x3; \ pxor x3, x4; \ pxor x1, x3; \ pand x0, x1; \ pxor x1, x4; \ pxor x3, x0; #define SI5_1(x0, x1, x2, x3, x4) \ movdqa x1, x4; \ por x2, x1; \ pxor x4, x2; \ pxor x3, x1; \ pand x4, x3; \ pxor x3, x2; \ por x0, x3; \ pxor RNOT, x0; \ pxor x2, x3; \ por x0, x2; #define SI5_2(x0, x1, x2, x3, x4) \ pxor x1, x4; \ pxor x4, x2; \ pand x0, x4; \ pxor x1, x0; \ pxor x3, x1; \ pand x2, x0; \ pxor x3, x2; \ pxor x2, x0; \ pxor x4, x2; \ pxor x3, x4; #define SI6_1(x0, x1, x2, x3, x4) \ pxor x2, x0; \ movdqa x0, x4; \ pand x3, x0; \ pxor x3, x2; \ pxor x2, x0; \ pxor x1, x3; \ por x4, x2; \ pxor x3, x2; \ pand x0, x3; #define SI6_2(x0, x1, x2, x3, x4) \ pxor RNOT, x0; \ pxor x1, x3; \ pand x2, x1; \ pxor x0, x4; \ pxor x4, x3; \ pxor x2, x4; \ pxor x1, x0; \ pxor x0, x2; #define SI7_1(x0, x1, x2, x3, x4) \ movdqa x3, x4; \ pand x0, x3; \ pxor x2, x0; \ por x4, x2; \ pxor x1, x4; \ pxor RNOT, x0; \ por x3, x1; \ pxor x0, x4; \ pand x2, x0; \ pxor x1, x0; #define SI7_2(x0, x1, x2, x3, x4) \ pand x2, x1; \ pxor x2, x3; \ pxor x3, x4; \ pand x3, x2; \ por x0, x3; \ pxor x4, x1; \ pxor x4, x3; \ pand x0, x4; \ pxor x2, x4; #define get_key(i, j, t) \ movd (4*(i)+(j))*4(CTX), t; \ pshufd $0, t, t; #define K2(x0, x1, x2, x3, x4, i) \ get_key(i, 0, RK0); \ get_key(i, 1, RK1); \ get_key(i, 2, RK2); \ get_key(i, 3, RK3); \ pxor RK0, x0 ## 1; \ pxor RK1, x1 ## 1; \ pxor RK2, x2 ## 1; \ pxor RK3, x3 ## 1; \ pxor RK0, x0 ## 2; \ pxor RK1, x1 ## 2; \ pxor RK2, x2 ## 2; \ pxor RK3, x3 ## 2; #define LK2(x0, x1, x2, x3, x4, i) \ movdqa x0 ## 1, x4 ## 1; \ pslld $13, x0 ## 1; \ psrld $(32 - 13), x4 ## 1; \ por x4 ## 1, x0 ## 1; \ pxor x0 ## 1, x1 ## 1; \ movdqa x2 ## 1, x4 ## 1; \ pslld $3, x2 ## 1; \ psrld $(32 - 3), x4 ## 1; \ por x4 ## 1, x2 ## 1; \ pxor x2 ## 1, x1 ## 1; \ movdqa x0 ## 2, x4 ## 2; \ pslld $13, x0 ## 2; \ psrld $(32 - 13), x4 ## 2; \ por x4 ## 2, x0 ## 2; \ pxor x0 ## 2, x1 ## 2; \ movdqa x2 ## 2, x4 ## 2; \ pslld $3, x2 ## 2; \ psrld $(32 - 3), x4 ## 2; \ por x4 ## 2, x2 ## 2; \ pxor x2 ## 2, x1 ## 2; \ movdqa x1 ## 1, x4 ## 1; \ pslld $1, x1 ## 1; \ psrld $(32 - 1), x4 ## 1; \ por x4 ## 1, x1 ## 1; \ movdqa x0 ## 1, x4 ## 1; \ pslld $3, x4 ## 1; \ pxor x2 ## 1, x3 ## 1; \ pxor x4 ## 1, x3 ## 1; \ movdqa x3 ## 1, x4 ## 1; \ get_key(i, 1, RK1); \ movdqa x1 ## 2, x4 ## 2; \ pslld $1, x1 ## 2; \ psrld $(32 - 1), x4 ## 2; \ por x4 ## 2, x1 ## 2; \ movdqa x0 ## 2, x4 ## 2; \ pslld $3, x4 ## 2; \ pxor x2 ## 2, x3 ## 2; \ pxor x4 ## 2, x3 ## 2; \ movdqa x3 ## 2, x4 ## 2; \ get_key(i, 3, RK3); \ pslld $7, x3 ## 1; \ psrld $(32 - 7), x4 ## 1; \ por x4 ## 1, x3 ## 1; \ movdqa x1 ## 1, x4 ## 1; \ pslld $7, x4 ## 1; \ pxor x1 ## 1, x0 ## 1; \ pxor x3 ## 1, x0 ## 1; \ pxor x3 ## 1, x2 ## 1; \ pxor x4 ## 1, x2 ## 1; \ get_key(i, 0, RK0); \ pslld $7, x3 ## 2; \ psrld $(32 - 7), x4 ## 2; \ por x4 ## 2, x3 ## 2; \ movdqa x1 ## 2, x4 ## 2; \ pslld $7, x4 ## 2; \ pxor x1 ## 2, x0 ## 2; \ pxor x3 ## 2, x0 ## 2; \ pxor x3 ## 2, x2 ## 2; \ pxor x4 ## 2, x2 ## 2; \ get_key(i, 2, RK2); \ pxor RK1, x1 ## 1; \ pxor RK3, x3 ## 1; \ movdqa x0 ## 1, x4 ## 1; \ pslld $5, x0 ## 1; \ psrld $(32 - 5), x4 ## 1; \ por x4 ## 1, x0 ## 1; \ movdqa x2 ## 1, x4 ## 1; \ pslld $22, x2 ## 1; \ psrld $(32 - 22), x4 ## 1; \ por x4 ## 1, x2 ## 1; \ pxor RK0, x0 ## 1; \ pxor RK2, x2 ## 1; \ pxor RK1, x1 ## 2; \ pxor RK3, x3 ## 2; \ movdqa x0 ## 2, x4 ## 2; \ pslld $5, x0 ## 2; \ psrld $(32 - 5), x4 ## 2; \ por x4 ## 2, x0 ## 2; \ movdqa x2 ## 2, x4 ## 2; \ pslld $22, x2 ## 2; \ psrld $(32 - 22), x4 ## 2; \ por x4 ## 2, x2 ## 2; \ pxor RK0, x0 ## 2; \ pxor RK2, x2 ## 2; #define KL2(x0, x1, x2, x3, x4, i) \ pxor RK0, x0 ## 1; \ pxor RK2, x2 ## 1; \ movdqa x0 ## 1, x4 ## 1; \ psrld $5, x0 ## 1; \ pslld $(32 - 5), x4 ## 1; \ por x4 ## 1, x0 ## 1; \ pxor RK3, x3 ## 1; \ pxor RK1, x1 ## 1; \ movdqa x2 ## 1, x4 ## 1; \ psrld $22, x2 ## 1; \ pslld $(32 - 22), x4 ## 1; \ por x4 ## 1, x2 ## 1; \ pxor x3 ## 1, x2 ## 1; \ pxor RK0, x0 ## 2; \ pxor RK2, x2 ## 2; \ movdqa x0 ## 2, x4 ## 2; \ psrld $5, x0 ## 2; \ pslld $(32 - 5), x4 ## 2; \ por x4 ## 2, x0 ## 2; \ pxor RK3, x3 ## 2; \ pxor RK1, x1 ## 2; \ movdqa x2 ## 2, x4 ## 2; \ psrld $22, x2 ## 2; \ pslld $(32 - 22), x4 ## 2; \ por x4 ## 2, x2 ## 2; \ pxor x3 ## 2, x2 ## 2; \ pxor x3 ## 1, x0 ## 1; \ movdqa x1 ## 1, x4 ## 1; \ pslld $7, x4 ## 1; \ pxor x1 ## 1, x0 ## 1; \ pxor x4 ## 1, x2 ## 1; \ movdqa x1 ## 1, x4 ## 1; \ psrld $1, x1 ## 1; \ pslld $(32 - 1), x4 ## 1; \ por x4 ## 1, x1 ## 1; \ pxor x3 ## 2, x0 ## 2; \ movdqa x1 ## 2, x4 ## 2; \ pslld $7, x4 ## 2; \ pxor x1 ## 2, x0 ## 2; \ pxor x4 ## 2, x2 ## 2; \ movdqa x1 ## 2, x4 ## 2; \ psrld $1, x1 ## 2; \ pslld $(32 - 1), x4 ## 2; \ por x4 ## 2, x1 ## 2; \ movdqa x3 ## 1, x4 ## 1; \ psrld $7, x3 ## 1; \ pslld $(32 - 7), x4 ## 1; \ por x4 ## 1, x3 ## 1; \ pxor x0 ## 1, x1 ## 1; \ movdqa x0 ## 1, x4 ## 1; \ pslld $3, x4 ## 1; \ pxor x4 ## 1, x3 ## 1; \ movdqa x0 ## 1, x4 ## 1; \ movdqa x3 ## 2, x4 ## 2; \ psrld $7, x3 ## 2; \ pslld $(32 - 7), x4 ## 2; \ por x4 ## 2, x3 ## 2; \ pxor x0 ## 2, x1 ## 2; \ movdqa x0 ## 2, x4 ## 2; \ pslld $3, x4 ## 2; \ pxor x4 ## 2, x3 ## 2; \ movdqa x0 ## 2, x4 ## 2; \ psrld $13, x0 ## 1; \ pslld $(32 - 13), x4 ## 1; \ por x4 ## 1, x0 ## 1; \ pxor x2 ## 1, x1 ## 1; \ pxor x2 ## 1, x3 ## 1; \ movdqa x2 ## 1, x4 ## 1; \ psrld $3, x2 ## 1; \ pslld $(32 - 3), x4 ## 1; \ por x4 ## 1, x2 ## 1; \ psrld $13, x0 ## 2; \ pslld $(32 - 13), x4 ## 2; \ por x4 ## 2, x0 ## 2; \ pxor x2 ## 2, x1 ## 2; \ pxor x2 ## 2, x3 ## 2; \ movdqa x2 ## 2, x4 ## 2; \ psrld $3, x2 ## 2; \ pslld $(32 - 3), x4 ## 2; \ por x4 ## 2, x2 ## 2; #define S(SBOX, x0, x1, x2, x3, x4) \ SBOX ## _1(x0 ## 1, x1 ## 1, x2 ## 1, x3 ## 1, x4 ## 1); \ SBOX ## _2(x0 ## 1, x1 ## 1, x2 ## 1, x3 ## 1, x4 ## 1); \ SBOX ## _1(x0 ## 2, x1 ## 2, x2 ## 2, x3 ## 2, x4 ## 2); \ SBOX ## _2(x0 ## 2, x1 ## 2, x2 ## 2, x3 ## 2, x4 ## 2); #define SP(SBOX, x0, x1, x2, x3, x4, i) \ get_key(i, 0, RK0); \ SBOX ## _1(x0 ## 1, x1 ## 1, x2 ## 1, x3 ## 1, x4 ## 1); \ get_key(i, 2, RK2); \ SBOX ## _1(x0 ## 2, x1 ## 2, x2 ## 2, x3 ## 2, x4 ## 2); \ get_key(i, 3, RK3); \ SBOX ## _2(x0 ## 1, x1 ## 1, x2 ## 1, x3 ## 1, x4 ## 1); \ get_key(i, 1, RK1); \ SBOX ## _2(x0 ## 2, x1 ## 2, x2 ## 2, x3 ## 2, x4 ## 2); \ #define transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \ movdqa x0, t2; \ punpckldq x1, x0; \ punpckhdq x1, t2; \ movdqa x2, t1; \ punpckhdq x3, x2; \ punpckldq x3, t1; \ movdqa x0, x1; \ punpcklqdq t1, x0; \ punpckhqdq t1, x1; \ movdqa t2, x3; \ punpcklqdq x2, t2; \ punpckhqdq x2, x3; \ movdqa t2, x2; #define read_blocks(in, x0, x1, x2, x3, t0, t1, t2) \ movdqu (0*4*4)(in), x0; \ movdqu (1*4*4)(in), x1; \ movdqu (2*4*4)(in), x2; \ movdqu (3*4*4)(in), x3; \ \ transpose_4x4(x0, x1, x2, x3, t0, t1, t2) #define write_blocks(out, x0, x1, x2, x3, t0, t1, t2) \ transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \ \ movdqu x0, (0*4*4)(out); \ movdqu x1, (1*4*4)(out); \ movdqu x2, (2*4*4)(out); \ movdqu x3, (3*4*4)(out); #define xor_blocks(out, x0, x1, x2, x3, t0, t1, t2) \ transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \ \ movdqu (0*4*4)(out), t0; \ pxor t0, x0; \ movdqu x0, (0*4*4)(out); \ movdqu (1*4*4)(out), t0; \ pxor t0, x1; \ movdqu x1, (1*4*4)(out); \ movdqu (2*4*4)(out), t0; \ pxor t0, x2; \ movdqu x2, (2*4*4)(out); \ movdqu (3*4*4)(out), t0; \ pxor t0, x3; \ movdqu x3, (3*4*4)(out); SYM_FUNC_START(__serpent_enc_blk_8way) /* input: * %rdi: ctx, CTX * %rsi: dst * %rdx: src * %rcx: bool, if true: xor output */ pcmpeqd RNOT, RNOT; leaq (4*4*4)(%rdx), %rax; read_blocks(%rdx, RA1, RB1, RC1, RD1, RK0, RK1, RK2); read_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2); K2(RA, RB, RC, RD, RE, 0); S(S0, RA, RB, RC, RD, RE); LK2(RC, RB, RD, RA, RE, 1); S(S1, RC, RB, RD, RA, RE); LK2(RE, RD, RA, RC, RB, 2); S(S2, RE, RD, RA, RC, RB); LK2(RB, RD, RE, RC, RA, 3); S(S3, RB, RD, RE, RC, RA); LK2(RC, RA, RD, RB, RE, 4); S(S4, RC, RA, RD, RB, RE); LK2(RA, RD, RB, RE, RC, 5); S(S5, RA, RD, RB, RE, RC); LK2(RC, RA, RD, RE, RB, 6); S(S6, RC, RA, RD, RE, RB); LK2(RD, RB, RA, RE, RC, 7); S(S7, RD, RB, RA, RE, RC); LK2(RC, RA, RE, RD, RB, 8); S(S0, RC, RA, RE, RD, RB); LK2(RE, RA, RD, RC, RB, 9); S(S1, RE, RA, RD, RC, RB); LK2(RB, RD, RC, RE, RA, 10); S(S2, RB, RD, RC, RE, RA); LK2(RA, RD, RB, RE, RC, 11); S(S3, RA, RD, RB, RE, RC); LK2(RE, RC, RD, RA, RB, 12); S(S4, RE, RC, RD, RA, RB); LK2(RC, RD, RA, RB, RE, 13); S(S5, RC, RD, RA, RB, RE); LK2(RE, RC, RD, RB, RA, 14); S(S6, RE, RC, RD, RB, RA); LK2(RD, RA, RC, RB, RE, 15); S(S7, RD, RA, RC, RB, RE); LK2(RE, RC, RB, RD, RA, 16); S(S0, RE, RC, RB, RD, RA); LK2(RB, RC, RD, RE, RA, 17); S(S1, RB, RC, RD, RE, RA); LK2(RA, RD, RE, RB, RC, 18); S(S2, RA, RD, RE, RB, RC); LK2(RC, RD, RA, RB, RE, 19); S(S3, RC, RD, RA, RB, RE); LK2(RB, RE, RD, RC, RA, 20); S(S4, RB, RE, RD, RC, RA); LK2(RE, RD, RC, RA, RB, 21); S(S5, RE, RD, RC, RA, RB); LK2(RB, RE, RD, RA, RC, 22); S(S6, RB, RE, RD, RA, RC); LK2(RD, RC, RE, RA, RB, 23); S(S7, RD, RC, RE, RA, RB); LK2(RB, RE, RA, RD, RC, 24); S(S0, RB, RE, RA, RD, RC); LK2(RA, RE, RD, RB, RC, 25); S(S1, RA, RE, RD, RB, RC); LK2(RC, RD, RB, RA, RE, 26); S(S2, RC, RD, RB, RA, RE); LK2(RE, RD, RC, RA, RB, 27); S(S3, RE, RD, RC, RA, RB); LK2(RA, RB, RD, RE, RC, 28); S(S4, RA, RB, RD, RE, RC); LK2(RB, RD, RE, RC, RA, 29); S(S5, RB, RD, RE, RC, RA); LK2(RA, RB, RD, RC, RE, 30); S(S6, RA, RB, RD, RC, RE); LK2(RD, RE, RB, RC, RA, 31); S(S7, RD, RE, RB, RC, RA); K2(RA, RB, RC, RD, RE, 32); leaq (4*4*4)(%rsi), %rax; testb %cl, %cl; jnz .L__enc_xor8; write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2); write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2); RET; .L__enc_xor8: xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2); xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2); RET; SYM_FUNC_END(__serpent_enc_blk_8way) SYM_FUNC_START(serpent_dec_blk_8way) /* input: * %rdi: ctx, CTX * %rsi: dst * %rdx: src */ pcmpeqd RNOT, RNOT; leaq (4*4*4)(%rdx), %rax; read_blocks(%rdx, RA1, RB1, RC1, RD1, RK0, RK1, RK2); read_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2); K2(RA, RB, RC, RD, RE, 32); SP(SI7, RA, RB, RC, RD, RE, 31); KL2(RB, RD, RA, RE, RC, 31); SP(SI6, RB, RD, RA, RE, RC, 30); KL2(RA, RC, RE, RB, RD, 30); SP(SI5, RA, RC, RE, RB, RD, 29); KL2(RC, RD, RA, RE, RB, 29); SP(SI4, RC, RD, RA, RE, RB, 28); KL2(RC, RA, RB, RE, RD, 28); SP(SI3, RC, RA, RB, RE, RD, 27); KL2(RB, RC, RD, RE, RA, 27); SP(SI2, RB, RC, RD, RE, RA, 26); KL2(RC, RA, RE, RD, RB, 26); SP(SI1, RC, RA, RE, RD, RB, 25); KL2(RB, RA, RE, RD, RC, 25); SP(SI0, RB, RA, RE, RD, RC, 24); KL2(RE, RC, RA, RB, RD, 24); SP(SI7, RE, RC, RA, RB, RD, 23); KL2(RC, RB, RE, RD, RA, 23); SP(SI6, RC, RB, RE, RD, RA, 22); KL2(RE, RA, RD, RC, RB, 22); SP(SI5, RE, RA, RD, RC, RB, 21); KL2(RA, RB, RE, RD, RC, 21); SP(SI4, RA, RB, RE, RD, RC, 20); KL2(RA, RE, RC, RD, RB, 20); SP(SI3, RA, RE, RC, RD, RB, 19); KL2(RC, RA, RB, RD, RE, 19); SP(SI2, RC, RA, RB, RD, RE, 18); KL2(RA, RE, RD, RB, RC, 18); SP(SI1, RA, RE, RD, RB, RC, 17); KL2(RC, RE, RD, RB, RA, 17); SP(SI0, RC, RE, RD, RB, RA, 16); KL2(RD, RA, RE, RC, RB, 16); SP(SI7, RD, RA, RE, RC, RB, 15); KL2(RA, RC, RD, RB, RE, 15); SP(SI6, RA, RC, RD, RB, RE, 14); KL2(RD, RE, RB, RA, RC, 14); SP(SI5, RD, RE, RB, RA, RC, 13); KL2(RE, RC, RD, RB, RA, 13); SP(SI4, RE, RC, RD, RB, RA, 12); KL2(RE, RD, RA, RB, RC, 12); SP(SI3, RE, RD, RA, RB, RC, 11); KL2(RA, RE, RC, RB, RD, 11); SP(SI2, RA, RE, RC, RB, RD, 10); KL2(RE, RD, RB, RC, RA, 10); SP(SI1, RE, RD, RB, RC, RA, 9); KL2(RA, RD, RB, RC, RE, 9); SP(SI0, RA, RD, RB, RC, RE, 8); KL2(RB, RE, RD, RA, RC, 8); SP(SI7, RB, RE, RD, RA, RC, 7); KL2(RE, RA, RB, RC, RD, 7); SP(SI6, RE, RA, RB, RC, RD, 6); KL2(RB, RD, RC, RE, RA, 6); SP(SI5, RB, RD, RC, RE, RA, 5); KL2(RD, RA, RB, RC, RE, 5); SP(SI4, RD, RA, RB, RC, RE, 4); KL2(RD, RB, RE, RC, RA, 4); SP(SI3, RD, RB, RE, RC, RA, 3); KL2(RE, RD, RA, RC, RB, 3); SP(SI2, RE, RD, RA, RC, RB, 2); KL2(RD, RB, RC, RA, RE, 2); SP(SI1, RD, RB, RC, RA, RE, 1); KL2(RE, RB, RC, RA, RD, 1); S(SI0, RE, RB, RC, RA, RD); K2(RC, RD, RB, RE, RA, 0); leaq (4*4*4)(%rsi), %rax; write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2); write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2); RET; SYM_FUNC_END(serpent_dec_blk_8way)
aixcc-public/challenge-001-exemplar-source
6,589
arch/x86/crypto/twofish-x86_64-asm_64-3way.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Twofish Cipher 3-way parallel algorithm (x86_64) * * Copyright (C) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> */ #include <linux/linkage.h> .file "twofish-x86_64-asm-3way.S" .text /* structure of crypto context */ #define s0 0 #define s1 1024 #define s2 2048 #define s3 3072 #define w 4096 #define k 4128 /********************************************************************** 3-way twofish **********************************************************************/ #define CTX %rdi #define RIO %rdx #define RAB0 %rax #define RAB1 %rbx #define RAB2 %rcx #define RAB0d %eax #define RAB1d %ebx #define RAB2d %ecx #define RAB0bh %ah #define RAB1bh %bh #define RAB2bh %ch #define RAB0bl %al #define RAB1bl %bl #define RAB2bl %cl #define CD0 0x0(%rsp) #define CD1 0x8(%rsp) #define CD2 0x10(%rsp) # used only before/after all rounds #define RCD0 %r8 #define RCD1 %r9 #define RCD2 %r10 # used only during rounds #define RX0 %r8 #define RX1 %r9 #define RX2 %r10 #define RX0d %r8d #define RX1d %r9d #define RX2d %r10d #define RY0 %r11 #define RY1 %r12 #define RY2 %r13 #define RY0d %r11d #define RY1d %r12d #define RY2d %r13d #define RT0 %rdx #define RT1 %rsi #define RT0d %edx #define RT1d %esi #define RT1bl %sil #define do16bit_ror(rot, op1, op2, T0, T1, tmp1, tmp2, ab, dst) \ movzbl ab ## bl, tmp2 ## d; \ movzbl ab ## bh, tmp1 ## d; \ rorq $(rot), ab; \ op1##l T0(CTX, tmp2, 4), dst ## d; \ op2##l T1(CTX, tmp1, 4), dst ## d; #define swap_ab_with_cd(ab, cd, tmp) \ movq cd, tmp; \ movq ab, cd; \ movq tmp, ab; /* * Combined G1 & G2 function. Reordered with help of rotates to have moves * at beginning. */ #define g1g2_3(ab, cd, Tx0, Tx1, Tx2, Tx3, Ty0, Ty1, Ty2, Ty3, x, y) \ /* G1,1 && G2,1 */ \ do16bit_ror(32, mov, xor, Tx0, Tx1, RT0, x ## 0, ab ## 0, x ## 0); \ do16bit_ror(48, mov, xor, Ty1, Ty2, RT0, y ## 0, ab ## 0, y ## 0); \ \ do16bit_ror(32, mov, xor, Tx0, Tx1, RT0, x ## 1, ab ## 1, x ## 1); \ do16bit_ror(48, mov, xor, Ty1, Ty2, RT0, y ## 1, ab ## 1, y ## 1); \ \ do16bit_ror(32, mov, xor, Tx0, Tx1, RT0, x ## 2, ab ## 2, x ## 2); \ do16bit_ror(48, mov, xor, Ty1, Ty2, RT0, y ## 2, ab ## 2, y ## 2); \ \ /* G1,2 && G2,2 */ \ do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 0, x ## 0); \ do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 0, y ## 0); \ swap_ab_with_cd(ab ## 0, cd ## 0, RT0); \ \ do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 1, x ## 1); \ do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 1, y ## 1); \ swap_ab_with_cd(ab ## 1, cd ## 1, RT0); \ \ do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 2, x ## 2); \ do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 2, y ## 2); \ swap_ab_with_cd(ab ## 2, cd ## 2, RT0); #define enc_round_end(ab, x, y, n) \ addl y ## d, x ## d; \ addl x ## d, y ## d; \ addl k+4*(2*(n))(CTX), x ## d; \ xorl ab ## d, x ## d; \ addl k+4*(2*(n)+1)(CTX), y ## d; \ shrq $32, ab; \ roll $1, ab ## d; \ xorl y ## d, ab ## d; \ shlq $32, ab; \ rorl $1, x ## d; \ orq x, ab; #define dec_round_end(ba, x, y, n) \ addl y ## d, x ## d; \ addl x ## d, y ## d; \ addl k+4*(2*(n))(CTX), x ## d; \ addl k+4*(2*(n)+1)(CTX), y ## d; \ xorl ba ## d, y ## d; \ shrq $32, ba; \ roll $1, ba ## d; \ xorl x ## d, ba ## d; \ shlq $32, ba; \ rorl $1, y ## d; \ orq y, ba; #define encrypt_round3(ab, cd, n) \ g1g2_3(ab, cd, s0, s1, s2, s3, s0, s1, s2, s3, RX, RY); \ \ enc_round_end(ab ## 0, RX0, RY0, n); \ enc_round_end(ab ## 1, RX1, RY1, n); \ enc_round_end(ab ## 2, RX2, RY2, n); #define decrypt_round3(ba, dc, n) \ g1g2_3(ba, dc, s1, s2, s3, s0, s3, s0, s1, s2, RY, RX); \ \ dec_round_end(ba ## 0, RX0, RY0, n); \ dec_round_end(ba ## 1, RX1, RY1, n); \ dec_round_end(ba ## 2, RX2, RY2, n); #define encrypt_cycle3(ab, cd, n) \ encrypt_round3(ab, cd, n*2); \ encrypt_round3(ab, cd, (n*2)+1); #define decrypt_cycle3(ba, dc, n) \ decrypt_round3(ba, dc, (n*2)+1); \ decrypt_round3(ba, dc, (n*2)); #define push_cd() \ pushq RCD2; \ pushq RCD1; \ pushq RCD0; #define pop_cd() \ popq RCD0; \ popq RCD1; \ popq RCD2; #define inpack3(in, n, xy, m) \ movq 4*(n)(in), xy ## 0; \ xorq w+4*m(CTX), xy ## 0; \ \ movq 4*(4+(n))(in), xy ## 1; \ xorq w+4*m(CTX), xy ## 1; \ \ movq 4*(8+(n))(in), xy ## 2; \ xorq w+4*m(CTX), xy ## 2; #define outunpack3(op, out, n, xy, m) \ xorq w+4*m(CTX), xy ## 0; \ op ## q xy ## 0, 4*(n)(out); \ \ xorq w+4*m(CTX), xy ## 1; \ op ## q xy ## 1, 4*(4+(n))(out); \ \ xorq w+4*m(CTX), xy ## 2; \ op ## q xy ## 2, 4*(8+(n))(out); #define inpack_enc3() \ inpack3(RIO, 0, RAB, 0); \ inpack3(RIO, 2, RCD, 2); #define outunpack_enc3(op) \ outunpack3(op, RIO, 2, RAB, 6); \ outunpack3(op, RIO, 0, RCD, 4); #define inpack_dec3() \ inpack3(RIO, 0, RAB, 4); \ rorq $32, RAB0; \ rorq $32, RAB1; \ rorq $32, RAB2; \ inpack3(RIO, 2, RCD, 6); \ rorq $32, RCD0; \ rorq $32, RCD1; \ rorq $32, RCD2; #define outunpack_dec3() \ rorq $32, RCD0; \ rorq $32, RCD1; \ rorq $32, RCD2; \ outunpack3(mov, RIO, 0, RCD, 0); \ rorq $32, RAB0; \ rorq $32, RAB1; \ rorq $32, RAB2; \ outunpack3(mov, RIO, 2, RAB, 2); SYM_FUNC_START(__twofish_enc_blk_3way) /* input: * %rdi: ctx, CTX * %rsi: dst * %rdx: src, RIO * %rcx: bool, if true: xor output */ pushq %r13; pushq %r12; pushq %rbx; pushq %rcx; /* bool xor */ pushq %rsi; /* dst */ inpack_enc3(); push_cd(); encrypt_cycle3(RAB, CD, 0); encrypt_cycle3(RAB, CD, 1); encrypt_cycle3(RAB, CD, 2); encrypt_cycle3(RAB, CD, 3); encrypt_cycle3(RAB, CD, 4); encrypt_cycle3(RAB, CD, 5); encrypt_cycle3(RAB, CD, 6); encrypt_cycle3(RAB, CD, 7); pop_cd(); popq RIO; /* dst */ popq RT1; /* bool xor */ testb RT1bl, RT1bl; jnz .L__enc_xor3; outunpack_enc3(mov); popq %rbx; popq %r12; popq %r13; RET; .L__enc_xor3: outunpack_enc3(xor); popq %rbx; popq %r12; popq %r13; RET; SYM_FUNC_END(__twofish_enc_blk_3way) SYM_FUNC_START(twofish_dec_blk_3way) /* input: * %rdi: ctx, CTX * %rsi: dst * %rdx: src, RIO */ pushq %r13; pushq %r12; pushq %rbx; pushq %rsi; /* dst */ inpack_dec3(); push_cd(); decrypt_cycle3(RAB, CD, 7); decrypt_cycle3(RAB, CD, 6); decrypt_cycle3(RAB, CD, 5); decrypt_cycle3(RAB, CD, 4); decrypt_cycle3(RAB, CD, 3); decrypt_cycle3(RAB, CD, 2); decrypt_cycle3(RAB, CD, 1); decrypt_cycle3(RAB, CD, 0); pop_cd(); popq RIO; /* dst */ outunpack_dec3(); popq %rbx; popq %r12; popq %r13; RET; SYM_FUNC_END(twofish_dec_blk_3way)
aixcc-public/challenge-001-exemplar-source
9,346
arch/x86/crypto/polyval-clmulni_asm.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright 2021 Google LLC */ /* * This is an efficient implementation of POLYVAL using intel PCLMULQDQ-NI * instructions. It works on 8 blocks at a time, by precomputing the first 8 * keys powers h^8, ..., h^1 in the POLYVAL finite field. This precomputation * allows us to split finite field multiplication into two steps. * * In the first step, we consider h^i, m_i as normal polynomials of degree less * than 128. We then compute p(x) = h^8m_0 + ... + h^1m_7 where multiplication * is simply polynomial multiplication. * * In the second step, we compute the reduction of p(x) modulo the finite field * modulus g(x) = x^128 + x^127 + x^126 + x^121 + 1. * * This two step process is equivalent to computing h^8m_0 + ... + h^1m_7 where * multiplication is finite field multiplication. The advantage is that the * two-step process only requires 1 finite field reduction for every 8 * polynomial multiplications. Further parallelism is gained by interleaving the * multiplications and polynomial reductions. */ #include <linux/linkage.h> #include <asm/frame.h> #define STRIDE_BLOCKS 8 #define GSTAR %xmm7 #define PL %xmm8 #define PH %xmm9 #define TMP_XMM %xmm11 #define LO %xmm12 #define HI %xmm13 #define MI %xmm14 #define SUM %xmm15 #define KEY_POWERS %rdi #define MSG %rsi #define BLOCKS_LEFT %rdx #define ACCUMULATOR %rcx #define TMP %rax .section .rodata.cst16.gstar, "aM", @progbits, 16 .align 16 .Lgstar: .quad 0xc200000000000000, 0xc200000000000000 .text /* * Performs schoolbook1_iteration on two lists of 128-bit polynomials of length * count pointed to by MSG and KEY_POWERS. */ .macro schoolbook1 count .set i, 0 .rept (\count) schoolbook1_iteration i 0 .set i, (i +1) .endr .endm /* * Computes the product of two 128-bit polynomials at the memory locations * specified by (MSG + 16*i) and (KEY_POWERS + 16*i) and XORs the components of * the 256-bit product into LO, MI, HI. * * Given: * X = [X_1 : X_0] * Y = [Y_1 : Y_0] * * We compute: * LO += X_0 * Y_0 * MI += X_0 * Y_1 + X_1 * Y_0 * HI += X_1 * Y_1 * * Later, the 256-bit result can be extracted as: * [HI_1 : HI_0 + MI_1 : LO_1 + MI_0 : LO_0] * This step is done when computing the polynomial reduction for efficiency * reasons. * * If xor_sum == 1, then also XOR the value of SUM into m_0. This avoids an * extra multiplication of SUM and h^8. */ .macro schoolbook1_iteration i xor_sum movups (16*\i)(MSG), %xmm0 .if (\i == 0 && \xor_sum == 1) pxor SUM, %xmm0 .endif vpclmulqdq $0x01, (16*\i)(KEY_POWERS), %xmm0, %xmm2 vpclmulqdq $0x00, (16*\i)(KEY_POWERS), %xmm0, %xmm1 vpclmulqdq $0x10, (16*\i)(KEY_POWERS), %xmm0, %xmm3 vpclmulqdq $0x11, (16*\i)(KEY_POWERS), %xmm0, %xmm4 vpxor %xmm2, MI, MI vpxor %xmm1, LO, LO vpxor %xmm4, HI, HI vpxor %xmm3, MI, MI .endm /* * Performs the same computation as schoolbook1_iteration, except we expect the * arguments to already be loaded into xmm0 and xmm1 and we set the result * registers LO, MI, and HI directly rather than XOR'ing into them. */ .macro schoolbook1_noload vpclmulqdq $0x01, %xmm0, %xmm1, MI vpclmulqdq $0x10, %xmm0, %xmm1, %xmm2 vpclmulqdq $0x00, %xmm0, %xmm1, LO vpclmulqdq $0x11, %xmm0, %xmm1, HI vpxor %xmm2, MI, MI .endm /* * Computes the 256-bit polynomial represented by LO, HI, MI. Stores * the result in PL, PH. * [PH : PL] = [HI_1 : HI_0 + MI_1 : LO_1 + MI_0 : LO_0] */ .macro schoolbook2 vpslldq $8, MI, PL vpsrldq $8, MI, PH pxor LO, PL pxor HI, PH .endm /* * Computes the 128-bit reduction of PH : PL. Stores the result in dest. * * This macro computes p(x) mod g(x) where p(x) is in montgomery form and g(x) = * x^128 + x^127 + x^126 + x^121 + 1. * * We have a 256-bit polynomial PH : PL = P_3 : P_2 : P_1 : P_0 that is the * product of two 128-bit polynomials in Montgomery form. We need to reduce it * mod g(x). Also, since polynomials in Montgomery form have an "extra" factor * of x^128, this product has two extra factors of x^128. To get it back into * Montgomery form, we need to remove one of these factors by dividing by x^128. * * To accomplish both of these goals, we add multiples of g(x) that cancel out * the low 128 bits P_1 : P_0, leaving just the high 128 bits. Since the low * bits are zero, the polynomial division by x^128 can be done by right shifting. * * Since the only nonzero term in the low 64 bits of g(x) is the constant term, * the multiple of g(x) needed to cancel out P_0 is P_0 * g(x). The CPU can * only do 64x64 bit multiplications, so split P_0 * g(x) into x^128 * P_0 + * x^64 * g*(x) * P_0 + P_0, where g*(x) is bits 64-127 of g(x). Adding this to * the original polynomial gives P_3 : P_2 + P_0 + T_1 : P_1 + T_0 : 0, where T * = T_1 : T_0 = g*(x) * P_0. Thus, bits 0-63 got "folded" into bits 64-191. * * Repeating this same process on the next 64 bits "folds" bits 64-127 into bits * 128-255, giving the answer in bits 128-255. This time, we need to cancel P_1 * + T_0 in bits 64-127. The multiple of g(x) required is (P_1 + T_0) * g(x) * * x^64. Adding this to our previous computation gives P_3 + P_1 + T_0 + V_1 : * P_2 + P_0 + T_1 + V_0 : 0 : 0, where V = V_1 : V_0 = g*(x) * (P_1 + T_0). * * So our final computation is: * T = T_1 : T_0 = g*(x) * P_0 * V = V_1 : V_0 = g*(x) * (P_1 + T_0) * p(x) / x^{128} mod g(x) = P_3 + P_1 + T_0 + V_1 : P_2 + P_0 + T_1 + V_0 * * The implementation below saves a XOR instruction by computing P_1 + T_0 : P_0 * + T_1 and XORing into dest, rather than separately XORing P_1 : P_0 and T_0 : * T_1 into dest. This allows us to reuse P_1 + T_0 when computing V. */ .macro montgomery_reduction dest vpclmulqdq $0x00, PL, GSTAR, TMP_XMM # TMP_XMM = T_1 : T_0 = P_0 * g*(x) pshufd $0b01001110, TMP_XMM, TMP_XMM # TMP_XMM = T_0 : T_1 pxor PL, TMP_XMM # TMP_XMM = P_1 + T_0 : P_0 + T_1 pxor TMP_XMM, PH # PH = P_3 + P_1 + T_0 : P_2 + P_0 + T_1 pclmulqdq $0x11, GSTAR, TMP_XMM # TMP_XMM = V_1 : V_0 = V = [(P_1 + T_0) * g*(x)] vpxor TMP_XMM, PH, \dest .endm /* * Compute schoolbook multiplication for 8 blocks * m_0h^8 + ... + m_7h^1 * * If reduce is set, also computes the montgomery reduction of the * previous full_stride call and XORs with the first message block. * (m_0 + REDUCE(PL, PH))h^8 + ... + m_7h^1. * I.e., the first multiplication uses m_0 + REDUCE(PL, PH) instead of m_0. */ .macro full_stride reduce pxor LO, LO pxor HI, HI pxor MI, MI schoolbook1_iteration 7 0 .if \reduce vpclmulqdq $0x00, PL, GSTAR, TMP_XMM .endif schoolbook1_iteration 6 0 .if \reduce pshufd $0b01001110, TMP_XMM, TMP_XMM .endif schoolbook1_iteration 5 0 .if \reduce pxor PL, TMP_XMM .endif schoolbook1_iteration 4 0 .if \reduce pxor TMP_XMM, PH .endif schoolbook1_iteration 3 0 .if \reduce pclmulqdq $0x11, GSTAR, TMP_XMM .endif schoolbook1_iteration 2 0 .if \reduce vpxor TMP_XMM, PH, SUM .endif schoolbook1_iteration 1 0 schoolbook1_iteration 0 1 addq $(8*16), MSG schoolbook2 .endm /* * Process BLOCKS_LEFT blocks, where 0 < BLOCKS_LEFT < STRIDE_BLOCKS */ .macro partial_stride mov BLOCKS_LEFT, TMP shlq $4, TMP addq $(16*STRIDE_BLOCKS), KEY_POWERS subq TMP, KEY_POWERS movups (MSG), %xmm0 pxor SUM, %xmm0 movaps (KEY_POWERS), %xmm1 schoolbook1_noload dec BLOCKS_LEFT addq $16, MSG addq $16, KEY_POWERS test $4, BLOCKS_LEFT jz .Lpartial4BlocksDone schoolbook1 4 addq $(4*16), MSG addq $(4*16), KEY_POWERS .Lpartial4BlocksDone: test $2, BLOCKS_LEFT jz .Lpartial2BlocksDone schoolbook1 2 addq $(2*16), MSG addq $(2*16), KEY_POWERS .Lpartial2BlocksDone: test $1, BLOCKS_LEFT jz .LpartialDone schoolbook1 1 .LpartialDone: schoolbook2 montgomery_reduction SUM .endm /* * Perform montgomery multiplication in GF(2^128) and store result in op1. * * Computes op1*op2*x^{-128} mod x^128 + x^127 + x^126 + x^121 + 1 * If op1, op2 are in montgomery form, this computes the montgomery * form of op1*op2. * * void clmul_polyval_mul(u8 *op1, const u8 *op2); */ SYM_FUNC_START(clmul_polyval_mul) FRAME_BEGIN vmovdqa .Lgstar(%rip), GSTAR movups (%rdi), %xmm0 movups (%rsi), %xmm1 schoolbook1_noload schoolbook2 montgomery_reduction SUM movups SUM, (%rdi) FRAME_END RET SYM_FUNC_END(clmul_polyval_mul) /* * Perform polynomial evaluation as specified by POLYVAL. This computes: * h^n * accumulator + h^n * m_0 + ... + h^1 * m_{n-1} * where n=nblocks, h is the hash key, and m_i are the message blocks. * * rdi - pointer to precomputed key powers h^8 ... h^1 * rsi - pointer to message blocks * rdx - number of blocks to hash * rcx - pointer to the accumulator * * void clmul_polyval_update(const struct polyval_tfm_ctx *keys, * const u8 *in, size_t nblocks, u8 *accumulator); */ SYM_FUNC_START(clmul_polyval_update) FRAME_BEGIN vmovdqa .Lgstar(%rip), GSTAR movups (ACCUMULATOR), SUM subq $STRIDE_BLOCKS, BLOCKS_LEFT js .LstrideLoopExit full_stride 0 subq $STRIDE_BLOCKS, BLOCKS_LEFT js .LstrideLoopExitReduce .LstrideLoop: full_stride 1 subq $STRIDE_BLOCKS, BLOCKS_LEFT jns .LstrideLoop .LstrideLoopExitReduce: montgomery_reduction SUM .LstrideLoopExit: add $STRIDE_BLOCKS, BLOCKS_LEFT jz .LskipPartial partial_stride .LskipPartial: movups SUM, (ACCUMULATOR) FRAME_END RET SYM_FUNC_END(clmul_polyval_update)
aixcc-public/challenge-001-exemplar-source
7,139
arch/x86/crypto/blake2s-core.S
/* SPDX-License-Identifier: GPL-2.0 OR MIT */ /* * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. * Copyright (C) 2017-2019 Samuel Neves <sneves@dei.uc.pt>. All Rights Reserved. */ #include <linux/linkage.h> .section .rodata.cst32.BLAKE2S_IV, "aM", @progbits, 32 .align 32 IV: .octa 0xA54FF53A3C6EF372BB67AE856A09E667 .octa 0x5BE0CD191F83D9AB9B05688C510E527F .section .rodata.cst16.ROT16, "aM", @progbits, 16 .align 16 ROT16: .octa 0x0D0C0F0E09080B0A0504070601000302 .section .rodata.cst16.ROR328, "aM", @progbits, 16 .align 16 ROR328: .octa 0x0C0F0E0D080B0A090407060500030201 .section .rodata.cst64.BLAKE2S_SIGMA, "aM", @progbits, 160 .align 64 SIGMA: .byte 0, 2, 4, 6, 1, 3, 5, 7, 14, 8, 10, 12, 15, 9, 11, 13 .byte 14, 4, 9, 13, 10, 8, 15, 6, 5, 1, 0, 11, 3, 12, 2, 7 .byte 11, 12, 5, 15, 8, 0, 2, 13, 9, 10, 3, 7, 4, 14, 6, 1 .byte 7, 3, 13, 11, 9, 1, 12, 14, 15, 2, 5, 4, 8, 6, 10, 0 .byte 9, 5, 2, 10, 0, 7, 4, 15, 3, 14, 11, 6, 13, 1, 12, 8 .byte 2, 6, 0, 8, 12, 10, 11, 3, 1, 4, 7, 15, 9, 13, 5, 14 .byte 12, 1, 14, 4, 5, 15, 13, 10, 8, 0, 6, 9, 11, 7, 3, 2 .byte 13, 7, 12, 3, 11, 14, 1, 9, 2, 5, 15, 8, 10, 0, 4, 6 .byte 6, 14, 11, 0, 15, 9, 3, 8, 10, 12, 13, 1, 5, 2, 7, 4 .byte 10, 8, 7, 1, 2, 4, 6, 5, 13, 15, 9, 3, 0, 11, 14, 12 #ifdef CONFIG_AS_AVX512 .section .rodata.cst64.BLAKE2S_SIGMA2, "aM", @progbits, 640 .align 64 SIGMA2: .long 0, 2, 4, 6, 1, 3, 5, 7, 14, 8, 10, 12, 15, 9, 11, 13 .long 8, 2, 13, 15, 10, 9, 12, 3, 6, 4, 0, 14, 5, 11, 1, 7 .long 11, 13, 8, 6, 5, 10, 14, 3, 2, 4, 12, 15, 1, 0, 7, 9 .long 11, 10, 7, 0, 8, 15, 1, 13, 3, 6, 2, 12, 4, 14, 9, 5 .long 4, 10, 9, 14, 15, 0, 11, 8, 1, 7, 3, 13, 2, 5, 6, 12 .long 2, 11, 4, 15, 14, 3, 10, 8, 13, 6, 5, 7, 0, 12, 1, 9 .long 4, 8, 15, 9, 14, 11, 13, 5, 3, 2, 1, 12, 6, 10, 7, 0 .long 6, 13, 0, 14, 12, 2, 1, 11, 15, 4, 5, 8, 7, 9, 3, 10 .long 15, 5, 4, 13, 10, 7, 3, 11, 12, 2, 0, 6, 9, 8, 1, 14 .long 8, 7, 14, 11, 13, 15, 0, 12, 10, 4, 5, 6, 3, 2, 1, 9 #endif /* CONFIG_AS_AVX512 */ .text SYM_FUNC_START(blake2s_compress_ssse3) testq %rdx,%rdx je .Lendofloop movdqu (%rdi),%xmm0 movdqu 0x10(%rdi),%xmm1 movdqa ROT16(%rip),%xmm12 movdqa ROR328(%rip),%xmm13 movdqu 0x20(%rdi),%xmm14 movq %rcx,%xmm15 leaq SIGMA+0xa0(%rip),%r8 jmp .Lbeginofloop .align 32 .Lbeginofloop: movdqa %xmm0,%xmm10 movdqa %xmm1,%xmm11 paddq %xmm15,%xmm14 movdqa IV(%rip),%xmm2 movdqa %xmm14,%xmm3 pxor IV+0x10(%rip),%xmm3 leaq SIGMA(%rip),%rcx .Lroundloop: movzbl (%rcx),%eax movd (%rsi,%rax,4),%xmm4 movzbl 0x1(%rcx),%eax movd (%rsi,%rax,4),%xmm5 movzbl 0x2(%rcx),%eax movd (%rsi,%rax,4),%xmm6 movzbl 0x3(%rcx),%eax movd (%rsi,%rax,4),%xmm7 punpckldq %xmm5,%xmm4 punpckldq %xmm7,%xmm6 punpcklqdq %xmm6,%xmm4 paddd %xmm4,%xmm0 paddd %xmm1,%xmm0 pxor %xmm0,%xmm3 pshufb %xmm12,%xmm3 paddd %xmm3,%xmm2 pxor %xmm2,%xmm1 movdqa %xmm1,%xmm8 psrld $0xc,%xmm1 pslld $0x14,%xmm8 por %xmm8,%xmm1 movzbl 0x4(%rcx),%eax movd (%rsi,%rax,4),%xmm5 movzbl 0x5(%rcx),%eax movd (%rsi,%rax,4),%xmm6 movzbl 0x6(%rcx),%eax movd (%rsi,%rax,4),%xmm7 movzbl 0x7(%rcx),%eax movd (%rsi,%rax,4),%xmm4 punpckldq %xmm6,%xmm5 punpckldq %xmm4,%xmm7 punpcklqdq %xmm7,%xmm5 paddd %xmm5,%xmm0 paddd %xmm1,%xmm0 pxor %xmm0,%xmm3 pshufb %xmm13,%xmm3 paddd %xmm3,%xmm2 pxor %xmm2,%xmm1 movdqa %xmm1,%xmm8 psrld $0x7,%xmm1 pslld $0x19,%xmm8 por %xmm8,%xmm1 pshufd $0x93,%xmm0,%xmm0 pshufd $0x4e,%xmm3,%xmm3 pshufd $0x39,%xmm2,%xmm2 movzbl 0x8(%rcx),%eax movd (%rsi,%rax,4),%xmm6 movzbl 0x9(%rcx),%eax movd (%rsi,%rax,4),%xmm7 movzbl 0xa(%rcx),%eax movd (%rsi,%rax,4),%xmm4 movzbl 0xb(%rcx),%eax movd (%rsi,%rax,4),%xmm5 punpckldq %xmm7,%xmm6 punpckldq %xmm5,%xmm4 punpcklqdq %xmm4,%xmm6 paddd %xmm6,%xmm0 paddd %xmm1,%xmm0 pxor %xmm0,%xmm3 pshufb %xmm12,%xmm3 paddd %xmm3,%xmm2 pxor %xmm2,%xmm1 movdqa %xmm1,%xmm8 psrld $0xc,%xmm1 pslld $0x14,%xmm8 por %xmm8,%xmm1 movzbl 0xc(%rcx),%eax movd (%rsi,%rax,4),%xmm7 movzbl 0xd(%rcx),%eax movd (%rsi,%rax,4),%xmm4 movzbl 0xe(%rcx),%eax movd (%rsi,%rax,4),%xmm5 movzbl 0xf(%rcx),%eax movd (%rsi,%rax,4),%xmm6 punpckldq %xmm4,%xmm7 punpckldq %xmm6,%xmm5 punpcklqdq %xmm5,%xmm7 paddd %xmm7,%xmm0 paddd %xmm1,%xmm0 pxor %xmm0,%xmm3 pshufb %xmm13,%xmm3 paddd %xmm3,%xmm2 pxor %xmm2,%xmm1 movdqa %xmm1,%xmm8 psrld $0x7,%xmm1 pslld $0x19,%xmm8 por %xmm8,%xmm1 pshufd $0x39,%xmm0,%xmm0 pshufd $0x4e,%xmm3,%xmm3 pshufd $0x93,%xmm2,%xmm2 addq $0x10,%rcx cmpq %r8,%rcx jnz .Lroundloop pxor %xmm2,%xmm0 pxor %xmm3,%xmm1 pxor %xmm10,%xmm0 pxor %xmm11,%xmm1 addq $0x40,%rsi decq %rdx jnz .Lbeginofloop movdqu %xmm0,(%rdi) movdqu %xmm1,0x10(%rdi) movdqu %xmm14,0x20(%rdi) .Lendofloop: RET SYM_FUNC_END(blake2s_compress_ssse3) #ifdef CONFIG_AS_AVX512 SYM_FUNC_START(blake2s_compress_avx512) vmovdqu (%rdi),%xmm0 vmovdqu 0x10(%rdi),%xmm1 vmovdqu 0x20(%rdi),%xmm4 vmovq %rcx,%xmm5 vmovdqa IV(%rip),%xmm14 vmovdqa IV+16(%rip),%xmm15 jmp .Lblake2s_compress_avx512_mainloop .align 32 .Lblake2s_compress_avx512_mainloop: vmovdqa %xmm0,%xmm10 vmovdqa %xmm1,%xmm11 vpaddq %xmm5,%xmm4,%xmm4 vmovdqa %xmm14,%xmm2 vpxor %xmm15,%xmm4,%xmm3 vmovdqu (%rsi),%ymm6 vmovdqu 0x20(%rsi),%ymm7 addq $0x40,%rsi leaq SIGMA2(%rip),%rax movb $0xa,%cl .Lblake2s_compress_avx512_roundloop: addq $0x40,%rax vmovdqa -0x40(%rax),%ymm8 vmovdqa -0x20(%rax),%ymm9 vpermi2d %ymm7,%ymm6,%ymm8 vpermi2d %ymm7,%ymm6,%ymm9 vmovdqa %ymm8,%ymm6 vmovdqa %ymm9,%ymm7 vpaddd %xmm8,%xmm0,%xmm0 vpaddd %xmm1,%xmm0,%xmm0 vpxor %xmm0,%xmm3,%xmm3 vprord $0x10,%xmm3,%xmm3 vpaddd %xmm3,%xmm2,%xmm2 vpxor %xmm2,%xmm1,%xmm1 vprord $0xc,%xmm1,%xmm1 vextracti128 $0x1,%ymm8,%xmm8 vpaddd %xmm8,%xmm0,%xmm0 vpaddd %xmm1,%xmm0,%xmm0 vpxor %xmm0,%xmm3,%xmm3 vprord $0x8,%xmm3,%xmm3 vpaddd %xmm3,%xmm2,%xmm2 vpxor %xmm2,%xmm1,%xmm1 vprord $0x7,%xmm1,%xmm1 vpshufd $0x93,%xmm0,%xmm0 vpshufd $0x4e,%xmm3,%xmm3 vpshufd $0x39,%xmm2,%xmm2 vpaddd %xmm9,%xmm0,%xmm0 vpaddd %xmm1,%xmm0,%xmm0 vpxor %xmm0,%xmm3,%xmm3 vprord $0x10,%xmm3,%xmm3 vpaddd %xmm3,%xmm2,%xmm2 vpxor %xmm2,%xmm1,%xmm1 vprord $0xc,%xmm1,%xmm1 vextracti128 $0x1,%ymm9,%xmm9 vpaddd %xmm9,%xmm0,%xmm0 vpaddd %xmm1,%xmm0,%xmm0 vpxor %xmm0,%xmm3,%xmm3 vprord $0x8,%xmm3,%xmm3 vpaddd %xmm3,%xmm2,%xmm2 vpxor %xmm2,%xmm1,%xmm1 vprord $0x7,%xmm1,%xmm1 vpshufd $0x39,%xmm0,%xmm0 vpshufd $0x4e,%xmm3,%xmm3 vpshufd $0x93,%xmm2,%xmm2 decb %cl jne .Lblake2s_compress_avx512_roundloop vpxor %xmm10,%xmm0,%xmm0 vpxor %xmm11,%xmm1,%xmm1 vpxor %xmm2,%xmm0,%xmm0 vpxor %xmm3,%xmm1,%xmm1 decq %rdx jne .Lblake2s_compress_avx512_mainloop vmovdqu %xmm0,(%rdi) vmovdqu %xmm1,0x10(%rdi) vmovdqu %xmm4,0x20(%rdi) vzeroupper RET SYM_FUNC_END(blake2s_compress_avx512) #endif /* CONFIG_AS_AVX512 */
aixcc-public/challenge-001-exemplar-source
13,390
arch/x86/crypto/crc32c-pcl-intel-asm_64.S
/* * Implement fast CRC32C with PCLMULQDQ instructions. (x86_64) * * The white papers on CRC32C calculations with PCLMULQDQ instruction can be * downloaded from: * http://www.intel.com/content/dam/www/public/us/en/documents/white-papers/crc-iscsi-polynomial-crc32-instruction-paper.pdf * http://www.intel.com/content/dam/www/public/us/en/documents/white-papers/fast-crc-computation-paper.pdf * * Copyright (C) 2012 Intel Corporation. * * Authors: * Wajdi Feghali <wajdi.k.feghali@intel.com> * James Guilford <james.guilford@intel.com> * David Cote <david.m.cote@intel.com> * Tim Chen <tim.c.chen@linux.intel.com> * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/linkage.h> #include <asm/nospec-branch.h> ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction .macro LABEL prefix n \prefix\n\(): .endm .macro JMPTBL_ENTRY i .quad crc_\i .endm .macro JNC_LESS_THAN j jnc less_than_\j .endm # Define threshold where buffers are considered "small" and routed to more # efficient "by-1" code. This "by-1" code only handles up to 255 bytes, so # SMALL_SIZE can be no larger than 255. #define SMALL_SIZE 200 .if (SMALL_SIZE > 255) .error "SMALL_ SIZE must be < 256" .endif # unsigned int crc_pcl(u8 *buffer, int len, unsigned int crc_init); .text SYM_FUNC_START(crc_pcl) #define bufp rdi #define bufp_dw %edi #define bufp_w %di #define bufp_b %dil #define bufptmp %rcx #define block_0 %rcx #define block_1 %rdx #define block_2 %r11 #define len %rsi #define len_dw %esi #define len_w %si #define len_b %sil #define crc_init_arg %rdx #define tmp %rbx #define crc_init %r8 #define crc_init_dw %r8d #define crc1 %r9 #define crc2 %r10 pushq %rbx pushq %rdi pushq %rsi ## Move crc_init for Linux to a different mov crc_init_arg, crc_init ################################################################ ## 1) ALIGN: ################################################################ mov %bufp, bufptmp # rdi = *buf neg %bufp and $7, %bufp # calculate the unalignment amount of # the address je proc_block # Skip if aligned ## If len is less than 8 and we're unaligned, we need to jump ## to special code to avoid reading beyond the end of the buffer cmp $8, len jae do_align # less_than_8 expects length in upper 3 bits of len_dw # less_than_8_post_shl1 expects length = carryflag * 8 + len_dw[31:30] shl $32-3+1, len_dw jmp less_than_8_post_shl1 do_align: #### Calculate CRC of unaligned bytes of the buffer (if any) movq (bufptmp), tmp # load a quadward from the buffer add %bufp, bufptmp # align buffer pointer for quadword # processing sub %bufp, len # update buffer length align_loop: crc32b %bl, crc_init_dw # compute crc32 of 1-byte shr $8, tmp # get next byte dec %bufp jne align_loop proc_block: ################################################################ ## 2) PROCESS BLOCKS: ################################################################ ## compute num of bytes to be processed movq len, tmp # save num bytes in tmp cmpq $128*24, len jae full_block continue_block: cmpq $SMALL_SIZE, len jb small ## len < 128*24 movq $2731, %rax # 2731 = ceil(2^16 / 24) mul len_dw shrq $16, %rax ## eax contains floor(bytes / 24) = num 24-byte chunks to do ## process rax 24-byte chunks (128 >= rax >= 0) ## compute end address of each block ## block 0 (base addr + RAX * 8) ## block 1 (base addr + RAX * 16) ## block 2 (base addr + RAX * 24) lea (bufptmp, %rax, 8), block_0 lea (block_0, %rax, 8), block_1 lea (block_1, %rax, 8), block_2 xor crc1, crc1 xor crc2, crc2 ## branch into array mov jump_table(,%rax,8), %bufp JMP_NOSPEC bufp ################################################################ ## 2a) PROCESS FULL BLOCKS: ################################################################ full_block: movl $128,%eax lea 128*8*2(block_0), block_1 lea 128*8*3(block_0), block_2 add $128*8*1, block_0 xor crc1,crc1 xor crc2,crc2 # Fall thruogh into top of crc array (crc_128) ################################################################ ## 3) CRC Array: ################################################################ crc_array: i=128 .rept 128-1 .altmacro LABEL crc_ %i .noaltmacro ENDBR crc32q -i*8(block_0), crc_init crc32q -i*8(block_1), crc1 crc32q -i*8(block_2), crc2 i=(i-1) .endr .altmacro LABEL crc_ %i .noaltmacro ENDBR crc32q -i*8(block_0), crc_init crc32q -i*8(block_1), crc1 # SKIP crc32 -i*8(block_2), crc2 ; Don't do this one yet mov block_2, block_0 ################################################################ ## 4) Combine three results: ################################################################ lea (K_table-8)(%rip), %bufp # first entry is for idx 1 shlq $3, %rax # rax *= 8 pmovzxdq (%bufp,%rax), %xmm0 # 2 consts: K1:K2 leal (%eax,%eax,2), %eax # rax *= 3 (total *24) subq %rax, tmp # tmp -= rax*24 movq crc_init, %xmm1 # CRC for block 1 pclmulqdq $0x00, %xmm0, %xmm1 # Multiply by K2 movq crc1, %xmm2 # CRC for block 2 pclmulqdq $0x10, %xmm0, %xmm2 # Multiply by K1 pxor %xmm2,%xmm1 movq %xmm1, %rax xor -i*8(block_2), %rax mov crc2, crc_init crc32 %rax, crc_init ################################################################ ## 5) Check for end: ################################################################ LABEL crc_ 0 ENDBR mov tmp, len cmp $128*24, tmp jae full_block cmp $24, tmp jae continue_block less_than_24: shl $32-4, len_dw # less_than_16 expects length # in upper 4 bits of len_dw jnc less_than_16 crc32q (bufptmp), crc_init crc32q 8(bufptmp), crc_init jz do_return add $16, bufptmp # len is less than 8 if we got here # less_than_8 expects length in upper 3 bits of len_dw # less_than_8_post_shl1 expects length = carryflag * 8 + len_dw[31:30] shl $2, len_dw jmp less_than_8_post_shl1 ####################################################################### ## 6) LESS THAN 256-bytes REMAIN AT THIS POINT (8-bits of len are full) ####################################################################### small: shl $32-8, len_dw # Prepare len_dw for less_than_256 j=256 .rept 5 # j = {256, 128, 64, 32, 16} .altmacro LABEL less_than_ %j # less_than_j: Length should be in # upper lg(j) bits of len_dw j=(j/2) shl $1, len_dw # Get next MSB JNC_LESS_THAN %j .noaltmacro i=0 .rept (j/8) crc32q i(bufptmp), crc_init # Compute crc32 of 8-byte data i=i+8 .endr jz do_return # Return if remaining length is zero add $j, bufptmp # Advance buf .endr less_than_8: # Length should be stored in # upper 3 bits of len_dw shl $1, len_dw less_than_8_post_shl1: jnc less_than_4 crc32l (bufptmp), crc_init_dw # CRC of 4 bytes jz do_return # return if remaining data is zero add $4, bufptmp less_than_4: # Length should be stored in # upper 2 bits of len_dw shl $1, len_dw jnc less_than_2 crc32w (bufptmp), crc_init_dw # CRC of 2 bytes jz do_return # return if remaining data is zero add $2, bufptmp less_than_2: # Length should be stored in the MSB # of len_dw shl $1, len_dw jnc less_than_1 crc32b (bufptmp), crc_init_dw # CRC of 1 byte less_than_1: # Length should be zero do_return: movq crc_init, %rax popq %rsi popq %rdi popq %rbx RET SYM_FUNC_END(crc_pcl) .section .rodata, "a", @progbits ################################################################ ## jump table Table is 129 entries x 2 bytes each ################################################################ .align 4 jump_table: i=0 .rept 129 .altmacro JMPTBL_ENTRY %i .noaltmacro i=i+1 .endr ################################################################ ## PCLMULQDQ tables ## Table is 128 entries x 2 words (8 bytes) each ################################################################ .align 8 K_table: .long 0x493c7d27, 0x00000001 .long 0xba4fc28e, 0x493c7d27 .long 0xddc0152b, 0xf20c0dfe .long 0x9e4addf8, 0xba4fc28e .long 0x39d3b296, 0x3da6d0cb .long 0x0715ce53, 0xddc0152b .long 0x47db8317, 0x1c291d04 .long 0x0d3b6092, 0x9e4addf8 .long 0xc96cfdc0, 0x740eef02 .long 0x878a92a7, 0x39d3b296 .long 0xdaece73e, 0x083a6eec .long 0xab7aff2a, 0x0715ce53 .long 0x2162d385, 0xc49f4f67 .long 0x83348832, 0x47db8317 .long 0x299847d5, 0x2ad91c30 .long 0xb9e02b86, 0x0d3b6092 .long 0x18b33a4e, 0x6992cea2 .long 0xb6dd949b, 0xc96cfdc0 .long 0x78d9ccb7, 0x7e908048 .long 0xbac2fd7b, 0x878a92a7 .long 0xa60ce07b, 0x1b3d8f29 .long 0xce7f39f4, 0xdaece73e .long 0x61d82e56, 0xf1d0f55e .long 0xd270f1a2, 0xab7aff2a .long 0xc619809d, 0xa87ab8a8 .long 0x2b3cac5d, 0x2162d385 .long 0x65863b64, 0x8462d800 .long 0x1b03397f, 0x83348832 .long 0xebb883bd, 0x71d111a8 .long 0xb3e32c28, 0x299847d5 .long 0x064f7f26, 0xffd852c6 .long 0xdd7e3b0c, 0xb9e02b86 .long 0xf285651c, 0xdcb17aa4 .long 0x10746f3c, 0x18b33a4e .long 0xc7a68855, 0xf37c5aee .long 0x271d9844, 0xb6dd949b .long 0x8e766a0c, 0x6051d5a2 .long 0x93a5f730, 0x78d9ccb7 .long 0x6cb08e5c, 0x18b0d4ff .long 0x6b749fb2, 0xbac2fd7b .long 0x1393e203, 0x21f3d99c .long 0xcec3662e, 0xa60ce07b .long 0x96c515bb, 0x8f158014 .long 0xe6fc4e6a, 0xce7f39f4 .long 0x8227bb8a, 0xa00457f7 .long 0xb0cd4768, 0x61d82e56 .long 0x39c7ff35, 0x8d6d2c43 .long 0xd7a4825c, 0xd270f1a2 .long 0x0ab3844b, 0x00ac29cf .long 0x0167d312, 0xc619809d .long 0xf6076544, 0xe9adf796 .long 0x26f6a60a, 0x2b3cac5d .long 0xa741c1bf, 0x96638b34 .long 0x98d8d9cb, 0x65863b64 .long 0x49c3cc9c, 0xe0e9f351 .long 0x68bce87a, 0x1b03397f .long 0x57a3d037, 0x9af01f2d .long 0x6956fc3b, 0xebb883bd .long 0x42d98888, 0x2cff42cf .long 0x3771e98f, 0xb3e32c28 .long 0xb42ae3d9, 0x88f25a3a .long 0x2178513a, 0x064f7f26 .long 0xe0ac139e, 0x4e36f0b0 .long 0x170076fa, 0xdd7e3b0c .long 0x444dd413, 0xbd6f81f8 .long 0x6f345e45, 0xf285651c .long 0x41d17b64, 0x91c9bd4b .long 0xff0dba97, 0x10746f3c .long 0xa2b73df1, 0x885f087b .long 0xf872e54c, 0xc7a68855 .long 0x1e41e9fc, 0x4c144932 .long 0x86d8e4d2, 0x271d9844 .long 0x651bd98b, 0x52148f02 .long 0x5bb8f1bc, 0x8e766a0c .long 0xa90fd27a, 0xa3c6f37a .long 0xb3af077a, 0x93a5f730 .long 0x4984d782, 0xd7c0557f .long 0xca6ef3ac, 0x6cb08e5c .long 0x234e0b26, 0x63ded06a .long 0xdd66cbbb, 0x6b749fb2 .long 0x4597456a, 0x4d56973c .long 0xe9e28eb4, 0x1393e203 .long 0x7b3ff57a, 0x9669c9df .long 0xc9c8b782, 0xcec3662e .long 0x3f70cc6f, 0xe417f38a .long 0x93e106a4, 0x96c515bb .long 0x62ec6c6d, 0x4b9e0f71 .long 0xd813b325, 0xe6fc4e6a .long 0x0df04680, 0xd104b8fc .long 0x2342001e, 0x8227bb8a .long 0x0a2a8d7e, 0x5b397730 .long 0x6d9a4957, 0xb0cd4768 .long 0xe8b6368b, 0xe78eb416 .long 0xd2c3ed1a, 0x39c7ff35 .long 0x995a5724, 0x61ff0e01 .long 0x9ef68d35, 0xd7a4825c .long 0x0c139b31, 0x8d96551c .long 0xf2271e60, 0x0ab3844b .long 0x0b0bf8ca, 0x0bf80dd2 .long 0x2664fd8b, 0x0167d312 .long 0xed64812d, 0x8821abed .long 0x02ee03b2, 0xf6076544 .long 0x8604ae0f, 0x6a45d2b2 .long 0x363bd6b3, 0x26f6a60a .long 0x135c83fd, 0xd8d26619 .long 0x5fabe670, 0xa741c1bf .long 0x35ec3279, 0xde87806c .long 0x00bcf5f6, 0x98d8d9cb .long 0x8ae00689, 0x14338754 .long 0x17f27698, 0x49c3cc9c .long 0x58ca5f00, 0x5bd2011f .long 0xaa7c7ad5, 0x68bce87a .long 0xb5cfca28, 0xdd07448e .long 0xded288f8, 0x57a3d037 .long 0x59f229bc, 0xdde8f5b9 .long 0x6d390dec, 0x6956fc3b .long 0x37170390, 0xa3e3e02c .long 0x6353c1cc, 0x42d98888 .long 0xc4584f5c, 0xd73c7bea .long 0xf48642e9, 0x3771e98f .long 0x531377e2, 0x80ff0093 .long 0xdd35bc8d, 0xb42ae3d9 .long 0xb25b29f2, 0x8fe4c34d .long 0x9a5ede41, 0x2178513a .long 0xa563905d, 0xdf99fc11 .long 0x45cddf4e, 0xe0ac139e .long 0xacfa3103, 0x6c23e841 .long 0xa51b6135, 0x170076fa
aixcc-public/challenge-001-exemplar-source
84,373
arch/x86/crypto/aesni-intel_asm.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Implement AES algorithm in Intel AES-NI instructions. * * The white paper of AES-NI instructions can be downloaded from: * http://softwarecommunity.intel.com/isn/downloads/intelavx/AES-Instructions-Set_WP.pdf * * Copyright (C) 2008, Intel Corp. * Author: Huang Ying <ying.huang@intel.com> * Vinodh Gopal <vinodh.gopal@intel.com> * Kahraman Akdemir * * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD * interface for 64-bit kernels. * Authors: Erdinc Ozturk (erdinc.ozturk@intel.com) * Aidan O'Mahony (aidan.o.mahony@intel.com) * Adrian Hoban <adrian.hoban@intel.com> * James Guilford (james.guilford@intel.com) * Gabriele Paoloni <gabriele.paoloni@intel.com> * Tadeusz Struk (tadeusz.struk@intel.com) * Wajdi Feghali (wajdi.k.feghali@intel.com) * Copyright (c) 2010, Intel Corporation. * * Ported x86_64 version to x86: * Author: Mathias Krause <minipli@googlemail.com> */ #include <linux/linkage.h> #include <asm/frame.h> #include <asm/nospec-branch.h> /* * The following macros are used to move an (un)aligned 16 byte value to/from * an XMM register. This can done for either FP or integer values, for FP use * movaps (move aligned packed single) or integer use movdqa (move double quad * aligned). It doesn't make a performance difference which instruction is used * since Nehalem (original Core i7) was released. However, the movaps is a byte * shorter, so that is the one we'll use for now. (same for unaligned). */ #define MOVADQ movaps #define MOVUDQ movups #ifdef __x86_64__ # constants in mergeable sections, linker can reorder and merge .section .rodata.cst16.POLY, "aM", @progbits, 16 .align 16 POLY: .octa 0xC2000000000000000000000000000001 .section .rodata.cst16.TWOONE, "aM", @progbits, 16 .align 16 TWOONE: .octa 0x00000001000000000000000000000001 .section .rodata.cst16.SHUF_MASK, "aM", @progbits, 16 .align 16 SHUF_MASK: .octa 0x000102030405060708090A0B0C0D0E0F .section .rodata.cst16.MASK1, "aM", @progbits, 16 .align 16 MASK1: .octa 0x0000000000000000ffffffffffffffff .section .rodata.cst16.MASK2, "aM", @progbits, 16 .align 16 MASK2: .octa 0xffffffffffffffff0000000000000000 .section .rodata.cst16.ONE, "aM", @progbits, 16 .align 16 ONE: .octa 0x00000000000000000000000000000001 .section .rodata.cst16.F_MIN_MASK, "aM", @progbits, 16 .align 16 F_MIN_MASK: .octa 0xf1f2f3f4f5f6f7f8f9fafbfcfdfeff0 .section .rodata.cst16.dec, "aM", @progbits, 16 .align 16 dec: .octa 0x1 .section .rodata.cst16.enc, "aM", @progbits, 16 .align 16 enc: .octa 0x2 # order of these constants should not change. # more specifically, ALL_F should follow SHIFT_MASK, # and zero should follow ALL_F .section .rodata, "a", @progbits .align 16 SHIFT_MASK: .octa 0x0f0e0d0c0b0a09080706050403020100 ALL_F: .octa 0xffffffffffffffffffffffffffffffff .octa 0x00000000000000000000000000000000 .text #define STACK_OFFSET 8*3 #define AadHash 16*0 #define AadLen 16*1 #define InLen (16*1)+8 #define PBlockEncKey 16*2 #define OrigIV 16*3 #define CurCount 16*4 #define PBlockLen 16*5 #define HashKey 16*6 // store HashKey <<1 mod poly here #define HashKey_2 16*7 // store HashKey^2 <<1 mod poly here #define HashKey_3 16*8 // store HashKey^3 <<1 mod poly here #define HashKey_4 16*9 // store HashKey^4 <<1 mod poly here #define HashKey_k 16*10 // store XOR of High 64 bits and Low 64 // bits of HashKey <<1 mod poly here //(for Karatsuba purposes) #define HashKey_2_k 16*11 // store XOR of High 64 bits and Low 64 // bits of HashKey^2 <<1 mod poly here // (for Karatsuba purposes) #define HashKey_3_k 16*12 // store XOR of High 64 bits and Low 64 // bits of HashKey^3 <<1 mod poly here // (for Karatsuba purposes) #define HashKey_4_k 16*13 // store XOR of High 64 bits and Low 64 // bits of HashKey^4 <<1 mod poly here // (for Karatsuba purposes) #define arg1 rdi #define arg2 rsi #define arg3 rdx #define arg4 rcx #define arg5 r8 #define arg6 r9 #define arg7 STACK_OFFSET+8(%rsp) #define arg8 STACK_OFFSET+16(%rsp) #define arg9 STACK_OFFSET+24(%rsp) #define arg10 STACK_OFFSET+32(%rsp) #define arg11 STACK_OFFSET+40(%rsp) #define keysize 2*15*16(%arg1) #endif #define STATE1 %xmm0 #define STATE2 %xmm4 #define STATE3 %xmm5 #define STATE4 %xmm6 #define STATE STATE1 #define IN1 %xmm1 #define IN2 %xmm7 #define IN3 %xmm8 #define IN4 %xmm9 #define IN IN1 #define KEY %xmm2 #define IV %xmm3 #define BSWAP_MASK %xmm10 #define CTR %xmm11 #define INC %xmm12 #define GF128MUL_MASK %xmm7 #ifdef __x86_64__ #define AREG %rax #define KEYP %rdi #define OUTP %rsi #define UKEYP OUTP #define INP %rdx #define LEN %rcx #define IVP %r8 #define KLEN %r9d #define T1 %r10 #define TKEYP T1 #define T2 %r11 #define TCTR_LOW T2 #else #define AREG %eax #define KEYP %edi #define OUTP AREG #define UKEYP OUTP #define INP %edx #define LEN %esi #define IVP %ebp #define KLEN %ebx #define T1 %ecx #define TKEYP T1 #endif .macro FUNC_SAVE push %r12 push %r13 push %r14 # # states of %xmm registers %xmm6:%xmm15 not saved # all %xmm registers are clobbered # .endm .macro FUNC_RESTORE pop %r14 pop %r13 pop %r12 .endm # Precompute hashkeys. # Input: Hash subkey. # Output: HashKeys stored in gcm_context_data. Only needs to be called # once per key. # clobbers r12, and tmp xmm registers. .macro PRECOMPUTE SUBKEY TMP1 TMP2 TMP3 TMP4 TMP5 TMP6 TMP7 mov \SUBKEY, %r12 movdqu (%r12), \TMP3 movdqa SHUF_MASK(%rip), \TMP2 pshufb \TMP2, \TMP3 # precompute HashKey<<1 mod poly from the HashKey (required for GHASH) movdqa \TMP3, \TMP2 psllq $1, \TMP3 psrlq $63, \TMP2 movdqa \TMP2, \TMP1 pslldq $8, \TMP2 psrldq $8, \TMP1 por \TMP2, \TMP3 # reduce HashKey<<1 pshufd $0x24, \TMP1, \TMP2 pcmpeqd TWOONE(%rip), \TMP2 pand POLY(%rip), \TMP2 pxor \TMP2, \TMP3 movdqu \TMP3, HashKey(%arg2) movdqa \TMP3, \TMP5 pshufd $78, \TMP3, \TMP1 pxor \TMP3, \TMP1 movdqu \TMP1, HashKey_k(%arg2) GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7 # TMP5 = HashKey^2<<1 (mod poly) movdqu \TMP5, HashKey_2(%arg2) # HashKey_2 = HashKey^2<<1 (mod poly) pshufd $78, \TMP5, \TMP1 pxor \TMP5, \TMP1 movdqu \TMP1, HashKey_2_k(%arg2) GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7 # TMP5 = HashKey^3<<1 (mod poly) movdqu \TMP5, HashKey_3(%arg2) pshufd $78, \TMP5, \TMP1 pxor \TMP5, \TMP1 movdqu \TMP1, HashKey_3_k(%arg2) GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7 # TMP5 = HashKey^3<<1 (mod poly) movdqu \TMP5, HashKey_4(%arg2) pshufd $78, \TMP5, \TMP1 pxor \TMP5, \TMP1 movdqu \TMP1, HashKey_4_k(%arg2) .endm # GCM_INIT initializes a gcm_context struct to prepare for encoding/decoding. # Clobbers rax, r10-r13 and xmm0-xmm6, %xmm13 .macro GCM_INIT Iv SUBKEY AAD AADLEN mov \AADLEN, %r11 mov %r11, AadLen(%arg2) # ctx_data.aad_length = aad_length xor %r11d, %r11d mov %r11, InLen(%arg2) # ctx_data.in_length = 0 mov %r11, PBlockLen(%arg2) # ctx_data.partial_block_length = 0 mov %r11, PBlockEncKey(%arg2) # ctx_data.partial_block_enc_key = 0 mov \Iv, %rax movdqu (%rax), %xmm0 movdqu %xmm0, OrigIV(%arg2) # ctx_data.orig_IV = iv movdqa SHUF_MASK(%rip), %xmm2 pshufb %xmm2, %xmm0 movdqu %xmm0, CurCount(%arg2) # ctx_data.current_counter = iv PRECOMPUTE \SUBKEY, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7 movdqu HashKey(%arg2), %xmm13 CALC_AAD_HASH %xmm13, \AAD, \AADLEN, %xmm0, %xmm1, %xmm2, %xmm3, \ %xmm4, %xmm5, %xmm6 .endm # GCM_ENC_DEC Encodes/Decodes given data. Assumes that the passed gcm_context # struct has been initialized by GCM_INIT. # Requires the input data be at least 1 byte long because of READ_PARTIAL_BLOCK # Clobbers rax, r10-r13, and xmm0-xmm15 .macro GCM_ENC_DEC operation movdqu AadHash(%arg2), %xmm8 movdqu HashKey(%arg2), %xmm13 add %arg5, InLen(%arg2) xor %r11d, %r11d # initialise the data pointer offset as zero PARTIAL_BLOCK %arg3 %arg4 %arg5 %r11 %xmm8 \operation sub %r11, %arg5 # sub partial block data used mov %arg5, %r13 # save the number of bytes and $-16, %r13 # %r13 = %r13 - (%r13 mod 16) mov %r13, %r12 # Encrypt/Decrypt first few blocks and $(3<<4), %r12 jz _initial_num_blocks_is_0_\@ cmp $(2<<4), %r12 jb _initial_num_blocks_is_1_\@ je _initial_num_blocks_is_2_\@ _initial_num_blocks_is_3_\@: INITIAL_BLOCKS_ENC_DEC %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \ %xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 5, 678, \operation sub $48, %r13 jmp _initial_blocks_\@ _initial_num_blocks_is_2_\@: INITIAL_BLOCKS_ENC_DEC %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \ %xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 6, 78, \operation sub $32, %r13 jmp _initial_blocks_\@ _initial_num_blocks_is_1_\@: INITIAL_BLOCKS_ENC_DEC %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \ %xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 7, 8, \operation sub $16, %r13 jmp _initial_blocks_\@ _initial_num_blocks_is_0_\@: INITIAL_BLOCKS_ENC_DEC %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \ %xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 8, 0, \operation _initial_blocks_\@: # Main loop - Encrypt/Decrypt remaining blocks test %r13, %r13 je _zero_cipher_left_\@ sub $64, %r13 je _four_cipher_left_\@ _crypt_by_4_\@: GHASH_4_ENCRYPT_4_PARALLEL_\operation %xmm9, %xmm10, %xmm11, %xmm12, \ %xmm13, %xmm14, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, \ %xmm7, %xmm8, enc add $64, %r11 sub $64, %r13 jne _crypt_by_4_\@ _four_cipher_left_\@: GHASH_LAST_4 %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, \ %xmm15, %xmm1, %xmm2, %xmm3, %xmm4, %xmm8 _zero_cipher_left_\@: movdqu %xmm8, AadHash(%arg2) movdqu %xmm0, CurCount(%arg2) mov %arg5, %r13 and $15, %r13 # %r13 = arg5 (mod 16) je _multiple_of_16_bytes_\@ mov %r13, PBlockLen(%arg2) # Handle the last <16 Byte block separately paddd ONE(%rip), %xmm0 # INCR CNT to get Yn movdqu %xmm0, CurCount(%arg2) movdqa SHUF_MASK(%rip), %xmm10 pshufb %xmm10, %xmm0 ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # Encrypt(K, Yn) movdqu %xmm0, PBlockEncKey(%arg2) cmp $16, %arg5 jge _large_enough_update_\@ lea (%arg4,%r11,1), %r10 mov %r13, %r12 READ_PARTIAL_BLOCK %r10 %r12 %xmm2 %xmm1 jmp _data_read_\@ _large_enough_update_\@: sub $16, %r11 add %r13, %r11 # receive the last <16 Byte block movdqu (%arg4, %r11, 1), %xmm1 sub %r13, %r11 add $16, %r11 lea SHIFT_MASK+16(%rip), %r12 # adjust the shuffle mask pointer to be able to shift 16-r13 bytes # (r13 is the number of bytes in plaintext mod 16) sub %r13, %r12 # get the appropriate shuffle mask movdqu (%r12), %xmm2 # shift right 16-r13 bytes pshufb %xmm2, %xmm1 _data_read_\@: lea ALL_F+16(%rip), %r12 sub %r13, %r12 .ifc \operation, dec movdqa %xmm1, %xmm2 .endif pxor %xmm1, %xmm0 # XOR Encrypt(K, Yn) movdqu (%r12), %xmm1 # get the appropriate mask to mask out top 16-r13 bytes of xmm0 pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0 .ifc \operation, dec pand %xmm1, %xmm2 movdqa SHUF_MASK(%rip), %xmm10 pshufb %xmm10 ,%xmm2 pxor %xmm2, %xmm8 .else movdqa SHUF_MASK(%rip), %xmm10 pshufb %xmm10,%xmm0 pxor %xmm0, %xmm8 .endif movdqu %xmm8, AadHash(%arg2) .ifc \operation, enc # GHASH computation for the last <16 byte block movdqa SHUF_MASK(%rip), %xmm10 # shuffle xmm0 back to output as ciphertext pshufb %xmm10, %xmm0 .endif # Output %r13 bytes movq %xmm0, %rax cmp $8, %r13 jle _less_than_8_bytes_left_\@ mov %rax, (%arg3 , %r11, 1) add $8, %r11 psrldq $8, %xmm0 movq %xmm0, %rax sub $8, %r13 _less_than_8_bytes_left_\@: mov %al, (%arg3, %r11, 1) add $1, %r11 shr $8, %rax sub $1, %r13 jne _less_than_8_bytes_left_\@ _multiple_of_16_bytes_\@: .endm # GCM_COMPLETE Finishes update of tag of last partial block # Output: Authorization Tag (AUTH_TAG) # Clobbers rax, r10-r12, and xmm0, xmm1, xmm5-xmm15 .macro GCM_COMPLETE AUTHTAG AUTHTAGLEN movdqu AadHash(%arg2), %xmm8 movdqu HashKey(%arg2), %xmm13 mov PBlockLen(%arg2), %r12 test %r12, %r12 je _partial_done\@ GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6 _partial_done\@: mov AadLen(%arg2), %r12 # %r13 = aadLen (number of bytes) shl $3, %r12 # convert into number of bits movd %r12d, %xmm15 # len(A) in %xmm15 mov InLen(%arg2), %r12 shl $3, %r12 # len(C) in bits (*128) movq %r12, %xmm1 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000 pxor %xmm1, %xmm15 # %xmm15 = len(A)||len(C) pxor %xmm15, %xmm8 GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6 # final GHASH computation movdqa SHUF_MASK(%rip), %xmm10 pshufb %xmm10, %xmm8 movdqu OrigIV(%arg2), %xmm0 # %xmm0 = Y0 ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # E(K, Y0) pxor %xmm8, %xmm0 _return_T_\@: mov \AUTHTAG, %r10 # %r10 = authTag mov \AUTHTAGLEN, %r11 # %r11 = auth_tag_len cmp $16, %r11 je _T_16_\@ cmp $8, %r11 jl _T_4_\@ _T_8_\@: movq %xmm0, %rax mov %rax, (%r10) add $8, %r10 sub $8, %r11 psrldq $8, %xmm0 test %r11, %r11 je _return_T_done_\@ _T_4_\@: movd %xmm0, %eax mov %eax, (%r10) add $4, %r10 sub $4, %r11 psrldq $4, %xmm0 test %r11, %r11 je _return_T_done_\@ _T_123_\@: movd %xmm0, %eax cmp $2, %r11 jl _T_1_\@ mov %ax, (%r10) cmp $2, %r11 je _return_T_done_\@ add $2, %r10 sar $16, %eax _T_1_\@: mov %al, (%r10) jmp _return_T_done_\@ _T_16_\@: movdqu %xmm0, (%r10) _return_T_done_\@: .endm #ifdef __x86_64__ /* GHASH_MUL MACRO to implement: Data*HashKey mod (128,127,126,121,0) * * * Input: A and B (128-bits each, bit-reflected) * Output: C = A*B*x mod poly, (i.e. >>1 ) * To compute GH = GH*HashKey mod poly, give HK = HashKey<<1 mod poly as input * GH = GH * HK * x mod poly which is equivalent to GH*HashKey mod poly. * */ .macro GHASH_MUL GH HK TMP1 TMP2 TMP3 TMP4 TMP5 movdqa \GH, \TMP1 pshufd $78, \GH, \TMP2 pshufd $78, \HK, \TMP3 pxor \GH, \TMP2 # TMP2 = a1+a0 pxor \HK, \TMP3 # TMP3 = b1+b0 pclmulqdq $0x11, \HK, \TMP1 # TMP1 = a1*b1 pclmulqdq $0x00, \HK, \GH # GH = a0*b0 pclmulqdq $0x00, \TMP3, \TMP2 # TMP2 = (a0+a1)*(b1+b0) pxor \GH, \TMP2 pxor \TMP1, \TMP2 # TMP2 = (a0*b0)+(a1*b0) movdqa \TMP2, \TMP3 pslldq $8, \TMP3 # left shift TMP3 2 DWs psrldq $8, \TMP2 # right shift TMP2 2 DWs pxor \TMP3, \GH pxor \TMP2, \TMP1 # TMP2:GH holds the result of GH*HK # first phase of the reduction movdqa \GH, \TMP2 movdqa \GH, \TMP3 movdqa \GH, \TMP4 # copy GH into TMP2,TMP3 and TMP4 # in in order to perform # independent shifts pslld $31, \TMP2 # packed right shift <<31 pslld $30, \TMP3 # packed right shift <<30 pslld $25, \TMP4 # packed right shift <<25 pxor \TMP3, \TMP2 # xor the shifted versions pxor \TMP4, \TMP2 movdqa \TMP2, \TMP5 psrldq $4, \TMP5 # right shift TMP5 1 DW pslldq $12, \TMP2 # left shift TMP2 3 DWs pxor \TMP2, \GH # second phase of the reduction movdqa \GH,\TMP2 # copy GH into TMP2,TMP3 and TMP4 # in in order to perform # independent shifts movdqa \GH,\TMP3 movdqa \GH,\TMP4 psrld $1,\TMP2 # packed left shift >>1 psrld $2,\TMP3 # packed left shift >>2 psrld $7,\TMP4 # packed left shift >>7 pxor \TMP3,\TMP2 # xor the shifted versions pxor \TMP4,\TMP2 pxor \TMP5, \TMP2 pxor \TMP2, \GH pxor \TMP1, \GH # result is in TMP1 .endm # Reads DLEN bytes starting at DPTR and stores in XMMDst # where 0 < DLEN < 16 # Clobbers %rax, DLEN and XMM1 .macro READ_PARTIAL_BLOCK DPTR DLEN XMM1 XMMDst cmp $8, \DLEN jl _read_lt8_\@ mov (\DPTR), %rax movq %rax, \XMMDst sub $8, \DLEN jz _done_read_partial_block_\@ xor %eax, %eax _read_next_byte_\@: shl $8, %rax mov 7(\DPTR, \DLEN, 1), %al dec \DLEN jnz _read_next_byte_\@ movq %rax, \XMM1 pslldq $8, \XMM1 por \XMM1, \XMMDst jmp _done_read_partial_block_\@ _read_lt8_\@: xor %eax, %eax _read_next_byte_lt8_\@: shl $8, %rax mov -1(\DPTR, \DLEN, 1), %al dec \DLEN jnz _read_next_byte_lt8_\@ movq %rax, \XMMDst _done_read_partial_block_\@: .endm # CALC_AAD_HASH: Calculates the hash of the data which will not be encrypted. # clobbers r10-11, xmm14 .macro CALC_AAD_HASH HASHKEY AAD AADLEN TMP1 TMP2 TMP3 TMP4 TMP5 \ TMP6 TMP7 MOVADQ SHUF_MASK(%rip), %xmm14 mov \AAD, %r10 # %r10 = AAD mov \AADLEN, %r11 # %r11 = aadLen pxor \TMP7, \TMP7 pxor \TMP6, \TMP6 cmp $16, %r11 jl _get_AAD_rest\@ _get_AAD_blocks\@: movdqu (%r10), \TMP7 pshufb %xmm14, \TMP7 # byte-reflect the AAD data pxor \TMP7, \TMP6 GHASH_MUL \TMP6, \HASHKEY, \TMP1, \TMP2, \TMP3, \TMP4, \TMP5 add $16, %r10 sub $16, %r11 cmp $16, %r11 jge _get_AAD_blocks\@ movdqu \TMP6, \TMP7 /* read the last <16B of AAD */ _get_AAD_rest\@: test %r11, %r11 je _get_AAD_done\@ READ_PARTIAL_BLOCK %r10, %r11, \TMP1, \TMP7 pshufb %xmm14, \TMP7 # byte-reflect the AAD data pxor \TMP6, \TMP7 GHASH_MUL \TMP7, \HASHKEY, \TMP1, \TMP2, \TMP3, \TMP4, \TMP5 movdqu \TMP7, \TMP6 _get_AAD_done\@: movdqu \TMP6, AadHash(%arg2) .endm # PARTIAL_BLOCK: Handles encryption/decryption and the tag partial blocks # between update calls. # Requires the input data be at least 1 byte long due to READ_PARTIAL_BLOCK # Outputs encrypted bytes, and updates hash and partial info in gcm_data_context # Clobbers rax, r10, r12, r13, xmm0-6, xmm9-13 .macro PARTIAL_BLOCK CYPH_PLAIN_OUT PLAIN_CYPH_IN PLAIN_CYPH_LEN DATA_OFFSET \ AAD_HASH operation mov PBlockLen(%arg2), %r13 test %r13, %r13 je _partial_block_done_\@ # Leave Macro if no partial blocks # Read in input data without over reading cmp $16, \PLAIN_CYPH_LEN jl _fewer_than_16_bytes_\@ movups (\PLAIN_CYPH_IN), %xmm1 # If more than 16 bytes, just fill xmm jmp _data_read_\@ _fewer_than_16_bytes_\@: lea (\PLAIN_CYPH_IN, \DATA_OFFSET, 1), %r10 mov \PLAIN_CYPH_LEN, %r12 READ_PARTIAL_BLOCK %r10 %r12 %xmm0 %xmm1 mov PBlockLen(%arg2), %r13 _data_read_\@: # Finished reading in data movdqu PBlockEncKey(%arg2), %xmm9 movdqu HashKey(%arg2), %xmm13 lea SHIFT_MASK(%rip), %r12 # adjust the shuffle mask pointer to be able to shift r13 bytes # r16-r13 is the number of bytes in plaintext mod 16) add %r13, %r12 movdqu (%r12), %xmm2 # get the appropriate shuffle mask pshufb %xmm2, %xmm9 # shift right r13 bytes .ifc \operation, dec movdqa %xmm1, %xmm3 pxor %xmm1, %xmm9 # Cyphertext XOR E(K, Yn) mov \PLAIN_CYPH_LEN, %r10 add %r13, %r10 # Set r10 to be the amount of data left in CYPH_PLAIN_IN after filling sub $16, %r10 # Determine if if partial block is not being filled and # shift mask accordingly jge _no_extra_mask_1_\@ sub %r10, %r12 _no_extra_mask_1_\@: movdqu ALL_F-SHIFT_MASK(%r12), %xmm1 # get the appropriate mask to mask out bottom r13 bytes of xmm9 pand %xmm1, %xmm9 # mask out bottom r13 bytes of xmm9 pand %xmm1, %xmm3 movdqa SHUF_MASK(%rip), %xmm10 pshufb %xmm10, %xmm3 pshufb %xmm2, %xmm3 pxor %xmm3, \AAD_HASH test %r10, %r10 jl _partial_incomplete_1_\@ # GHASH computation for the last <16 Byte block GHASH_MUL \AAD_HASH, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6 xor %eax, %eax mov %rax, PBlockLen(%arg2) jmp _dec_done_\@ _partial_incomplete_1_\@: add \PLAIN_CYPH_LEN, PBlockLen(%arg2) _dec_done_\@: movdqu \AAD_HASH, AadHash(%arg2) .else pxor %xmm1, %xmm9 # Plaintext XOR E(K, Yn) mov \PLAIN_CYPH_LEN, %r10 add %r13, %r10 # Set r10 to be the amount of data left in CYPH_PLAIN_IN after filling sub $16, %r10 # Determine if if partial block is not being filled and # shift mask accordingly jge _no_extra_mask_2_\@ sub %r10, %r12 _no_extra_mask_2_\@: movdqu ALL_F-SHIFT_MASK(%r12), %xmm1 # get the appropriate mask to mask out bottom r13 bytes of xmm9 pand %xmm1, %xmm9 movdqa SHUF_MASK(%rip), %xmm1 pshufb %xmm1, %xmm9 pshufb %xmm2, %xmm9 pxor %xmm9, \AAD_HASH test %r10, %r10 jl _partial_incomplete_2_\@ # GHASH computation for the last <16 Byte block GHASH_MUL \AAD_HASH, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6 xor %eax, %eax mov %rax, PBlockLen(%arg2) jmp _encode_done_\@ _partial_incomplete_2_\@: add \PLAIN_CYPH_LEN, PBlockLen(%arg2) _encode_done_\@: movdqu \AAD_HASH, AadHash(%arg2) movdqa SHUF_MASK(%rip), %xmm10 # shuffle xmm9 back to output as ciphertext pshufb %xmm10, %xmm9 pshufb %xmm2, %xmm9 .endif # output encrypted Bytes test %r10, %r10 jl _partial_fill_\@ mov %r13, %r12 mov $16, %r13 # Set r13 to be the number of bytes to write out sub %r12, %r13 jmp _count_set_\@ _partial_fill_\@: mov \PLAIN_CYPH_LEN, %r13 _count_set_\@: movdqa %xmm9, %xmm0 movq %xmm0, %rax cmp $8, %r13 jle _less_than_8_bytes_left_\@ mov %rax, (\CYPH_PLAIN_OUT, \DATA_OFFSET, 1) add $8, \DATA_OFFSET psrldq $8, %xmm0 movq %xmm0, %rax sub $8, %r13 _less_than_8_bytes_left_\@: movb %al, (\CYPH_PLAIN_OUT, \DATA_OFFSET, 1) add $1, \DATA_OFFSET shr $8, %rax sub $1, %r13 jne _less_than_8_bytes_left_\@ _partial_block_done_\@: .endm # PARTIAL_BLOCK /* * if a = number of total plaintext bytes * b = floor(a/16) * num_initial_blocks = b mod 4 * encrypt the initial num_initial_blocks blocks and apply ghash on * the ciphertext * %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers * are clobbered * arg1, %arg2, %arg3 are used as a pointer only, not modified */ .macro INITIAL_BLOCKS_ENC_DEC TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \ XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation MOVADQ SHUF_MASK(%rip), %xmm14 movdqu AadHash(%arg2), %xmm\i # XMM0 = Y0 # start AES for num_initial_blocks blocks movdqu CurCount(%arg2), \XMM0 # XMM0 = Y0 .if (\i == 5) || (\i == 6) || (\i == 7) MOVADQ ONE(%RIP),\TMP1 MOVADQ 0(%arg1),\TMP2 .irpc index, \i_seq paddd \TMP1, \XMM0 # INCR Y0 .ifc \operation, dec movdqa \XMM0, %xmm\index .else MOVADQ \XMM0, %xmm\index .endif pshufb %xmm14, %xmm\index # perform a 16 byte swap pxor \TMP2, %xmm\index .endr lea 0x10(%arg1),%r10 mov keysize,%eax shr $2,%eax # 128->4, 192->6, 256->8 add $5,%eax # 128->9, 192->11, 256->13 aes_loop_initial_\@: MOVADQ (%r10),\TMP1 .irpc index, \i_seq aesenc \TMP1, %xmm\index .endr add $16,%r10 sub $1,%eax jnz aes_loop_initial_\@ MOVADQ (%r10), \TMP1 .irpc index, \i_seq aesenclast \TMP1, %xmm\index # Last Round .endr .irpc index, \i_seq movdqu (%arg4 , %r11, 1), \TMP1 pxor \TMP1, %xmm\index movdqu %xmm\index, (%arg3 , %r11, 1) # write back plaintext/ciphertext for num_initial_blocks add $16, %r11 .ifc \operation, dec movdqa \TMP1, %xmm\index .endif pshufb %xmm14, %xmm\index # prepare plaintext/ciphertext for GHASH computation .endr .endif # apply GHASH on num_initial_blocks blocks .if \i == 5 pxor %xmm5, %xmm6 GHASH_MUL %xmm6, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 pxor %xmm6, %xmm7 GHASH_MUL %xmm7, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 pxor %xmm7, %xmm8 GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 .elseif \i == 6 pxor %xmm6, %xmm7 GHASH_MUL %xmm7, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 pxor %xmm7, %xmm8 GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 .elseif \i == 7 pxor %xmm7, %xmm8 GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 .endif cmp $64, %r13 jl _initial_blocks_done\@ # no need for precomputed values /* * * Precomputations for HashKey parallel with encryption of first 4 blocks. * Haskey_i_k holds XORed values of the low and high parts of the Haskey_i */ MOVADQ ONE(%RIP),\TMP1 paddd \TMP1, \XMM0 # INCR Y0 MOVADQ \XMM0, \XMM1 pshufb %xmm14, \XMM1 # perform a 16 byte swap paddd \TMP1, \XMM0 # INCR Y0 MOVADQ \XMM0, \XMM2 pshufb %xmm14, \XMM2 # perform a 16 byte swap paddd \TMP1, \XMM0 # INCR Y0 MOVADQ \XMM0, \XMM3 pshufb %xmm14, \XMM3 # perform a 16 byte swap paddd \TMP1, \XMM0 # INCR Y0 MOVADQ \XMM0, \XMM4 pshufb %xmm14, \XMM4 # perform a 16 byte swap MOVADQ 0(%arg1),\TMP1 pxor \TMP1, \XMM1 pxor \TMP1, \XMM2 pxor \TMP1, \XMM3 pxor \TMP1, \XMM4 .irpc index, 1234 # do 4 rounds movaps 0x10*\index(%arg1), \TMP1 aesenc \TMP1, \XMM1 aesenc \TMP1, \XMM2 aesenc \TMP1, \XMM3 aesenc \TMP1, \XMM4 .endr .irpc index, 56789 # do next 5 rounds movaps 0x10*\index(%arg1), \TMP1 aesenc \TMP1, \XMM1 aesenc \TMP1, \XMM2 aesenc \TMP1, \XMM3 aesenc \TMP1, \XMM4 .endr lea 0xa0(%arg1),%r10 mov keysize,%eax shr $2,%eax # 128->4, 192->6, 256->8 sub $4,%eax # 128->0, 192->2, 256->4 jz aes_loop_pre_done\@ aes_loop_pre_\@: MOVADQ (%r10),\TMP2 .irpc index, 1234 aesenc \TMP2, %xmm\index .endr add $16,%r10 sub $1,%eax jnz aes_loop_pre_\@ aes_loop_pre_done\@: MOVADQ (%r10), \TMP2 aesenclast \TMP2, \XMM1 aesenclast \TMP2, \XMM2 aesenclast \TMP2, \XMM3 aesenclast \TMP2, \XMM4 movdqu 16*0(%arg4 , %r11 , 1), \TMP1 pxor \TMP1, \XMM1 .ifc \operation, dec movdqu \XMM1, 16*0(%arg3 , %r11 , 1) movdqa \TMP1, \XMM1 .endif movdqu 16*1(%arg4 , %r11 , 1), \TMP1 pxor \TMP1, \XMM2 .ifc \operation, dec movdqu \XMM2, 16*1(%arg3 , %r11 , 1) movdqa \TMP1, \XMM2 .endif movdqu 16*2(%arg4 , %r11 , 1), \TMP1 pxor \TMP1, \XMM3 .ifc \operation, dec movdqu \XMM3, 16*2(%arg3 , %r11 , 1) movdqa \TMP1, \XMM3 .endif movdqu 16*3(%arg4 , %r11 , 1), \TMP1 pxor \TMP1, \XMM4 .ifc \operation, dec movdqu \XMM4, 16*3(%arg3 , %r11 , 1) movdqa \TMP1, \XMM4 .else movdqu \XMM1, 16*0(%arg3 , %r11 , 1) movdqu \XMM2, 16*1(%arg3 , %r11 , 1) movdqu \XMM3, 16*2(%arg3 , %r11 , 1) movdqu \XMM4, 16*3(%arg3 , %r11 , 1) .endif add $64, %r11 pshufb %xmm14, \XMM1 # perform a 16 byte swap pxor \XMMDst, \XMM1 # combine GHASHed value with the corresponding ciphertext pshufb %xmm14, \XMM2 # perform a 16 byte swap pshufb %xmm14, \XMM3 # perform a 16 byte swap pshufb %xmm14, \XMM4 # perform a 16 byte swap _initial_blocks_done\@: .endm /* * encrypt 4 blocks at a time * ghash the 4 previously encrypted ciphertext blocks * arg1, %arg3, %arg4 are used as pointers only, not modified * %r11 is the data offset value */ .macro GHASH_4_ENCRYPT_4_PARALLEL_enc TMP1 TMP2 TMP3 TMP4 TMP5 \ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation movdqa \XMM1, \XMM5 movdqa \XMM2, \XMM6 movdqa \XMM3, \XMM7 movdqa \XMM4, \XMM8 movdqa SHUF_MASK(%rip), %xmm15 # multiply TMP5 * HashKey using karatsuba movdqa \XMM5, \TMP4 pshufd $78, \XMM5, \TMP6 pxor \XMM5, \TMP6 paddd ONE(%rip), \XMM0 # INCR CNT movdqu HashKey_4(%arg2), \TMP5 pclmulqdq $0x11, \TMP5, \TMP4 # TMP4 = a1*b1 movdqa \XMM0, \XMM1 paddd ONE(%rip), \XMM0 # INCR CNT movdqa \XMM0, \XMM2 paddd ONE(%rip), \XMM0 # INCR CNT movdqa \XMM0, \XMM3 paddd ONE(%rip), \XMM0 # INCR CNT movdqa \XMM0, \XMM4 pshufb %xmm15, \XMM1 # perform a 16 byte swap pclmulqdq $0x00, \TMP5, \XMM5 # XMM5 = a0*b0 pshufb %xmm15, \XMM2 # perform a 16 byte swap pshufb %xmm15, \XMM3 # perform a 16 byte swap pshufb %xmm15, \XMM4 # perform a 16 byte swap pxor (%arg1), \XMM1 pxor (%arg1), \XMM2 pxor (%arg1), \XMM3 pxor (%arg1), \XMM4 movdqu HashKey_4_k(%arg2), \TMP5 pclmulqdq $0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0) movaps 0x10(%arg1), \TMP1 aesenc \TMP1, \XMM1 # Round 1 aesenc \TMP1, \XMM2 aesenc \TMP1, \XMM3 aesenc \TMP1, \XMM4 movaps 0x20(%arg1), \TMP1 aesenc \TMP1, \XMM1 # Round 2 aesenc \TMP1, \XMM2 aesenc \TMP1, \XMM3 aesenc \TMP1, \XMM4 movdqa \XMM6, \TMP1 pshufd $78, \XMM6, \TMP2 pxor \XMM6, \TMP2 movdqu HashKey_3(%arg2), \TMP5 pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1 * b1 movaps 0x30(%arg1), \TMP3 aesenc \TMP3, \XMM1 # Round 3 aesenc \TMP3, \XMM2 aesenc \TMP3, \XMM3 aesenc \TMP3, \XMM4 pclmulqdq $0x00, \TMP5, \XMM6 # XMM6 = a0*b0 movaps 0x40(%arg1), \TMP3 aesenc \TMP3, \XMM1 # Round 4 aesenc \TMP3, \XMM2 aesenc \TMP3, \XMM3 aesenc \TMP3, \XMM4 movdqu HashKey_3_k(%arg2), \TMP5 pclmulqdq $0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) movaps 0x50(%arg1), \TMP3 aesenc \TMP3, \XMM1 # Round 5 aesenc \TMP3, \XMM2 aesenc \TMP3, \XMM3 aesenc \TMP3, \XMM4 pxor \TMP1, \TMP4 # accumulate the results in TMP4:XMM5, TMP6 holds the middle part pxor \XMM6, \XMM5 pxor \TMP2, \TMP6 movdqa \XMM7, \TMP1 pshufd $78, \XMM7, \TMP2 pxor \XMM7, \TMP2 movdqu HashKey_2(%arg2), \TMP5 # Multiply TMP5 * HashKey using karatsuba pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1*b1 movaps 0x60(%arg1), \TMP3 aesenc \TMP3, \XMM1 # Round 6 aesenc \TMP3, \XMM2 aesenc \TMP3, \XMM3 aesenc \TMP3, \XMM4 pclmulqdq $0x00, \TMP5, \XMM7 # XMM7 = a0*b0 movaps 0x70(%arg1), \TMP3 aesenc \TMP3, \XMM1 # Round 7 aesenc \TMP3, \XMM2 aesenc \TMP3, \XMM3 aesenc \TMP3, \XMM4 movdqu HashKey_2_k(%arg2), \TMP5 pclmulqdq $0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) movaps 0x80(%arg1), \TMP3 aesenc \TMP3, \XMM1 # Round 8 aesenc \TMP3, \XMM2 aesenc \TMP3, \XMM3 aesenc \TMP3, \XMM4 pxor \TMP1, \TMP4 # accumulate the results in TMP4:XMM5, TMP6 holds the middle part pxor \XMM7, \XMM5 pxor \TMP2, \TMP6 # Multiply XMM8 * HashKey # XMM8 and TMP5 hold the values for the two operands movdqa \XMM8, \TMP1 pshufd $78, \XMM8, \TMP2 pxor \XMM8, \TMP2 movdqu HashKey(%arg2), \TMP5 pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1*b1 movaps 0x90(%arg1), \TMP3 aesenc \TMP3, \XMM1 # Round 9 aesenc \TMP3, \XMM2 aesenc \TMP3, \XMM3 aesenc \TMP3, \XMM4 pclmulqdq $0x00, \TMP5, \XMM8 # XMM8 = a0*b0 lea 0xa0(%arg1),%r10 mov keysize,%eax shr $2,%eax # 128->4, 192->6, 256->8 sub $4,%eax # 128->0, 192->2, 256->4 jz aes_loop_par_enc_done\@ aes_loop_par_enc\@: MOVADQ (%r10),\TMP3 .irpc index, 1234 aesenc \TMP3, %xmm\index .endr add $16,%r10 sub $1,%eax jnz aes_loop_par_enc\@ aes_loop_par_enc_done\@: MOVADQ (%r10), \TMP3 aesenclast \TMP3, \XMM1 # Round 10 aesenclast \TMP3, \XMM2 aesenclast \TMP3, \XMM3 aesenclast \TMP3, \XMM4 movdqu HashKey_k(%arg2), \TMP5 pclmulqdq $0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) movdqu (%arg4,%r11,1), \TMP3 pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK movdqu 16(%arg4,%r11,1), \TMP3 pxor \TMP3, \XMM2 # Ciphertext/Plaintext XOR EK movdqu 32(%arg4,%r11,1), \TMP3 pxor \TMP3, \XMM3 # Ciphertext/Plaintext XOR EK movdqu 48(%arg4,%r11,1), \TMP3 pxor \TMP3, \XMM4 # Ciphertext/Plaintext XOR EK movdqu \XMM1, (%arg3,%r11,1) # Write to the ciphertext buffer movdqu \XMM2, 16(%arg3,%r11,1) # Write to the ciphertext buffer movdqu \XMM3, 32(%arg3,%r11,1) # Write to the ciphertext buffer movdqu \XMM4, 48(%arg3,%r11,1) # Write to the ciphertext buffer pshufb %xmm15, \XMM1 # perform a 16 byte swap pshufb %xmm15, \XMM2 # perform a 16 byte swap pshufb %xmm15, \XMM3 # perform a 16 byte swap pshufb %xmm15, \XMM4 # perform a 16 byte swap pxor \TMP4, \TMP1 pxor \XMM8, \XMM5 pxor \TMP6, \TMP2 pxor \TMP1, \TMP2 pxor \XMM5, \TMP2 movdqa \TMP2, \TMP3 pslldq $8, \TMP3 # left shift TMP3 2 DWs psrldq $8, \TMP2 # right shift TMP2 2 DWs pxor \TMP3, \XMM5 pxor \TMP2, \TMP1 # accumulate the results in TMP1:XMM5 # first phase of reduction movdqa \XMM5, \TMP2 movdqa \XMM5, \TMP3 movdqa \XMM5, \TMP4 # move XMM5 into TMP2, TMP3, TMP4 in order to perform shifts independently pslld $31, \TMP2 # packed right shift << 31 pslld $30, \TMP3 # packed right shift << 30 pslld $25, \TMP4 # packed right shift << 25 pxor \TMP3, \TMP2 # xor the shifted versions pxor \TMP4, \TMP2 movdqa \TMP2, \TMP5 psrldq $4, \TMP5 # right shift T5 1 DW pslldq $12, \TMP2 # left shift T2 3 DWs pxor \TMP2, \XMM5 # second phase of reduction movdqa \XMM5,\TMP2 # make 3 copies of XMM5 into TMP2, TMP3, TMP4 movdqa \XMM5,\TMP3 movdqa \XMM5,\TMP4 psrld $1, \TMP2 # packed left shift >>1 psrld $2, \TMP3 # packed left shift >>2 psrld $7, \TMP4 # packed left shift >>7 pxor \TMP3,\TMP2 # xor the shifted versions pxor \TMP4,\TMP2 pxor \TMP5, \TMP2 pxor \TMP2, \XMM5 pxor \TMP1, \XMM5 # result is in TMP1 pxor \XMM5, \XMM1 .endm /* * decrypt 4 blocks at a time * ghash the 4 previously decrypted ciphertext blocks * arg1, %arg3, %arg4 are used as pointers only, not modified * %r11 is the data offset value */ .macro GHASH_4_ENCRYPT_4_PARALLEL_dec TMP1 TMP2 TMP3 TMP4 TMP5 \ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation movdqa \XMM1, \XMM5 movdqa \XMM2, \XMM6 movdqa \XMM3, \XMM7 movdqa \XMM4, \XMM8 movdqa SHUF_MASK(%rip), %xmm15 # multiply TMP5 * HashKey using karatsuba movdqa \XMM5, \TMP4 pshufd $78, \XMM5, \TMP6 pxor \XMM5, \TMP6 paddd ONE(%rip), \XMM0 # INCR CNT movdqu HashKey_4(%arg2), \TMP5 pclmulqdq $0x11, \TMP5, \TMP4 # TMP4 = a1*b1 movdqa \XMM0, \XMM1 paddd ONE(%rip), \XMM0 # INCR CNT movdqa \XMM0, \XMM2 paddd ONE(%rip), \XMM0 # INCR CNT movdqa \XMM0, \XMM3 paddd ONE(%rip), \XMM0 # INCR CNT movdqa \XMM0, \XMM4 pshufb %xmm15, \XMM1 # perform a 16 byte swap pclmulqdq $0x00, \TMP5, \XMM5 # XMM5 = a0*b0 pshufb %xmm15, \XMM2 # perform a 16 byte swap pshufb %xmm15, \XMM3 # perform a 16 byte swap pshufb %xmm15, \XMM4 # perform a 16 byte swap pxor (%arg1), \XMM1 pxor (%arg1), \XMM2 pxor (%arg1), \XMM3 pxor (%arg1), \XMM4 movdqu HashKey_4_k(%arg2), \TMP5 pclmulqdq $0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0) movaps 0x10(%arg1), \TMP1 aesenc \TMP1, \XMM1 # Round 1 aesenc \TMP1, \XMM2 aesenc \TMP1, \XMM3 aesenc \TMP1, \XMM4 movaps 0x20(%arg1), \TMP1 aesenc \TMP1, \XMM1 # Round 2 aesenc \TMP1, \XMM2 aesenc \TMP1, \XMM3 aesenc \TMP1, \XMM4 movdqa \XMM6, \TMP1 pshufd $78, \XMM6, \TMP2 pxor \XMM6, \TMP2 movdqu HashKey_3(%arg2), \TMP5 pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1 * b1 movaps 0x30(%arg1), \TMP3 aesenc \TMP3, \XMM1 # Round 3 aesenc \TMP3, \XMM2 aesenc \TMP3, \XMM3 aesenc \TMP3, \XMM4 pclmulqdq $0x00, \TMP5, \XMM6 # XMM6 = a0*b0 movaps 0x40(%arg1), \TMP3 aesenc \TMP3, \XMM1 # Round 4 aesenc \TMP3, \XMM2 aesenc \TMP3, \XMM3 aesenc \TMP3, \XMM4 movdqu HashKey_3_k(%arg2), \TMP5 pclmulqdq $0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) movaps 0x50(%arg1), \TMP3 aesenc \TMP3, \XMM1 # Round 5 aesenc \TMP3, \XMM2 aesenc \TMP3, \XMM3 aesenc \TMP3, \XMM4 pxor \TMP1, \TMP4 # accumulate the results in TMP4:XMM5, TMP6 holds the middle part pxor \XMM6, \XMM5 pxor \TMP2, \TMP6 movdqa \XMM7, \TMP1 pshufd $78, \XMM7, \TMP2 pxor \XMM7, \TMP2 movdqu HashKey_2(%arg2), \TMP5 # Multiply TMP5 * HashKey using karatsuba pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1*b1 movaps 0x60(%arg1), \TMP3 aesenc \TMP3, \XMM1 # Round 6 aesenc \TMP3, \XMM2 aesenc \TMP3, \XMM3 aesenc \TMP3, \XMM4 pclmulqdq $0x00, \TMP5, \XMM7 # XMM7 = a0*b0 movaps 0x70(%arg1), \TMP3 aesenc \TMP3, \XMM1 # Round 7 aesenc \TMP3, \XMM2 aesenc \TMP3, \XMM3 aesenc \TMP3, \XMM4 movdqu HashKey_2_k(%arg2), \TMP5 pclmulqdq $0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) movaps 0x80(%arg1), \TMP3 aesenc \TMP3, \XMM1 # Round 8 aesenc \TMP3, \XMM2 aesenc \TMP3, \XMM3 aesenc \TMP3, \XMM4 pxor \TMP1, \TMP4 # accumulate the results in TMP4:XMM5, TMP6 holds the middle part pxor \XMM7, \XMM5 pxor \TMP2, \TMP6 # Multiply XMM8 * HashKey # XMM8 and TMP5 hold the values for the two operands movdqa \XMM8, \TMP1 pshufd $78, \XMM8, \TMP2 pxor \XMM8, \TMP2 movdqu HashKey(%arg2), \TMP5 pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1*b1 movaps 0x90(%arg1), \TMP3 aesenc \TMP3, \XMM1 # Round 9 aesenc \TMP3, \XMM2 aesenc \TMP3, \XMM3 aesenc \TMP3, \XMM4 pclmulqdq $0x00, \TMP5, \XMM8 # XMM8 = a0*b0 lea 0xa0(%arg1),%r10 mov keysize,%eax shr $2,%eax # 128->4, 192->6, 256->8 sub $4,%eax # 128->0, 192->2, 256->4 jz aes_loop_par_dec_done\@ aes_loop_par_dec\@: MOVADQ (%r10),\TMP3 .irpc index, 1234 aesenc \TMP3, %xmm\index .endr add $16,%r10 sub $1,%eax jnz aes_loop_par_dec\@ aes_loop_par_dec_done\@: MOVADQ (%r10), \TMP3 aesenclast \TMP3, \XMM1 # last round aesenclast \TMP3, \XMM2 aesenclast \TMP3, \XMM3 aesenclast \TMP3, \XMM4 movdqu HashKey_k(%arg2), \TMP5 pclmulqdq $0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) movdqu (%arg4,%r11,1), \TMP3 pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK movdqu \XMM1, (%arg3,%r11,1) # Write to plaintext buffer movdqa \TMP3, \XMM1 movdqu 16(%arg4,%r11,1), \TMP3 pxor \TMP3, \XMM2 # Ciphertext/Plaintext XOR EK movdqu \XMM2, 16(%arg3,%r11,1) # Write to plaintext buffer movdqa \TMP3, \XMM2 movdqu 32(%arg4,%r11,1), \TMP3 pxor \TMP3, \XMM3 # Ciphertext/Plaintext XOR EK movdqu \XMM3, 32(%arg3,%r11,1) # Write to plaintext buffer movdqa \TMP3, \XMM3 movdqu 48(%arg4,%r11,1), \TMP3 pxor \TMP3, \XMM4 # Ciphertext/Plaintext XOR EK movdqu \XMM4, 48(%arg3,%r11,1) # Write to plaintext buffer movdqa \TMP3, \XMM4 pshufb %xmm15, \XMM1 # perform a 16 byte swap pshufb %xmm15, \XMM2 # perform a 16 byte swap pshufb %xmm15, \XMM3 # perform a 16 byte swap pshufb %xmm15, \XMM4 # perform a 16 byte swap pxor \TMP4, \TMP1 pxor \XMM8, \XMM5 pxor \TMP6, \TMP2 pxor \TMP1, \TMP2 pxor \XMM5, \TMP2 movdqa \TMP2, \TMP3 pslldq $8, \TMP3 # left shift TMP3 2 DWs psrldq $8, \TMP2 # right shift TMP2 2 DWs pxor \TMP3, \XMM5 pxor \TMP2, \TMP1 # accumulate the results in TMP1:XMM5 # first phase of reduction movdqa \XMM5, \TMP2 movdqa \XMM5, \TMP3 movdqa \XMM5, \TMP4 # move XMM5 into TMP2, TMP3, TMP4 in order to perform shifts independently pslld $31, \TMP2 # packed right shift << 31 pslld $30, \TMP3 # packed right shift << 30 pslld $25, \TMP4 # packed right shift << 25 pxor \TMP3, \TMP2 # xor the shifted versions pxor \TMP4, \TMP2 movdqa \TMP2, \TMP5 psrldq $4, \TMP5 # right shift T5 1 DW pslldq $12, \TMP2 # left shift T2 3 DWs pxor \TMP2, \XMM5 # second phase of reduction movdqa \XMM5,\TMP2 # make 3 copies of XMM5 into TMP2, TMP3, TMP4 movdqa \XMM5,\TMP3 movdqa \XMM5,\TMP4 psrld $1, \TMP2 # packed left shift >>1 psrld $2, \TMP3 # packed left shift >>2 psrld $7, \TMP4 # packed left shift >>7 pxor \TMP3,\TMP2 # xor the shifted versions pxor \TMP4,\TMP2 pxor \TMP5, \TMP2 pxor \TMP2, \XMM5 pxor \TMP1, \XMM5 # result is in TMP1 pxor \XMM5, \XMM1 .endm /* GHASH the last 4 ciphertext blocks. */ .macro GHASH_LAST_4 TMP1 TMP2 TMP3 TMP4 TMP5 TMP6 \ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst # Multiply TMP6 * HashKey (using Karatsuba) movdqa \XMM1, \TMP6 pshufd $78, \XMM1, \TMP2 pxor \XMM1, \TMP2 movdqu HashKey_4(%arg2), \TMP5 pclmulqdq $0x11, \TMP5, \TMP6 # TMP6 = a1*b1 pclmulqdq $0x00, \TMP5, \XMM1 # XMM1 = a0*b0 movdqu HashKey_4_k(%arg2), \TMP4 pclmulqdq $0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) movdqa \XMM1, \XMMDst movdqa \TMP2, \XMM1 # result in TMP6, XMMDst, XMM1 # Multiply TMP1 * HashKey (using Karatsuba) movdqa \XMM2, \TMP1 pshufd $78, \XMM2, \TMP2 pxor \XMM2, \TMP2 movdqu HashKey_3(%arg2), \TMP5 pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1*b1 pclmulqdq $0x00, \TMP5, \XMM2 # XMM2 = a0*b0 movdqu HashKey_3_k(%arg2), \TMP4 pclmulqdq $0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) pxor \TMP1, \TMP6 pxor \XMM2, \XMMDst pxor \TMP2, \XMM1 # results accumulated in TMP6, XMMDst, XMM1 # Multiply TMP1 * HashKey (using Karatsuba) movdqa \XMM3, \TMP1 pshufd $78, \XMM3, \TMP2 pxor \XMM3, \TMP2 movdqu HashKey_2(%arg2), \TMP5 pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1*b1 pclmulqdq $0x00, \TMP5, \XMM3 # XMM3 = a0*b0 movdqu HashKey_2_k(%arg2), \TMP4 pclmulqdq $0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) pxor \TMP1, \TMP6 pxor \XMM3, \XMMDst pxor \TMP2, \XMM1 # results accumulated in TMP6, XMMDst, XMM1 # Multiply TMP1 * HashKey (using Karatsuba) movdqa \XMM4, \TMP1 pshufd $78, \XMM4, \TMP2 pxor \XMM4, \TMP2 movdqu HashKey(%arg2), \TMP5 pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1*b1 pclmulqdq $0x00, \TMP5, \XMM4 # XMM4 = a0*b0 movdqu HashKey_k(%arg2), \TMP4 pclmulqdq $0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) pxor \TMP1, \TMP6 pxor \XMM4, \XMMDst pxor \XMM1, \TMP2 pxor \TMP6, \TMP2 pxor \XMMDst, \TMP2 # middle section of the temp results combined as in karatsuba algorithm movdqa \TMP2, \TMP4 pslldq $8, \TMP4 # left shift TMP4 2 DWs psrldq $8, \TMP2 # right shift TMP2 2 DWs pxor \TMP4, \XMMDst pxor \TMP2, \TMP6 # TMP6:XMMDst holds the result of the accumulated carry-less multiplications # first phase of the reduction movdqa \XMMDst, \TMP2 movdqa \XMMDst, \TMP3 movdqa \XMMDst, \TMP4 # move XMMDst into TMP2, TMP3, TMP4 in order to perform 3 shifts independently pslld $31, \TMP2 # packed right shifting << 31 pslld $30, \TMP3 # packed right shifting << 30 pslld $25, \TMP4 # packed right shifting << 25 pxor \TMP3, \TMP2 # xor the shifted versions pxor \TMP4, \TMP2 movdqa \TMP2, \TMP7 psrldq $4, \TMP7 # right shift TMP7 1 DW pslldq $12, \TMP2 # left shift TMP2 3 DWs pxor \TMP2, \XMMDst # second phase of the reduction movdqa \XMMDst, \TMP2 # make 3 copies of XMMDst for doing 3 shift operations movdqa \XMMDst, \TMP3 movdqa \XMMDst, \TMP4 psrld $1, \TMP2 # packed left shift >> 1 psrld $2, \TMP3 # packed left shift >> 2 psrld $7, \TMP4 # packed left shift >> 7 pxor \TMP3, \TMP2 # xor the shifted versions pxor \TMP4, \TMP2 pxor \TMP7, \TMP2 pxor \TMP2, \XMMDst pxor \TMP6, \XMMDst # reduced result is in XMMDst .endm /* Encryption of a single block * uses eax & r10 */ .macro ENCRYPT_SINGLE_BLOCK XMM0 TMP1 pxor (%arg1), \XMM0 mov keysize,%eax shr $2,%eax # 128->4, 192->6, 256->8 add $5,%eax # 128->9, 192->11, 256->13 lea 16(%arg1), %r10 # get first expanded key address _esb_loop_\@: MOVADQ (%r10),\TMP1 aesenc \TMP1,\XMM0 add $16,%r10 sub $1,%eax jnz _esb_loop_\@ MOVADQ (%r10),\TMP1 aesenclast \TMP1,\XMM0 .endm /***************************************************************************** * void aesni_gcm_dec(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary. * struct gcm_context_data *data * // Context data * u8 *out, // Plaintext output. Encrypt in-place is allowed. * const u8 *in, // Ciphertext input * u64 plaintext_len, // Length of data in bytes for decryption. * u8 *iv, // Pre-counter block j0: 4 byte salt (from Security Association) * // concatenated with 8 byte Initialisation Vector (from IPSec ESP Payload) * // concatenated with 0x00000001. 16-byte aligned pointer. * u8 *hash_subkey, // H, the Hash sub key input. Data starts on a 16-byte boundary. * const u8 *aad, // Additional Authentication Data (AAD) * u64 aad_len, // Length of AAD in bytes. With RFC4106 this is going to be 8 or 12 bytes * u8 *auth_tag, // Authenticated Tag output. The driver will compare this to the * // given authentication tag and only return the plaintext if they match. * u64 auth_tag_len); // Authenticated Tag Length in bytes. Valid values are 16 * // (most likely), 12 or 8. * * Assumptions: * * keys: * keys are pre-expanded and aligned to 16 bytes. we are using the first * set of 11 keys in the data structure void *aes_ctx * * iv: * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Salt (From the SA) | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Initialization Vector | * | (This is the sequence number from IPSec header) | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | 0x1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * * * AAD: * AAD padded to 128 bits with 0 * for example, assume AAD is a u32 vector * * if AAD is 8 bytes: * AAD[3] = {A0, A1}; * padded AAD in xmm register = {A1 A0 0 0} * * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | SPI (A1) | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | 32-bit Sequence Number (A0) | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | 0x0 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * AAD Format with 32-bit Sequence Number * * if AAD is 12 bytes: * AAD[3] = {A0, A1, A2}; * padded AAD in xmm register = {A2 A1 A0 0} * * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | SPI (A2) | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | 64-bit Extended Sequence Number {A1,A0} | * | | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | 0x0 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * AAD Format with 64-bit Extended Sequence Number * * poly = x^128 + x^127 + x^126 + x^121 + 1 * *****************************************************************************/ SYM_FUNC_START(aesni_gcm_dec) FUNC_SAVE GCM_INIT %arg6, arg7, arg8, arg9 GCM_ENC_DEC dec GCM_COMPLETE arg10, arg11 FUNC_RESTORE RET SYM_FUNC_END(aesni_gcm_dec) /***************************************************************************** * void aesni_gcm_enc(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary. * struct gcm_context_data *data * // Context data * u8 *out, // Ciphertext output. Encrypt in-place is allowed. * const u8 *in, // Plaintext input * u64 plaintext_len, // Length of data in bytes for encryption. * u8 *iv, // Pre-counter block j0: 4 byte salt (from Security Association) * // concatenated with 8 byte Initialisation Vector (from IPSec ESP Payload) * // concatenated with 0x00000001. 16-byte aligned pointer. * u8 *hash_subkey, // H, the Hash sub key input. Data starts on a 16-byte boundary. * const u8 *aad, // Additional Authentication Data (AAD) * u64 aad_len, // Length of AAD in bytes. With RFC4106 this is going to be 8 or 12 bytes * u8 *auth_tag, // Authenticated Tag output. * u64 auth_tag_len); // Authenticated Tag Length in bytes. Valid values are 16 (most likely), * // 12 or 8. * * Assumptions: * * keys: * keys are pre-expanded and aligned to 16 bytes. we are using the * first set of 11 keys in the data structure void *aes_ctx * * * iv: * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Salt (From the SA) | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Initialization Vector | * | (This is the sequence number from IPSec header) | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | 0x1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * * * AAD: * AAD padded to 128 bits with 0 * for example, assume AAD is a u32 vector * * if AAD is 8 bytes: * AAD[3] = {A0, A1}; * padded AAD in xmm register = {A1 A0 0 0} * * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | SPI (A1) | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | 32-bit Sequence Number (A0) | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | 0x0 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * AAD Format with 32-bit Sequence Number * * if AAD is 12 bytes: * AAD[3] = {A0, A1, A2}; * padded AAD in xmm register = {A2 A1 A0 0} * * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | SPI (A2) | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | 64-bit Extended Sequence Number {A1,A0} | * | | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | 0x0 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * AAD Format with 64-bit Extended Sequence Number * * poly = x^128 + x^127 + x^126 + x^121 + 1 ***************************************************************************/ SYM_FUNC_START(aesni_gcm_enc) FUNC_SAVE GCM_INIT %arg6, arg7, arg8, arg9 GCM_ENC_DEC enc GCM_COMPLETE arg10, arg11 FUNC_RESTORE RET SYM_FUNC_END(aesni_gcm_enc) /***************************************************************************** * void aesni_gcm_init(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary. * struct gcm_context_data *data, * // context data * u8 *iv, // Pre-counter block j0: 4 byte salt (from Security Association) * // concatenated with 8 byte Initialisation Vector (from IPSec ESP Payload) * // concatenated with 0x00000001. 16-byte aligned pointer. * u8 *hash_subkey, // H, the Hash sub key input. Data starts on a 16-byte boundary. * const u8 *aad, // Additional Authentication Data (AAD) * u64 aad_len) // Length of AAD in bytes. */ SYM_FUNC_START(aesni_gcm_init) FUNC_SAVE GCM_INIT %arg3, %arg4,%arg5, %arg6 FUNC_RESTORE RET SYM_FUNC_END(aesni_gcm_init) /***************************************************************************** * void aesni_gcm_enc_update(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary. * struct gcm_context_data *data, * // context data * u8 *out, // Ciphertext output. Encrypt in-place is allowed. * const u8 *in, // Plaintext input * u64 plaintext_len, // Length of data in bytes for encryption. */ SYM_FUNC_START(aesni_gcm_enc_update) FUNC_SAVE GCM_ENC_DEC enc FUNC_RESTORE RET SYM_FUNC_END(aesni_gcm_enc_update) /***************************************************************************** * void aesni_gcm_dec_update(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary. * struct gcm_context_data *data, * // context data * u8 *out, // Ciphertext output. Encrypt in-place is allowed. * const u8 *in, // Plaintext input * u64 plaintext_len, // Length of data in bytes for encryption. */ SYM_FUNC_START(aesni_gcm_dec_update) FUNC_SAVE GCM_ENC_DEC dec FUNC_RESTORE RET SYM_FUNC_END(aesni_gcm_dec_update) /***************************************************************************** * void aesni_gcm_finalize(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary. * struct gcm_context_data *data, * // context data * u8 *auth_tag, // Authenticated Tag output. * u64 auth_tag_len); // Authenticated Tag Length in bytes. Valid values are 16 (most likely), * // 12 or 8. */ SYM_FUNC_START(aesni_gcm_finalize) FUNC_SAVE GCM_COMPLETE %arg3 %arg4 FUNC_RESTORE RET SYM_FUNC_END(aesni_gcm_finalize) #endif SYM_FUNC_START_LOCAL(_key_expansion_256a) pshufd $0b11111111, %xmm1, %xmm1 shufps $0b00010000, %xmm0, %xmm4 pxor %xmm4, %xmm0 shufps $0b10001100, %xmm0, %xmm4 pxor %xmm4, %xmm0 pxor %xmm1, %xmm0 movaps %xmm0, (TKEYP) add $0x10, TKEYP RET SYM_FUNC_END(_key_expansion_256a) SYM_FUNC_ALIAS_LOCAL(_key_expansion_128, _key_expansion_256a) SYM_FUNC_START_LOCAL(_key_expansion_192a) pshufd $0b01010101, %xmm1, %xmm1 shufps $0b00010000, %xmm0, %xmm4 pxor %xmm4, %xmm0 shufps $0b10001100, %xmm0, %xmm4 pxor %xmm4, %xmm0 pxor %xmm1, %xmm0 movaps %xmm2, %xmm5 movaps %xmm2, %xmm6 pslldq $4, %xmm5 pshufd $0b11111111, %xmm0, %xmm3 pxor %xmm3, %xmm2 pxor %xmm5, %xmm2 movaps %xmm0, %xmm1 shufps $0b01000100, %xmm0, %xmm6 movaps %xmm6, (TKEYP) shufps $0b01001110, %xmm2, %xmm1 movaps %xmm1, 0x10(TKEYP) add $0x20, TKEYP RET SYM_FUNC_END(_key_expansion_192a) SYM_FUNC_START_LOCAL(_key_expansion_192b) pshufd $0b01010101, %xmm1, %xmm1 shufps $0b00010000, %xmm0, %xmm4 pxor %xmm4, %xmm0 shufps $0b10001100, %xmm0, %xmm4 pxor %xmm4, %xmm0 pxor %xmm1, %xmm0 movaps %xmm2, %xmm5 pslldq $4, %xmm5 pshufd $0b11111111, %xmm0, %xmm3 pxor %xmm3, %xmm2 pxor %xmm5, %xmm2 movaps %xmm0, (TKEYP) add $0x10, TKEYP RET SYM_FUNC_END(_key_expansion_192b) SYM_FUNC_START_LOCAL(_key_expansion_256b) pshufd $0b10101010, %xmm1, %xmm1 shufps $0b00010000, %xmm2, %xmm4 pxor %xmm4, %xmm2 shufps $0b10001100, %xmm2, %xmm4 pxor %xmm4, %xmm2 pxor %xmm1, %xmm2 movaps %xmm2, (TKEYP) add $0x10, TKEYP RET SYM_FUNC_END(_key_expansion_256b) /* * int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key, * unsigned int key_len) */ SYM_FUNC_START(aesni_set_key) FRAME_BEGIN #ifndef __x86_64__ pushl KEYP movl (FRAME_OFFSET+8)(%esp), KEYP # ctx movl (FRAME_OFFSET+12)(%esp), UKEYP # in_key movl (FRAME_OFFSET+16)(%esp), %edx # key_len #endif movups (UKEYP), %xmm0 # user key (first 16 bytes) movaps %xmm0, (KEYP) lea 0x10(KEYP), TKEYP # key addr movl %edx, 480(KEYP) pxor %xmm4, %xmm4 # xmm4 is assumed 0 in _key_expansion_x cmp $24, %dl jb .Lenc_key128 je .Lenc_key192 movups 0x10(UKEYP), %xmm2 # other user key movaps %xmm2, (TKEYP) add $0x10, TKEYP aeskeygenassist $0x1, %xmm2, %xmm1 # round 1 call _key_expansion_256a aeskeygenassist $0x1, %xmm0, %xmm1 call _key_expansion_256b aeskeygenassist $0x2, %xmm2, %xmm1 # round 2 call _key_expansion_256a aeskeygenassist $0x2, %xmm0, %xmm1 call _key_expansion_256b aeskeygenassist $0x4, %xmm2, %xmm1 # round 3 call _key_expansion_256a aeskeygenassist $0x4, %xmm0, %xmm1 call _key_expansion_256b aeskeygenassist $0x8, %xmm2, %xmm1 # round 4 call _key_expansion_256a aeskeygenassist $0x8, %xmm0, %xmm1 call _key_expansion_256b aeskeygenassist $0x10, %xmm2, %xmm1 # round 5 call _key_expansion_256a aeskeygenassist $0x10, %xmm0, %xmm1 call _key_expansion_256b aeskeygenassist $0x20, %xmm2, %xmm1 # round 6 call _key_expansion_256a aeskeygenassist $0x20, %xmm0, %xmm1 call _key_expansion_256b aeskeygenassist $0x40, %xmm2, %xmm1 # round 7 call _key_expansion_256a jmp .Ldec_key .Lenc_key192: movq 0x10(UKEYP), %xmm2 # other user key aeskeygenassist $0x1, %xmm2, %xmm1 # round 1 call _key_expansion_192a aeskeygenassist $0x2, %xmm2, %xmm1 # round 2 call _key_expansion_192b aeskeygenassist $0x4, %xmm2, %xmm1 # round 3 call _key_expansion_192a aeskeygenassist $0x8, %xmm2, %xmm1 # round 4 call _key_expansion_192b aeskeygenassist $0x10, %xmm2, %xmm1 # round 5 call _key_expansion_192a aeskeygenassist $0x20, %xmm2, %xmm1 # round 6 call _key_expansion_192b aeskeygenassist $0x40, %xmm2, %xmm1 # round 7 call _key_expansion_192a aeskeygenassist $0x80, %xmm2, %xmm1 # round 8 call _key_expansion_192b jmp .Ldec_key .Lenc_key128: aeskeygenassist $0x1, %xmm0, %xmm1 # round 1 call _key_expansion_128 aeskeygenassist $0x2, %xmm0, %xmm1 # round 2 call _key_expansion_128 aeskeygenassist $0x4, %xmm0, %xmm1 # round 3 call _key_expansion_128 aeskeygenassist $0x8, %xmm0, %xmm1 # round 4 call _key_expansion_128 aeskeygenassist $0x10, %xmm0, %xmm1 # round 5 call _key_expansion_128 aeskeygenassist $0x20, %xmm0, %xmm1 # round 6 call _key_expansion_128 aeskeygenassist $0x40, %xmm0, %xmm1 # round 7 call _key_expansion_128 aeskeygenassist $0x80, %xmm0, %xmm1 # round 8 call _key_expansion_128 aeskeygenassist $0x1b, %xmm0, %xmm1 # round 9 call _key_expansion_128 aeskeygenassist $0x36, %xmm0, %xmm1 # round 10 call _key_expansion_128 .Ldec_key: sub $0x10, TKEYP movaps (KEYP), %xmm0 movaps (TKEYP), %xmm1 movaps %xmm0, 240(TKEYP) movaps %xmm1, 240(KEYP) add $0x10, KEYP lea 240-16(TKEYP), UKEYP .align 4 .Ldec_key_loop: movaps (KEYP), %xmm0 aesimc %xmm0, %xmm1 movaps %xmm1, (UKEYP) add $0x10, KEYP sub $0x10, UKEYP cmp TKEYP, KEYP jb .Ldec_key_loop xor AREG, AREG #ifndef __x86_64__ popl KEYP #endif FRAME_END RET SYM_FUNC_END(aesni_set_key) /* * void aesni_enc(const void *ctx, u8 *dst, const u8 *src) */ SYM_FUNC_START(aesni_enc) FRAME_BEGIN #ifndef __x86_64__ pushl KEYP pushl KLEN movl (FRAME_OFFSET+12)(%esp), KEYP # ctx movl (FRAME_OFFSET+16)(%esp), OUTP # dst movl (FRAME_OFFSET+20)(%esp), INP # src #endif movl 480(KEYP), KLEN # key length movups (INP), STATE # input call _aesni_enc1 movups STATE, (OUTP) # output #ifndef __x86_64__ popl KLEN popl KEYP #endif FRAME_END RET SYM_FUNC_END(aesni_enc) /* * _aesni_enc1: internal ABI * input: * KEYP: key struct pointer * KLEN: round count * STATE: initial state (input) * output: * STATE: finial state (output) * changed: * KEY * TKEYP (T1) */ SYM_FUNC_START_LOCAL(_aesni_enc1) movaps (KEYP), KEY # key mov KEYP, TKEYP pxor KEY, STATE # round 0 add $0x30, TKEYP cmp $24, KLEN jb .Lenc128 lea 0x20(TKEYP), TKEYP je .Lenc192 add $0x20, TKEYP movaps -0x60(TKEYP), KEY aesenc KEY, STATE movaps -0x50(TKEYP), KEY aesenc KEY, STATE .align 4 .Lenc192: movaps -0x40(TKEYP), KEY aesenc KEY, STATE movaps -0x30(TKEYP), KEY aesenc KEY, STATE .align 4 .Lenc128: movaps -0x20(TKEYP), KEY aesenc KEY, STATE movaps -0x10(TKEYP), KEY aesenc KEY, STATE movaps (TKEYP), KEY aesenc KEY, STATE movaps 0x10(TKEYP), KEY aesenc KEY, STATE movaps 0x20(TKEYP), KEY aesenc KEY, STATE movaps 0x30(TKEYP), KEY aesenc KEY, STATE movaps 0x40(TKEYP), KEY aesenc KEY, STATE movaps 0x50(TKEYP), KEY aesenc KEY, STATE movaps 0x60(TKEYP), KEY aesenc KEY, STATE movaps 0x70(TKEYP), KEY aesenclast KEY, STATE RET SYM_FUNC_END(_aesni_enc1) /* * _aesni_enc4: internal ABI * input: * KEYP: key struct pointer * KLEN: round count * STATE1: initial state (input) * STATE2 * STATE3 * STATE4 * output: * STATE1: finial state (output) * STATE2 * STATE3 * STATE4 * changed: * KEY * TKEYP (T1) */ SYM_FUNC_START_LOCAL(_aesni_enc4) movaps (KEYP), KEY # key mov KEYP, TKEYP pxor KEY, STATE1 # round 0 pxor KEY, STATE2 pxor KEY, STATE3 pxor KEY, STATE4 add $0x30, TKEYP cmp $24, KLEN jb .L4enc128 lea 0x20(TKEYP), TKEYP je .L4enc192 add $0x20, TKEYP movaps -0x60(TKEYP), KEY aesenc KEY, STATE1 aesenc KEY, STATE2 aesenc KEY, STATE3 aesenc KEY, STATE4 movaps -0x50(TKEYP), KEY aesenc KEY, STATE1 aesenc KEY, STATE2 aesenc KEY, STATE3 aesenc KEY, STATE4 #.align 4 .L4enc192: movaps -0x40(TKEYP), KEY aesenc KEY, STATE1 aesenc KEY, STATE2 aesenc KEY, STATE3 aesenc KEY, STATE4 movaps -0x30(TKEYP), KEY aesenc KEY, STATE1 aesenc KEY, STATE2 aesenc KEY, STATE3 aesenc KEY, STATE4 #.align 4 .L4enc128: movaps -0x20(TKEYP), KEY aesenc KEY, STATE1 aesenc KEY, STATE2 aesenc KEY, STATE3 aesenc KEY, STATE4 movaps -0x10(TKEYP), KEY aesenc KEY, STATE1 aesenc KEY, STATE2 aesenc KEY, STATE3 aesenc KEY, STATE4 movaps (TKEYP), KEY aesenc KEY, STATE1 aesenc KEY, STATE2 aesenc KEY, STATE3 aesenc KEY, STATE4 movaps 0x10(TKEYP), KEY aesenc KEY, STATE1 aesenc KEY, STATE2 aesenc KEY, STATE3 aesenc KEY, STATE4 movaps 0x20(TKEYP), KEY aesenc KEY, STATE1 aesenc KEY, STATE2 aesenc KEY, STATE3 aesenc KEY, STATE4 movaps 0x30(TKEYP), KEY aesenc KEY, STATE1 aesenc KEY, STATE2 aesenc KEY, STATE3 aesenc KEY, STATE4 movaps 0x40(TKEYP), KEY aesenc KEY, STATE1 aesenc KEY, STATE2 aesenc KEY, STATE3 aesenc KEY, STATE4 movaps 0x50(TKEYP), KEY aesenc KEY, STATE1 aesenc KEY, STATE2 aesenc KEY, STATE3 aesenc KEY, STATE4 movaps 0x60(TKEYP), KEY aesenc KEY, STATE1 aesenc KEY, STATE2 aesenc KEY, STATE3 aesenc KEY, STATE4 movaps 0x70(TKEYP), KEY aesenclast KEY, STATE1 # last round aesenclast KEY, STATE2 aesenclast KEY, STATE3 aesenclast KEY, STATE4 RET SYM_FUNC_END(_aesni_enc4) /* * void aesni_dec (const void *ctx, u8 *dst, const u8 *src) */ SYM_FUNC_START(aesni_dec) FRAME_BEGIN #ifndef __x86_64__ pushl KEYP pushl KLEN movl (FRAME_OFFSET+12)(%esp), KEYP # ctx movl (FRAME_OFFSET+16)(%esp), OUTP # dst movl (FRAME_OFFSET+20)(%esp), INP # src #endif mov 480(KEYP), KLEN # key length add $240, KEYP movups (INP), STATE # input call _aesni_dec1 movups STATE, (OUTP) #output #ifndef __x86_64__ popl KLEN popl KEYP #endif FRAME_END RET SYM_FUNC_END(aesni_dec) /* * _aesni_dec1: internal ABI * input: * KEYP: key struct pointer * KLEN: key length * STATE: initial state (input) * output: * STATE: finial state (output) * changed: * KEY * TKEYP (T1) */ SYM_FUNC_START_LOCAL(_aesni_dec1) movaps (KEYP), KEY # key mov KEYP, TKEYP pxor KEY, STATE # round 0 add $0x30, TKEYP cmp $24, KLEN jb .Ldec128 lea 0x20(TKEYP), TKEYP je .Ldec192 add $0x20, TKEYP movaps -0x60(TKEYP), KEY aesdec KEY, STATE movaps -0x50(TKEYP), KEY aesdec KEY, STATE .align 4 .Ldec192: movaps -0x40(TKEYP), KEY aesdec KEY, STATE movaps -0x30(TKEYP), KEY aesdec KEY, STATE .align 4 .Ldec128: movaps -0x20(TKEYP), KEY aesdec KEY, STATE movaps -0x10(TKEYP), KEY aesdec KEY, STATE movaps (TKEYP), KEY aesdec KEY, STATE movaps 0x10(TKEYP), KEY aesdec KEY, STATE movaps 0x20(TKEYP), KEY aesdec KEY, STATE movaps 0x30(TKEYP), KEY aesdec KEY, STATE movaps 0x40(TKEYP), KEY aesdec KEY, STATE movaps 0x50(TKEYP), KEY aesdec KEY, STATE movaps 0x60(TKEYP), KEY aesdec KEY, STATE movaps 0x70(TKEYP), KEY aesdeclast KEY, STATE RET SYM_FUNC_END(_aesni_dec1) /* * _aesni_dec4: internal ABI * input: * KEYP: key struct pointer * KLEN: key length * STATE1: initial state (input) * STATE2 * STATE3 * STATE4 * output: * STATE1: finial state (output) * STATE2 * STATE3 * STATE4 * changed: * KEY * TKEYP (T1) */ SYM_FUNC_START_LOCAL(_aesni_dec4) movaps (KEYP), KEY # key mov KEYP, TKEYP pxor KEY, STATE1 # round 0 pxor KEY, STATE2 pxor KEY, STATE3 pxor KEY, STATE4 add $0x30, TKEYP cmp $24, KLEN jb .L4dec128 lea 0x20(TKEYP), TKEYP je .L4dec192 add $0x20, TKEYP movaps -0x60(TKEYP), KEY aesdec KEY, STATE1 aesdec KEY, STATE2 aesdec KEY, STATE3 aesdec KEY, STATE4 movaps -0x50(TKEYP), KEY aesdec KEY, STATE1 aesdec KEY, STATE2 aesdec KEY, STATE3 aesdec KEY, STATE4 .align 4 .L4dec192: movaps -0x40(TKEYP), KEY aesdec KEY, STATE1 aesdec KEY, STATE2 aesdec KEY, STATE3 aesdec KEY, STATE4 movaps -0x30(TKEYP), KEY aesdec KEY, STATE1 aesdec KEY, STATE2 aesdec KEY, STATE3 aesdec KEY, STATE4 .align 4 .L4dec128: movaps -0x20(TKEYP), KEY aesdec KEY, STATE1 aesdec KEY, STATE2 aesdec KEY, STATE3 aesdec KEY, STATE4 movaps -0x10(TKEYP), KEY aesdec KEY, STATE1 aesdec KEY, STATE2 aesdec KEY, STATE3 aesdec KEY, STATE4 movaps (TKEYP), KEY aesdec KEY, STATE1 aesdec KEY, STATE2 aesdec KEY, STATE3 aesdec KEY, STATE4 movaps 0x10(TKEYP), KEY aesdec KEY, STATE1 aesdec KEY, STATE2 aesdec KEY, STATE3 aesdec KEY, STATE4 movaps 0x20(TKEYP), KEY aesdec KEY, STATE1 aesdec KEY, STATE2 aesdec KEY, STATE3 aesdec KEY, STATE4 movaps 0x30(TKEYP), KEY aesdec KEY, STATE1 aesdec KEY, STATE2 aesdec KEY, STATE3 aesdec KEY, STATE4 movaps 0x40(TKEYP), KEY aesdec KEY, STATE1 aesdec KEY, STATE2 aesdec KEY, STATE3 aesdec KEY, STATE4 movaps 0x50(TKEYP), KEY aesdec KEY, STATE1 aesdec KEY, STATE2 aesdec KEY, STATE3 aesdec KEY, STATE4 movaps 0x60(TKEYP), KEY aesdec KEY, STATE1 aesdec KEY, STATE2 aesdec KEY, STATE3 aesdec KEY, STATE4 movaps 0x70(TKEYP), KEY aesdeclast KEY, STATE1 # last round aesdeclast KEY, STATE2 aesdeclast KEY, STATE3 aesdeclast KEY, STATE4 RET SYM_FUNC_END(_aesni_dec4) /* * void aesni_ecb_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src, * size_t len) */ SYM_FUNC_START(aesni_ecb_enc) FRAME_BEGIN #ifndef __x86_64__ pushl LEN pushl KEYP pushl KLEN movl (FRAME_OFFSET+16)(%esp), KEYP # ctx movl (FRAME_OFFSET+20)(%esp), OUTP # dst movl (FRAME_OFFSET+24)(%esp), INP # src movl (FRAME_OFFSET+28)(%esp), LEN # len #endif test LEN, LEN # check length jz .Lecb_enc_ret mov 480(KEYP), KLEN cmp $16, LEN jb .Lecb_enc_ret cmp $64, LEN jb .Lecb_enc_loop1 .align 4 .Lecb_enc_loop4: movups (INP), STATE1 movups 0x10(INP), STATE2 movups 0x20(INP), STATE3 movups 0x30(INP), STATE4 call _aesni_enc4 movups STATE1, (OUTP) movups STATE2, 0x10(OUTP) movups STATE3, 0x20(OUTP) movups STATE4, 0x30(OUTP) sub $64, LEN add $64, INP add $64, OUTP cmp $64, LEN jge .Lecb_enc_loop4 cmp $16, LEN jb .Lecb_enc_ret .align 4 .Lecb_enc_loop1: movups (INP), STATE1 call _aesni_enc1 movups STATE1, (OUTP) sub $16, LEN add $16, INP add $16, OUTP cmp $16, LEN jge .Lecb_enc_loop1 .Lecb_enc_ret: #ifndef __x86_64__ popl KLEN popl KEYP popl LEN #endif FRAME_END RET SYM_FUNC_END(aesni_ecb_enc) /* * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src, * size_t len); */ SYM_FUNC_START(aesni_ecb_dec) FRAME_BEGIN #ifndef __x86_64__ pushl LEN pushl KEYP pushl KLEN movl (FRAME_OFFSET+16)(%esp), KEYP # ctx movl (FRAME_OFFSET+20)(%esp), OUTP # dst movl (FRAME_OFFSET+24)(%esp), INP # src movl (FRAME_OFFSET+28)(%esp), LEN # len #endif test LEN, LEN jz .Lecb_dec_ret mov 480(KEYP), KLEN add $240, KEYP cmp $16, LEN jb .Lecb_dec_ret cmp $64, LEN jb .Lecb_dec_loop1 .align 4 .Lecb_dec_loop4: movups (INP), STATE1 movups 0x10(INP), STATE2 movups 0x20(INP), STATE3 movups 0x30(INP), STATE4 call _aesni_dec4 movups STATE1, (OUTP) movups STATE2, 0x10(OUTP) movups STATE3, 0x20(OUTP) movups STATE4, 0x30(OUTP) sub $64, LEN add $64, INP add $64, OUTP cmp $64, LEN jge .Lecb_dec_loop4 cmp $16, LEN jb .Lecb_dec_ret .align 4 .Lecb_dec_loop1: movups (INP), STATE1 call _aesni_dec1 movups STATE1, (OUTP) sub $16, LEN add $16, INP add $16, OUTP cmp $16, LEN jge .Lecb_dec_loop1 .Lecb_dec_ret: #ifndef __x86_64__ popl KLEN popl KEYP popl LEN #endif FRAME_END RET SYM_FUNC_END(aesni_ecb_dec) /* * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src, * size_t len, u8 *iv) */ SYM_FUNC_START(aesni_cbc_enc) FRAME_BEGIN #ifndef __x86_64__ pushl IVP pushl LEN pushl KEYP pushl KLEN movl (FRAME_OFFSET+20)(%esp), KEYP # ctx movl (FRAME_OFFSET+24)(%esp), OUTP # dst movl (FRAME_OFFSET+28)(%esp), INP # src movl (FRAME_OFFSET+32)(%esp), LEN # len movl (FRAME_OFFSET+36)(%esp), IVP # iv #endif cmp $16, LEN jb .Lcbc_enc_ret mov 480(KEYP), KLEN movups (IVP), STATE # load iv as initial state .align 4 .Lcbc_enc_loop: movups (INP), IN # load input pxor IN, STATE call _aesni_enc1 movups STATE, (OUTP) # store output sub $16, LEN add $16, INP add $16, OUTP cmp $16, LEN jge .Lcbc_enc_loop movups STATE, (IVP) .Lcbc_enc_ret: #ifndef __x86_64__ popl KLEN popl KEYP popl LEN popl IVP #endif FRAME_END RET SYM_FUNC_END(aesni_cbc_enc) /* * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src, * size_t len, u8 *iv) */ SYM_FUNC_START(aesni_cbc_dec) FRAME_BEGIN #ifndef __x86_64__ pushl IVP pushl LEN pushl KEYP pushl KLEN movl (FRAME_OFFSET+20)(%esp), KEYP # ctx movl (FRAME_OFFSET+24)(%esp), OUTP # dst movl (FRAME_OFFSET+28)(%esp), INP # src movl (FRAME_OFFSET+32)(%esp), LEN # len movl (FRAME_OFFSET+36)(%esp), IVP # iv #endif cmp $16, LEN jb .Lcbc_dec_just_ret mov 480(KEYP), KLEN add $240, KEYP movups (IVP), IV cmp $64, LEN jb .Lcbc_dec_loop1 .align 4 .Lcbc_dec_loop4: movups (INP), IN1 movaps IN1, STATE1 movups 0x10(INP), IN2 movaps IN2, STATE2 #ifdef __x86_64__ movups 0x20(INP), IN3 movaps IN3, STATE3 movups 0x30(INP), IN4 movaps IN4, STATE4 #else movups 0x20(INP), IN1 movaps IN1, STATE3 movups 0x30(INP), IN2 movaps IN2, STATE4 #endif call _aesni_dec4 pxor IV, STATE1 #ifdef __x86_64__ pxor IN1, STATE2 pxor IN2, STATE3 pxor IN3, STATE4 movaps IN4, IV #else pxor IN1, STATE4 movaps IN2, IV movups (INP), IN1 pxor IN1, STATE2 movups 0x10(INP), IN2 pxor IN2, STATE3 #endif movups STATE1, (OUTP) movups STATE2, 0x10(OUTP) movups STATE3, 0x20(OUTP) movups STATE4, 0x30(OUTP) sub $64, LEN add $64, INP add $64, OUTP cmp $64, LEN jge .Lcbc_dec_loop4 cmp $16, LEN jb .Lcbc_dec_ret .align 4 .Lcbc_dec_loop1: movups (INP), IN movaps IN, STATE call _aesni_dec1 pxor IV, STATE movups STATE, (OUTP) movaps IN, IV sub $16, LEN add $16, INP add $16, OUTP cmp $16, LEN jge .Lcbc_dec_loop1 .Lcbc_dec_ret: movups IV, (IVP) .Lcbc_dec_just_ret: #ifndef __x86_64__ popl KLEN popl KEYP popl LEN popl IVP #endif FRAME_END RET SYM_FUNC_END(aesni_cbc_dec) /* * void aesni_cts_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src, * size_t len, u8 *iv) */ SYM_FUNC_START(aesni_cts_cbc_enc) FRAME_BEGIN #ifndef __x86_64__ pushl IVP pushl LEN pushl KEYP pushl KLEN movl (FRAME_OFFSET+20)(%esp), KEYP # ctx movl (FRAME_OFFSET+24)(%esp), OUTP # dst movl (FRAME_OFFSET+28)(%esp), INP # src movl (FRAME_OFFSET+32)(%esp), LEN # len movl (FRAME_OFFSET+36)(%esp), IVP # iv lea .Lcts_permute_table, T1 #else lea .Lcts_permute_table(%rip), T1 #endif mov 480(KEYP), KLEN movups (IVP), STATE sub $16, LEN mov T1, IVP add $32, IVP add LEN, T1 sub LEN, IVP movups (T1), %xmm4 movups (IVP), %xmm5 movups (INP), IN1 add LEN, INP movups (INP), IN2 pxor IN1, STATE call _aesni_enc1 pshufb %xmm5, IN2 pxor STATE, IN2 pshufb %xmm4, STATE add OUTP, LEN movups STATE, (LEN) movaps IN2, STATE call _aesni_enc1 movups STATE, (OUTP) #ifndef __x86_64__ popl KLEN popl KEYP popl LEN popl IVP #endif FRAME_END RET SYM_FUNC_END(aesni_cts_cbc_enc) /* * void aesni_cts_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src, * size_t len, u8 *iv) */ SYM_FUNC_START(aesni_cts_cbc_dec) FRAME_BEGIN #ifndef __x86_64__ pushl IVP pushl LEN pushl KEYP pushl KLEN movl (FRAME_OFFSET+20)(%esp), KEYP # ctx movl (FRAME_OFFSET+24)(%esp), OUTP # dst movl (FRAME_OFFSET+28)(%esp), INP # src movl (FRAME_OFFSET+32)(%esp), LEN # len movl (FRAME_OFFSET+36)(%esp), IVP # iv lea .Lcts_permute_table, T1 #else lea .Lcts_permute_table(%rip), T1 #endif mov 480(KEYP), KLEN add $240, KEYP movups (IVP), IV sub $16, LEN mov T1, IVP add $32, IVP add LEN, T1 sub LEN, IVP movups (T1), %xmm4 movups (INP), STATE add LEN, INP movups (INP), IN1 call _aesni_dec1 movaps STATE, IN2 pshufb %xmm4, STATE pxor IN1, STATE add OUTP, LEN movups STATE, (LEN) movups (IVP), %xmm0 pshufb %xmm0, IN1 pblendvb IN2, IN1 movaps IN1, STATE call _aesni_dec1 pxor IV, STATE movups STATE, (OUTP) #ifndef __x86_64__ popl KLEN popl KEYP popl LEN popl IVP #endif FRAME_END RET SYM_FUNC_END(aesni_cts_cbc_dec) .pushsection .rodata .align 16 .Lcts_permute_table: .byte 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 .byte 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 .byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 .byte 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f .byte 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 .byte 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 #ifdef __x86_64__ .Lbswap_mask: .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 #endif .popsection #ifdef __x86_64__ /* * _aesni_inc_init: internal ABI * setup registers used by _aesni_inc * input: * IV * output: * CTR: == IV, in little endian * TCTR_LOW: == lower qword of CTR * INC: == 1, in little endian * BSWAP_MASK == endian swapping mask */ SYM_FUNC_START_LOCAL(_aesni_inc_init) movaps .Lbswap_mask, BSWAP_MASK movaps IV, CTR pshufb BSWAP_MASK, CTR mov $1, TCTR_LOW movq TCTR_LOW, INC movq CTR, TCTR_LOW RET SYM_FUNC_END(_aesni_inc_init) /* * _aesni_inc: internal ABI * Increase IV by 1, IV is in big endian * input: * IV * CTR: == IV, in little endian * TCTR_LOW: == lower qword of CTR * INC: == 1, in little endian * BSWAP_MASK == endian swapping mask * output: * IV: Increase by 1 * changed: * CTR: == output IV, in little endian * TCTR_LOW: == lower qword of CTR */ SYM_FUNC_START_LOCAL(_aesni_inc) paddq INC, CTR add $1, TCTR_LOW jnc .Linc_low pslldq $8, INC paddq INC, CTR psrldq $8, INC .Linc_low: movaps CTR, IV pshufb BSWAP_MASK, IV RET SYM_FUNC_END(_aesni_inc) /* * void aesni_ctr_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src, * size_t len, u8 *iv) */ SYM_FUNC_START(aesni_ctr_enc) FRAME_BEGIN cmp $16, LEN jb .Lctr_enc_just_ret mov 480(KEYP), KLEN movups (IVP), IV call _aesni_inc_init cmp $64, LEN jb .Lctr_enc_loop1 .align 4 .Lctr_enc_loop4: movaps IV, STATE1 call _aesni_inc movups (INP), IN1 movaps IV, STATE2 call _aesni_inc movups 0x10(INP), IN2 movaps IV, STATE3 call _aesni_inc movups 0x20(INP), IN3 movaps IV, STATE4 call _aesni_inc movups 0x30(INP), IN4 call _aesni_enc4 pxor IN1, STATE1 movups STATE1, (OUTP) pxor IN2, STATE2 movups STATE2, 0x10(OUTP) pxor IN3, STATE3 movups STATE3, 0x20(OUTP) pxor IN4, STATE4 movups STATE4, 0x30(OUTP) sub $64, LEN add $64, INP add $64, OUTP cmp $64, LEN jge .Lctr_enc_loop4 cmp $16, LEN jb .Lctr_enc_ret .align 4 .Lctr_enc_loop1: movaps IV, STATE call _aesni_inc movups (INP), IN call _aesni_enc1 pxor IN, STATE movups STATE, (OUTP) sub $16, LEN add $16, INP add $16, OUTP cmp $16, LEN jge .Lctr_enc_loop1 .Lctr_enc_ret: movups IV, (IVP) .Lctr_enc_just_ret: FRAME_END RET SYM_FUNC_END(aesni_ctr_enc) #endif .section .rodata.cst16.gf128mul_x_ble_mask, "aM", @progbits, 16 .align 16 .Lgf128mul_x_ble_mask: .octa 0x00000000000000010000000000000087 .previous /* * _aesni_gf128mul_x_ble: internal ABI * Multiply in GF(2^128) for XTS IVs * input: * IV: current IV * GF128MUL_MASK == mask with 0x87 and 0x01 * output: * IV: next IV * changed: * CTR: == temporary value */ #define _aesni_gf128mul_x_ble() \ pshufd $0x13, IV, KEY; \ paddq IV, IV; \ psrad $31, KEY; \ pand GF128MUL_MASK, KEY; \ pxor KEY, IV; /* * void aesni_xts_encrypt(const struct crypto_aes_ctx *ctx, u8 *dst, * const u8 *src, unsigned int len, le128 *iv) */ SYM_FUNC_START(aesni_xts_encrypt) FRAME_BEGIN #ifndef __x86_64__ pushl IVP pushl LEN pushl KEYP pushl KLEN movl (FRAME_OFFSET+20)(%esp), KEYP # ctx movl (FRAME_OFFSET+24)(%esp), OUTP # dst movl (FRAME_OFFSET+28)(%esp), INP # src movl (FRAME_OFFSET+32)(%esp), LEN # len movl (FRAME_OFFSET+36)(%esp), IVP # iv movdqa .Lgf128mul_x_ble_mask, GF128MUL_MASK #else movdqa .Lgf128mul_x_ble_mask(%rip), GF128MUL_MASK #endif movups (IVP), IV mov 480(KEYP), KLEN .Lxts_enc_loop4: sub $64, LEN jl .Lxts_enc_1x movdqa IV, STATE1 movdqu 0x00(INP), IN pxor IN, STATE1 movdqu IV, 0x00(OUTP) _aesni_gf128mul_x_ble() movdqa IV, STATE2 movdqu 0x10(INP), IN pxor IN, STATE2 movdqu IV, 0x10(OUTP) _aesni_gf128mul_x_ble() movdqa IV, STATE3 movdqu 0x20(INP), IN pxor IN, STATE3 movdqu IV, 0x20(OUTP) _aesni_gf128mul_x_ble() movdqa IV, STATE4 movdqu 0x30(INP), IN pxor IN, STATE4 movdqu IV, 0x30(OUTP) call _aesni_enc4 movdqu 0x00(OUTP), IN pxor IN, STATE1 movdqu STATE1, 0x00(OUTP) movdqu 0x10(OUTP), IN pxor IN, STATE2 movdqu STATE2, 0x10(OUTP) movdqu 0x20(OUTP), IN pxor IN, STATE3 movdqu STATE3, 0x20(OUTP) movdqu 0x30(OUTP), IN pxor IN, STATE4 movdqu STATE4, 0x30(OUTP) _aesni_gf128mul_x_ble() add $64, INP add $64, OUTP test LEN, LEN jnz .Lxts_enc_loop4 .Lxts_enc_ret_iv: movups IV, (IVP) .Lxts_enc_ret: #ifndef __x86_64__ popl KLEN popl KEYP popl LEN popl IVP #endif FRAME_END RET .Lxts_enc_1x: add $64, LEN jz .Lxts_enc_ret_iv sub $16, LEN jl .Lxts_enc_cts4 .Lxts_enc_loop1: movdqu (INP), STATE pxor IV, STATE call _aesni_enc1 pxor IV, STATE _aesni_gf128mul_x_ble() test LEN, LEN jz .Lxts_enc_out add $16, INP sub $16, LEN jl .Lxts_enc_cts1 movdqu STATE, (OUTP) add $16, OUTP jmp .Lxts_enc_loop1 .Lxts_enc_out: movdqu STATE, (OUTP) jmp .Lxts_enc_ret_iv .Lxts_enc_cts4: movdqa STATE4, STATE sub $16, OUTP .Lxts_enc_cts1: #ifndef __x86_64__ lea .Lcts_permute_table, T1 #else lea .Lcts_permute_table(%rip), T1 #endif add LEN, INP /* rewind input pointer */ add $16, LEN /* # bytes in final block */ movups (INP), IN1 mov T1, IVP add $32, IVP add LEN, T1 sub LEN, IVP add OUTP, LEN movups (T1), %xmm4 movaps STATE, IN2 pshufb %xmm4, STATE movups STATE, (LEN) movups (IVP), %xmm0 pshufb %xmm0, IN1 pblendvb IN2, IN1 movaps IN1, STATE pxor IV, STATE call _aesni_enc1 pxor IV, STATE movups STATE, (OUTP) jmp .Lxts_enc_ret SYM_FUNC_END(aesni_xts_encrypt) /* * void aesni_xts_decrypt(const struct crypto_aes_ctx *ctx, u8 *dst, * const u8 *src, unsigned int len, le128 *iv) */ SYM_FUNC_START(aesni_xts_decrypt) FRAME_BEGIN #ifndef __x86_64__ pushl IVP pushl LEN pushl KEYP pushl KLEN movl (FRAME_OFFSET+20)(%esp), KEYP # ctx movl (FRAME_OFFSET+24)(%esp), OUTP # dst movl (FRAME_OFFSET+28)(%esp), INP # src movl (FRAME_OFFSET+32)(%esp), LEN # len movl (FRAME_OFFSET+36)(%esp), IVP # iv movdqa .Lgf128mul_x_ble_mask, GF128MUL_MASK #else movdqa .Lgf128mul_x_ble_mask(%rip), GF128MUL_MASK #endif movups (IVP), IV mov 480(KEYP), KLEN add $240, KEYP test $15, LEN jz .Lxts_dec_loop4 sub $16, LEN .Lxts_dec_loop4: sub $64, LEN jl .Lxts_dec_1x movdqa IV, STATE1 movdqu 0x00(INP), IN pxor IN, STATE1 movdqu IV, 0x00(OUTP) _aesni_gf128mul_x_ble() movdqa IV, STATE2 movdqu 0x10(INP), IN pxor IN, STATE2 movdqu IV, 0x10(OUTP) _aesni_gf128mul_x_ble() movdqa IV, STATE3 movdqu 0x20(INP), IN pxor IN, STATE3 movdqu IV, 0x20(OUTP) _aesni_gf128mul_x_ble() movdqa IV, STATE4 movdqu 0x30(INP), IN pxor IN, STATE4 movdqu IV, 0x30(OUTP) call _aesni_dec4 movdqu 0x00(OUTP), IN pxor IN, STATE1 movdqu STATE1, 0x00(OUTP) movdqu 0x10(OUTP), IN pxor IN, STATE2 movdqu STATE2, 0x10(OUTP) movdqu 0x20(OUTP), IN pxor IN, STATE3 movdqu STATE3, 0x20(OUTP) movdqu 0x30(OUTP), IN pxor IN, STATE4 movdqu STATE4, 0x30(OUTP) _aesni_gf128mul_x_ble() add $64, INP add $64, OUTP test LEN, LEN jnz .Lxts_dec_loop4 .Lxts_dec_ret_iv: movups IV, (IVP) .Lxts_dec_ret: #ifndef __x86_64__ popl KLEN popl KEYP popl LEN popl IVP #endif FRAME_END RET .Lxts_dec_1x: add $64, LEN jz .Lxts_dec_ret_iv .Lxts_dec_loop1: movdqu (INP), STATE add $16, INP sub $16, LEN jl .Lxts_dec_cts1 pxor IV, STATE call _aesni_dec1 pxor IV, STATE _aesni_gf128mul_x_ble() test LEN, LEN jz .Lxts_dec_out movdqu STATE, (OUTP) add $16, OUTP jmp .Lxts_dec_loop1 .Lxts_dec_out: movdqu STATE, (OUTP) jmp .Lxts_dec_ret_iv .Lxts_dec_cts1: movdqa IV, STATE4 _aesni_gf128mul_x_ble() pxor IV, STATE call _aesni_dec1 pxor IV, STATE #ifndef __x86_64__ lea .Lcts_permute_table, T1 #else lea .Lcts_permute_table(%rip), T1 #endif add LEN, INP /* rewind input pointer */ add $16, LEN /* # bytes in final block */ movups (INP), IN1 mov T1, IVP add $32, IVP add LEN, T1 sub LEN, IVP add OUTP, LEN movups (T1), %xmm4 movaps STATE, IN2 pshufb %xmm4, STATE movups STATE, (LEN) movups (IVP), %xmm0 pshufb %xmm0, IN1 pblendvb IN2, IN1 movaps IN1, STATE pxor STATE4, STATE call _aesni_dec1 pxor STATE4, STATE movups STATE, (OUTP) jmp .Lxts_dec_ret SYM_FUNC_END(aesni_xts_decrypt)
aixcc-public/challenge-001-exemplar-source
5,995
arch/x86/crypto/blowfish-x86_64-asm_64.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Blowfish Cipher Algorithm (x86_64) * * Copyright (C) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> */ #include <linux/linkage.h> #include <linux/cfi_types.h> .file "blowfish-x86_64-asm.S" .text /* structure of crypto context */ #define p 0 #define s0 ((16 + 2) * 4) #define s1 ((16 + 2 + (1 * 256)) * 4) #define s2 ((16 + 2 + (2 * 256)) * 4) #define s3 ((16 + 2 + (3 * 256)) * 4) /* register macros */ #define CTX %r12 #define RIO %rsi #define RX0 %rax #define RX1 %rbx #define RX2 %rcx #define RX3 %rdx #define RX0d %eax #define RX1d %ebx #define RX2d %ecx #define RX3d %edx #define RX0bl %al #define RX1bl %bl #define RX2bl %cl #define RX3bl %dl #define RX0bh %ah #define RX1bh %bh #define RX2bh %ch #define RX3bh %dh #define RT0 %rdi #define RT1 %rsi #define RT2 %r8 #define RT3 %r9 #define RT0d %edi #define RT1d %esi #define RT2d %r8d #define RT3d %r9d #define RKEY %r10 /*********************************************************************** * 1-way blowfish ***********************************************************************/ #define F() \ rorq $16, RX0; \ movzbl RX0bh, RT0d; \ movzbl RX0bl, RT1d; \ rolq $16, RX0; \ movl s0(CTX,RT0,4), RT0d; \ addl s1(CTX,RT1,4), RT0d; \ movzbl RX0bh, RT1d; \ movzbl RX0bl, RT2d; \ rolq $32, RX0; \ xorl s2(CTX,RT1,4), RT0d; \ addl s3(CTX,RT2,4), RT0d; \ xorq RT0, RX0; #define add_roundkey_enc(n) \ xorq p+4*(n)(CTX), RX0; #define round_enc(n) \ add_roundkey_enc(n); \ \ F(); \ F(); #define add_roundkey_dec(n) \ movq p+4*(n-1)(CTX), RT0; \ rorq $32, RT0; \ xorq RT0, RX0; #define round_dec(n) \ add_roundkey_dec(n); \ \ F(); \ F(); \ #define read_block() \ movq (RIO), RX0; \ rorq $32, RX0; \ bswapq RX0; #define write_block() \ bswapq RX0; \ movq RX0, (RIO); #define xor_block() \ bswapq RX0; \ xorq RX0, (RIO); SYM_FUNC_START(__blowfish_enc_blk) /* input: * %rdi: ctx * %rsi: dst * %rdx: src * %rcx: bool, if true: xor output */ movq %r12, %r11; movq %rdi, CTX; movq %rsi, %r10; movq %rdx, RIO; read_block(); round_enc(0); round_enc(2); round_enc(4); round_enc(6); round_enc(8); round_enc(10); round_enc(12); round_enc(14); add_roundkey_enc(16); movq %r11, %r12; movq %r10, RIO; test %cl, %cl; jnz .L__enc_xor; write_block(); RET; .L__enc_xor: xor_block(); RET; SYM_FUNC_END(__blowfish_enc_blk) SYM_TYPED_FUNC_START(blowfish_dec_blk) /* input: * %rdi: ctx * %rsi: dst * %rdx: src */ movq %r12, %r11; movq %rdi, CTX; movq %rsi, %r10; movq %rdx, RIO; read_block(); round_dec(17); round_dec(15); round_dec(13); round_dec(11); round_dec(9); round_dec(7); round_dec(5); round_dec(3); add_roundkey_dec(1); movq %r10, RIO; write_block(); movq %r11, %r12; RET; SYM_FUNC_END(blowfish_dec_blk) /********************************************************************** 4-way blowfish, four blocks parallel **********************************************************************/ /* F() for 4-way. Slower when used alone/1-way, but faster when used * parallel/4-way (tested on AMD Phenom II & Intel Xeon E7330). */ #define F4(x) \ movzbl x ## bh, RT1d; \ movzbl x ## bl, RT3d; \ rorq $16, x; \ movzbl x ## bh, RT0d; \ movzbl x ## bl, RT2d; \ rorq $16, x; \ movl s0(CTX,RT0,4), RT0d; \ addl s1(CTX,RT2,4), RT0d; \ xorl s2(CTX,RT1,4), RT0d; \ addl s3(CTX,RT3,4), RT0d; \ xorq RT0, x; #define add_preloaded_roundkey4() \ xorq RKEY, RX0; \ xorq RKEY, RX1; \ xorq RKEY, RX2; \ xorq RKEY, RX3; #define preload_roundkey_enc(n) \ movq p+4*(n)(CTX), RKEY; #define add_roundkey_enc4(n) \ add_preloaded_roundkey4(); \ preload_roundkey_enc(n + 2); #define round_enc4(n) \ add_roundkey_enc4(n); \ \ F4(RX0); \ F4(RX1); \ F4(RX2); \ F4(RX3); \ \ F4(RX0); \ F4(RX1); \ F4(RX2); \ F4(RX3); #define preload_roundkey_dec(n) \ movq p+4*((n)-1)(CTX), RKEY; \ rorq $32, RKEY; #define add_roundkey_dec4(n) \ add_preloaded_roundkey4(); \ preload_roundkey_dec(n - 2); #define round_dec4(n) \ add_roundkey_dec4(n); \ \ F4(RX0); \ F4(RX1); \ F4(RX2); \ F4(RX3); \ \ F4(RX0); \ F4(RX1); \ F4(RX2); \ F4(RX3); #define read_block4() \ movq (RIO), RX0; \ rorq $32, RX0; \ bswapq RX0; \ \ movq 8(RIO), RX1; \ rorq $32, RX1; \ bswapq RX1; \ \ movq 16(RIO), RX2; \ rorq $32, RX2; \ bswapq RX2; \ \ movq 24(RIO), RX3; \ rorq $32, RX3; \ bswapq RX3; #define write_block4() \ bswapq RX0; \ movq RX0, (RIO); \ \ bswapq RX1; \ movq RX1, 8(RIO); \ \ bswapq RX2; \ movq RX2, 16(RIO); \ \ bswapq RX3; \ movq RX3, 24(RIO); #define xor_block4() \ bswapq RX0; \ xorq RX0, (RIO); \ \ bswapq RX1; \ xorq RX1, 8(RIO); \ \ bswapq RX2; \ xorq RX2, 16(RIO); \ \ bswapq RX3; \ xorq RX3, 24(RIO); SYM_FUNC_START(__blowfish_enc_blk_4way) /* input: * %rdi: ctx * %rsi: dst * %rdx: src * %rcx: bool, if true: xor output */ pushq %r12; pushq %rbx; pushq %rcx; movq %rdi, CTX movq %rsi, %r11; movq %rdx, RIO; preload_roundkey_enc(0); read_block4(); round_enc4(0); round_enc4(2); round_enc4(4); round_enc4(6); round_enc4(8); round_enc4(10); round_enc4(12); round_enc4(14); add_preloaded_roundkey4(); popq %r12; movq %r11, RIO; test %r12b, %r12b; jnz .L__enc_xor4; write_block4(); popq %rbx; popq %r12; RET; .L__enc_xor4: xor_block4(); popq %rbx; popq %r12; RET; SYM_FUNC_END(__blowfish_enc_blk_4way) SYM_TYPED_FUNC_START(blowfish_dec_blk_4way) /* input: * %rdi: ctx * %rsi: dst * %rdx: src */ pushq %r12; pushq %rbx; movq %rdi, CTX; movq %rsi, %r11 movq %rdx, RIO; preload_roundkey_dec(17); read_block4(); round_dec4(17); round_dec4(15); round_dec4(13); round_dec4(11); round_dec4(9); round_dec4(7); round_dec4(5); round_dec4(3); add_preloaded_roundkey4(); movq %r11, RIO; write_block4(); popq %rbx; popq %r12; RET; SYM_FUNC_END(blowfish_dec_blk_4way)
aixcc-public/challenge-001-exemplar-source
26,147
arch/x86/crypto/des3_ede-asm_64.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * des3_ede-asm_64.S - x86-64 assembly implementation of 3DES cipher * * Copyright © 2014 Jussi Kivilinna <jussi.kivilinna@iki.fi> */ #include <linux/linkage.h> .file "des3_ede-asm_64.S" .text #define s1 .L_s1 #define s2 ((s1) + (64*8)) #define s3 ((s2) + (64*8)) #define s4 ((s3) + (64*8)) #define s5 ((s4) + (64*8)) #define s6 ((s5) + (64*8)) #define s7 ((s6) + (64*8)) #define s8 ((s7) + (64*8)) /* register macros */ #define CTX %rdi #define RL0 %r8 #define RL1 %r9 #define RL2 %r10 #define RL0d %r8d #define RL1d %r9d #define RL2d %r10d #define RR0 %r11 #define RR1 %r12 #define RR2 %r13 #define RR0d %r11d #define RR1d %r12d #define RR2d %r13d #define RW0 %rax #define RW1 %rbx #define RW2 %rcx #define RW0d %eax #define RW1d %ebx #define RW2d %ecx #define RW0bl %al #define RW1bl %bl #define RW2bl %cl #define RW0bh %ah #define RW1bh %bh #define RW2bh %ch #define RT0 %r15 #define RT1 %rsi #define RT2 %r14 #define RT3 %rdx #define RT0d %r15d #define RT1d %esi #define RT2d %r14d #define RT3d %edx /*********************************************************************** * 1-way 3DES ***********************************************************************/ #define do_permutation(a, b, offset, mask) \ movl a, RT0d; \ shrl $(offset), RT0d; \ xorl b, RT0d; \ andl $(mask), RT0d; \ xorl RT0d, b; \ shll $(offset), RT0d; \ xorl RT0d, a; #define expand_to_64bits(val, mask) \ movl val##d, RT0d; \ rorl $4, RT0d; \ shlq $32, RT0; \ orq RT0, val; \ andq mask, val; #define compress_to_64bits(val) \ movq val, RT0; \ shrq $32, RT0; \ roll $4, RT0d; \ orl RT0d, val##d; #define initial_permutation(left, right) \ do_permutation(left##d, right##d, 4, 0x0f0f0f0f); \ do_permutation(left##d, right##d, 16, 0x0000ffff); \ do_permutation(right##d, left##d, 2, 0x33333333); \ do_permutation(right##d, left##d, 8, 0x00ff00ff); \ movabs $0x3f3f3f3f3f3f3f3f, RT3; \ movl left##d, RW0d; \ roll $1, right##d; \ xorl right##d, RW0d; \ andl $0xaaaaaaaa, RW0d; \ xorl RW0d, left##d; \ xorl RW0d, right##d; \ roll $1, left##d; \ expand_to_64bits(right, RT3); \ expand_to_64bits(left, RT3); #define final_permutation(left, right) \ compress_to_64bits(right); \ compress_to_64bits(left); \ movl right##d, RW0d; \ rorl $1, left##d; \ xorl left##d, RW0d; \ andl $0xaaaaaaaa, RW0d; \ xorl RW0d, right##d; \ xorl RW0d, left##d; \ rorl $1, right##d; \ do_permutation(right##d, left##d, 8, 0x00ff00ff); \ do_permutation(right##d, left##d, 2, 0x33333333); \ do_permutation(left##d, right##d, 16, 0x0000ffff); \ do_permutation(left##d, right##d, 4, 0x0f0f0f0f); #define round1(n, from, to, load_next_key) \ xorq from, RW0; \ \ movzbl RW0bl, RT0d; \ movzbl RW0bh, RT1d; \ shrq $16, RW0; \ movzbl RW0bl, RT2d; \ movzbl RW0bh, RT3d; \ shrq $16, RW0; \ movq s8(, RT0, 8), RT0; \ xorq s6(, RT1, 8), to; \ movzbl RW0bl, RL1d; \ movzbl RW0bh, RT1d; \ shrl $16, RW0d; \ xorq s4(, RT2, 8), RT0; \ xorq s2(, RT3, 8), to; \ movzbl RW0bl, RT2d; \ movzbl RW0bh, RT3d; \ xorq s7(, RL1, 8), RT0; \ xorq s5(, RT1, 8), to; \ xorq s3(, RT2, 8), RT0; \ load_next_key(n, RW0); \ xorq RT0, to; \ xorq s1(, RT3, 8), to; \ #define load_next_key(n, RWx) \ movq (((n) + 1) * 8)(CTX), RWx; #define dummy2(a, b) /*_*/ #define read_block(io, left, right) \ movl (io), left##d; \ movl 4(io), right##d; \ bswapl left##d; \ bswapl right##d; #define write_block(io, left, right) \ bswapl left##d; \ bswapl right##d; \ movl left##d, (io); \ movl right##d, 4(io); SYM_FUNC_START(des3_ede_x86_64_crypt_blk) /* input: * %rdi: round keys, CTX * %rsi: dst * %rdx: src */ pushq %rbx; pushq %r12; pushq %r13; pushq %r14; pushq %r15; pushq %rsi; /* dst */ read_block(%rdx, RL0, RR0); initial_permutation(RL0, RR0); movq (CTX), RW0; round1(0, RR0, RL0, load_next_key); round1(1, RL0, RR0, load_next_key); round1(2, RR0, RL0, load_next_key); round1(3, RL0, RR0, load_next_key); round1(4, RR0, RL0, load_next_key); round1(5, RL0, RR0, load_next_key); round1(6, RR0, RL0, load_next_key); round1(7, RL0, RR0, load_next_key); round1(8, RR0, RL0, load_next_key); round1(9, RL0, RR0, load_next_key); round1(10, RR0, RL0, load_next_key); round1(11, RL0, RR0, load_next_key); round1(12, RR0, RL0, load_next_key); round1(13, RL0, RR0, load_next_key); round1(14, RR0, RL0, load_next_key); round1(15, RL0, RR0, load_next_key); round1(16+0, RL0, RR0, load_next_key); round1(16+1, RR0, RL0, load_next_key); round1(16+2, RL0, RR0, load_next_key); round1(16+3, RR0, RL0, load_next_key); round1(16+4, RL0, RR0, load_next_key); round1(16+5, RR0, RL0, load_next_key); round1(16+6, RL0, RR0, load_next_key); round1(16+7, RR0, RL0, load_next_key); round1(16+8, RL0, RR0, load_next_key); round1(16+9, RR0, RL0, load_next_key); round1(16+10, RL0, RR0, load_next_key); round1(16+11, RR0, RL0, load_next_key); round1(16+12, RL0, RR0, load_next_key); round1(16+13, RR0, RL0, load_next_key); round1(16+14, RL0, RR0, load_next_key); round1(16+15, RR0, RL0, load_next_key); round1(32+0, RR0, RL0, load_next_key); round1(32+1, RL0, RR0, load_next_key); round1(32+2, RR0, RL0, load_next_key); round1(32+3, RL0, RR0, load_next_key); round1(32+4, RR0, RL0, load_next_key); round1(32+5, RL0, RR0, load_next_key); round1(32+6, RR0, RL0, load_next_key); round1(32+7, RL0, RR0, load_next_key); round1(32+8, RR0, RL0, load_next_key); round1(32+9, RL0, RR0, load_next_key); round1(32+10, RR0, RL0, load_next_key); round1(32+11, RL0, RR0, load_next_key); round1(32+12, RR0, RL0, load_next_key); round1(32+13, RL0, RR0, load_next_key); round1(32+14, RR0, RL0, load_next_key); round1(32+15, RL0, RR0, dummy2); final_permutation(RR0, RL0); popq %rsi /* dst */ write_block(%rsi, RR0, RL0); popq %r15; popq %r14; popq %r13; popq %r12; popq %rbx; RET; SYM_FUNC_END(des3_ede_x86_64_crypt_blk) /*********************************************************************** * 3-way 3DES ***********************************************************************/ #define expand_to_64bits(val, mask) \ movl val##d, RT0d; \ rorl $4, RT0d; \ shlq $32, RT0; \ orq RT0, val; \ andq mask, val; #define compress_to_64bits(val) \ movq val, RT0; \ shrq $32, RT0; \ roll $4, RT0d; \ orl RT0d, val##d; #define initial_permutation3(left, right) \ do_permutation(left##0d, right##0d, 4, 0x0f0f0f0f); \ do_permutation(left##0d, right##0d, 16, 0x0000ffff); \ do_permutation(left##1d, right##1d, 4, 0x0f0f0f0f); \ do_permutation(left##1d, right##1d, 16, 0x0000ffff); \ do_permutation(left##2d, right##2d, 4, 0x0f0f0f0f); \ do_permutation(left##2d, right##2d, 16, 0x0000ffff); \ \ do_permutation(right##0d, left##0d, 2, 0x33333333); \ do_permutation(right##0d, left##0d, 8, 0x00ff00ff); \ do_permutation(right##1d, left##1d, 2, 0x33333333); \ do_permutation(right##1d, left##1d, 8, 0x00ff00ff); \ do_permutation(right##2d, left##2d, 2, 0x33333333); \ do_permutation(right##2d, left##2d, 8, 0x00ff00ff); \ \ movabs $0x3f3f3f3f3f3f3f3f, RT3; \ \ movl left##0d, RW0d; \ roll $1, right##0d; \ xorl right##0d, RW0d; \ andl $0xaaaaaaaa, RW0d; \ xorl RW0d, left##0d; \ xorl RW0d, right##0d; \ roll $1, left##0d; \ expand_to_64bits(right##0, RT3); \ expand_to_64bits(left##0, RT3); \ movl left##1d, RW1d; \ roll $1, right##1d; \ xorl right##1d, RW1d; \ andl $0xaaaaaaaa, RW1d; \ xorl RW1d, left##1d; \ xorl RW1d, right##1d; \ roll $1, left##1d; \ expand_to_64bits(right##1, RT3); \ expand_to_64bits(left##1, RT3); \ movl left##2d, RW2d; \ roll $1, right##2d; \ xorl right##2d, RW2d; \ andl $0xaaaaaaaa, RW2d; \ xorl RW2d, left##2d; \ xorl RW2d, right##2d; \ roll $1, left##2d; \ expand_to_64bits(right##2, RT3); \ expand_to_64bits(left##2, RT3); #define final_permutation3(left, right) \ compress_to_64bits(right##0); \ compress_to_64bits(left##0); \ movl right##0d, RW0d; \ rorl $1, left##0d; \ xorl left##0d, RW0d; \ andl $0xaaaaaaaa, RW0d; \ xorl RW0d, right##0d; \ xorl RW0d, left##0d; \ rorl $1, right##0d; \ compress_to_64bits(right##1); \ compress_to_64bits(left##1); \ movl right##1d, RW1d; \ rorl $1, left##1d; \ xorl left##1d, RW1d; \ andl $0xaaaaaaaa, RW1d; \ xorl RW1d, right##1d; \ xorl RW1d, left##1d; \ rorl $1, right##1d; \ compress_to_64bits(right##2); \ compress_to_64bits(left##2); \ movl right##2d, RW2d; \ rorl $1, left##2d; \ xorl left##2d, RW2d; \ andl $0xaaaaaaaa, RW2d; \ xorl RW2d, right##2d; \ xorl RW2d, left##2d; \ rorl $1, right##2d; \ \ do_permutation(right##0d, left##0d, 8, 0x00ff00ff); \ do_permutation(right##0d, left##0d, 2, 0x33333333); \ do_permutation(right##1d, left##1d, 8, 0x00ff00ff); \ do_permutation(right##1d, left##1d, 2, 0x33333333); \ do_permutation(right##2d, left##2d, 8, 0x00ff00ff); \ do_permutation(right##2d, left##2d, 2, 0x33333333); \ \ do_permutation(left##0d, right##0d, 16, 0x0000ffff); \ do_permutation(left##0d, right##0d, 4, 0x0f0f0f0f); \ do_permutation(left##1d, right##1d, 16, 0x0000ffff); \ do_permutation(left##1d, right##1d, 4, 0x0f0f0f0f); \ do_permutation(left##2d, right##2d, 16, 0x0000ffff); \ do_permutation(left##2d, right##2d, 4, 0x0f0f0f0f); #define round3(n, from, to, load_next_key, do_movq) \ xorq from##0, RW0; \ movzbl RW0bl, RT3d; \ movzbl RW0bh, RT1d; \ shrq $16, RW0; \ xorq s8(, RT3, 8), to##0; \ xorq s6(, RT1, 8), to##0; \ movzbl RW0bl, RT3d; \ movzbl RW0bh, RT1d; \ shrq $16, RW0; \ xorq s4(, RT3, 8), to##0; \ xorq s2(, RT1, 8), to##0; \ movzbl RW0bl, RT3d; \ movzbl RW0bh, RT1d; \ shrl $16, RW0d; \ xorq s7(, RT3, 8), to##0; \ xorq s5(, RT1, 8), to##0; \ movzbl RW0bl, RT3d; \ movzbl RW0bh, RT1d; \ load_next_key(n, RW0); \ xorq s3(, RT3, 8), to##0; \ xorq s1(, RT1, 8), to##0; \ xorq from##1, RW1; \ movzbl RW1bl, RT3d; \ movzbl RW1bh, RT1d; \ shrq $16, RW1; \ xorq s8(, RT3, 8), to##1; \ xorq s6(, RT1, 8), to##1; \ movzbl RW1bl, RT3d; \ movzbl RW1bh, RT1d; \ shrq $16, RW1; \ xorq s4(, RT3, 8), to##1; \ xorq s2(, RT1, 8), to##1; \ movzbl RW1bl, RT3d; \ movzbl RW1bh, RT1d; \ shrl $16, RW1d; \ xorq s7(, RT3, 8), to##1; \ xorq s5(, RT1, 8), to##1; \ movzbl RW1bl, RT3d; \ movzbl RW1bh, RT1d; \ do_movq(RW0, RW1); \ xorq s3(, RT3, 8), to##1; \ xorq s1(, RT1, 8), to##1; \ xorq from##2, RW2; \ movzbl RW2bl, RT3d; \ movzbl RW2bh, RT1d; \ shrq $16, RW2; \ xorq s8(, RT3, 8), to##2; \ xorq s6(, RT1, 8), to##2; \ movzbl RW2bl, RT3d; \ movzbl RW2bh, RT1d; \ shrq $16, RW2; \ xorq s4(, RT3, 8), to##2; \ xorq s2(, RT1, 8), to##2; \ movzbl RW2bl, RT3d; \ movzbl RW2bh, RT1d; \ shrl $16, RW2d; \ xorq s7(, RT3, 8), to##2; \ xorq s5(, RT1, 8), to##2; \ movzbl RW2bl, RT3d; \ movzbl RW2bh, RT1d; \ do_movq(RW0, RW2); \ xorq s3(, RT3, 8), to##2; \ xorq s1(, RT1, 8), to##2; #define __movq(src, dst) \ movq src, dst; SYM_FUNC_START(des3_ede_x86_64_crypt_blk_3way) /* input: * %rdi: ctx, round keys * %rsi: dst (3 blocks) * %rdx: src (3 blocks) */ pushq %rbx; pushq %r12; pushq %r13; pushq %r14; pushq %r15; pushq %rsi /* dst */ /* load input */ movl 0 * 4(%rdx), RL0d; movl 1 * 4(%rdx), RR0d; movl 2 * 4(%rdx), RL1d; movl 3 * 4(%rdx), RR1d; movl 4 * 4(%rdx), RL2d; movl 5 * 4(%rdx), RR2d; bswapl RL0d; bswapl RR0d; bswapl RL1d; bswapl RR1d; bswapl RL2d; bswapl RR2d; initial_permutation3(RL, RR); movq 0(CTX), RW0; movq RW0, RW1; movq RW0, RW2; round3(0, RR, RL, load_next_key, __movq); round3(1, RL, RR, load_next_key, __movq); round3(2, RR, RL, load_next_key, __movq); round3(3, RL, RR, load_next_key, __movq); round3(4, RR, RL, load_next_key, __movq); round3(5, RL, RR, load_next_key, __movq); round3(6, RR, RL, load_next_key, __movq); round3(7, RL, RR, load_next_key, __movq); round3(8, RR, RL, load_next_key, __movq); round3(9, RL, RR, load_next_key, __movq); round3(10, RR, RL, load_next_key, __movq); round3(11, RL, RR, load_next_key, __movq); round3(12, RR, RL, load_next_key, __movq); round3(13, RL, RR, load_next_key, __movq); round3(14, RR, RL, load_next_key, __movq); round3(15, RL, RR, load_next_key, __movq); round3(16+0, RL, RR, load_next_key, __movq); round3(16+1, RR, RL, load_next_key, __movq); round3(16+2, RL, RR, load_next_key, __movq); round3(16+3, RR, RL, load_next_key, __movq); round3(16+4, RL, RR, load_next_key, __movq); round3(16+5, RR, RL, load_next_key, __movq); round3(16+6, RL, RR, load_next_key, __movq); round3(16+7, RR, RL, load_next_key, __movq); round3(16+8, RL, RR, load_next_key, __movq); round3(16+9, RR, RL, load_next_key, __movq); round3(16+10, RL, RR, load_next_key, __movq); round3(16+11, RR, RL, load_next_key, __movq); round3(16+12, RL, RR, load_next_key, __movq); round3(16+13, RR, RL, load_next_key, __movq); round3(16+14, RL, RR, load_next_key, __movq); round3(16+15, RR, RL, load_next_key, __movq); round3(32+0, RR, RL, load_next_key, __movq); round3(32+1, RL, RR, load_next_key, __movq); round3(32+2, RR, RL, load_next_key, __movq); round3(32+3, RL, RR, load_next_key, __movq); round3(32+4, RR, RL, load_next_key, __movq); round3(32+5, RL, RR, load_next_key, __movq); round3(32+6, RR, RL, load_next_key, __movq); round3(32+7, RL, RR, load_next_key, __movq); round3(32+8, RR, RL, load_next_key, __movq); round3(32+9, RL, RR, load_next_key, __movq); round3(32+10, RR, RL, load_next_key, __movq); round3(32+11, RL, RR, load_next_key, __movq); round3(32+12, RR, RL, load_next_key, __movq); round3(32+13, RL, RR, load_next_key, __movq); round3(32+14, RR, RL, load_next_key, __movq); round3(32+15, RL, RR, dummy2, dummy2); final_permutation3(RR, RL); bswapl RR0d; bswapl RL0d; bswapl RR1d; bswapl RL1d; bswapl RR2d; bswapl RL2d; popq %rsi /* dst */ movl RR0d, 0 * 4(%rsi); movl RL0d, 1 * 4(%rsi); movl RR1d, 2 * 4(%rsi); movl RL1d, 3 * 4(%rsi); movl RR2d, 4 * 4(%rsi); movl RL2d, 5 * 4(%rsi); popq %r15; popq %r14; popq %r13; popq %r12; popq %rbx; RET; SYM_FUNC_END(des3_ede_x86_64_crypt_blk_3way) .section .rodata, "a", @progbits .align 16 .L_s1: .quad 0x0010100001010400, 0x0000000000000000 .quad 0x0000100000010000, 0x0010100001010404 .quad 0x0010100001010004, 0x0000100000010404 .quad 0x0000000000000004, 0x0000100000010000 .quad 0x0000000000000400, 0x0010100001010400 .quad 0x0010100001010404, 0x0000000000000400 .quad 0x0010000001000404, 0x0010100001010004 .quad 0x0010000001000000, 0x0000000000000004 .quad 0x0000000000000404, 0x0010000001000400 .quad 0x0010000001000400, 0x0000100000010400 .quad 0x0000100000010400, 0x0010100001010000 .quad 0x0010100001010000, 0x0010000001000404 .quad 0x0000100000010004, 0x0010000001000004 .quad 0x0010000001000004, 0x0000100000010004 .quad 0x0000000000000000, 0x0000000000000404 .quad 0x0000100000010404, 0x0010000001000000 .quad 0x0000100000010000, 0x0010100001010404 .quad 0x0000000000000004, 0x0010100001010000 .quad 0x0010100001010400, 0x0010000001000000 .quad 0x0010000001000000, 0x0000000000000400 .quad 0x0010100001010004, 0x0000100000010000 .quad 0x0000100000010400, 0x0010000001000004 .quad 0x0000000000000400, 0x0000000000000004 .quad 0x0010000001000404, 0x0000100000010404 .quad 0x0010100001010404, 0x0000100000010004 .quad 0x0010100001010000, 0x0010000001000404 .quad 0x0010000001000004, 0x0000000000000404 .quad 0x0000100000010404, 0x0010100001010400 .quad 0x0000000000000404, 0x0010000001000400 .quad 0x0010000001000400, 0x0000000000000000 .quad 0x0000100000010004, 0x0000100000010400 .quad 0x0000000000000000, 0x0010100001010004 .L_s2: .quad 0x0801080200100020, 0x0800080000000000 .quad 0x0000080000000000, 0x0001080200100020 .quad 0x0001000000100000, 0x0000000200000020 .quad 0x0801000200100020, 0x0800080200000020 .quad 0x0800000200000020, 0x0801080200100020 .quad 0x0801080000100000, 0x0800000000000000 .quad 0x0800080000000000, 0x0001000000100000 .quad 0x0000000200000020, 0x0801000200100020 .quad 0x0001080000100000, 0x0001000200100020 .quad 0x0800080200000020, 0x0000000000000000 .quad 0x0800000000000000, 0x0000080000000000 .quad 0x0001080200100020, 0x0801000000100000 .quad 0x0001000200100020, 0x0800000200000020 .quad 0x0000000000000000, 0x0001080000100000 .quad 0x0000080200000020, 0x0801080000100000 .quad 0x0801000000100000, 0x0000080200000020 .quad 0x0000000000000000, 0x0001080200100020 .quad 0x0801000200100020, 0x0001000000100000 .quad 0x0800080200000020, 0x0801000000100000 .quad 0x0801080000100000, 0x0000080000000000 .quad 0x0801000000100000, 0x0800080000000000 .quad 0x0000000200000020, 0x0801080200100020 .quad 0x0001080200100020, 0x0000000200000020 .quad 0x0000080000000000, 0x0800000000000000 .quad 0x0000080200000020, 0x0801080000100000 .quad 0x0001000000100000, 0x0800000200000020 .quad 0x0001000200100020, 0x0800080200000020 .quad 0x0800000200000020, 0x0001000200100020 .quad 0x0001080000100000, 0x0000000000000000 .quad 0x0800080000000000, 0x0000080200000020 .quad 0x0800000000000000, 0x0801000200100020 .quad 0x0801080200100020, 0x0001080000100000 .L_s3: .quad 0x0000002000000208, 0x0000202008020200 .quad 0x0000000000000000, 0x0000200008020008 .quad 0x0000002008000200, 0x0000000000000000 .quad 0x0000202000020208, 0x0000002008000200 .quad 0x0000200000020008, 0x0000000008000008 .quad 0x0000000008000008, 0x0000200000020000 .quad 0x0000202008020208, 0x0000200000020008 .quad 0x0000200008020000, 0x0000002000000208 .quad 0x0000000008000000, 0x0000000000000008 .quad 0x0000202008020200, 0x0000002000000200 .quad 0x0000202000020200, 0x0000200008020000 .quad 0x0000200008020008, 0x0000202000020208 .quad 0x0000002008000208, 0x0000202000020200 .quad 0x0000200000020000, 0x0000002008000208 .quad 0x0000000000000008, 0x0000202008020208 .quad 0x0000002000000200, 0x0000000008000000 .quad 0x0000202008020200, 0x0000000008000000 .quad 0x0000200000020008, 0x0000002000000208 .quad 0x0000200000020000, 0x0000202008020200 .quad 0x0000002008000200, 0x0000000000000000 .quad 0x0000002000000200, 0x0000200000020008 .quad 0x0000202008020208, 0x0000002008000200 .quad 0x0000000008000008, 0x0000002000000200 .quad 0x0000000000000000, 0x0000200008020008 .quad 0x0000002008000208, 0x0000200000020000 .quad 0x0000000008000000, 0x0000202008020208 .quad 0x0000000000000008, 0x0000202000020208 .quad 0x0000202000020200, 0x0000000008000008 .quad 0x0000200008020000, 0x0000002008000208 .quad 0x0000002000000208, 0x0000200008020000 .quad 0x0000202000020208, 0x0000000000000008 .quad 0x0000200008020008, 0x0000202000020200 .L_s4: .quad 0x1008020000002001, 0x1000020800002001 .quad 0x1000020800002001, 0x0000000800000000 .quad 0x0008020800002000, 0x1008000800000001 .quad 0x1008000000000001, 0x1000020000002001 .quad 0x0000000000000000, 0x0008020000002000 .quad 0x0008020000002000, 0x1008020800002001 .quad 0x1000000800000001, 0x0000000000000000 .quad 0x0008000800000000, 0x1008000000000001 .quad 0x1000000000000001, 0x0000020000002000 .quad 0x0008000000000000, 0x1008020000002001 .quad 0x0000000800000000, 0x0008000000000000 .quad 0x1000020000002001, 0x0000020800002000 .quad 0x1008000800000001, 0x1000000000000001 .quad 0x0000020800002000, 0x0008000800000000 .quad 0x0000020000002000, 0x0008020800002000 .quad 0x1008020800002001, 0x1000000800000001 .quad 0x0008000800000000, 0x1008000000000001 .quad 0x0008020000002000, 0x1008020800002001 .quad 0x1000000800000001, 0x0000000000000000 .quad 0x0000000000000000, 0x0008020000002000 .quad 0x0000020800002000, 0x0008000800000000 .quad 0x1008000800000001, 0x1000000000000001 .quad 0x1008020000002001, 0x1000020800002001 .quad 0x1000020800002001, 0x0000000800000000 .quad 0x1008020800002001, 0x1000000800000001 .quad 0x1000000000000001, 0x0000020000002000 .quad 0x1008000000000001, 0x1000020000002001 .quad 0x0008020800002000, 0x1008000800000001 .quad 0x1000020000002001, 0x0000020800002000 .quad 0x0008000000000000, 0x1008020000002001 .quad 0x0000000800000000, 0x0008000000000000 .quad 0x0000020000002000, 0x0008020800002000 .L_s5: .quad 0x0000001000000100, 0x0020001002080100 .quad 0x0020000002080000, 0x0420001002000100 .quad 0x0000000000080000, 0x0000001000000100 .quad 0x0400000000000000, 0x0020000002080000 .quad 0x0400001000080100, 0x0000000000080000 .quad 0x0020001002000100, 0x0400001000080100 .quad 0x0420001002000100, 0x0420000002080000 .quad 0x0000001000080100, 0x0400000000000000 .quad 0x0020000002000000, 0x0400000000080000 .quad 0x0400000000080000, 0x0000000000000000 .quad 0x0400001000000100, 0x0420001002080100 .quad 0x0420001002080100, 0x0020001002000100 .quad 0x0420000002080000, 0x0400001000000100 .quad 0x0000000000000000, 0x0420000002000000 .quad 0x0020001002080100, 0x0020000002000000 .quad 0x0420000002000000, 0x0000001000080100 .quad 0x0000000000080000, 0x0420001002000100 .quad 0x0000001000000100, 0x0020000002000000 .quad 0x0400000000000000, 0x0020000002080000 .quad 0x0420001002000100, 0x0400001000080100 .quad 0x0020001002000100, 0x0400000000000000 .quad 0x0420000002080000, 0x0020001002080100 .quad 0x0400001000080100, 0x0000001000000100 .quad 0x0020000002000000, 0x0420000002080000 .quad 0x0420001002080100, 0x0000001000080100 .quad 0x0420000002000000, 0x0420001002080100 .quad 0x0020000002080000, 0x0000000000000000 .quad 0x0400000000080000, 0x0420000002000000 .quad 0x0000001000080100, 0x0020001002000100 .quad 0x0400001000000100, 0x0000000000080000 .quad 0x0000000000000000, 0x0400000000080000 .quad 0x0020001002080100, 0x0400001000000100 .L_s6: .quad 0x0200000120000010, 0x0204000020000000 .quad 0x0000040000000000, 0x0204040120000010 .quad 0x0204000020000000, 0x0000000100000010 .quad 0x0204040120000010, 0x0004000000000000 .quad 0x0200040020000000, 0x0004040100000010 .quad 0x0004000000000000, 0x0200000120000010 .quad 0x0004000100000010, 0x0200040020000000 .quad 0x0200000020000000, 0x0000040100000010 .quad 0x0000000000000000, 0x0004000100000010 .quad 0x0200040120000010, 0x0000040000000000 .quad 0x0004040000000000, 0x0200040120000010 .quad 0x0000000100000010, 0x0204000120000010 .quad 0x0204000120000010, 0x0000000000000000 .quad 0x0004040100000010, 0x0204040020000000 .quad 0x0000040100000010, 0x0004040000000000 .quad 0x0204040020000000, 0x0200000020000000 .quad 0x0200040020000000, 0x0000000100000010 .quad 0x0204000120000010, 0x0004040000000000 .quad 0x0204040120000010, 0x0004000000000000 .quad 0x0000040100000010, 0x0200000120000010 .quad 0x0004000000000000, 0x0200040020000000 .quad 0x0200000020000000, 0x0000040100000010 .quad 0x0200000120000010, 0x0204040120000010 .quad 0x0004040000000000, 0x0204000020000000 .quad 0x0004040100000010, 0x0204040020000000 .quad 0x0000000000000000, 0x0204000120000010 .quad 0x0000000100000010, 0x0000040000000000 .quad 0x0204000020000000, 0x0004040100000010 .quad 0x0000040000000000, 0x0004000100000010 .quad 0x0200040120000010, 0x0000000000000000 .quad 0x0204040020000000, 0x0200000020000000 .quad 0x0004000100000010, 0x0200040120000010 .L_s7: .quad 0x0002000000200000, 0x2002000004200002 .quad 0x2000000004000802, 0x0000000000000000 .quad 0x0000000000000800, 0x2000000004000802 .quad 0x2002000000200802, 0x0002000004200800 .quad 0x2002000004200802, 0x0002000000200000 .quad 0x0000000000000000, 0x2000000004000002 .quad 0x2000000000000002, 0x0000000004000000 .quad 0x2002000004200002, 0x2000000000000802 .quad 0x0000000004000800, 0x2002000000200802 .quad 0x2002000000200002, 0x0000000004000800 .quad 0x2000000004000002, 0x0002000004200000 .quad 0x0002000004200800, 0x2002000000200002 .quad 0x0002000004200000, 0x0000000000000800 .quad 0x2000000000000802, 0x2002000004200802 .quad 0x0002000000200800, 0x2000000000000002 .quad 0x0000000004000000, 0x0002000000200800 .quad 0x0000000004000000, 0x0002000000200800 .quad 0x0002000000200000, 0x2000000004000802 .quad 0x2000000004000802, 0x2002000004200002 .quad 0x2002000004200002, 0x2000000000000002 .quad 0x2002000000200002, 0x0000000004000000 .quad 0x0000000004000800, 0x0002000000200000 .quad 0x0002000004200800, 0x2000000000000802 .quad 0x2002000000200802, 0x0002000004200800 .quad 0x2000000000000802, 0x2000000004000002 .quad 0x2002000004200802, 0x0002000004200000 .quad 0x0002000000200800, 0x0000000000000000 .quad 0x2000000000000002, 0x2002000004200802 .quad 0x0000000000000000, 0x2002000000200802 .quad 0x0002000004200000, 0x0000000000000800 .quad 0x2000000004000002, 0x0000000004000800 .quad 0x0000000000000800, 0x2002000000200002 .L_s8: .quad 0x0100010410001000, 0x0000010000001000 .quad 0x0000000000040000, 0x0100010410041000 .quad 0x0100000010000000, 0x0100010410001000 .quad 0x0000000400000000, 0x0100000010000000 .quad 0x0000000400040000, 0x0100000010040000 .quad 0x0100010410041000, 0x0000010000041000 .quad 0x0100010010041000, 0x0000010400041000 .quad 0x0000010000001000, 0x0000000400000000 .quad 0x0100000010040000, 0x0100000410000000 .quad 0x0100010010001000, 0x0000010400001000 .quad 0x0000010000041000, 0x0000000400040000 .quad 0x0100000410040000, 0x0100010010041000 .quad 0x0000010400001000, 0x0000000000000000 .quad 0x0000000000000000, 0x0100000410040000 .quad 0x0100000410000000, 0x0100010010001000 .quad 0x0000010400041000, 0x0000000000040000 .quad 0x0000010400041000, 0x0000000000040000 .quad 0x0100010010041000, 0x0000010000001000 .quad 0x0000000400000000, 0x0100000410040000 .quad 0x0000010000001000, 0x0000010400041000 .quad 0x0100010010001000, 0x0000000400000000 .quad 0x0100000410000000, 0x0100000010040000 .quad 0x0100000410040000, 0x0100000010000000 .quad 0x0000000000040000, 0x0100010410001000 .quad 0x0000000000000000, 0x0100010410041000 .quad 0x0000000400040000, 0x0100000410000000 .quad 0x0100000010040000, 0x0100010010001000 .quad 0x0100010410001000, 0x0000000000000000 .quad 0x0100010410041000, 0x0000010000041000 .quad 0x0000010000041000, 0x0000010400001000 .quad 0x0000010400001000, 0x0000000400040000 .quad 0x0100000010000000, 0x0100010010041000
aixcc-public/challenge-001-exemplar-source
12,884
arch/x86/crypto/cast5-avx-x86_64-asm_64.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Cast5 Cipher 16-way parallel algorithm (AVX/x86_64) * * Copyright (C) 2012 Johannes Goetzfried * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de> * * Copyright © 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> */ #include <linux/linkage.h> #include <asm/frame.h> .file "cast5-avx-x86_64-asm_64.S" .extern cast_s1 .extern cast_s2 .extern cast_s3 .extern cast_s4 /* structure of crypto context */ #define km 0 #define kr (16*4) #define rr ((16*4)+16) /* s-boxes */ #define s1 cast_s1 #define s2 cast_s2 #define s3 cast_s3 #define s4 cast_s4 /********************************************************************** 16-way AVX cast5 **********************************************************************/ #define CTX %r15 #define RL1 %xmm0 #define RR1 %xmm1 #define RL2 %xmm2 #define RR2 %xmm3 #define RL3 %xmm4 #define RR3 %xmm5 #define RL4 %xmm6 #define RR4 %xmm7 #define RX %xmm8 #define RKM %xmm9 #define RKR %xmm10 #define RKRF %xmm11 #define RKRR %xmm12 #define R32 %xmm13 #define R1ST %xmm14 #define RTMP %xmm15 #define RID1 %rdi #define RID1d %edi #define RID2 %rsi #define RID2d %esi #define RGI1 %rdx #define RGI1bl %dl #define RGI1bh %dh #define RGI2 %rcx #define RGI2bl %cl #define RGI2bh %ch #define RGI3 %rax #define RGI3bl %al #define RGI3bh %ah #define RGI4 %rbx #define RGI4bl %bl #define RGI4bh %bh #define RFS1 %r8 #define RFS1d %r8d #define RFS2 %r9 #define RFS2d %r9d #define RFS3 %r10 #define RFS3d %r10d #define lookup_32bit(src, dst, op1, op2, op3, interleave_op, il_reg) \ movzbl src ## bh, RID1d; \ movzbl src ## bl, RID2d; \ shrq $16, src; \ movl s1(, RID1, 4), dst ## d; \ op1 s2(, RID2, 4), dst ## d; \ movzbl src ## bh, RID1d; \ movzbl src ## bl, RID2d; \ interleave_op(il_reg); \ op2 s3(, RID1, 4), dst ## d; \ op3 s4(, RID2, 4), dst ## d; #define dummy(d) /* do nothing */ #define shr_next(reg) \ shrq $16, reg; #define F_head(a, x, gi1, gi2, op0) \ op0 a, RKM, x; \ vpslld RKRF, x, RTMP; \ vpsrld RKRR, x, x; \ vpor RTMP, x, x; \ \ vmovq x, gi1; \ vpextrq $1, x, gi2; #define F_tail(a, x, gi1, gi2, op1, op2, op3) \ lookup_32bit(##gi1, RFS1, op1, op2, op3, shr_next, ##gi1); \ lookup_32bit(##gi2, RFS3, op1, op2, op3, shr_next, ##gi2); \ \ lookup_32bit(##gi1, RFS2, op1, op2, op3, dummy, none); \ shlq $32, RFS2; \ orq RFS1, RFS2; \ lookup_32bit(##gi2, RFS1, op1, op2, op3, dummy, none); \ shlq $32, RFS1; \ orq RFS1, RFS3; \ \ vmovq RFS2, x; \ vpinsrq $1, RFS3, x, x; #define F_2(a1, b1, a2, b2, op0, op1, op2, op3) \ F_head(b1, RX, RGI1, RGI2, op0); \ F_head(b2, RX, RGI3, RGI4, op0); \ \ F_tail(b1, RX, RGI1, RGI2, op1, op2, op3); \ F_tail(b2, RTMP, RGI3, RGI4, op1, op2, op3); \ \ vpxor a1, RX, a1; \ vpxor a2, RTMP, a2; #define F1_2(a1, b1, a2, b2) \ F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl) #define F2_2(a1, b1, a2, b2) \ F_2(a1, b1, a2, b2, vpxor, subl, addl, xorl) #define F3_2(a1, b1, a2, b2) \ F_2(a1, b1, a2, b2, vpsubd, addl, xorl, subl) #define subround(a1, b1, a2, b2, f) \ F ## f ## _2(a1, b1, a2, b2); #define round(l, r, n, f) \ vbroadcastss (km+(4*n))(CTX), RKM; \ vpand R1ST, RKR, RKRF; \ vpsubq RKRF, R32, RKRR; \ vpsrldq $1, RKR, RKR; \ subround(l ## 1, r ## 1, l ## 2, r ## 2, f); \ subround(l ## 3, r ## 3, l ## 4, r ## 4, f); #define enc_preload_rkr() \ vbroadcastss .L16_mask, RKR; \ /* add 16-bit rotation to key rotations (mod 32) */ \ vpxor kr(CTX), RKR, RKR; #define dec_preload_rkr() \ vbroadcastss .L16_mask, RKR; \ /* add 16-bit rotation to key rotations (mod 32) */ \ vpxor kr(CTX), RKR, RKR; \ vpshufb .Lbswap128_mask, RKR, RKR; #define transpose_2x4(x0, x1, t0, t1) \ vpunpckldq x1, x0, t0; \ vpunpckhdq x1, x0, t1; \ \ vpunpcklqdq t1, t0, x0; \ vpunpckhqdq t1, t0, x1; #define inpack_blocks(x0, x1, t0, t1, rmask) \ vpshufb rmask, x0, x0; \ vpshufb rmask, x1, x1; \ \ transpose_2x4(x0, x1, t0, t1) #define outunpack_blocks(x0, x1, t0, t1, rmask) \ transpose_2x4(x0, x1, t0, t1) \ \ vpshufb rmask, x0, x0; \ vpshufb rmask, x1, x1; .section .rodata.cst16.bswap_mask, "aM", @progbits, 16 .align 16 .Lbswap_mask: .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 .section .rodata.cst16.bswap128_mask, "aM", @progbits, 16 .align 16 .Lbswap128_mask: .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 .section .rodata.cst16.bswap_iv_mask, "aM", @progbits, 16 .align 16 .Lbswap_iv_mask: .byte 7, 6, 5, 4, 3, 2, 1, 0, 7, 6, 5, 4, 3, 2, 1, 0 .section .rodata.cst4.16_mask, "aM", @progbits, 4 .align 4 .L16_mask: .byte 16, 16, 16, 16 .section .rodata.cst4.32_mask, "aM", @progbits, 4 .align 4 .L32_mask: .byte 32, 0, 0, 0 .section .rodata.cst4.first_mask, "aM", @progbits, 4 .align 4 .Lfirst_mask: .byte 0x1f, 0, 0, 0 .text .align 16 SYM_FUNC_START_LOCAL(__cast5_enc_blk16) /* input: * %rdi: ctx * RL1: blocks 1 and 2 * RR1: blocks 3 and 4 * RL2: blocks 5 and 6 * RR2: blocks 7 and 8 * RL3: blocks 9 and 10 * RR3: blocks 11 and 12 * RL4: blocks 13 and 14 * RR4: blocks 15 and 16 * output: * RL1: encrypted blocks 1 and 2 * RR1: encrypted blocks 3 and 4 * RL2: encrypted blocks 5 and 6 * RR2: encrypted blocks 7 and 8 * RL3: encrypted blocks 9 and 10 * RR3: encrypted blocks 11 and 12 * RL4: encrypted blocks 13 and 14 * RR4: encrypted blocks 15 and 16 */ pushq %r15; pushq %rbx; movq %rdi, CTX; vmovdqa .Lbswap_mask, RKM; vmovd .Lfirst_mask, R1ST; vmovd .L32_mask, R32; enc_preload_rkr(); inpack_blocks(RL1, RR1, RTMP, RX, RKM); inpack_blocks(RL2, RR2, RTMP, RX, RKM); inpack_blocks(RL3, RR3, RTMP, RX, RKM); inpack_blocks(RL4, RR4, RTMP, RX, RKM); round(RL, RR, 0, 1); round(RR, RL, 1, 2); round(RL, RR, 2, 3); round(RR, RL, 3, 1); round(RL, RR, 4, 2); round(RR, RL, 5, 3); round(RL, RR, 6, 1); round(RR, RL, 7, 2); round(RL, RR, 8, 3); round(RR, RL, 9, 1); round(RL, RR, 10, 2); round(RR, RL, 11, 3); movzbl rr(CTX), %eax; testl %eax, %eax; jnz .L__skip_enc; round(RL, RR, 12, 1); round(RR, RL, 13, 2); round(RL, RR, 14, 3); round(RR, RL, 15, 1); .L__skip_enc: popq %rbx; popq %r15; vmovdqa .Lbswap_mask, RKM; outunpack_blocks(RR1, RL1, RTMP, RX, RKM); outunpack_blocks(RR2, RL2, RTMP, RX, RKM); outunpack_blocks(RR3, RL3, RTMP, RX, RKM); outunpack_blocks(RR4, RL4, RTMP, RX, RKM); RET; SYM_FUNC_END(__cast5_enc_blk16) .align 16 SYM_FUNC_START_LOCAL(__cast5_dec_blk16) /* input: * %rdi: ctx * RL1: encrypted blocks 1 and 2 * RR1: encrypted blocks 3 and 4 * RL2: encrypted blocks 5 and 6 * RR2: encrypted blocks 7 and 8 * RL3: encrypted blocks 9 and 10 * RR3: encrypted blocks 11 and 12 * RL4: encrypted blocks 13 and 14 * RR4: encrypted blocks 15 and 16 * output: * RL1: decrypted blocks 1 and 2 * RR1: decrypted blocks 3 and 4 * RL2: decrypted blocks 5 and 6 * RR2: decrypted blocks 7 and 8 * RL3: decrypted blocks 9 and 10 * RR3: decrypted blocks 11 and 12 * RL4: decrypted blocks 13 and 14 * RR4: decrypted blocks 15 and 16 */ pushq %r15; pushq %rbx; movq %rdi, CTX; vmovdqa .Lbswap_mask, RKM; vmovd .Lfirst_mask, R1ST; vmovd .L32_mask, R32; dec_preload_rkr(); inpack_blocks(RL1, RR1, RTMP, RX, RKM); inpack_blocks(RL2, RR2, RTMP, RX, RKM); inpack_blocks(RL3, RR3, RTMP, RX, RKM); inpack_blocks(RL4, RR4, RTMP, RX, RKM); movzbl rr(CTX), %eax; testl %eax, %eax; jnz .L__skip_dec; round(RL, RR, 15, 1); round(RR, RL, 14, 3); round(RL, RR, 13, 2); round(RR, RL, 12, 1); .L__dec_tail: round(RL, RR, 11, 3); round(RR, RL, 10, 2); round(RL, RR, 9, 1); round(RR, RL, 8, 3); round(RL, RR, 7, 2); round(RR, RL, 6, 1); round(RL, RR, 5, 3); round(RR, RL, 4, 2); round(RL, RR, 3, 1); round(RR, RL, 2, 3); round(RL, RR, 1, 2); round(RR, RL, 0, 1); vmovdqa .Lbswap_mask, RKM; popq %rbx; popq %r15; outunpack_blocks(RR1, RL1, RTMP, RX, RKM); outunpack_blocks(RR2, RL2, RTMP, RX, RKM); outunpack_blocks(RR3, RL3, RTMP, RX, RKM); outunpack_blocks(RR4, RL4, RTMP, RX, RKM); RET; .L__skip_dec: vpsrldq $4, RKR, RKR; jmp .L__dec_tail; SYM_FUNC_END(__cast5_dec_blk16) SYM_FUNC_START(cast5_ecb_enc_16way) /* input: * %rdi: ctx * %rsi: dst * %rdx: src */ FRAME_BEGIN pushq %r15; movq %rdi, CTX; movq %rsi, %r11; vmovdqu (0*4*4)(%rdx), RL1; vmovdqu (1*4*4)(%rdx), RR1; vmovdqu (2*4*4)(%rdx), RL2; vmovdqu (3*4*4)(%rdx), RR2; vmovdqu (4*4*4)(%rdx), RL3; vmovdqu (5*4*4)(%rdx), RR3; vmovdqu (6*4*4)(%rdx), RL4; vmovdqu (7*4*4)(%rdx), RR4; call __cast5_enc_blk16; vmovdqu RR1, (0*4*4)(%r11); vmovdqu RL1, (1*4*4)(%r11); vmovdqu RR2, (2*4*4)(%r11); vmovdqu RL2, (3*4*4)(%r11); vmovdqu RR3, (4*4*4)(%r11); vmovdqu RL3, (5*4*4)(%r11); vmovdqu RR4, (6*4*4)(%r11); vmovdqu RL4, (7*4*4)(%r11); popq %r15; FRAME_END RET; SYM_FUNC_END(cast5_ecb_enc_16way) SYM_FUNC_START(cast5_ecb_dec_16way) /* input: * %rdi: ctx * %rsi: dst * %rdx: src */ FRAME_BEGIN pushq %r15; movq %rdi, CTX; movq %rsi, %r11; vmovdqu (0*4*4)(%rdx), RL1; vmovdqu (1*4*4)(%rdx), RR1; vmovdqu (2*4*4)(%rdx), RL2; vmovdqu (3*4*4)(%rdx), RR2; vmovdqu (4*4*4)(%rdx), RL3; vmovdqu (5*4*4)(%rdx), RR3; vmovdqu (6*4*4)(%rdx), RL4; vmovdqu (7*4*4)(%rdx), RR4; call __cast5_dec_blk16; vmovdqu RR1, (0*4*4)(%r11); vmovdqu RL1, (1*4*4)(%r11); vmovdqu RR2, (2*4*4)(%r11); vmovdqu RL2, (3*4*4)(%r11); vmovdqu RR3, (4*4*4)(%r11); vmovdqu RL3, (5*4*4)(%r11); vmovdqu RR4, (6*4*4)(%r11); vmovdqu RL4, (7*4*4)(%r11); popq %r15; FRAME_END RET; SYM_FUNC_END(cast5_ecb_dec_16way) SYM_FUNC_START(cast5_cbc_dec_16way) /* input: * %rdi: ctx * %rsi: dst * %rdx: src */ FRAME_BEGIN pushq %r12; pushq %r15; movq %rdi, CTX; movq %rsi, %r11; movq %rdx, %r12; vmovdqu (0*16)(%rdx), RL1; vmovdqu (1*16)(%rdx), RR1; vmovdqu (2*16)(%rdx), RL2; vmovdqu (3*16)(%rdx), RR2; vmovdqu (4*16)(%rdx), RL3; vmovdqu (5*16)(%rdx), RR3; vmovdqu (6*16)(%rdx), RL4; vmovdqu (7*16)(%rdx), RR4; call __cast5_dec_blk16; /* xor with src */ vmovq (%r12), RX; vpshufd $0x4f, RX, RX; vpxor RX, RR1, RR1; vpxor 0*16+8(%r12), RL1, RL1; vpxor 1*16+8(%r12), RR2, RR2; vpxor 2*16+8(%r12), RL2, RL2; vpxor 3*16+8(%r12), RR3, RR3; vpxor 4*16+8(%r12), RL3, RL3; vpxor 5*16+8(%r12), RR4, RR4; vpxor 6*16+8(%r12), RL4, RL4; vmovdqu RR1, (0*16)(%r11); vmovdqu RL1, (1*16)(%r11); vmovdqu RR2, (2*16)(%r11); vmovdqu RL2, (3*16)(%r11); vmovdqu RR3, (4*16)(%r11); vmovdqu RL3, (5*16)(%r11); vmovdqu RR4, (6*16)(%r11); vmovdqu RL4, (7*16)(%r11); popq %r15; popq %r12; FRAME_END RET; SYM_FUNC_END(cast5_cbc_dec_16way) SYM_FUNC_START(cast5_ctr_16way) /* input: * %rdi: ctx * %rsi: dst * %rdx: src * %rcx: iv (big endian, 64bit) */ FRAME_BEGIN pushq %r12; pushq %r15; movq %rdi, CTX; movq %rsi, %r11; movq %rdx, %r12; vpcmpeqd RTMP, RTMP, RTMP; vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */ vpcmpeqd RKR, RKR, RKR; vpaddq RKR, RKR, RKR; /* low: -2, high: -2 */ vmovdqa .Lbswap_iv_mask, R1ST; vmovdqa .Lbswap128_mask, RKM; /* load IV and byteswap */ vmovq (%rcx), RX; vpshufb R1ST, RX, RX; /* construct IVs */ vpsubq RTMP, RX, RX; /* le: IV1, IV0 */ vpshufb RKM, RX, RL1; /* be: IV0, IV1 */ vpsubq RKR, RX, RX; vpshufb RKM, RX, RR1; /* be: IV2, IV3 */ vpsubq RKR, RX, RX; vpshufb RKM, RX, RL2; /* be: IV4, IV5 */ vpsubq RKR, RX, RX; vpshufb RKM, RX, RR2; /* be: IV6, IV7 */ vpsubq RKR, RX, RX; vpshufb RKM, RX, RL3; /* be: IV8, IV9 */ vpsubq RKR, RX, RX; vpshufb RKM, RX, RR3; /* be: IV10, IV11 */ vpsubq RKR, RX, RX; vpshufb RKM, RX, RL4; /* be: IV12, IV13 */ vpsubq RKR, RX, RX; vpshufb RKM, RX, RR4; /* be: IV14, IV15 */ /* store last IV */ vpsubq RTMP, RX, RX; /* le: IV16, IV14 */ vpshufb R1ST, RX, RX; /* be: IV16, IV16 */ vmovq RX, (%rcx); call __cast5_enc_blk16; /* dst = src ^ iv */ vpxor (0*16)(%r12), RR1, RR1; vpxor (1*16)(%r12), RL1, RL1; vpxor (2*16)(%r12), RR2, RR2; vpxor (3*16)(%r12), RL2, RL2; vpxor (4*16)(%r12), RR3, RR3; vpxor (5*16)(%r12), RL3, RL3; vpxor (6*16)(%r12), RR4, RR4; vpxor (7*16)(%r12), RL4, RL4; vmovdqu RR1, (0*16)(%r11); vmovdqu RL1, (1*16)(%r11); vmovdqu RR2, (2*16)(%r11); vmovdqu RL2, (3*16)(%r11); vmovdqu RR3, (4*16)(%r11); vmovdqu RL3, (5*16)(%r11); vmovdqu RR4, (6*16)(%r11); vmovdqu RL4, (7*16)(%r11); popq %r15; popq %r12; FRAME_END RET; SYM_FUNC_END(cast5_ctr_16way)
aixcc-public/challenge-001-exemplar-source
21,576
arch/x86/crypto/serpent-avx-x86_64-asm_64.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Serpent Cipher 8-way parallel algorithm (x86_64/AVX) * * Copyright (C) 2012 Johannes Goetzfried * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de> * * Copyright © 2011-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi> */ #include <linux/linkage.h> #include <asm/frame.h> #include "glue_helper-asm-avx.S" .file "serpent-avx-x86_64-asm_64.S" .section .rodata.cst16.bswap128_mask, "aM", @progbits, 16 .align 16 .Lbswap128_mask: .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 .text #define CTX %rdi /********************************************************************** 8-way AVX serpent **********************************************************************/ #define RA1 %xmm0 #define RB1 %xmm1 #define RC1 %xmm2 #define RD1 %xmm3 #define RE1 %xmm4 #define tp %xmm5 #define RA2 %xmm6 #define RB2 %xmm7 #define RC2 %xmm8 #define RD2 %xmm9 #define RE2 %xmm10 #define RNOT %xmm11 #define RK0 %xmm12 #define RK1 %xmm13 #define RK2 %xmm14 #define RK3 %xmm15 #define S0_1(x0, x1, x2, x3, x4) \ vpor x0, x3, tp; \ vpxor x3, x0, x0; \ vpxor x2, x3, x4; \ vpxor RNOT, x4, x4; \ vpxor x1, tp, x3; \ vpand x0, x1, x1; \ vpxor x4, x1, x1; \ vpxor x0, x2, x2; #define S0_2(x0, x1, x2, x3, x4) \ vpxor x3, x0, x0; \ vpor x0, x4, x4; \ vpxor x2, x0, x0; \ vpand x1, x2, x2; \ vpxor x2, x3, x3; \ vpxor RNOT, x1, x1; \ vpxor x4, x2, x2; \ vpxor x2, x1, x1; #define S1_1(x0, x1, x2, x3, x4) \ vpxor x0, x1, tp; \ vpxor x3, x0, x0; \ vpxor RNOT, x3, x3; \ vpand tp, x1, x4; \ vpor tp, x0, x0; \ vpxor x2, x3, x3; \ vpxor x3, x0, x0; \ vpxor x3, tp, x1; #define S1_2(x0, x1, x2, x3, x4) \ vpxor x4, x3, x3; \ vpor x4, x1, x1; \ vpxor x2, x4, x4; \ vpand x0, x2, x2; \ vpxor x1, x2, x2; \ vpor x0, x1, x1; \ vpxor RNOT, x0, x0; \ vpxor x2, x0, x0; \ vpxor x1, x4, x4; #define S2_1(x0, x1, x2, x3, x4) \ vpxor RNOT, x3, x3; \ vpxor x0, x1, x1; \ vpand x2, x0, tp; \ vpxor x3, tp, tp; \ vpor x0, x3, x3; \ vpxor x1, x2, x2; \ vpxor x1, x3, x3; \ vpand tp, x1, x1; #define S2_2(x0, x1, x2, x3, x4) \ vpxor x2, tp, tp; \ vpand x3, x2, x2; \ vpor x1, x3, x3; \ vpxor RNOT, tp, tp; \ vpxor tp, x3, x3; \ vpxor tp, x0, x4; \ vpxor x2, tp, x0; \ vpor x2, x1, x1; #define S3_1(x0, x1, x2, x3, x4) \ vpxor x3, x1, tp; \ vpor x0, x3, x3; \ vpand x0, x1, x4; \ vpxor x2, x0, x0; \ vpxor tp, x2, x2; \ vpand x3, tp, x1; \ vpxor x3, x2, x2; \ vpor x4, x0, x0; \ vpxor x3, x4, x4; #define S3_2(x0, x1, x2, x3, x4) \ vpxor x0, x1, x1; \ vpand x3, x0, x0; \ vpand x4, x3, x3; \ vpxor x2, x3, x3; \ vpor x1, x4, x4; \ vpand x1, x2, x2; \ vpxor x3, x4, x4; \ vpxor x3, x0, x0; \ vpxor x2, x3, x3; #define S4_1(x0, x1, x2, x3, x4) \ vpand x0, x3, tp; \ vpxor x3, x0, x0; \ vpxor x2, tp, tp; \ vpor x3, x2, x2; \ vpxor x1, x0, x0; \ vpxor tp, x3, x4; \ vpor x0, x2, x2; \ vpxor x1, x2, x2; #define S4_2(x0, x1, x2, x3, x4) \ vpand x0, x1, x1; \ vpxor x4, x1, x1; \ vpand x2, x4, x4; \ vpxor tp, x2, x2; \ vpxor x0, x4, x4; \ vpor x1, tp, x3; \ vpxor RNOT, x1, x1; \ vpxor x0, x3, x3; #define S5_1(x0, x1, x2, x3, x4) \ vpor x0, x1, tp; \ vpxor tp, x2, x2; \ vpxor RNOT, x3, x3; \ vpxor x0, x1, x4; \ vpxor x2, x0, x0; \ vpand x4, tp, x1; \ vpor x3, x4, x4; \ vpxor x0, x4, x4; #define S5_2(x0, x1, x2, x3, x4) \ vpand x3, x0, x0; \ vpxor x3, x1, x1; \ vpxor x2, x3, x3; \ vpxor x1, x0, x0; \ vpand x4, x2, x2; \ vpxor x2, x1, x1; \ vpand x0, x2, x2; \ vpxor x2, x3, x3; #define S6_1(x0, x1, x2, x3, x4) \ vpxor x0, x3, x3; \ vpxor x2, x1, tp; \ vpxor x0, x2, x2; \ vpand x3, x0, x0; \ vpor x3, tp, tp; \ vpxor RNOT, x1, x4; \ vpxor tp, x0, x0; \ vpxor x2, tp, x1; #define S6_2(x0, x1, x2, x3, x4) \ vpxor x4, x3, x3; \ vpxor x0, x4, x4; \ vpand x0, x2, x2; \ vpxor x1, x4, x4; \ vpxor x3, x2, x2; \ vpand x1, x3, x3; \ vpxor x0, x3, x3; \ vpxor x2, x1, x1; #define S7_1(x0, x1, x2, x3, x4) \ vpxor RNOT, x1, tp; \ vpxor RNOT, x0, x0; \ vpand x2, tp, x1; \ vpxor x3, x1, x1; \ vpor tp, x3, x3; \ vpxor x2, tp, x4; \ vpxor x3, x2, x2; \ vpxor x0, x3, x3; \ vpor x1, x0, x0; #define S7_2(x0, x1, x2, x3, x4) \ vpand x0, x2, x2; \ vpxor x4, x0, x0; \ vpxor x3, x4, x4; \ vpand x0, x3, x3; \ vpxor x1, x4, x4; \ vpxor x4, x2, x2; \ vpxor x1, x3, x3; \ vpor x0, x4, x4; \ vpxor x1, x4, x4; #define SI0_1(x0, x1, x2, x3, x4) \ vpxor x0, x1, x1; \ vpor x1, x3, tp; \ vpxor x1, x3, x4; \ vpxor RNOT, x0, x0; \ vpxor tp, x2, x2; \ vpxor x0, tp, x3; \ vpand x1, x0, x0; \ vpxor x2, x0, x0; #define SI0_2(x0, x1, x2, x3, x4) \ vpand x3, x2, x2; \ vpxor x4, x3, x3; \ vpxor x3, x2, x2; \ vpxor x3, x1, x1; \ vpand x0, x3, x3; \ vpxor x0, x1, x1; \ vpxor x2, x0, x0; \ vpxor x3, x4, x4; #define SI1_1(x0, x1, x2, x3, x4) \ vpxor x3, x1, x1; \ vpxor x2, x0, tp; \ vpxor RNOT, x2, x2; \ vpor x1, x0, x4; \ vpxor x3, x4, x4; \ vpand x1, x3, x3; \ vpxor x2, x1, x1; \ vpand x4, x2, x2; #define SI1_2(x0, x1, x2, x3, x4) \ vpxor x1, x4, x4; \ vpor x3, x1, x1; \ vpxor tp, x3, x3; \ vpxor tp, x2, x2; \ vpor x4, tp, x0; \ vpxor x4, x2, x2; \ vpxor x0, x1, x1; \ vpxor x1, x4, x4; #define SI2_1(x0, x1, x2, x3, x4) \ vpxor x1, x2, x2; \ vpxor RNOT, x3, tp; \ vpor x2, tp, tp; \ vpxor x3, x2, x2; \ vpxor x0, x3, x4; \ vpxor x1, tp, x3; \ vpor x2, x1, x1; \ vpxor x0, x2, x2; #define SI2_2(x0, x1, x2, x3, x4) \ vpxor x4, x1, x1; \ vpor x3, x4, x4; \ vpxor x3, x2, x2; \ vpxor x2, x4, x4; \ vpand x1, x2, x2; \ vpxor x3, x2, x2; \ vpxor x4, x3, x3; \ vpxor x0, x4, x4; #define SI3_1(x0, x1, x2, x3, x4) \ vpxor x1, x2, x2; \ vpand x2, x1, tp; \ vpxor x0, tp, tp; \ vpor x1, x0, x0; \ vpxor x3, x1, x4; \ vpxor x3, x0, x0; \ vpor tp, x3, x3; \ vpxor x2, tp, x1; #define SI3_2(x0, x1, x2, x3, x4) \ vpxor x3, x1, x1; \ vpxor x2, x0, x0; \ vpxor x3, x2, x2; \ vpand x1, x3, x3; \ vpxor x0, x1, x1; \ vpand x2, x0, x0; \ vpxor x3, x4, x4; \ vpxor x0, x3, x3; \ vpxor x1, x0, x0; #define SI4_1(x0, x1, x2, x3, x4) \ vpxor x3, x2, x2; \ vpand x1, x0, tp; \ vpxor x2, tp, tp; \ vpor x3, x2, x2; \ vpxor RNOT, x0, x4; \ vpxor tp, x1, x1; \ vpxor x2, tp, x0; \ vpand x4, x2, x2; #define SI4_2(x0, x1, x2, x3, x4) \ vpxor x0, x2, x2; \ vpor x4, x0, x0; \ vpxor x3, x0, x0; \ vpand x2, x3, x3; \ vpxor x3, x4, x4; \ vpxor x1, x3, x3; \ vpand x0, x1, x1; \ vpxor x1, x4, x4; \ vpxor x3, x0, x0; #define SI5_1(x0, x1, x2, x3, x4) \ vpor x2, x1, tp; \ vpxor x1, x2, x2; \ vpxor x3, tp, tp; \ vpand x1, x3, x3; \ vpxor x3, x2, x2; \ vpor x0, x3, x3; \ vpxor RNOT, x0, x0; \ vpxor x2, x3, x3; \ vpor x0, x2, x2; #define SI5_2(x0, x1, x2, x3, x4) \ vpxor tp, x1, x4; \ vpxor x4, x2, x2; \ vpand x0, x4, x4; \ vpxor tp, x0, x0; \ vpxor x3, tp, x1; \ vpand x2, x0, x0; \ vpxor x3, x2, x2; \ vpxor x2, x0, x0; \ vpxor x4, x2, x2; \ vpxor x3, x4, x4; #define SI6_1(x0, x1, x2, x3, x4) \ vpxor x2, x0, x0; \ vpand x3, x0, tp; \ vpxor x3, x2, x2; \ vpxor x2, tp, tp; \ vpxor x1, x3, x3; \ vpor x0, x2, x2; \ vpxor x3, x2, x2; \ vpand tp, x3, x3; #define SI6_2(x0, x1, x2, x3, x4) \ vpxor RNOT, tp, tp; \ vpxor x1, x3, x3; \ vpand x2, x1, x1; \ vpxor tp, x0, x4; \ vpxor x4, x3, x3; \ vpxor x2, x4, x4; \ vpxor x1, tp, x0; \ vpxor x0, x2, x2; #define SI7_1(x0, x1, x2, x3, x4) \ vpand x0, x3, tp; \ vpxor x2, x0, x0; \ vpor x3, x2, x2; \ vpxor x1, x3, x4; \ vpxor RNOT, x0, x0; \ vpor tp, x1, x1; \ vpxor x0, x4, x4; \ vpand x2, x0, x0; \ vpxor x1, x0, x0; #define SI7_2(x0, x1, x2, x3, x4) \ vpand x2, x1, x1; \ vpxor x2, tp, x3; \ vpxor x3, x4, x4; \ vpand x3, x2, x2; \ vpor x0, x3, x3; \ vpxor x4, x1, x1; \ vpxor x4, x3, x3; \ vpand x0, x4, x4; \ vpxor x2, x4, x4; #define get_key(i, j, t) \ vbroadcastss (4*(i)+(j))*4(CTX), t; #define K2(x0, x1, x2, x3, x4, i) \ get_key(i, 0, RK0); \ get_key(i, 1, RK1); \ get_key(i, 2, RK2); \ get_key(i, 3, RK3); \ vpxor RK0, x0 ## 1, x0 ## 1; \ vpxor RK1, x1 ## 1, x1 ## 1; \ vpxor RK2, x2 ## 1, x2 ## 1; \ vpxor RK3, x3 ## 1, x3 ## 1; \ vpxor RK0, x0 ## 2, x0 ## 2; \ vpxor RK1, x1 ## 2, x1 ## 2; \ vpxor RK2, x2 ## 2, x2 ## 2; \ vpxor RK3, x3 ## 2, x3 ## 2; #define LK2(x0, x1, x2, x3, x4, i) \ vpslld $13, x0 ## 1, x4 ## 1; \ vpsrld $(32 - 13), x0 ## 1, x0 ## 1; \ vpor x4 ## 1, x0 ## 1, x0 ## 1; \ vpxor x0 ## 1, x1 ## 1, x1 ## 1; \ vpslld $3, x2 ## 1, x4 ## 1; \ vpsrld $(32 - 3), x2 ## 1, x2 ## 1; \ vpor x4 ## 1, x2 ## 1, x2 ## 1; \ vpxor x2 ## 1, x1 ## 1, x1 ## 1; \ vpslld $13, x0 ## 2, x4 ## 2; \ vpsrld $(32 - 13), x0 ## 2, x0 ## 2; \ vpor x4 ## 2, x0 ## 2, x0 ## 2; \ vpxor x0 ## 2, x1 ## 2, x1 ## 2; \ vpslld $3, x2 ## 2, x4 ## 2; \ vpsrld $(32 - 3), x2 ## 2, x2 ## 2; \ vpor x4 ## 2, x2 ## 2, x2 ## 2; \ vpxor x2 ## 2, x1 ## 2, x1 ## 2; \ vpslld $1, x1 ## 1, x4 ## 1; \ vpsrld $(32 - 1), x1 ## 1, x1 ## 1; \ vpor x4 ## 1, x1 ## 1, x1 ## 1; \ vpslld $3, x0 ## 1, x4 ## 1; \ vpxor x2 ## 1, x3 ## 1, x3 ## 1; \ vpxor x4 ## 1, x3 ## 1, x3 ## 1; \ get_key(i, 1, RK1); \ vpslld $1, x1 ## 2, x4 ## 2; \ vpsrld $(32 - 1), x1 ## 2, x1 ## 2; \ vpor x4 ## 2, x1 ## 2, x1 ## 2; \ vpslld $3, x0 ## 2, x4 ## 2; \ vpxor x2 ## 2, x3 ## 2, x3 ## 2; \ vpxor x4 ## 2, x3 ## 2, x3 ## 2; \ get_key(i, 3, RK3); \ vpslld $7, x3 ## 1, x4 ## 1; \ vpsrld $(32 - 7), x3 ## 1, x3 ## 1; \ vpor x4 ## 1, x3 ## 1, x3 ## 1; \ vpslld $7, x1 ## 1, x4 ## 1; \ vpxor x1 ## 1, x0 ## 1, x0 ## 1; \ vpxor x3 ## 1, x0 ## 1, x0 ## 1; \ vpxor x3 ## 1, x2 ## 1, x2 ## 1; \ vpxor x4 ## 1, x2 ## 1, x2 ## 1; \ get_key(i, 0, RK0); \ vpslld $7, x3 ## 2, x4 ## 2; \ vpsrld $(32 - 7), x3 ## 2, x3 ## 2; \ vpor x4 ## 2, x3 ## 2, x3 ## 2; \ vpslld $7, x1 ## 2, x4 ## 2; \ vpxor x1 ## 2, x0 ## 2, x0 ## 2; \ vpxor x3 ## 2, x0 ## 2, x0 ## 2; \ vpxor x3 ## 2, x2 ## 2, x2 ## 2; \ vpxor x4 ## 2, x2 ## 2, x2 ## 2; \ get_key(i, 2, RK2); \ vpxor RK1, x1 ## 1, x1 ## 1; \ vpxor RK3, x3 ## 1, x3 ## 1; \ vpslld $5, x0 ## 1, x4 ## 1; \ vpsrld $(32 - 5), x0 ## 1, x0 ## 1; \ vpor x4 ## 1, x0 ## 1, x0 ## 1; \ vpslld $22, x2 ## 1, x4 ## 1; \ vpsrld $(32 - 22), x2 ## 1, x2 ## 1; \ vpor x4 ## 1, x2 ## 1, x2 ## 1; \ vpxor RK0, x0 ## 1, x0 ## 1; \ vpxor RK2, x2 ## 1, x2 ## 1; \ vpxor RK1, x1 ## 2, x1 ## 2; \ vpxor RK3, x3 ## 2, x3 ## 2; \ vpslld $5, x0 ## 2, x4 ## 2; \ vpsrld $(32 - 5), x0 ## 2, x0 ## 2; \ vpor x4 ## 2, x0 ## 2, x0 ## 2; \ vpslld $22, x2 ## 2, x4 ## 2; \ vpsrld $(32 - 22), x2 ## 2, x2 ## 2; \ vpor x4 ## 2, x2 ## 2, x2 ## 2; \ vpxor RK0, x0 ## 2, x0 ## 2; \ vpxor RK2, x2 ## 2, x2 ## 2; #define KL2(x0, x1, x2, x3, x4, i) \ vpxor RK0, x0 ## 1, x0 ## 1; \ vpxor RK2, x2 ## 1, x2 ## 1; \ vpsrld $5, x0 ## 1, x4 ## 1; \ vpslld $(32 - 5), x0 ## 1, x0 ## 1; \ vpor x4 ## 1, x0 ## 1, x0 ## 1; \ vpxor RK3, x3 ## 1, x3 ## 1; \ vpxor RK1, x1 ## 1, x1 ## 1; \ vpsrld $22, x2 ## 1, x4 ## 1; \ vpslld $(32 - 22), x2 ## 1, x2 ## 1; \ vpor x4 ## 1, x2 ## 1, x2 ## 1; \ vpxor x3 ## 1, x2 ## 1, x2 ## 1; \ vpxor RK0, x0 ## 2, x0 ## 2; \ vpxor RK2, x2 ## 2, x2 ## 2; \ vpsrld $5, x0 ## 2, x4 ## 2; \ vpslld $(32 - 5), x0 ## 2, x0 ## 2; \ vpor x4 ## 2, x0 ## 2, x0 ## 2; \ vpxor RK3, x3 ## 2, x3 ## 2; \ vpxor RK1, x1 ## 2, x1 ## 2; \ vpsrld $22, x2 ## 2, x4 ## 2; \ vpslld $(32 - 22), x2 ## 2, x2 ## 2; \ vpor x4 ## 2, x2 ## 2, x2 ## 2; \ vpxor x3 ## 2, x2 ## 2, x2 ## 2; \ vpxor x3 ## 1, x0 ## 1, x0 ## 1; \ vpslld $7, x1 ## 1, x4 ## 1; \ vpxor x1 ## 1, x0 ## 1, x0 ## 1; \ vpxor x4 ## 1, x2 ## 1, x2 ## 1; \ vpsrld $1, x1 ## 1, x4 ## 1; \ vpslld $(32 - 1), x1 ## 1, x1 ## 1; \ vpor x4 ## 1, x1 ## 1, x1 ## 1; \ vpxor x3 ## 2, x0 ## 2, x0 ## 2; \ vpslld $7, x1 ## 2, x4 ## 2; \ vpxor x1 ## 2, x0 ## 2, x0 ## 2; \ vpxor x4 ## 2, x2 ## 2, x2 ## 2; \ vpsrld $1, x1 ## 2, x4 ## 2; \ vpslld $(32 - 1), x1 ## 2, x1 ## 2; \ vpor x4 ## 2, x1 ## 2, x1 ## 2; \ vpsrld $7, x3 ## 1, x4 ## 1; \ vpslld $(32 - 7), x3 ## 1, x3 ## 1; \ vpor x4 ## 1, x3 ## 1, x3 ## 1; \ vpxor x0 ## 1, x1 ## 1, x1 ## 1; \ vpslld $3, x0 ## 1, x4 ## 1; \ vpxor x4 ## 1, x3 ## 1, x3 ## 1; \ vpsrld $7, x3 ## 2, x4 ## 2; \ vpslld $(32 - 7), x3 ## 2, x3 ## 2; \ vpor x4 ## 2, x3 ## 2, x3 ## 2; \ vpxor x0 ## 2, x1 ## 2, x1 ## 2; \ vpslld $3, x0 ## 2, x4 ## 2; \ vpxor x4 ## 2, x3 ## 2, x3 ## 2; \ vpsrld $13, x0 ## 1, x4 ## 1; \ vpslld $(32 - 13), x0 ## 1, x0 ## 1; \ vpor x4 ## 1, x0 ## 1, x0 ## 1; \ vpxor x2 ## 1, x1 ## 1, x1 ## 1; \ vpxor x2 ## 1, x3 ## 1, x3 ## 1; \ vpsrld $3, x2 ## 1, x4 ## 1; \ vpslld $(32 - 3), x2 ## 1, x2 ## 1; \ vpor x4 ## 1, x2 ## 1, x2 ## 1; \ vpsrld $13, x0 ## 2, x4 ## 2; \ vpslld $(32 - 13), x0 ## 2, x0 ## 2; \ vpor x4 ## 2, x0 ## 2, x0 ## 2; \ vpxor x2 ## 2, x1 ## 2, x1 ## 2; \ vpxor x2 ## 2, x3 ## 2, x3 ## 2; \ vpsrld $3, x2 ## 2, x4 ## 2; \ vpslld $(32 - 3), x2 ## 2, x2 ## 2; \ vpor x4 ## 2, x2 ## 2, x2 ## 2; #define S(SBOX, x0, x1, x2, x3, x4) \ SBOX ## _1(x0 ## 1, x1 ## 1, x2 ## 1, x3 ## 1, x4 ## 1); \ SBOX ## _2(x0 ## 1, x1 ## 1, x2 ## 1, x3 ## 1, x4 ## 1); \ SBOX ## _1(x0 ## 2, x1 ## 2, x2 ## 2, x3 ## 2, x4 ## 2); \ SBOX ## _2(x0 ## 2, x1 ## 2, x2 ## 2, x3 ## 2, x4 ## 2); #define SP(SBOX, x0, x1, x2, x3, x4, i) \ get_key(i, 0, RK0); \ SBOX ## _1(x0 ## 1, x1 ## 1, x2 ## 1, x3 ## 1, x4 ## 1); \ get_key(i, 2, RK2); \ SBOX ## _2(x0 ## 1, x1 ## 1, x2 ## 1, x3 ## 1, x4 ## 1); \ get_key(i, 3, RK3); \ SBOX ## _1(x0 ## 2, x1 ## 2, x2 ## 2, x3 ## 2, x4 ## 2); \ get_key(i, 1, RK1); \ SBOX ## _2(x0 ## 2, x1 ## 2, x2 ## 2, x3 ## 2, x4 ## 2); \ #define transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \ vpunpckldq x1, x0, t0; \ vpunpckhdq x1, x0, t2; \ vpunpckldq x3, x2, t1; \ vpunpckhdq x3, x2, x3; \ \ vpunpcklqdq t1, t0, x0; \ vpunpckhqdq t1, t0, x1; \ vpunpcklqdq x3, t2, x2; \ vpunpckhqdq x3, t2, x3; #define read_blocks(x0, x1, x2, x3, t0, t1, t2) \ transpose_4x4(x0, x1, x2, x3, t0, t1, t2) #define write_blocks(x0, x1, x2, x3, t0, t1, t2) \ transpose_4x4(x0, x1, x2, x3, t0, t1, t2) .align 8 SYM_FUNC_START_LOCAL(__serpent_enc_blk8_avx) /* input: * %rdi: ctx, CTX * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: blocks * output: * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: encrypted blocks */ vpcmpeqd RNOT, RNOT, RNOT; read_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2); read_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2); K2(RA, RB, RC, RD, RE, 0); S(S0, RA, RB, RC, RD, RE); LK2(RC, RB, RD, RA, RE, 1); S(S1, RC, RB, RD, RA, RE); LK2(RE, RD, RA, RC, RB, 2); S(S2, RE, RD, RA, RC, RB); LK2(RB, RD, RE, RC, RA, 3); S(S3, RB, RD, RE, RC, RA); LK2(RC, RA, RD, RB, RE, 4); S(S4, RC, RA, RD, RB, RE); LK2(RA, RD, RB, RE, RC, 5); S(S5, RA, RD, RB, RE, RC); LK2(RC, RA, RD, RE, RB, 6); S(S6, RC, RA, RD, RE, RB); LK2(RD, RB, RA, RE, RC, 7); S(S7, RD, RB, RA, RE, RC); LK2(RC, RA, RE, RD, RB, 8); S(S0, RC, RA, RE, RD, RB); LK2(RE, RA, RD, RC, RB, 9); S(S1, RE, RA, RD, RC, RB); LK2(RB, RD, RC, RE, RA, 10); S(S2, RB, RD, RC, RE, RA); LK2(RA, RD, RB, RE, RC, 11); S(S3, RA, RD, RB, RE, RC); LK2(RE, RC, RD, RA, RB, 12); S(S4, RE, RC, RD, RA, RB); LK2(RC, RD, RA, RB, RE, 13); S(S5, RC, RD, RA, RB, RE); LK2(RE, RC, RD, RB, RA, 14); S(S6, RE, RC, RD, RB, RA); LK2(RD, RA, RC, RB, RE, 15); S(S7, RD, RA, RC, RB, RE); LK2(RE, RC, RB, RD, RA, 16); S(S0, RE, RC, RB, RD, RA); LK2(RB, RC, RD, RE, RA, 17); S(S1, RB, RC, RD, RE, RA); LK2(RA, RD, RE, RB, RC, 18); S(S2, RA, RD, RE, RB, RC); LK2(RC, RD, RA, RB, RE, 19); S(S3, RC, RD, RA, RB, RE); LK2(RB, RE, RD, RC, RA, 20); S(S4, RB, RE, RD, RC, RA); LK2(RE, RD, RC, RA, RB, 21); S(S5, RE, RD, RC, RA, RB); LK2(RB, RE, RD, RA, RC, 22); S(S6, RB, RE, RD, RA, RC); LK2(RD, RC, RE, RA, RB, 23); S(S7, RD, RC, RE, RA, RB); LK2(RB, RE, RA, RD, RC, 24); S(S0, RB, RE, RA, RD, RC); LK2(RA, RE, RD, RB, RC, 25); S(S1, RA, RE, RD, RB, RC); LK2(RC, RD, RB, RA, RE, 26); S(S2, RC, RD, RB, RA, RE); LK2(RE, RD, RC, RA, RB, 27); S(S3, RE, RD, RC, RA, RB); LK2(RA, RB, RD, RE, RC, 28); S(S4, RA, RB, RD, RE, RC); LK2(RB, RD, RE, RC, RA, 29); S(S5, RB, RD, RE, RC, RA); LK2(RA, RB, RD, RC, RE, 30); S(S6, RA, RB, RD, RC, RE); LK2(RD, RE, RB, RC, RA, 31); S(S7, RD, RE, RB, RC, RA); K2(RA, RB, RC, RD, RE, 32); write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2); write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2); RET; SYM_FUNC_END(__serpent_enc_blk8_avx) .align 8 SYM_FUNC_START_LOCAL(__serpent_dec_blk8_avx) /* input: * %rdi: ctx, CTX * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: encrypted blocks * output: * RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2: decrypted blocks */ vpcmpeqd RNOT, RNOT, RNOT; read_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2); read_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2); K2(RA, RB, RC, RD, RE, 32); SP(SI7, RA, RB, RC, RD, RE, 31); KL2(RB, RD, RA, RE, RC, 31); SP(SI6, RB, RD, RA, RE, RC, 30); KL2(RA, RC, RE, RB, RD, 30); SP(SI5, RA, RC, RE, RB, RD, 29); KL2(RC, RD, RA, RE, RB, 29); SP(SI4, RC, RD, RA, RE, RB, 28); KL2(RC, RA, RB, RE, RD, 28); SP(SI3, RC, RA, RB, RE, RD, 27); KL2(RB, RC, RD, RE, RA, 27); SP(SI2, RB, RC, RD, RE, RA, 26); KL2(RC, RA, RE, RD, RB, 26); SP(SI1, RC, RA, RE, RD, RB, 25); KL2(RB, RA, RE, RD, RC, 25); SP(SI0, RB, RA, RE, RD, RC, 24); KL2(RE, RC, RA, RB, RD, 24); SP(SI7, RE, RC, RA, RB, RD, 23); KL2(RC, RB, RE, RD, RA, 23); SP(SI6, RC, RB, RE, RD, RA, 22); KL2(RE, RA, RD, RC, RB, 22); SP(SI5, RE, RA, RD, RC, RB, 21); KL2(RA, RB, RE, RD, RC, 21); SP(SI4, RA, RB, RE, RD, RC, 20); KL2(RA, RE, RC, RD, RB, 20); SP(SI3, RA, RE, RC, RD, RB, 19); KL2(RC, RA, RB, RD, RE, 19); SP(SI2, RC, RA, RB, RD, RE, 18); KL2(RA, RE, RD, RB, RC, 18); SP(SI1, RA, RE, RD, RB, RC, 17); KL2(RC, RE, RD, RB, RA, 17); SP(SI0, RC, RE, RD, RB, RA, 16); KL2(RD, RA, RE, RC, RB, 16); SP(SI7, RD, RA, RE, RC, RB, 15); KL2(RA, RC, RD, RB, RE, 15); SP(SI6, RA, RC, RD, RB, RE, 14); KL2(RD, RE, RB, RA, RC, 14); SP(SI5, RD, RE, RB, RA, RC, 13); KL2(RE, RC, RD, RB, RA, 13); SP(SI4, RE, RC, RD, RB, RA, 12); KL2(RE, RD, RA, RB, RC, 12); SP(SI3, RE, RD, RA, RB, RC, 11); KL2(RA, RE, RC, RB, RD, 11); SP(SI2, RA, RE, RC, RB, RD, 10); KL2(RE, RD, RB, RC, RA, 10); SP(SI1, RE, RD, RB, RC, RA, 9); KL2(RA, RD, RB, RC, RE, 9); SP(SI0, RA, RD, RB, RC, RE, 8); KL2(RB, RE, RD, RA, RC, 8); SP(SI7, RB, RE, RD, RA, RC, 7); KL2(RE, RA, RB, RC, RD, 7); SP(SI6, RE, RA, RB, RC, RD, 6); KL2(RB, RD, RC, RE, RA, 6); SP(SI5, RB, RD, RC, RE, RA, 5); KL2(RD, RA, RB, RC, RE, 5); SP(SI4, RD, RA, RB, RC, RE, 4); KL2(RD, RB, RE, RC, RA, 4); SP(SI3, RD, RB, RE, RC, RA, 3); KL2(RE, RD, RA, RC, RB, 3); SP(SI2, RE, RD, RA, RC, RB, 2); KL2(RD, RB, RC, RA, RE, 2); SP(SI1, RD, RB, RC, RA, RE, 1); KL2(RE, RB, RC, RA, RD, 1); S(SI0, RE, RB, RC, RA, RD); K2(RC, RD, RB, RE, RA, 0); write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2); write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2); RET; SYM_FUNC_END(__serpent_dec_blk8_avx) SYM_FUNC_START(serpent_ecb_enc_8way_avx) /* input: * %rdi: ctx, CTX * %rsi: dst * %rdx: src */ FRAME_BEGIN load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); call __serpent_enc_blk8_avx; store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); FRAME_END RET; SYM_FUNC_END(serpent_ecb_enc_8way_avx) SYM_FUNC_START(serpent_ecb_dec_8way_avx) /* input: * %rdi: ctx, CTX * %rsi: dst * %rdx: src */ FRAME_BEGIN load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); call __serpent_dec_blk8_avx; store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2); FRAME_END RET; SYM_FUNC_END(serpent_ecb_dec_8way_avx) SYM_FUNC_START(serpent_cbc_dec_8way_avx) /* input: * %rdi: ctx, CTX * %rsi: dst * %rdx: src */ FRAME_BEGIN load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); call __serpent_dec_blk8_avx; store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2); FRAME_END RET; SYM_FUNC_END(serpent_cbc_dec_8way_avx)
aixcc-public/challenge-001-exemplar-source
17,511
arch/x86/crypto/sha256-avx-asm.S
######################################################################## # Implement fast SHA-256 with AVX1 instructions. (x86_64) # # Copyright (C) 2013 Intel Corporation. # # Authors: # James Guilford <james.guilford@intel.com> # Kirk Yap <kirk.s.yap@intel.com> # Tim Chen <tim.c.chen@linux.intel.com> # # This software is available to you under a choice of one of two # licenses. You may choose to be licensed under the terms of the GNU # General Public License (GPL) Version 2, available from the file # COPYING in the main directory of this source tree, or the # OpenIB.org BSD license below: # # Redistribution and use in source and binary forms, with or # without modification, are permitted provided that the following # conditions are met: # # - Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # # - Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials # provided with the distribution. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. ######################################################################## # # This code is described in an Intel White-Paper: # "Fast SHA-256 Implementations on Intel Architecture Processors" # # To find it, surf to http://www.intel.com/p/en_US/embedded # and search for that title. # ######################################################################## # This code schedules 1 block at a time, with 4 lanes per block ######################################################################## #include <linux/linkage.h> #include <linux/cfi_types.h> ## assume buffers not aligned #define VMOVDQ vmovdqu ################################ Define Macros # addm [mem], reg # Add reg to mem using reg-mem add and store .macro addm p1 p2 add \p1, \p2 mov \p2, \p1 .endm .macro MY_ROR p1 p2 shld $(32-(\p1)), \p2, \p2 .endm ################################ # COPY_XMM_AND_BSWAP xmm, [mem], byte_flip_mask # Load xmm with mem and byte swap each dword .macro COPY_XMM_AND_BSWAP p1 p2 p3 VMOVDQ \p2, \p1 vpshufb \p3, \p1, \p1 .endm ################################ X0 = %xmm4 X1 = %xmm5 X2 = %xmm6 X3 = %xmm7 XTMP0 = %xmm0 XTMP1 = %xmm1 XTMP2 = %xmm2 XTMP3 = %xmm3 XTMP4 = %xmm8 XFER = %xmm9 XTMP5 = %xmm11 SHUF_00BA = %xmm10 # shuffle xBxA -> 00BA SHUF_DC00 = %xmm12 # shuffle xDxC -> DC00 BYTE_FLIP_MASK = %xmm13 NUM_BLKS = %rdx # 3rd arg INP = %rsi # 2nd arg CTX = %rdi # 1st arg SRND = %rsi # clobbers INP c = %ecx d = %r8d e = %edx TBL = %r12 a = %eax b = %ebx f = %r9d g = %r10d h = %r11d y0 = %r13d y1 = %r14d y2 = %r15d _INP_END_SIZE = 8 _INP_SIZE = 8 _XFER_SIZE = 16 _XMM_SAVE_SIZE = 0 _INP_END = 0 _INP = _INP_END + _INP_END_SIZE _XFER = _INP + _INP_SIZE _XMM_SAVE = _XFER + _XFER_SIZE STACK_SIZE = _XMM_SAVE + _XMM_SAVE_SIZE # rotate_Xs # Rotate values of symbols X0...X3 .macro rotate_Xs X_ = X0 X0 = X1 X1 = X2 X2 = X3 X3 = X_ .endm # ROTATE_ARGS # Rotate values of symbols a...h .macro ROTATE_ARGS TMP_ = h h = g g = f f = e e = d d = c c = b b = a a = TMP_ .endm .macro FOUR_ROUNDS_AND_SCHED ## compute s0 four at a time and s1 two at a time ## compute W[-16] + W[-7] 4 at a time mov e, y0 # y0 = e MY_ROR (25-11), y0 # y0 = e >> (25-11) mov a, y1 # y1 = a vpalignr $4, X2, X3, XTMP0 # XTMP0 = W[-7] MY_ROR (22-13), y1 # y1 = a >> (22-13) xor e, y0 # y0 = e ^ (e >> (25-11)) mov f, y2 # y2 = f MY_ROR (11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6)) xor a, y1 # y1 = a ^ (a >> (22-13) xor g, y2 # y2 = f^g vpaddd X0, XTMP0, XTMP0 # XTMP0 = W[-7] + W[-16] xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) and e, y2 # y2 = (f^g)&e MY_ROR (13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2)) ## compute s0 vpalignr $4, X0, X1, XTMP1 # XTMP1 = W[-15] xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) MY_ROR 6, y0 # y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) xor g, y2 # y2 = CH = ((f^g)&e)^g MY_ROR 2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) add y0, y2 # y2 = S1 + CH add _XFER(%rsp), y2 # y2 = k + w + S1 + CH mov a, y0 # y0 = a add y2, h # h = h + S1 + CH + k + w mov a, y2 # y2 = a vpsrld $7, XTMP1, XTMP2 or c, y0 # y0 = a|c add h, d # d = d + h + S1 + CH + k + w and c, y2 # y2 = a&c vpslld $(32-7), XTMP1, XTMP3 and b, y0 # y0 = (a|c)&b add y1, h # h = h + S1 + CH + k + w + S0 vpor XTMP2, XTMP3, XTMP3 # XTMP1 = W[-15] MY_ROR 7 or y2, y0 # y0 = MAJ = (a|c)&b)|(a&c) add y0, h # h = h + S1 + CH + k + w + S0 + MAJ ROTATE_ARGS mov e, y0 # y0 = e mov a, y1 # y1 = a MY_ROR (25-11), y0 # y0 = e >> (25-11) xor e, y0 # y0 = e ^ (e >> (25-11)) mov f, y2 # y2 = f MY_ROR (22-13), y1 # y1 = a >> (22-13) vpsrld $18, XTMP1, XTMP2 # xor a, y1 # y1 = a ^ (a >> (22-13) MY_ROR (11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6)) xor g, y2 # y2 = f^g vpsrld $3, XTMP1, XTMP4 # XTMP4 = W[-15] >> 3 MY_ROR (13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2)) xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) and e, y2 # y2 = (f^g)&e MY_ROR 6, y0 # y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) vpslld $(32-18), XTMP1, XTMP1 xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) xor g, y2 # y2 = CH = ((f^g)&e)^g vpxor XTMP1, XTMP3, XTMP3 # add y0, y2 # y2 = S1 + CH add (1*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH MY_ROR 2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) vpxor XTMP2, XTMP3, XTMP3 # XTMP1 = W[-15] MY_ROR 7 ^ W[-15] MY_ROR mov a, y0 # y0 = a add y2, h # h = h + S1 + CH + k + w mov a, y2 # y2 = a vpxor XTMP4, XTMP3, XTMP1 # XTMP1 = s0 or c, y0 # y0 = a|c add h, d # d = d + h + S1 + CH + k + w and c, y2 # y2 = a&c ## compute low s1 vpshufd $0b11111010, X3, XTMP2 # XTMP2 = W[-2] {BBAA} and b, y0 # y0 = (a|c)&b add y1, h # h = h + S1 + CH + k + w + S0 vpaddd XTMP1, XTMP0, XTMP0 # XTMP0 = W[-16] + W[-7] + s0 or y2, y0 # y0 = MAJ = (a|c)&b)|(a&c) add y0, h # h = h + S1 + CH + k + w + S0 + MAJ ROTATE_ARGS mov e, y0 # y0 = e mov a, y1 # y1 = a MY_ROR (25-11), y0 # y0 = e >> (25-11) xor e, y0 # y0 = e ^ (e >> (25-11)) MY_ROR (22-13), y1 # y1 = a >> (22-13) mov f, y2 # y2 = f xor a, y1 # y1 = a ^ (a >> (22-13) MY_ROR (11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6)) vpsrld $10, XTMP2, XTMP4 # XTMP4 = W[-2] >> 10 {BBAA} xor g, y2 # y2 = f^g vpsrlq $19, XTMP2, XTMP3 # XTMP3 = W[-2] MY_ROR 19 {xBxA} xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) and e, y2 # y2 = (f^g)&e vpsrlq $17, XTMP2, XTMP2 # XTMP2 = W[-2] MY_ROR 17 {xBxA} MY_ROR (13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2)) xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) xor g, y2 # y2 = CH = ((f^g)&e)^g MY_ROR 6, y0 # y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) vpxor XTMP3, XTMP2, XTMP2 # add y0, y2 # y2 = S1 + CH MY_ROR 2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) add (2*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH vpxor XTMP2, XTMP4, XTMP4 # XTMP4 = s1 {xBxA} mov a, y0 # y0 = a add y2, h # h = h + S1 + CH + k + w mov a, y2 # y2 = a vpshufb SHUF_00BA, XTMP4, XTMP4 # XTMP4 = s1 {00BA} or c, y0 # y0 = a|c add h, d # d = d + h + S1 + CH + k + w and c, y2 # y2 = a&c vpaddd XTMP4, XTMP0, XTMP0 # XTMP0 = {..., ..., W[1], W[0]} and b, y0 # y0 = (a|c)&b add y1, h # h = h + S1 + CH + k + w + S0 ## compute high s1 vpshufd $0b01010000, XTMP0, XTMP2 # XTMP2 = W[-2] {DDCC} or y2, y0 # y0 = MAJ = (a|c)&b)|(a&c) add y0, h # h = h + S1 + CH + k + w + S0 + MAJ ROTATE_ARGS mov e, y0 # y0 = e MY_ROR (25-11), y0 # y0 = e >> (25-11) mov a, y1 # y1 = a MY_ROR (22-13), y1 # y1 = a >> (22-13) xor e, y0 # y0 = e ^ (e >> (25-11)) mov f, y2 # y2 = f MY_ROR (11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6)) vpsrld $10, XTMP2, XTMP5 # XTMP5 = W[-2] >> 10 {DDCC} xor a, y1 # y1 = a ^ (a >> (22-13) xor g, y2 # y2 = f^g vpsrlq $19, XTMP2, XTMP3 # XTMP3 = W[-2] MY_ROR 19 {xDxC} xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) and e, y2 # y2 = (f^g)&e MY_ROR (13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2)) vpsrlq $17, XTMP2, XTMP2 # XTMP2 = W[-2] MY_ROR 17 {xDxC} xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) MY_ROR 6, y0 # y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) xor g, y2 # y2 = CH = ((f^g)&e)^g vpxor XTMP3, XTMP2, XTMP2 MY_ROR 2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) add y0, y2 # y2 = S1 + CH add (3*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH vpxor XTMP2, XTMP5, XTMP5 # XTMP5 = s1 {xDxC} mov a, y0 # y0 = a add y2, h # h = h + S1 + CH + k + w mov a, y2 # y2 = a vpshufb SHUF_DC00, XTMP5, XTMP5 # XTMP5 = s1 {DC00} or c, y0 # y0 = a|c add h, d # d = d + h + S1 + CH + k + w and c, y2 # y2 = a&c vpaddd XTMP0, XTMP5, X0 # X0 = {W[3], W[2], W[1], W[0]} and b, y0 # y0 = (a|c)&b add y1, h # h = h + S1 + CH + k + w + S0 or y2, y0 # y0 = MAJ = (a|c)&b)|(a&c) add y0, h # h = h + S1 + CH + k + w + S0 + MAJ ROTATE_ARGS rotate_Xs .endm ## input is [rsp + _XFER + %1 * 4] .macro DO_ROUND round mov e, y0 # y0 = e MY_ROR (25-11), y0 # y0 = e >> (25-11) mov a, y1 # y1 = a xor e, y0 # y0 = e ^ (e >> (25-11)) MY_ROR (22-13), y1 # y1 = a >> (22-13) mov f, y2 # y2 = f xor a, y1 # y1 = a ^ (a >> (22-13) MY_ROR (11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6)) xor g, y2 # y2 = f^g xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) MY_ROR (13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2)) and e, y2 # y2 = (f^g)&e xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) MY_ROR 6, y0 # y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) xor g, y2 # y2 = CH = ((f^g)&e)^g add y0, y2 # y2 = S1 + CH MY_ROR 2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) offset = \round * 4 + _XFER # add offset(%rsp), y2 # y2 = k + w + S1 + CH mov a, y0 # y0 = a add y2, h # h = h + S1 + CH + k + w mov a, y2 # y2 = a or c, y0 # y0 = a|c add h, d # d = d + h + S1 + CH + k + w and c, y2 # y2 = a&c and b, y0 # y0 = (a|c)&b add y1, h # h = h + S1 + CH + k + w + S0 or y2, y0 # y0 = MAJ = (a|c)&b)|(a&c) add y0, h # h = h + S1 + CH + k + w + S0 + MAJ ROTATE_ARGS .endm ######################################################################## ## void sha256_transform_avx(state sha256_state *state, const u8 *data, int blocks) ## arg 1 : pointer to state ## arg 2 : pointer to input data ## arg 3 : Num blocks ######################################################################## .text SYM_TYPED_FUNC_START(sha256_transform_avx) .align 32 pushq %rbx pushq %r12 pushq %r13 pushq %r14 pushq %r15 pushq %rbp movq %rsp, %rbp subq $STACK_SIZE, %rsp # allocate stack space and $~15, %rsp # align stack pointer shl $6, NUM_BLKS # convert to bytes jz done_hash add INP, NUM_BLKS # pointer to end of data mov NUM_BLKS, _INP_END(%rsp) ## load initial digest mov 4*0(CTX), a mov 4*1(CTX), b mov 4*2(CTX), c mov 4*3(CTX), d mov 4*4(CTX), e mov 4*5(CTX), f mov 4*6(CTX), g mov 4*7(CTX), h vmovdqa PSHUFFLE_BYTE_FLIP_MASK(%rip), BYTE_FLIP_MASK vmovdqa _SHUF_00BA(%rip), SHUF_00BA vmovdqa _SHUF_DC00(%rip), SHUF_DC00 loop0: lea K256(%rip), TBL ## byte swap first 16 dwords COPY_XMM_AND_BSWAP X0, 0*16(INP), BYTE_FLIP_MASK COPY_XMM_AND_BSWAP X1, 1*16(INP), BYTE_FLIP_MASK COPY_XMM_AND_BSWAP X2, 2*16(INP), BYTE_FLIP_MASK COPY_XMM_AND_BSWAP X3, 3*16(INP), BYTE_FLIP_MASK mov INP, _INP(%rsp) ## schedule 48 input dwords, by doing 3 rounds of 16 each mov $3, SRND .align 16 loop1: vpaddd (TBL), X0, XFER vmovdqa XFER, _XFER(%rsp) FOUR_ROUNDS_AND_SCHED vpaddd 1*16(TBL), X0, XFER vmovdqa XFER, _XFER(%rsp) FOUR_ROUNDS_AND_SCHED vpaddd 2*16(TBL), X0, XFER vmovdqa XFER, _XFER(%rsp) FOUR_ROUNDS_AND_SCHED vpaddd 3*16(TBL), X0, XFER vmovdqa XFER, _XFER(%rsp) add $4*16, TBL FOUR_ROUNDS_AND_SCHED sub $1, SRND jne loop1 mov $2, SRND loop2: vpaddd (TBL), X0, XFER vmovdqa XFER, _XFER(%rsp) DO_ROUND 0 DO_ROUND 1 DO_ROUND 2 DO_ROUND 3 vpaddd 1*16(TBL), X1, XFER vmovdqa XFER, _XFER(%rsp) add $2*16, TBL DO_ROUND 0 DO_ROUND 1 DO_ROUND 2 DO_ROUND 3 vmovdqa X2, X0 vmovdqa X3, X1 sub $1, SRND jne loop2 addm (4*0)(CTX),a addm (4*1)(CTX),b addm (4*2)(CTX),c addm (4*3)(CTX),d addm (4*4)(CTX),e addm (4*5)(CTX),f addm (4*6)(CTX),g addm (4*7)(CTX),h mov _INP(%rsp), INP add $64, INP cmp _INP_END(%rsp), INP jne loop0 done_hash: mov %rbp, %rsp popq %rbp popq %r15 popq %r14 popq %r13 popq %r12 popq %rbx RET SYM_FUNC_END(sha256_transform_avx) .section .rodata.cst256.K256, "aM", @progbits, 256 .align 64 K256: .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 .section .rodata.cst16.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 16 .align 16 PSHUFFLE_BYTE_FLIP_MASK: .octa 0x0c0d0e0f08090a0b0405060700010203 .section .rodata.cst16._SHUF_00BA, "aM", @progbits, 16 .align 16 # shuffle xBxA -> 00BA _SHUF_00BA: .octa 0xFFFFFFFFFFFFFFFF0b0a090803020100 .section .rodata.cst16._SHUF_DC00, "aM", @progbits, 16 .align 16 # shuffle xDxC -> DC00 _SHUF_DC00: .octa 0x0b0a090803020100FFFFFFFFFFFFFFFF
aixcc-public/challenge-001-exemplar-source
13,684
arch/x86/crypto/serpent-sse2-i586-asm_32.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Serpent Cipher 4-way parallel algorithm (i586/SSE2) * * Copyright (C) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> * * Based on crypto/serpent.c by * Copyright (C) 2002 Dag Arne Osvik <osvik@ii.uib.no> * 2003 Herbert Valerio Riedel <hvr@gnu.org> */ #include <linux/linkage.h> .file "serpent-sse2-i586-asm_32.S" .text #define arg_ctx 4 #define arg_dst 8 #define arg_src 12 #define arg_xor 16 /********************************************************************** 4-way SSE2 serpent **********************************************************************/ #define CTX %edx #define RA %xmm0 #define RB %xmm1 #define RC %xmm2 #define RD %xmm3 #define RE %xmm4 #define RT0 %xmm5 #define RT1 %xmm6 #define RNOT %xmm7 #define get_key(i, j, t) \ movd (4*(i)+(j))*4(CTX), t; \ pshufd $0, t, t; #define K(x0, x1, x2, x3, x4, i) \ get_key(i, 0, x4); \ get_key(i, 1, RT0); \ get_key(i, 2, RT1); \ pxor x4, x0; \ pxor RT0, x1; \ pxor RT1, x2; \ get_key(i, 3, x4); \ pxor x4, x3; #define LK(x0, x1, x2, x3, x4, i) \ movdqa x0, x4; \ pslld $13, x0; \ psrld $(32 - 13), x4; \ por x4, x0; \ pxor x0, x1; \ movdqa x2, x4; \ pslld $3, x2; \ psrld $(32 - 3), x4; \ por x4, x2; \ pxor x2, x1; \ movdqa x1, x4; \ pslld $1, x1; \ psrld $(32 - 1), x4; \ por x4, x1; \ movdqa x0, x4; \ pslld $3, x4; \ pxor x2, x3; \ pxor x4, x3; \ movdqa x3, x4; \ pslld $7, x3; \ psrld $(32 - 7), x4; \ por x4, x3; \ movdqa x1, x4; \ pslld $7, x4; \ pxor x1, x0; \ pxor x3, x0; \ pxor x3, x2; \ pxor x4, x2; \ movdqa x0, x4; \ get_key(i, 1, RT0); \ pxor RT0, x1; \ get_key(i, 3, RT0); \ pxor RT0, x3; \ pslld $5, x0; \ psrld $(32 - 5), x4; \ por x4, x0; \ movdqa x2, x4; \ pslld $22, x2; \ psrld $(32 - 22), x4; \ por x4, x2; \ get_key(i, 0, RT0); \ pxor RT0, x0; \ get_key(i, 2, RT0); \ pxor RT0, x2; #define KL(x0, x1, x2, x3, x4, i) \ K(x0, x1, x2, x3, x4, i); \ movdqa x0, x4; \ psrld $5, x0; \ pslld $(32 - 5), x4; \ por x4, x0; \ movdqa x2, x4; \ psrld $22, x2; \ pslld $(32 - 22), x4; \ por x4, x2; \ pxor x3, x2; \ pxor x3, x0; \ movdqa x1, x4; \ pslld $7, x4; \ pxor x1, x0; \ pxor x4, x2; \ movdqa x1, x4; \ psrld $1, x1; \ pslld $(32 - 1), x4; \ por x4, x1; \ movdqa x3, x4; \ psrld $7, x3; \ pslld $(32 - 7), x4; \ por x4, x3; \ pxor x0, x1; \ movdqa x0, x4; \ pslld $3, x4; \ pxor x4, x3; \ movdqa x0, x4; \ psrld $13, x0; \ pslld $(32 - 13), x4; \ por x4, x0; \ pxor x2, x1; \ pxor x2, x3; \ movdqa x2, x4; \ psrld $3, x2; \ pslld $(32 - 3), x4; \ por x4, x2; #define S0(x0, x1, x2, x3, x4) \ movdqa x3, x4; \ por x0, x3; \ pxor x4, x0; \ pxor x2, x4; \ pxor RNOT, x4; \ pxor x1, x3; \ pand x0, x1; \ pxor x4, x1; \ pxor x0, x2; \ pxor x3, x0; \ por x0, x4; \ pxor x2, x0; \ pand x1, x2; \ pxor x2, x3; \ pxor RNOT, x1; \ pxor x4, x2; \ pxor x2, x1; #define S1(x0, x1, x2, x3, x4) \ movdqa x1, x4; \ pxor x0, x1; \ pxor x3, x0; \ pxor RNOT, x3; \ pand x1, x4; \ por x1, x0; \ pxor x2, x3; \ pxor x3, x0; \ pxor x3, x1; \ pxor x4, x3; \ por x4, x1; \ pxor x2, x4; \ pand x0, x2; \ pxor x1, x2; \ por x0, x1; \ pxor RNOT, x0; \ pxor x2, x0; \ pxor x1, x4; #define S2(x0, x1, x2, x3, x4) \ pxor RNOT, x3; \ pxor x0, x1; \ movdqa x0, x4; \ pand x2, x0; \ pxor x3, x0; \ por x4, x3; \ pxor x1, x2; \ pxor x1, x3; \ pand x0, x1; \ pxor x2, x0; \ pand x3, x2; \ por x1, x3; \ pxor RNOT, x0; \ pxor x0, x3; \ pxor x0, x4; \ pxor x2, x0; \ por x2, x1; #define S3(x0, x1, x2, x3, x4) \ movdqa x1, x4; \ pxor x3, x1; \ por x0, x3; \ pand x0, x4; \ pxor x2, x0; \ pxor x1, x2; \ pand x3, x1; \ pxor x3, x2; \ por x4, x0; \ pxor x3, x4; \ pxor x0, x1; \ pand x3, x0; \ pand x4, x3; \ pxor x2, x3; \ por x1, x4; \ pand x1, x2; \ pxor x3, x4; \ pxor x3, x0; \ pxor x2, x3; #define S4(x0, x1, x2, x3, x4) \ movdqa x3, x4; \ pand x0, x3; \ pxor x4, x0; \ pxor x2, x3; \ por x4, x2; \ pxor x1, x0; \ pxor x3, x4; \ por x0, x2; \ pxor x1, x2; \ pand x0, x1; \ pxor x4, x1; \ pand x2, x4; \ pxor x3, x2; \ pxor x0, x4; \ por x1, x3; \ pxor RNOT, x1; \ pxor x0, x3; #define S5(x0, x1, x2, x3, x4) \ movdqa x1, x4; \ por x0, x1; \ pxor x1, x2; \ pxor RNOT, x3; \ pxor x0, x4; \ pxor x2, x0; \ pand x4, x1; \ por x3, x4; \ pxor x0, x4; \ pand x3, x0; \ pxor x3, x1; \ pxor x2, x3; \ pxor x1, x0; \ pand x4, x2; \ pxor x2, x1; \ pand x0, x2; \ pxor x2, x3; #define S6(x0, x1, x2, x3, x4) \ movdqa x1, x4; \ pxor x0, x3; \ pxor x2, x1; \ pxor x0, x2; \ pand x3, x0; \ por x3, x1; \ pxor RNOT, x4; \ pxor x1, x0; \ pxor x2, x1; \ pxor x4, x3; \ pxor x0, x4; \ pand x0, x2; \ pxor x1, x4; \ pxor x3, x2; \ pand x1, x3; \ pxor x0, x3; \ pxor x2, x1; #define S7(x0, x1, x2, x3, x4) \ pxor RNOT, x1; \ movdqa x1, x4; \ pxor RNOT, x0; \ pand x2, x1; \ pxor x3, x1; \ por x4, x3; \ pxor x2, x4; \ pxor x3, x2; \ pxor x0, x3; \ por x1, x0; \ pand x0, x2; \ pxor x4, x0; \ pxor x3, x4; \ pand x0, x3; \ pxor x1, x4; \ pxor x4, x2; \ pxor x1, x3; \ por x0, x4; \ pxor x1, x4; #define SI0(x0, x1, x2, x3, x4) \ movdqa x3, x4; \ pxor x0, x1; \ por x1, x3; \ pxor x1, x4; \ pxor RNOT, x0; \ pxor x3, x2; \ pxor x0, x3; \ pand x1, x0; \ pxor x2, x0; \ pand x3, x2; \ pxor x4, x3; \ pxor x3, x2; \ pxor x3, x1; \ pand x0, x3; \ pxor x0, x1; \ pxor x2, x0; \ pxor x3, x4; #define SI1(x0, x1, x2, x3, x4) \ pxor x3, x1; \ movdqa x0, x4; \ pxor x2, x0; \ pxor RNOT, x2; \ por x1, x4; \ pxor x3, x4; \ pand x1, x3; \ pxor x2, x1; \ pand x4, x2; \ pxor x1, x4; \ por x3, x1; \ pxor x0, x3; \ pxor x0, x2; \ por x4, x0; \ pxor x4, x2; \ pxor x0, x1; \ pxor x1, x4; #define SI2(x0, x1, x2, x3, x4) \ pxor x1, x2; \ movdqa x3, x4; \ pxor RNOT, x3; \ por x2, x3; \ pxor x4, x2; \ pxor x0, x4; \ pxor x1, x3; \ por x2, x1; \ pxor x0, x2; \ pxor x4, x1; \ por x3, x4; \ pxor x3, x2; \ pxor x2, x4; \ pand x1, x2; \ pxor x3, x2; \ pxor x4, x3; \ pxor x0, x4; #define SI3(x0, x1, x2, x3, x4) \ pxor x1, x2; \ movdqa x1, x4; \ pand x2, x1; \ pxor x0, x1; \ por x4, x0; \ pxor x3, x4; \ pxor x3, x0; \ por x1, x3; \ pxor x2, x1; \ pxor x3, x1; \ pxor x2, x0; \ pxor x3, x2; \ pand x1, x3; \ pxor x0, x1; \ pand x2, x0; \ pxor x3, x4; \ pxor x0, x3; \ pxor x1, x0; #define SI4(x0, x1, x2, x3, x4) \ pxor x3, x2; \ movdqa x0, x4; \ pand x1, x0; \ pxor x2, x0; \ por x3, x2; \ pxor RNOT, x4; \ pxor x0, x1; \ pxor x2, x0; \ pand x4, x2; \ pxor x0, x2; \ por x4, x0; \ pxor x3, x0; \ pand x2, x3; \ pxor x3, x4; \ pxor x1, x3; \ pand x0, x1; \ pxor x1, x4; \ pxor x3, x0; #define SI5(x0, x1, x2, x3, x4) \ movdqa x1, x4; \ por x2, x1; \ pxor x4, x2; \ pxor x3, x1; \ pand x4, x3; \ pxor x3, x2; \ por x0, x3; \ pxor RNOT, x0; \ pxor x2, x3; \ por x0, x2; \ pxor x1, x4; \ pxor x4, x2; \ pand x0, x4; \ pxor x1, x0; \ pxor x3, x1; \ pand x2, x0; \ pxor x3, x2; \ pxor x2, x0; \ pxor x4, x2; \ pxor x3, x4; #define SI6(x0, x1, x2, x3, x4) \ pxor x2, x0; \ movdqa x0, x4; \ pand x3, x0; \ pxor x3, x2; \ pxor x2, x0; \ pxor x1, x3; \ por x4, x2; \ pxor x3, x2; \ pand x0, x3; \ pxor RNOT, x0; \ pxor x1, x3; \ pand x2, x1; \ pxor x0, x4; \ pxor x4, x3; \ pxor x2, x4; \ pxor x1, x0; \ pxor x0, x2; #define SI7(x0, x1, x2, x3, x4) \ movdqa x3, x4; \ pand x0, x3; \ pxor x2, x0; \ por x4, x2; \ pxor x1, x4; \ pxor RNOT, x0; \ por x3, x1; \ pxor x0, x4; \ pand x2, x0; \ pxor x1, x0; \ pand x2, x1; \ pxor x2, x3; \ pxor x3, x4; \ pand x3, x2; \ por x0, x3; \ pxor x4, x1; \ pxor x4, x3; \ pand x0, x4; \ pxor x2, x4; #define transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \ movdqa x0, t2; \ punpckldq x1, x0; \ punpckhdq x1, t2; \ movdqa x2, t1; \ punpckhdq x3, x2; \ punpckldq x3, t1; \ movdqa x0, x1; \ punpcklqdq t1, x0; \ punpckhqdq t1, x1; \ movdqa t2, x3; \ punpcklqdq x2, t2; \ punpckhqdq x2, x3; \ movdqa t2, x2; #define read_blocks(in, x0, x1, x2, x3, t0, t1, t2) \ movdqu (0*4*4)(in), x0; \ movdqu (1*4*4)(in), x1; \ movdqu (2*4*4)(in), x2; \ movdqu (3*4*4)(in), x3; \ \ transpose_4x4(x0, x1, x2, x3, t0, t1, t2) #define write_blocks(out, x0, x1, x2, x3, t0, t1, t2) \ transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \ \ movdqu x0, (0*4*4)(out); \ movdqu x1, (1*4*4)(out); \ movdqu x2, (2*4*4)(out); \ movdqu x3, (3*4*4)(out); #define xor_blocks(out, x0, x1, x2, x3, t0, t1, t2) \ transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \ \ movdqu (0*4*4)(out), t0; \ pxor t0, x0; \ movdqu x0, (0*4*4)(out); \ movdqu (1*4*4)(out), t0; \ pxor t0, x1; \ movdqu x1, (1*4*4)(out); \ movdqu (2*4*4)(out), t0; \ pxor t0, x2; \ movdqu x2, (2*4*4)(out); \ movdqu (3*4*4)(out), t0; \ pxor t0, x3; \ movdqu x3, (3*4*4)(out); SYM_FUNC_START(__serpent_enc_blk_4way) /* input: * arg_ctx(%esp): ctx, CTX * arg_dst(%esp): dst * arg_src(%esp): src * arg_xor(%esp): bool, if true: xor output */ pcmpeqd RNOT, RNOT; movl arg_ctx(%esp), CTX; movl arg_src(%esp), %eax; read_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE); K(RA, RB, RC, RD, RE, 0); S0(RA, RB, RC, RD, RE); LK(RC, RB, RD, RA, RE, 1); S1(RC, RB, RD, RA, RE); LK(RE, RD, RA, RC, RB, 2); S2(RE, RD, RA, RC, RB); LK(RB, RD, RE, RC, RA, 3); S3(RB, RD, RE, RC, RA); LK(RC, RA, RD, RB, RE, 4); S4(RC, RA, RD, RB, RE); LK(RA, RD, RB, RE, RC, 5); S5(RA, RD, RB, RE, RC); LK(RC, RA, RD, RE, RB, 6); S6(RC, RA, RD, RE, RB); LK(RD, RB, RA, RE, RC, 7); S7(RD, RB, RA, RE, RC); LK(RC, RA, RE, RD, RB, 8); S0(RC, RA, RE, RD, RB); LK(RE, RA, RD, RC, RB, 9); S1(RE, RA, RD, RC, RB); LK(RB, RD, RC, RE, RA, 10); S2(RB, RD, RC, RE, RA); LK(RA, RD, RB, RE, RC, 11); S3(RA, RD, RB, RE, RC); LK(RE, RC, RD, RA, RB, 12); S4(RE, RC, RD, RA, RB); LK(RC, RD, RA, RB, RE, 13); S5(RC, RD, RA, RB, RE); LK(RE, RC, RD, RB, RA, 14); S6(RE, RC, RD, RB, RA); LK(RD, RA, RC, RB, RE, 15); S7(RD, RA, RC, RB, RE); LK(RE, RC, RB, RD, RA, 16); S0(RE, RC, RB, RD, RA); LK(RB, RC, RD, RE, RA, 17); S1(RB, RC, RD, RE, RA); LK(RA, RD, RE, RB, RC, 18); S2(RA, RD, RE, RB, RC); LK(RC, RD, RA, RB, RE, 19); S3(RC, RD, RA, RB, RE); LK(RB, RE, RD, RC, RA, 20); S4(RB, RE, RD, RC, RA); LK(RE, RD, RC, RA, RB, 21); S5(RE, RD, RC, RA, RB); LK(RB, RE, RD, RA, RC, 22); S6(RB, RE, RD, RA, RC); LK(RD, RC, RE, RA, RB, 23); S7(RD, RC, RE, RA, RB); LK(RB, RE, RA, RD, RC, 24); S0(RB, RE, RA, RD, RC); LK(RA, RE, RD, RB, RC, 25); S1(RA, RE, RD, RB, RC); LK(RC, RD, RB, RA, RE, 26); S2(RC, RD, RB, RA, RE); LK(RE, RD, RC, RA, RB, 27); S3(RE, RD, RC, RA, RB); LK(RA, RB, RD, RE, RC, 28); S4(RA, RB, RD, RE, RC); LK(RB, RD, RE, RC, RA, 29); S5(RB, RD, RE, RC, RA); LK(RA, RB, RD, RC, RE, 30); S6(RA, RB, RD, RC, RE); LK(RD, RE, RB, RC, RA, 31); S7(RD, RE, RB, RC, RA); K(RA, RB, RC, RD, RE, 32); movl arg_dst(%esp), %eax; cmpb $0, arg_xor(%esp); jnz .L__enc_xor4; write_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE); RET; .L__enc_xor4: xor_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE); RET; SYM_FUNC_END(__serpent_enc_blk_4way) SYM_FUNC_START(serpent_dec_blk_4way) /* input: * arg_ctx(%esp): ctx, CTX * arg_dst(%esp): dst * arg_src(%esp): src */ pcmpeqd RNOT, RNOT; movl arg_ctx(%esp), CTX; movl arg_src(%esp), %eax; read_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE); K(RA, RB, RC, RD, RE, 32); SI7(RA, RB, RC, RD, RE); KL(RB, RD, RA, RE, RC, 31); SI6(RB, RD, RA, RE, RC); KL(RA, RC, RE, RB, RD, 30); SI5(RA, RC, RE, RB, RD); KL(RC, RD, RA, RE, RB, 29); SI4(RC, RD, RA, RE, RB); KL(RC, RA, RB, RE, RD, 28); SI3(RC, RA, RB, RE, RD); KL(RB, RC, RD, RE, RA, 27); SI2(RB, RC, RD, RE, RA); KL(RC, RA, RE, RD, RB, 26); SI1(RC, RA, RE, RD, RB); KL(RB, RA, RE, RD, RC, 25); SI0(RB, RA, RE, RD, RC); KL(RE, RC, RA, RB, RD, 24); SI7(RE, RC, RA, RB, RD); KL(RC, RB, RE, RD, RA, 23); SI6(RC, RB, RE, RD, RA); KL(RE, RA, RD, RC, RB, 22); SI5(RE, RA, RD, RC, RB); KL(RA, RB, RE, RD, RC, 21); SI4(RA, RB, RE, RD, RC); KL(RA, RE, RC, RD, RB, 20); SI3(RA, RE, RC, RD, RB); KL(RC, RA, RB, RD, RE, 19); SI2(RC, RA, RB, RD, RE); KL(RA, RE, RD, RB, RC, 18); SI1(RA, RE, RD, RB, RC); KL(RC, RE, RD, RB, RA, 17); SI0(RC, RE, RD, RB, RA); KL(RD, RA, RE, RC, RB, 16); SI7(RD, RA, RE, RC, RB); KL(RA, RC, RD, RB, RE, 15); SI6(RA, RC, RD, RB, RE); KL(RD, RE, RB, RA, RC, 14); SI5(RD, RE, RB, RA, RC); KL(RE, RC, RD, RB, RA, 13); SI4(RE, RC, RD, RB, RA); KL(RE, RD, RA, RB, RC, 12); SI3(RE, RD, RA, RB, RC); KL(RA, RE, RC, RB, RD, 11); SI2(RA, RE, RC, RB, RD); KL(RE, RD, RB, RC, RA, 10); SI1(RE, RD, RB, RC, RA); KL(RA, RD, RB, RC, RE, 9); SI0(RA, RD, RB, RC, RE); KL(RB, RE, RD, RA, RC, 8); SI7(RB, RE, RD, RA, RC); KL(RE, RA, RB, RC, RD, 7); SI6(RE, RA, RB, RC, RD); KL(RB, RD, RC, RE, RA, 6); SI5(RB, RD, RC, RE, RA); KL(RD, RA, RB, RC, RE, 5); SI4(RD, RA, RB, RC, RE); KL(RD, RB, RE, RC, RA, 4); SI3(RD, RB, RE, RC, RA); KL(RE, RD, RA, RC, RB, 3); SI2(RE, RD, RA, RC, RB); KL(RD, RB, RC, RA, RE, 2); SI1(RD, RB, RC, RA, RE); KL(RE, RB, RC, RA, RD, 1); SI0(RE, RB, RC, RA, RD); K(RC, RD, RB, RE, RA, 0); movl arg_dst(%esp), %eax; write_blocks(%eax, RC, RD, RB, RE, RT0, RT1, RA); RET; SYM_FUNC_END(serpent_dec_blk_4way)
aixcc-public/challenge-001-exemplar-source
17,159
arch/x86/crypto/sm3-avx-asm_64.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * SM3 AVX accelerated transform. * specified in: https://datatracker.ietf.org/doc/html/draft-sca-cfrg-sm3-02 * * Copyright (C) 2021 Jussi Kivilinna <jussi.kivilinna@iki.fi> * Copyright (C) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com> */ /* Based on SM3 AES/BMI2 accelerated work by libgcrypt at: * https://gnupg.org/software/libgcrypt/index.html */ #include <linux/linkage.h> #include <linux/cfi_types.h> #include <asm/frame.h> /* Context structure */ #define state_h0 0 #define state_h1 4 #define state_h2 8 #define state_h3 12 #define state_h4 16 #define state_h5 20 #define state_h6 24 #define state_h7 28 /* Constants */ /* Round constant macros */ #define K0 2043430169 /* 0x79cc4519 */ #define K1 -208106958 /* 0xf3988a32 */ #define K2 -416213915 /* 0xe7311465 */ #define K3 -832427829 /* 0xce6228cb */ #define K4 -1664855657 /* 0x9cc45197 */ #define K5 965255983 /* 0x3988a32f */ #define K6 1930511966 /* 0x7311465e */ #define K7 -433943364 /* 0xe6228cbc */ #define K8 -867886727 /* 0xcc451979 */ #define K9 -1735773453 /* 0x988a32f3 */ #define K10 823420391 /* 0x311465e7 */ #define K11 1646840782 /* 0x6228cbce */ #define K12 -1001285732 /* 0xc451979c */ #define K13 -2002571463 /* 0x88a32f39 */ #define K14 289824371 /* 0x11465e73 */ #define K15 579648742 /* 0x228cbce6 */ #define K16 -1651869049 /* 0x9d8a7a87 */ #define K17 991229199 /* 0x3b14f50f */ #define K18 1982458398 /* 0x7629ea1e */ #define K19 -330050500 /* 0xec53d43c */ #define K20 -660100999 /* 0xd8a7a879 */ #define K21 -1320201997 /* 0xb14f50f3 */ #define K22 1654563303 /* 0x629ea1e7 */ #define K23 -985840690 /* 0xc53d43ce */ #define K24 -1971681379 /* 0x8a7a879d */ #define K25 351604539 /* 0x14f50f3b */ #define K26 703209078 /* 0x29ea1e76 */ #define K27 1406418156 /* 0x53d43cec */ #define K28 -1482130984 /* 0xa7a879d8 */ #define K29 1330705329 /* 0x4f50f3b1 */ #define K30 -1633556638 /* 0x9ea1e762 */ #define K31 1027854021 /* 0x3d43cec5 */ #define K32 2055708042 /* 0x7a879d8a */ #define K33 -183551212 /* 0xf50f3b14 */ #define K34 -367102423 /* 0xea1e7629 */ #define K35 -734204845 /* 0xd43cec53 */ #define K36 -1468409689 /* 0xa879d8a7 */ #define K37 1358147919 /* 0x50f3b14f */ #define K38 -1578671458 /* 0xa1e7629e */ #define K39 1137624381 /* 0x43cec53d */ #define K40 -2019718534 /* 0x879d8a7a */ #define K41 255530229 /* 0x0f3b14f5 */ #define K42 511060458 /* 0x1e7629ea */ #define K43 1022120916 /* 0x3cec53d4 */ #define K44 2044241832 /* 0x79d8a7a8 */ #define K45 -206483632 /* 0xf3b14f50 */ #define K46 -412967263 /* 0xe7629ea1 */ #define K47 -825934525 /* 0xcec53d43 */ #define K48 -1651869049 /* 0x9d8a7a87 */ #define K49 991229199 /* 0x3b14f50f */ #define K50 1982458398 /* 0x7629ea1e */ #define K51 -330050500 /* 0xec53d43c */ #define K52 -660100999 /* 0xd8a7a879 */ #define K53 -1320201997 /* 0xb14f50f3 */ #define K54 1654563303 /* 0x629ea1e7 */ #define K55 -985840690 /* 0xc53d43ce */ #define K56 -1971681379 /* 0x8a7a879d */ #define K57 351604539 /* 0x14f50f3b */ #define K58 703209078 /* 0x29ea1e76 */ #define K59 1406418156 /* 0x53d43cec */ #define K60 -1482130984 /* 0xa7a879d8 */ #define K61 1330705329 /* 0x4f50f3b1 */ #define K62 -1633556638 /* 0x9ea1e762 */ #define K63 1027854021 /* 0x3d43cec5 */ /* Register macros */ #define RSTATE %rdi #define RDATA %rsi #define RNBLKS %rdx #define t0 %eax #define t1 %ebx #define t2 %ecx #define a %r8d #define b %r9d #define c %r10d #define d %r11d #define e %r12d #define f %r13d #define g %r14d #define h %r15d #define W0 %xmm0 #define W1 %xmm1 #define W2 %xmm2 #define W3 %xmm3 #define W4 %xmm4 #define W5 %xmm5 #define XTMP0 %xmm6 #define XTMP1 %xmm7 #define XTMP2 %xmm8 #define XTMP3 %xmm9 #define XTMP4 %xmm10 #define XTMP5 %xmm11 #define XTMP6 %xmm12 #define BSWAP_REG %xmm15 /* Stack structure */ #define STACK_W_SIZE (32 * 2 * 3) #define STACK_REG_SAVE_SIZE (64) #define STACK_W (0) #define STACK_REG_SAVE (STACK_W + STACK_W_SIZE) #define STACK_SIZE (STACK_REG_SAVE + STACK_REG_SAVE_SIZE) /* Instruction helpers. */ #define roll2(v, reg) \ roll $(v), reg; #define roll3mov(v, src, dst) \ movl src, dst; \ roll $(v), dst; #define roll3(v, src, dst) \ rorxl $(32-(v)), src, dst; #define addl2(a, out) \ leal (a, out), out; /* Round function macros. */ #define GG1(x, y, z, o, t) \ movl x, o; \ xorl y, o; \ xorl z, o; #define FF1(x, y, z, o, t) GG1(x, y, z, o, t) #define GG2(x, y, z, o, t) \ andnl z, x, o; \ movl y, t; \ andl x, t; \ addl2(t, o); #define FF2(x, y, z, o, t) \ movl y, o; \ xorl x, o; \ movl y, t; \ andl x, t; \ andl z, o; \ xorl t, o; #define R(i, a, b, c, d, e, f, g, h, round, widx, wtype) \ /* rol(a, 12) => t0 */ \ roll3mov(12, a, t0); /* rorxl here would reduce perf by 6% on zen3 */ \ /* rol (t0 + e + t), 7) => t1 */ \ leal K##round(t0, e, 1), t1; \ roll2(7, t1); \ /* h + w1 => h */ \ addl wtype##_W1_ADDR(round, widx), h; \ /* h + t1 => h */ \ addl2(t1, h); \ /* t1 ^ t0 => t0 */ \ xorl t1, t0; \ /* w1w2 + d => d */ \ addl wtype##_W1W2_ADDR(round, widx), d; \ /* FF##i(a,b,c) => t1 */ \ FF##i(a, b, c, t1, t2); \ /* d + t1 => d */ \ addl2(t1, d); \ /* GG#i(e,f,g) => t2 */ \ GG##i(e, f, g, t2, t1); \ /* h + t2 => h */ \ addl2(t2, h); \ /* rol (f, 19) => f */ \ roll2(19, f); \ /* d + t0 => d */ \ addl2(t0, d); \ /* rol (b, 9) => b */ \ roll2(9, b); \ /* P0(h) => h */ \ roll3(9, h, t2); \ roll3(17, h, t1); \ xorl t2, h; \ xorl t1, h; #define R1(a, b, c, d, e, f, g, h, round, widx, wtype) \ R(1, a, b, c, d, e, f, g, h, round, widx, wtype) #define R2(a, b, c, d, e, f, g, h, round, widx, wtype) \ R(2, a, b, c, d, e, f, g, h, round, widx, wtype) /* Input expansion macros. */ /* Byte-swapped input address. */ #define IW_W_ADDR(round, widx, offs) \ (STACK_W + ((round) / 4) * 64 + (offs) + ((widx) * 4))(%rsp) /* Expanded input address. */ #define XW_W_ADDR(round, widx, offs) \ (STACK_W + ((((round) / 3) - 4) % 2) * 64 + (offs) + ((widx) * 4))(%rsp) /* Rounds 1-12, byte-swapped input block addresses. */ #define IW_W1_ADDR(round, widx) IW_W_ADDR(round, widx, 0) #define IW_W1W2_ADDR(round, widx) IW_W_ADDR(round, widx, 32) /* Rounds 1-12, expanded input block addresses. */ #define XW_W1_ADDR(round, widx) XW_W_ADDR(round, widx, 0) #define XW_W1W2_ADDR(round, widx) XW_W_ADDR(round, widx, 32) /* Input block loading. */ #define LOAD_W_XMM_1() \ vmovdqu 0*16(RDATA), XTMP0; /* XTMP0: w3, w2, w1, w0 */ \ vmovdqu 1*16(RDATA), XTMP1; /* XTMP1: w7, w6, w5, w4 */ \ vmovdqu 2*16(RDATA), XTMP2; /* XTMP2: w11, w10, w9, w8 */ \ vmovdqu 3*16(RDATA), XTMP3; /* XTMP3: w15, w14, w13, w12 */ \ vpshufb BSWAP_REG, XTMP0, XTMP0; \ vpshufb BSWAP_REG, XTMP1, XTMP1; \ vpshufb BSWAP_REG, XTMP2, XTMP2; \ vpshufb BSWAP_REG, XTMP3, XTMP3; \ vpxor XTMP0, XTMP1, XTMP4; \ vpxor XTMP1, XTMP2, XTMP5; \ vpxor XTMP2, XTMP3, XTMP6; \ leaq 64(RDATA), RDATA; \ vmovdqa XTMP0, IW_W1_ADDR(0, 0); \ vmovdqa XTMP4, IW_W1W2_ADDR(0, 0); \ vmovdqa XTMP1, IW_W1_ADDR(4, 0); \ vmovdqa XTMP5, IW_W1W2_ADDR(4, 0); #define LOAD_W_XMM_2() \ vmovdqa XTMP2, IW_W1_ADDR(8, 0); \ vmovdqa XTMP6, IW_W1W2_ADDR(8, 0); #define LOAD_W_XMM_3() \ vpshufd $0b00000000, XTMP0, W0; /* W0: xx, w0, xx, xx */ \ vpshufd $0b11111001, XTMP0, W1; /* W1: xx, w3, w2, w1 */ \ vmovdqa XTMP1, W2; /* W2: xx, w6, w5, w4 */ \ vpalignr $12, XTMP1, XTMP2, W3; /* W3: xx, w9, w8, w7 */ \ vpalignr $8, XTMP2, XTMP3, W4; /* W4: xx, w12, w11, w10 */ \ vpshufd $0b11111001, XTMP3, W5; /* W5: xx, w15, w14, w13 */ /* Message scheduling. Note: 3 words per XMM register. */ #define SCHED_W_0(round, w0, w1, w2, w3, w4, w5) \ /* Load (w[i - 16]) => XTMP0 */ \ vpshufd $0b10111111, w0, XTMP0; \ vpalignr $12, XTMP0, w1, XTMP0; /* XTMP0: xx, w2, w1, w0 */ \ /* Load (w[i - 13]) => XTMP1 */ \ vpshufd $0b10111111, w1, XTMP1; \ vpalignr $12, XTMP1, w2, XTMP1; \ /* w[i - 9] == w3 */ \ /* XMM3 ^ XTMP0 => XTMP0 */ \ vpxor w3, XTMP0, XTMP0; #define SCHED_W_1(round, w0, w1, w2, w3, w4, w5) \ /* w[i - 3] == w5 */ \ /* rol(XMM5, 15) ^ XTMP0 => XTMP0 */ \ vpslld $15, w5, XTMP2; \ vpsrld $(32-15), w5, XTMP3; \ vpxor XTMP2, XTMP3, XTMP3; \ vpxor XTMP3, XTMP0, XTMP0; \ /* rol(XTMP1, 7) => XTMP1 */ \ vpslld $7, XTMP1, XTMP5; \ vpsrld $(32-7), XTMP1, XTMP1; \ vpxor XTMP5, XTMP1, XTMP1; \ /* XMM4 ^ XTMP1 => XTMP1 */ \ vpxor w4, XTMP1, XTMP1; \ /* w[i - 6] == XMM4 */ \ /* P1(XTMP0) ^ XTMP1 => XMM0 */ \ vpslld $15, XTMP0, XTMP5; \ vpsrld $(32-15), XTMP0, XTMP6; \ vpslld $23, XTMP0, XTMP2; \ vpsrld $(32-23), XTMP0, XTMP3; \ vpxor XTMP0, XTMP1, XTMP1; \ vpxor XTMP6, XTMP5, XTMP5; \ vpxor XTMP3, XTMP2, XTMP2; \ vpxor XTMP2, XTMP5, XTMP5; \ vpxor XTMP5, XTMP1, w0; #define SCHED_W_2(round, w0, w1, w2, w3, w4, w5) \ /* W1 in XMM12 */ \ vpshufd $0b10111111, w4, XTMP4; \ vpalignr $12, XTMP4, w5, XTMP4; \ vmovdqa XTMP4, XW_W1_ADDR((round), 0); \ /* W1 ^ W2 => XTMP1 */ \ vpxor w0, XTMP4, XTMP1; \ vmovdqa XTMP1, XW_W1W2_ADDR((round), 0); .section .rodata.cst16, "aM", @progbits, 16 .align 16 .Lbe32mask: .long 0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f .text /* * Transform nblocks*64 bytes (nblocks*16 32-bit words) at DATA. * * void sm3_transform_avx(struct sm3_state *state, * const u8 *data, int nblocks); */ .align 16 SYM_TYPED_FUNC_START(sm3_transform_avx) /* input: * %rdi: ctx, CTX * %rsi: data (64*nblks bytes) * %rdx: nblocks */ vzeroupper; pushq %rbp; movq %rsp, %rbp; movq %rdx, RNBLKS; subq $STACK_SIZE, %rsp; andq $(~63), %rsp; movq %rbx, (STACK_REG_SAVE + 0 * 8)(%rsp); movq %r15, (STACK_REG_SAVE + 1 * 8)(%rsp); movq %r14, (STACK_REG_SAVE + 2 * 8)(%rsp); movq %r13, (STACK_REG_SAVE + 3 * 8)(%rsp); movq %r12, (STACK_REG_SAVE + 4 * 8)(%rsp); vmovdqa .Lbe32mask (%rip), BSWAP_REG; /* Get the values of the chaining variables. */ movl state_h0(RSTATE), a; movl state_h1(RSTATE), b; movl state_h2(RSTATE), c; movl state_h3(RSTATE), d; movl state_h4(RSTATE), e; movl state_h5(RSTATE), f; movl state_h6(RSTATE), g; movl state_h7(RSTATE), h; .align 16 .Loop: /* Load data part1. */ LOAD_W_XMM_1(); leaq -1(RNBLKS), RNBLKS; /* Transform 0-3 + Load data part2. */ R1(a, b, c, d, e, f, g, h, 0, 0, IW); LOAD_W_XMM_2(); R1(d, a, b, c, h, e, f, g, 1, 1, IW); R1(c, d, a, b, g, h, e, f, 2, 2, IW); R1(b, c, d, a, f, g, h, e, 3, 3, IW); LOAD_W_XMM_3(); /* Transform 4-7 + Precalc 12-14. */ R1(a, b, c, d, e, f, g, h, 4, 0, IW); R1(d, a, b, c, h, e, f, g, 5, 1, IW); R1(c, d, a, b, g, h, e, f, 6, 2, IW); SCHED_W_0(12, W0, W1, W2, W3, W4, W5); R1(b, c, d, a, f, g, h, e, 7, 3, IW); SCHED_W_1(12, W0, W1, W2, W3, W4, W5); /* Transform 8-11 + Precalc 12-17. */ R1(a, b, c, d, e, f, g, h, 8, 0, IW); SCHED_W_2(12, W0, W1, W2, W3, W4, W5); R1(d, a, b, c, h, e, f, g, 9, 1, IW); SCHED_W_0(15, W1, W2, W3, W4, W5, W0); R1(c, d, a, b, g, h, e, f, 10, 2, IW); SCHED_W_1(15, W1, W2, W3, W4, W5, W0); R1(b, c, d, a, f, g, h, e, 11, 3, IW); SCHED_W_2(15, W1, W2, W3, W4, W5, W0); /* Transform 12-14 + Precalc 18-20 */ R1(a, b, c, d, e, f, g, h, 12, 0, XW); SCHED_W_0(18, W2, W3, W4, W5, W0, W1); R1(d, a, b, c, h, e, f, g, 13, 1, XW); SCHED_W_1(18, W2, W3, W4, W5, W0, W1); R1(c, d, a, b, g, h, e, f, 14, 2, XW); SCHED_W_2(18, W2, W3, W4, W5, W0, W1); /* Transform 15-17 + Precalc 21-23 */ R1(b, c, d, a, f, g, h, e, 15, 0, XW); SCHED_W_0(21, W3, W4, W5, W0, W1, W2); R2(a, b, c, d, e, f, g, h, 16, 1, XW); SCHED_W_1(21, W3, W4, W5, W0, W1, W2); R2(d, a, b, c, h, e, f, g, 17, 2, XW); SCHED_W_2(21, W3, W4, W5, W0, W1, W2); /* Transform 18-20 + Precalc 24-26 */ R2(c, d, a, b, g, h, e, f, 18, 0, XW); SCHED_W_0(24, W4, W5, W0, W1, W2, W3); R2(b, c, d, a, f, g, h, e, 19, 1, XW); SCHED_W_1(24, W4, W5, W0, W1, W2, W3); R2(a, b, c, d, e, f, g, h, 20, 2, XW); SCHED_W_2(24, W4, W5, W0, W1, W2, W3); /* Transform 21-23 + Precalc 27-29 */ R2(d, a, b, c, h, e, f, g, 21, 0, XW); SCHED_W_0(27, W5, W0, W1, W2, W3, W4); R2(c, d, a, b, g, h, e, f, 22, 1, XW); SCHED_W_1(27, W5, W0, W1, W2, W3, W4); R2(b, c, d, a, f, g, h, e, 23, 2, XW); SCHED_W_2(27, W5, W0, W1, W2, W3, W4); /* Transform 24-26 + Precalc 30-32 */ R2(a, b, c, d, e, f, g, h, 24, 0, XW); SCHED_W_0(30, W0, W1, W2, W3, W4, W5); R2(d, a, b, c, h, e, f, g, 25, 1, XW); SCHED_W_1(30, W0, W1, W2, W3, W4, W5); R2(c, d, a, b, g, h, e, f, 26, 2, XW); SCHED_W_2(30, W0, W1, W2, W3, W4, W5); /* Transform 27-29 + Precalc 33-35 */ R2(b, c, d, a, f, g, h, e, 27, 0, XW); SCHED_W_0(33, W1, W2, W3, W4, W5, W0); R2(a, b, c, d, e, f, g, h, 28, 1, XW); SCHED_W_1(33, W1, W2, W3, W4, W5, W0); R2(d, a, b, c, h, e, f, g, 29, 2, XW); SCHED_W_2(33, W1, W2, W3, W4, W5, W0); /* Transform 30-32 + Precalc 36-38 */ R2(c, d, a, b, g, h, e, f, 30, 0, XW); SCHED_W_0(36, W2, W3, W4, W5, W0, W1); R2(b, c, d, a, f, g, h, e, 31, 1, XW); SCHED_W_1(36, W2, W3, W4, W5, W0, W1); R2(a, b, c, d, e, f, g, h, 32, 2, XW); SCHED_W_2(36, W2, W3, W4, W5, W0, W1); /* Transform 33-35 + Precalc 39-41 */ R2(d, a, b, c, h, e, f, g, 33, 0, XW); SCHED_W_0(39, W3, W4, W5, W0, W1, W2); R2(c, d, a, b, g, h, e, f, 34, 1, XW); SCHED_W_1(39, W3, W4, W5, W0, W1, W2); R2(b, c, d, a, f, g, h, e, 35, 2, XW); SCHED_W_2(39, W3, W4, W5, W0, W1, W2); /* Transform 36-38 + Precalc 42-44 */ R2(a, b, c, d, e, f, g, h, 36, 0, XW); SCHED_W_0(42, W4, W5, W0, W1, W2, W3); R2(d, a, b, c, h, e, f, g, 37, 1, XW); SCHED_W_1(42, W4, W5, W0, W1, W2, W3); R2(c, d, a, b, g, h, e, f, 38, 2, XW); SCHED_W_2(42, W4, W5, W0, W1, W2, W3); /* Transform 39-41 + Precalc 45-47 */ R2(b, c, d, a, f, g, h, e, 39, 0, XW); SCHED_W_0(45, W5, W0, W1, W2, W3, W4); R2(a, b, c, d, e, f, g, h, 40, 1, XW); SCHED_W_1(45, W5, W0, W1, W2, W3, W4); R2(d, a, b, c, h, e, f, g, 41, 2, XW); SCHED_W_2(45, W5, W0, W1, W2, W3, W4); /* Transform 42-44 + Precalc 48-50 */ R2(c, d, a, b, g, h, e, f, 42, 0, XW); SCHED_W_0(48, W0, W1, W2, W3, W4, W5); R2(b, c, d, a, f, g, h, e, 43, 1, XW); SCHED_W_1(48, W0, W1, W2, W3, W4, W5); R2(a, b, c, d, e, f, g, h, 44, 2, XW); SCHED_W_2(48, W0, W1, W2, W3, W4, W5); /* Transform 45-47 + Precalc 51-53 */ R2(d, a, b, c, h, e, f, g, 45, 0, XW); SCHED_W_0(51, W1, W2, W3, W4, W5, W0); R2(c, d, a, b, g, h, e, f, 46, 1, XW); SCHED_W_1(51, W1, W2, W3, W4, W5, W0); R2(b, c, d, a, f, g, h, e, 47, 2, XW); SCHED_W_2(51, W1, W2, W3, W4, W5, W0); /* Transform 48-50 + Precalc 54-56 */ R2(a, b, c, d, e, f, g, h, 48, 0, XW); SCHED_W_0(54, W2, W3, W4, W5, W0, W1); R2(d, a, b, c, h, e, f, g, 49, 1, XW); SCHED_W_1(54, W2, W3, W4, W5, W0, W1); R2(c, d, a, b, g, h, e, f, 50, 2, XW); SCHED_W_2(54, W2, W3, W4, W5, W0, W1); /* Transform 51-53 + Precalc 57-59 */ R2(b, c, d, a, f, g, h, e, 51, 0, XW); SCHED_W_0(57, W3, W4, W5, W0, W1, W2); R2(a, b, c, d, e, f, g, h, 52, 1, XW); SCHED_W_1(57, W3, W4, W5, W0, W1, W2); R2(d, a, b, c, h, e, f, g, 53, 2, XW); SCHED_W_2(57, W3, W4, W5, W0, W1, W2); /* Transform 54-56 + Precalc 60-62 */ R2(c, d, a, b, g, h, e, f, 54, 0, XW); SCHED_W_0(60, W4, W5, W0, W1, W2, W3); R2(b, c, d, a, f, g, h, e, 55, 1, XW); SCHED_W_1(60, W4, W5, W0, W1, W2, W3); R2(a, b, c, d, e, f, g, h, 56, 2, XW); SCHED_W_2(60, W4, W5, W0, W1, W2, W3); /* Transform 57-59 + Precalc 63 */ R2(d, a, b, c, h, e, f, g, 57, 0, XW); SCHED_W_0(63, W5, W0, W1, W2, W3, W4); R2(c, d, a, b, g, h, e, f, 58, 1, XW); R2(b, c, d, a, f, g, h, e, 59, 2, XW); SCHED_W_1(63, W5, W0, W1, W2, W3, W4); /* Transform 60-62 + Precalc 63 */ R2(a, b, c, d, e, f, g, h, 60, 0, XW); R2(d, a, b, c, h, e, f, g, 61, 1, XW); SCHED_W_2(63, W5, W0, W1, W2, W3, W4); R2(c, d, a, b, g, h, e, f, 62, 2, XW); /* Transform 63 */ R2(b, c, d, a, f, g, h, e, 63, 0, XW); /* Update the chaining variables. */ xorl state_h0(RSTATE), a; xorl state_h1(RSTATE), b; xorl state_h2(RSTATE), c; xorl state_h3(RSTATE), d; movl a, state_h0(RSTATE); movl b, state_h1(RSTATE); movl c, state_h2(RSTATE); movl d, state_h3(RSTATE); xorl state_h4(RSTATE), e; xorl state_h5(RSTATE), f; xorl state_h6(RSTATE), g; xorl state_h7(RSTATE), h; movl e, state_h4(RSTATE); movl f, state_h5(RSTATE); movl g, state_h6(RSTATE); movl h, state_h7(RSTATE); cmpq $0, RNBLKS; jne .Loop; vzeroall; movq (STACK_REG_SAVE + 0 * 8)(%rsp), %rbx; movq (STACK_REG_SAVE + 1 * 8)(%rsp), %r15; movq (STACK_REG_SAVE + 2 * 8)(%rsp), %r14; movq (STACK_REG_SAVE + 3 * 8)(%rsp), %r13; movq (STACK_REG_SAVE + 4 * 8)(%rsp), %r12; vmovdqa %xmm0, IW_W1_ADDR(0, 0); vmovdqa %xmm0, IW_W1W2_ADDR(0, 0); vmovdqa %xmm0, IW_W1_ADDR(4, 0); vmovdqa %xmm0, IW_W1W2_ADDR(4, 0); vmovdqa %xmm0, IW_W1_ADDR(8, 0); vmovdqa %xmm0, IW_W1W2_ADDR(8, 0); movq %rbp, %rsp; popq %rbp; RET; SYM_FUNC_END(sm3_transform_avx)
aixcc-public/challenge-001-exemplar-source
12,448
arch/x86/crypto/aes_ctrby8_avx-x86_64.S
/* SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause */ /* * AES CTR mode by8 optimization with AVX instructions. (x86_64) * * Copyright(c) 2014 Intel Corporation. * * Contact Information: * James Guilford <james.guilford@intel.com> * Sean Gulley <sean.m.gulley@intel.com> * Chandramouli Narayanan <mouli@linux.intel.com> */ /* * This is AES128/192/256 CTR mode optimization implementation. It requires * the support of Intel(R) AESNI and AVX instructions. * * This work was inspired by the AES CTR mode optimization published * in Intel Optimized IPSEC Cryptographic library. * Additional information on it can be found at: * https://github.com/intel/intel-ipsec-mb */ #include <linux/linkage.h> #define VMOVDQ vmovdqu /* * Note: the "x" prefix in these aliases means "this is an xmm register". The * alias prefixes have no relation to XCTR where the "X" prefix means "XOR * counter". */ #define xdata0 %xmm0 #define xdata1 %xmm1 #define xdata2 %xmm2 #define xdata3 %xmm3 #define xdata4 %xmm4 #define xdata5 %xmm5 #define xdata6 %xmm6 #define xdata7 %xmm7 #define xcounter %xmm8 // CTR mode only #define xiv %xmm8 // XCTR mode only #define xbyteswap %xmm9 // CTR mode only #define xtmp %xmm9 // XCTR mode only #define xkey0 %xmm10 #define xkey4 %xmm11 #define xkey8 %xmm12 #define xkey12 %xmm13 #define xkeyA %xmm14 #define xkeyB %xmm15 #define p_in %rdi #define p_iv %rsi #define p_keys %rdx #define p_out %rcx #define num_bytes %r8 #define counter %r9 // XCTR mode only #define tmp %r10 #define DDQ_DATA 0 #define XDATA 1 #define KEY_128 1 #define KEY_192 2 #define KEY_256 3 .section .rodata .align 16 byteswap_const: .octa 0x000102030405060708090A0B0C0D0E0F ddq_low_msk: .octa 0x0000000000000000FFFFFFFFFFFFFFFF ddq_high_add_1: .octa 0x00000000000000010000000000000000 ddq_add_1: .octa 0x00000000000000000000000000000001 ddq_add_2: .octa 0x00000000000000000000000000000002 ddq_add_3: .octa 0x00000000000000000000000000000003 ddq_add_4: .octa 0x00000000000000000000000000000004 ddq_add_5: .octa 0x00000000000000000000000000000005 ddq_add_6: .octa 0x00000000000000000000000000000006 ddq_add_7: .octa 0x00000000000000000000000000000007 ddq_add_8: .octa 0x00000000000000000000000000000008 .text /* generate a unique variable for ddq_add_x */ /* generate a unique variable for xmm register */ .macro setxdata n var_xdata = %xmm\n .endm /* club the numeric 'id' to the symbol 'name' */ .macro club name, id .altmacro .if \name == XDATA setxdata %\id .endif .noaltmacro .endm /* * do_aes num_in_par load_keys key_len * This increments p_in, but not p_out */ .macro do_aes b, k, key_len, xctr .set by, \b .set load_keys, \k .set klen, \key_len .if (load_keys) vmovdqa 0*16(p_keys), xkey0 .endif .if \xctr movq counter, xtmp .set i, 0 .rept (by) club XDATA, i vpaddq (ddq_add_1 + 16 * i)(%rip), xtmp, var_xdata .set i, (i +1) .endr .set i, 0 .rept (by) club XDATA, i vpxor xiv, var_xdata, var_xdata .set i, (i +1) .endr .else vpshufb xbyteswap, xcounter, xdata0 .set i, 1 .rept (by - 1) club XDATA, i vpaddq (ddq_add_1 + 16 * (i - 1))(%rip), xcounter, var_xdata vptest ddq_low_msk(%rip), var_xdata jnz 1f vpaddq ddq_high_add_1(%rip), var_xdata, var_xdata vpaddq ddq_high_add_1(%rip), xcounter, xcounter 1: vpshufb xbyteswap, var_xdata, var_xdata .set i, (i +1) .endr .endif vmovdqa 1*16(p_keys), xkeyA vpxor xkey0, xdata0, xdata0 .if \xctr add $by, counter .else vpaddq (ddq_add_1 + 16 * (by - 1))(%rip), xcounter, xcounter vptest ddq_low_msk(%rip), xcounter jnz 1f vpaddq ddq_high_add_1(%rip), xcounter, xcounter 1: .endif .set i, 1 .rept (by - 1) club XDATA, i vpxor xkey0, var_xdata, var_xdata .set i, (i +1) .endr vmovdqa 2*16(p_keys), xkeyB .set i, 0 .rept by club XDATA, i vaesenc xkeyA, var_xdata, var_xdata /* key 1 */ .set i, (i +1) .endr .if (klen == KEY_128) .if (load_keys) vmovdqa 3*16(p_keys), xkey4 .endif .else vmovdqa 3*16(p_keys), xkeyA .endif .set i, 0 .rept by club XDATA, i vaesenc xkeyB, var_xdata, var_xdata /* key 2 */ .set i, (i +1) .endr add $(16*by), p_in .if (klen == KEY_128) vmovdqa 4*16(p_keys), xkeyB .else .if (load_keys) vmovdqa 4*16(p_keys), xkey4 .endif .endif .set i, 0 .rept by club XDATA, i /* key 3 */ .if (klen == KEY_128) vaesenc xkey4, var_xdata, var_xdata .else vaesenc xkeyA, var_xdata, var_xdata .endif .set i, (i +1) .endr vmovdqa 5*16(p_keys), xkeyA .set i, 0 .rept by club XDATA, i /* key 4 */ .if (klen == KEY_128) vaesenc xkeyB, var_xdata, var_xdata .else vaesenc xkey4, var_xdata, var_xdata .endif .set i, (i +1) .endr .if (klen == KEY_128) .if (load_keys) vmovdqa 6*16(p_keys), xkey8 .endif .else vmovdqa 6*16(p_keys), xkeyB .endif .set i, 0 .rept by club XDATA, i vaesenc xkeyA, var_xdata, var_xdata /* key 5 */ .set i, (i +1) .endr vmovdqa 7*16(p_keys), xkeyA .set i, 0 .rept by club XDATA, i /* key 6 */ .if (klen == KEY_128) vaesenc xkey8, var_xdata, var_xdata .else vaesenc xkeyB, var_xdata, var_xdata .endif .set i, (i +1) .endr .if (klen == KEY_128) vmovdqa 8*16(p_keys), xkeyB .else .if (load_keys) vmovdqa 8*16(p_keys), xkey8 .endif .endif .set i, 0 .rept by club XDATA, i vaesenc xkeyA, var_xdata, var_xdata /* key 7 */ .set i, (i +1) .endr .if (klen == KEY_128) .if (load_keys) vmovdqa 9*16(p_keys), xkey12 .endif .else vmovdqa 9*16(p_keys), xkeyA .endif .set i, 0 .rept by club XDATA, i /* key 8 */ .if (klen == KEY_128) vaesenc xkeyB, var_xdata, var_xdata .else vaesenc xkey8, var_xdata, var_xdata .endif .set i, (i +1) .endr vmovdqa 10*16(p_keys), xkeyB .set i, 0 .rept by club XDATA, i /* key 9 */ .if (klen == KEY_128) vaesenc xkey12, var_xdata, var_xdata .else vaesenc xkeyA, var_xdata, var_xdata .endif .set i, (i +1) .endr .if (klen != KEY_128) vmovdqa 11*16(p_keys), xkeyA .endif .set i, 0 .rept by club XDATA, i /* key 10 */ .if (klen == KEY_128) vaesenclast xkeyB, var_xdata, var_xdata .else vaesenc xkeyB, var_xdata, var_xdata .endif .set i, (i +1) .endr .if (klen != KEY_128) .if (load_keys) vmovdqa 12*16(p_keys), xkey12 .endif .set i, 0 .rept by club XDATA, i vaesenc xkeyA, var_xdata, var_xdata /* key 11 */ .set i, (i +1) .endr .if (klen == KEY_256) vmovdqa 13*16(p_keys), xkeyA .endif .set i, 0 .rept by club XDATA, i .if (klen == KEY_256) /* key 12 */ vaesenc xkey12, var_xdata, var_xdata .else vaesenclast xkey12, var_xdata, var_xdata .endif .set i, (i +1) .endr .if (klen == KEY_256) vmovdqa 14*16(p_keys), xkeyB .set i, 0 .rept by club XDATA, i /* key 13 */ vaesenc xkeyA, var_xdata, var_xdata .set i, (i +1) .endr .set i, 0 .rept by club XDATA, i /* key 14 */ vaesenclast xkeyB, var_xdata, var_xdata .set i, (i +1) .endr .endif .endif .set i, 0 .rept (by / 2) .set j, (i+1) VMOVDQ (i*16 - 16*by)(p_in), xkeyA VMOVDQ (j*16 - 16*by)(p_in), xkeyB club XDATA, i vpxor xkeyA, var_xdata, var_xdata club XDATA, j vpxor xkeyB, var_xdata, var_xdata .set i, (i+2) .endr .if (i < by) VMOVDQ (i*16 - 16*by)(p_in), xkeyA club XDATA, i vpxor xkeyA, var_xdata, var_xdata .endif .set i, 0 .rept by club XDATA, i VMOVDQ var_xdata, i*16(p_out) .set i, (i+1) .endr .endm .macro do_aes_load val, key_len, xctr do_aes \val, 1, \key_len, \xctr .endm .macro do_aes_noload val, key_len, xctr do_aes \val, 0, \key_len, \xctr .endm /* main body of aes ctr load */ .macro do_aes_ctrmain key_len, xctr cmp $16, num_bytes jb .Ldo_return2\xctr\key_len .if \xctr shr $4, counter vmovdqu (p_iv), xiv .else vmovdqa byteswap_const(%rip), xbyteswap vmovdqu (p_iv), xcounter vpshufb xbyteswap, xcounter, xcounter .endif mov num_bytes, tmp and $(7*16), tmp jz .Lmult_of_8_blks\xctr\key_len /* 1 <= tmp <= 7 */ cmp $(4*16), tmp jg .Lgt4\xctr\key_len je .Leq4\xctr\key_len .Llt4\xctr\key_len: cmp $(2*16), tmp jg .Leq3\xctr\key_len je .Leq2\xctr\key_len .Leq1\xctr\key_len: do_aes_load 1, \key_len, \xctr add $(1*16), p_out and $(~7*16), num_bytes jz .Ldo_return2\xctr\key_len jmp .Lmain_loop2\xctr\key_len .Leq2\xctr\key_len: do_aes_load 2, \key_len, \xctr add $(2*16), p_out and $(~7*16), num_bytes jz .Ldo_return2\xctr\key_len jmp .Lmain_loop2\xctr\key_len .Leq3\xctr\key_len: do_aes_load 3, \key_len, \xctr add $(3*16), p_out and $(~7*16), num_bytes jz .Ldo_return2\xctr\key_len jmp .Lmain_loop2\xctr\key_len .Leq4\xctr\key_len: do_aes_load 4, \key_len, \xctr add $(4*16), p_out and $(~7*16), num_bytes jz .Ldo_return2\xctr\key_len jmp .Lmain_loop2\xctr\key_len .Lgt4\xctr\key_len: cmp $(6*16), tmp jg .Leq7\xctr\key_len je .Leq6\xctr\key_len .Leq5\xctr\key_len: do_aes_load 5, \key_len, \xctr add $(5*16), p_out and $(~7*16), num_bytes jz .Ldo_return2\xctr\key_len jmp .Lmain_loop2\xctr\key_len .Leq6\xctr\key_len: do_aes_load 6, \key_len, \xctr add $(6*16), p_out and $(~7*16), num_bytes jz .Ldo_return2\xctr\key_len jmp .Lmain_loop2\xctr\key_len .Leq7\xctr\key_len: do_aes_load 7, \key_len, \xctr add $(7*16), p_out and $(~7*16), num_bytes jz .Ldo_return2\xctr\key_len jmp .Lmain_loop2\xctr\key_len .Lmult_of_8_blks\xctr\key_len: .if (\key_len != KEY_128) vmovdqa 0*16(p_keys), xkey0 vmovdqa 4*16(p_keys), xkey4 vmovdqa 8*16(p_keys), xkey8 vmovdqa 12*16(p_keys), xkey12 .else vmovdqa 0*16(p_keys), xkey0 vmovdqa 3*16(p_keys), xkey4 vmovdqa 6*16(p_keys), xkey8 vmovdqa 9*16(p_keys), xkey12 .endif .align 16 .Lmain_loop2\xctr\key_len: /* num_bytes is a multiple of 8 and >0 */ do_aes_noload 8, \key_len, \xctr add $(8*16), p_out sub $(8*16), num_bytes jne .Lmain_loop2\xctr\key_len .Ldo_return2\xctr\key_len: .if !\xctr /* return updated IV */ vpshufb xbyteswap, xcounter, xcounter vmovdqu xcounter, (p_iv) .endif RET .endm /* * routine to do AES128 CTR enc/decrypt "by8" * XMM registers are clobbered. * Saving/restoring must be done at a higher level * aes_ctr_enc_128_avx_by8(void *in, void *iv, void *keys, void *out, * unsigned int num_bytes) */ SYM_FUNC_START(aes_ctr_enc_128_avx_by8) /* call the aes main loop */ do_aes_ctrmain KEY_128 0 SYM_FUNC_END(aes_ctr_enc_128_avx_by8) /* * routine to do AES192 CTR enc/decrypt "by8" * XMM registers are clobbered. * Saving/restoring must be done at a higher level * aes_ctr_enc_192_avx_by8(void *in, void *iv, void *keys, void *out, * unsigned int num_bytes) */ SYM_FUNC_START(aes_ctr_enc_192_avx_by8) /* call the aes main loop */ do_aes_ctrmain KEY_192 0 SYM_FUNC_END(aes_ctr_enc_192_avx_by8) /* * routine to do AES256 CTR enc/decrypt "by8" * XMM registers are clobbered. * Saving/restoring must be done at a higher level * aes_ctr_enc_256_avx_by8(void *in, void *iv, void *keys, void *out, * unsigned int num_bytes) */ SYM_FUNC_START(aes_ctr_enc_256_avx_by8) /* call the aes main loop */ do_aes_ctrmain KEY_256 0 SYM_FUNC_END(aes_ctr_enc_256_avx_by8) /* * routine to do AES128 XCTR enc/decrypt "by8" * XMM registers are clobbered. * Saving/restoring must be done at a higher level * aes_xctr_enc_128_avx_by8(const u8 *in, const u8 *iv, const void *keys, * u8* out, unsigned int num_bytes, unsigned int byte_ctr) */ SYM_FUNC_START(aes_xctr_enc_128_avx_by8) /* call the aes main loop */ do_aes_ctrmain KEY_128 1 SYM_FUNC_END(aes_xctr_enc_128_avx_by8) /* * routine to do AES192 XCTR enc/decrypt "by8" * XMM registers are clobbered. * Saving/restoring must be done at a higher level * aes_xctr_enc_192_avx_by8(const u8 *in, const u8 *iv, const void *keys, * u8* out, unsigned int num_bytes, unsigned int byte_ctr) */ SYM_FUNC_START(aes_xctr_enc_192_avx_by8) /* call the aes main loop */ do_aes_ctrmain KEY_192 1 SYM_FUNC_END(aes_xctr_enc_192_avx_by8) /* * routine to do AES256 XCTR enc/decrypt "by8" * XMM registers are clobbered. * Saving/restoring must be done at a higher level * aes_xctr_enc_256_avx_by8(const u8 *in, const u8 *iv, const void *keys, * u8* out, unsigned int num_bytes, unsigned int byte_ctr) */ SYM_FUNC_START(aes_xctr_enc_256_avx_by8) /* call the aes main loop */ do_aes_ctrmain KEY_256 1 SYM_FUNC_END(aes_xctr_enc_256_avx_by8)
aixcc-public/challenge-001-exemplar-source
8,357
arch/x86/crypto/twofish-i586-asm_32.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /*************************************************************************** * Copyright (C) 2006 by Joachim Fritschi, <jfritschi@freenet.de> * * * ***************************************************************************/ .file "twofish-i586-asm.S" .text #include <linux/linkage.h> #include <asm/asm-offsets.h> /* return address at 0 */ #define in_blk 12 /* input byte array address parameter*/ #define out_blk 8 /* output byte array address parameter*/ #define ctx 4 /* Twofish context structure */ #define a_offset 0 #define b_offset 4 #define c_offset 8 #define d_offset 12 /* Structure of the crypto context struct*/ #define s0 0 /* S0 Array 256 Words each */ #define s1 1024 /* S1 Array */ #define s2 2048 /* S2 Array */ #define s3 3072 /* S3 Array */ #define w 4096 /* 8 whitening keys (word) */ #define k 4128 /* key 1-32 ( word ) */ /* define a few register aliases to allow macro substitution */ #define R0D %eax #define R0B %al #define R0H %ah #define R1D %ebx #define R1B %bl #define R1H %bh #define R2D %ecx #define R2B %cl #define R2H %ch #define R3D %edx #define R3B %dl #define R3H %dh /* performs input whitening */ #define input_whitening(src,context,offset)\ xor w+offset(context), src; /* performs input whitening */ #define output_whitening(src,context,offset)\ xor w+16+offset(context), src; /* * a input register containing a (rotated 16) * b input register containing b * c input register containing c * d input register containing d (already rol $1) * operations on a and b are interleaved to increase performance */ #define encrypt_round(a,b,c,d,round)\ push d ## D;\ movzx b ## B, %edi;\ mov s1(%ebp,%edi,4),d ## D;\ movzx a ## B, %edi;\ mov s2(%ebp,%edi,4),%esi;\ movzx b ## H, %edi;\ ror $16, b ## D;\ xor s2(%ebp,%edi,4),d ## D;\ movzx a ## H, %edi;\ ror $16, a ## D;\ xor s3(%ebp,%edi,4),%esi;\ movzx b ## B, %edi;\ xor s3(%ebp,%edi,4),d ## D;\ movzx a ## B, %edi;\ xor (%ebp,%edi,4), %esi;\ movzx b ## H, %edi;\ ror $15, b ## D;\ xor (%ebp,%edi,4), d ## D;\ movzx a ## H, %edi;\ xor s1(%ebp,%edi,4),%esi;\ pop %edi;\ add d ## D, %esi;\ add %esi, d ## D;\ add k+round(%ebp), %esi;\ xor %esi, c ## D;\ rol $15, c ## D;\ add k+4+round(%ebp),d ## D;\ xor %edi, d ## D; /* * a input register containing a (rotated 16) * b input register containing b * c input register containing c * d input register containing d (already rol $1) * operations on a and b are interleaved to increase performance * last round has different rotations for the output preparation */ #define encrypt_last_round(a,b,c,d,round)\ push d ## D;\ movzx b ## B, %edi;\ mov s1(%ebp,%edi,4),d ## D;\ movzx a ## B, %edi;\ mov s2(%ebp,%edi,4),%esi;\ movzx b ## H, %edi;\ ror $16, b ## D;\ xor s2(%ebp,%edi,4),d ## D;\ movzx a ## H, %edi;\ ror $16, a ## D;\ xor s3(%ebp,%edi,4),%esi;\ movzx b ## B, %edi;\ xor s3(%ebp,%edi,4),d ## D;\ movzx a ## B, %edi;\ xor (%ebp,%edi,4), %esi;\ movzx b ## H, %edi;\ ror $16, b ## D;\ xor (%ebp,%edi,4), d ## D;\ movzx a ## H, %edi;\ xor s1(%ebp,%edi,4),%esi;\ pop %edi;\ add d ## D, %esi;\ add %esi, d ## D;\ add k+round(%ebp), %esi;\ xor %esi, c ## D;\ ror $1, c ## D;\ add k+4+round(%ebp),d ## D;\ xor %edi, d ## D; /* * a input register containing a * b input register containing b (rotated 16) * c input register containing c * d input register containing d (already rol $1) * operations on a and b are interleaved to increase performance */ #define decrypt_round(a,b,c,d,round)\ push c ## D;\ movzx a ## B, %edi;\ mov (%ebp,%edi,4), c ## D;\ movzx b ## B, %edi;\ mov s3(%ebp,%edi,4),%esi;\ movzx a ## H, %edi;\ ror $16, a ## D;\ xor s1(%ebp,%edi,4),c ## D;\ movzx b ## H, %edi;\ ror $16, b ## D;\ xor (%ebp,%edi,4), %esi;\ movzx a ## B, %edi;\ xor s2(%ebp,%edi,4),c ## D;\ movzx b ## B, %edi;\ xor s1(%ebp,%edi,4),%esi;\ movzx a ## H, %edi;\ ror $15, a ## D;\ xor s3(%ebp,%edi,4),c ## D;\ movzx b ## H, %edi;\ xor s2(%ebp,%edi,4),%esi;\ pop %edi;\ add %esi, c ## D;\ add c ## D, %esi;\ add k+round(%ebp), c ## D;\ xor %edi, c ## D;\ add k+4+round(%ebp),%esi;\ xor %esi, d ## D;\ rol $15, d ## D; /* * a input register containing a * b input register containing b (rotated 16) * c input register containing c * d input register containing d (already rol $1) * operations on a and b are interleaved to increase performance * last round has different rotations for the output preparation */ #define decrypt_last_round(a,b,c,d,round)\ push c ## D;\ movzx a ## B, %edi;\ mov (%ebp,%edi,4), c ## D;\ movzx b ## B, %edi;\ mov s3(%ebp,%edi,4),%esi;\ movzx a ## H, %edi;\ ror $16, a ## D;\ xor s1(%ebp,%edi,4),c ## D;\ movzx b ## H, %edi;\ ror $16, b ## D;\ xor (%ebp,%edi,4), %esi;\ movzx a ## B, %edi;\ xor s2(%ebp,%edi,4),c ## D;\ movzx b ## B, %edi;\ xor s1(%ebp,%edi,4),%esi;\ movzx a ## H, %edi;\ ror $16, a ## D;\ xor s3(%ebp,%edi,4),c ## D;\ movzx b ## H, %edi;\ xor s2(%ebp,%edi,4),%esi;\ pop %edi;\ add %esi, c ## D;\ add c ## D, %esi;\ add k+round(%ebp), c ## D;\ xor %edi, c ## D;\ add k+4+round(%ebp),%esi;\ xor %esi, d ## D;\ ror $1, d ## D; SYM_FUNC_START(twofish_enc_blk) push %ebp /* save registers according to calling convention*/ push %ebx push %esi push %edi mov ctx + 16(%esp), %ebp /* abuse the base pointer: set new base * pointer to the ctx address */ mov in_blk+16(%esp),%edi /* input address in edi */ mov (%edi), %eax mov b_offset(%edi), %ebx mov c_offset(%edi), %ecx mov d_offset(%edi), %edx input_whitening(%eax,%ebp,a_offset) ror $16, %eax input_whitening(%ebx,%ebp,b_offset) input_whitening(%ecx,%ebp,c_offset) input_whitening(%edx,%ebp,d_offset) rol $1, %edx encrypt_round(R0,R1,R2,R3,0); encrypt_round(R2,R3,R0,R1,8); encrypt_round(R0,R1,R2,R3,2*8); encrypt_round(R2,R3,R0,R1,3*8); encrypt_round(R0,R1,R2,R3,4*8); encrypt_round(R2,R3,R0,R1,5*8); encrypt_round(R0,R1,R2,R3,6*8); encrypt_round(R2,R3,R0,R1,7*8); encrypt_round(R0,R1,R2,R3,8*8); encrypt_round(R2,R3,R0,R1,9*8); encrypt_round(R0,R1,R2,R3,10*8); encrypt_round(R2,R3,R0,R1,11*8); encrypt_round(R0,R1,R2,R3,12*8); encrypt_round(R2,R3,R0,R1,13*8); encrypt_round(R0,R1,R2,R3,14*8); encrypt_last_round(R2,R3,R0,R1,15*8); output_whitening(%eax,%ebp,c_offset) output_whitening(%ebx,%ebp,d_offset) output_whitening(%ecx,%ebp,a_offset) output_whitening(%edx,%ebp,b_offset) mov out_blk+16(%esp),%edi; mov %eax, c_offset(%edi) mov %ebx, d_offset(%edi) mov %ecx, (%edi) mov %edx, b_offset(%edi) pop %edi pop %esi pop %ebx pop %ebp mov $1, %eax RET SYM_FUNC_END(twofish_enc_blk) SYM_FUNC_START(twofish_dec_blk) push %ebp /* save registers according to calling convention*/ push %ebx push %esi push %edi mov ctx + 16(%esp), %ebp /* abuse the base pointer: set new base * pointer to the ctx address */ mov in_blk+16(%esp),%edi /* input address in edi */ mov (%edi), %eax mov b_offset(%edi), %ebx mov c_offset(%edi), %ecx mov d_offset(%edi), %edx output_whitening(%eax,%ebp,a_offset) output_whitening(%ebx,%ebp,b_offset) ror $16, %ebx output_whitening(%ecx,%ebp,c_offset) output_whitening(%edx,%ebp,d_offset) rol $1, %ecx decrypt_round(R0,R1,R2,R3,15*8); decrypt_round(R2,R3,R0,R1,14*8); decrypt_round(R0,R1,R2,R3,13*8); decrypt_round(R2,R3,R0,R1,12*8); decrypt_round(R0,R1,R2,R3,11*8); decrypt_round(R2,R3,R0,R1,10*8); decrypt_round(R0,R1,R2,R3,9*8); decrypt_round(R2,R3,R0,R1,8*8); decrypt_round(R0,R1,R2,R3,7*8); decrypt_round(R2,R3,R0,R1,6*8); decrypt_round(R0,R1,R2,R3,5*8); decrypt_round(R2,R3,R0,R1,4*8); decrypt_round(R0,R1,R2,R3,3*8); decrypt_round(R2,R3,R0,R1,2*8); decrypt_round(R0,R1,R2,R3,1*8); decrypt_last_round(R2,R3,R0,R1,0); input_whitening(%eax,%ebp,c_offset) input_whitening(%ebx,%ebp,d_offset) input_whitening(%ecx,%ebp,a_offset) input_whitening(%edx,%ebp,b_offset) mov out_blk+16(%esp),%edi; mov %eax, c_offset(%edi) mov %ebx, d_offset(%edi) mov %ecx, (%edi) mov %edx, b_offset(%edi) pop %edi pop %esi pop %ebx pop %ebp mov $1, %eax RET SYM_FUNC_END(twofish_dec_blk)
aixcc-public/challenge-001-exemplar-source
14,176
arch/x86/crypto/aegis128-aesni-asm.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * AES-NI + SSE2 implementation of AEGIS-128 * * Copyright (c) 2017-2018 Ondrej Mosnacek <omosnacek@gmail.com> * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. */ #include <linux/linkage.h> #include <linux/cfi_types.h> #include <asm/frame.h> #define STATE0 %xmm0 #define STATE1 %xmm1 #define STATE2 %xmm2 #define STATE3 %xmm3 #define STATE4 %xmm4 #define KEY %xmm5 #define MSG %xmm5 #define T0 %xmm6 #define T1 %xmm7 #define STATEP %rdi #define LEN %rsi #define SRC %rdx #define DST %rcx .section .rodata.cst16.aegis128_const, "aM", @progbits, 32 .align 16 .Laegis128_const_0: .byte 0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x08, 0x0d .byte 0x15, 0x22, 0x37, 0x59, 0x90, 0xe9, 0x79, 0x62 .Laegis128_const_1: .byte 0xdb, 0x3d, 0x18, 0x55, 0x6d, 0xc2, 0x2f, 0xf1 .byte 0x20, 0x11, 0x31, 0x42, 0x73, 0xb5, 0x28, 0xdd .section .rodata.cst16.aegis128_counter, "aM", @progbits, 16 .align 16 .Laegis128_counter: .byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 .byte 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f .text /* * aegis128_update * input: * STATE[0-4] - input state * output: * STATE[0-4] - output state (shifted positions) * changed: * T0 */ .macro aegis128_update movdqa STATE4, T0 aesenc STATE0, STATE4 aesenc STATE1, STATE0 aesenc STATE2, STATE1 aesenc STATE3, STATE2 aesenc T0, STATE3 .endm /* * __load_partial: internal ABI * input: * LEN - bytes * SRC - src * output: * MSG - message block * changed: * T0 * %r8 * %r9 */ SYM_FUNC_START_LOCAL(__load_partial) xor %r9d, %r9d pxor MSG, MSG mov LEN, %r8 and $0x1, %r8 jz .Lld_partial_1 mov LEN, %r8 and $0x1E, %r8 add SRC, %r8 mov (%r8), %r9b .Lld_partial_1: mov LEN, %r8 and $0x2, %r8 jz .Lld_partial_2 mov LEN, %r8 and $0x1C, %r8 add SRC, %r8 shl $0x10, %r9 mov (%r8), %r9w .Lld_partial_2: mov LEN, %r8 and $0x4, %r8 jz .Lld_partial_4 mov LEN, %r8 and $0x18, %r8 add SRC, %r8 shl $32, %r9 mov (%r8), %r8d xor %r8, %r9 .Lld_partial_4: movq %r9, MSG mov LEN, %r8 and $0x8, %r8 jz .Lld_partial_8 mov LEN, %r8 and $0x10, %r8 add SRC, %r8 pslldq $8, MSG movq (%r8), T0 pxor T0, MSG .Lld_partial_8: RET SYM_FUNC_END(__load_partial) /* * __store_partial: internal ABI * input: * LEN - bytes * DST - dst * output: * T0 - message block * changed: * %r8 * %r9 * %r10 */ SYM_FUNC_START_LOCAL(__store_partial) mov LEN, %r8 mov DST, %r9 movq T0, %r10 cmp $8, %r8 jl .Lst_partial_8 mov %r10, (%r9) psrldq $8, T0 movq T0, %r10 sub $8, %r8 add $8, %r9 .Lst_partial_8: cmp $4, %r8 jl .Lst_partial_4 mov %r10d, (%r9) shr $32, %r10 sub $4, %r8 add $4, %r9 .Lst_partial_4: cmp $2, %r8 jl .Lst_partial_2 mov %r10w, (%r9) shr $0x10, %r10 sub $2, %r8 add $2, %r9 .Lst_partial_2: cmp $1, %r8 jl .Lst_partial_1 mov %r10b, (%r9) .Lst_partial_1: RET SYM_FUNC_END(__store_partial) /* * void crypto_aegis128_aesni_init(void *state, const void *key, const void *iv); */ SYM_FUNC_START(crypto_aegis128_aesni_init) FRAME_BEGIN /* load IV: */ movdqu (%rdx), T1 /* load key: */ movdqa (%rsi), KEY pxor KEY, T1 movdqa T1, STATE0 movdqa KEY, STATE3 movdqa KEY, STATE4 /* load the constants: */ movdqa .Laegis128_const_0, STATE2 movdqa .Laegis128_const_1, STATE1 pxor STATE2, STATE3 pxor STATE1, STATE4 /* update 10 times with KEY / KEY xor IV: */ aegis128_update; pxor KEY, STATE4 aegis128_update; pxor T1, STATE3 aegis128_update; pxor KEY, STATE2 aegis128_update; pxor T1, STATE1 aegis128_update; pxor KEY, STATE0 aegis128_update; pxor T1, STATE4 aegis128_update; pxor KEY, STATE3 aegis128_update; pxor T1, STATE2 aegis128_update; pxor KEY, STATE1 aegis128_update; pxor T1, STATE0 /* store the state: */ movdqu STATE0, 0x00(STATEP) movdqu STATE1, 0x10(STATEP) movdqu STATE2, 0x20(STATEP) movdqu STATE3, 0x30(STATEP) movdqu STATE4, 0x40(STATEP) FRAME_END RET SYM_FUNC_END(crypto_aegis128_aesni_init) /* * void crypto_aegis128_aesni_ad(void *state, unsigned int length, * const void *data); */ SYM_FUNC_START(crypto_aegis128_aesni_ad) FRAME_BEGIN cmp $0x10, LEN jb .Lad_out /* load the state: */ movdqu 0x00(STATEP), STATE0 movdqu 0x10(STATEP), STATE1 movdqu 0x20(STATEP), STATE2 movdqu 0x30(STATEP), STATE3 movdqu 0x40(STATEP), STATE4 mov SRC, %r8 and $0xF, %r8 jnz .Lad_u_loop .align 8 .Lad_a_loop: movdqa 0x00(SRC), MSG aegis128_update pxor MSG, STATE4 sub $0x10, LEN cmp $0x10, LEN jl .Lad_out_1 movdqa 0x10(SRC), MSG aegis128_update pxor MSG, STATE3 sub $0x10, LEN cmp $0x10, LEN jl .Lad_out_2 movdqa 0x20(SRC), MSG aegis128_update pxor MSG, STATE2 sub $0x10, LEN cmp $0x10, LEN jl .Lad_out_3 movdqa 0x30(SRC), MSG aegis128_update pxor MSG, STATE1 sub $0x10, LEN cmp $0x10, LEN jl .Lad_out_4 movdqa 0x40(SRC), MSG aegis128_update pxor MSG, STATE0 sub $0x10, LEN cmp $0x10, LEN jl .Lad_out_0 add $0x50, SRC jmp .Lad_a_loop .align 8 .Lad_u_loop: movdqu 0x00(SRC), MSG aegis128_update pxor MSG, STATE4 sub $0x10, LEN cmp $0x10, LEN jl .Lad_out_1 movdqu 0x10(SRC), MSG aegis128_update pxor MSG, STATE3 sub $0x10, LEN cmp $0x10, LEN jl .Lad_out_2 movdqu 0x20(SRC), MSG aegis128_update pxor MSG, STATE2 sub $0x10, LEN cmp $0x10, LEN jl .Lad_out_3 movdqu 0x30(SRC), MSG aegis128_update pxor MSG, STATE1 sub $0x10, LEN cmp $0x10, LEN jl .Lad_out_4 movdqu 0x40(SRC), MSG aegis128_update pxor MSG, STATE0 sub $0x10, LEN cmp $0x10, LEN jl .Lad_out_0 add $0x50, SRC jmp .Lad_u_loop /* store the state: */ .Lad_out_0: movdqu STATE0, 0x00(STATEP) movdqu STATE1, 0x10(STATEP) movdqu STATE2, 0x20(STATEP) movdqu STATE3, 0x30(STATEP) movdqu STATE4, 0x40(STATEP) FRAME_END RET .Lad_out_1: movdqu STATE4, 0x00(STATEP) movdqu STATE0, 0x10(STATEP) movdqu STATE1, 0x20(STATEP) movdqu STATE2, 0x30(STATEP) movdqu STATE3, 0x40(STATEP) FRAME_END RET .Lad_out_2: movdqu STATE3, 0x00(STATEP) movdqu STATE4, 0x10(STATEP) movdqu STATE0, 0x20(STATEP) movdqu STATE1, 0x30(STATEP) movdqu STATE2, 0x40(STATEP) FRAME_END RET .Lad_out_3: movdqu STATE2, 0x00(STATEP) movdqu STATE3, 0x10(STATEP) movdqu STATE4, 0x20(STATEP) movdqu STATE0, 0x30(STATEP) movdqu STATE1, 0x40(STATEP) FRAME_END RET .Lad_out_4: movdqu STATE1, 0x00(STATEP) movdqu STATE2, 0x10(STATEP) movdqu STATE3, 0x20(STATEP) movdqu STATE4, 0x30(STATEP) movdqu STATE0, 0x40(STATEP) FRAME_END RET .Lad_out: FRAME_END RET SYM_FUNC_END(crypto_aegis128_aesni_ad) .macro encrypt_block a s0 s1 s2 s3 s4 i movdq\a (\i * 0x10)(SRC), MSG movdqa MSG, T0 pxor \s1, T0 pxor \s4, T0 movdqa \s2, T1 pand \s3, T1 pxor T1, T0 movdq\a T0, (\i * 0x10)(DST) aegis128_update pxor MSG, \s4 sub $0x10, LEN cmp $0x10, LEN jl .Lenc_out_\i .endm /* * void crypto_aegis128_aesni_enc(void *state, unsigned int length, * const void *src, void *dst); */ SYM_TYPED_FUNC_START(crypto_aegis128_aesni_enc) FRAME_BEGIN cmp $0x10, LEN jb .Lenc_out /* load the state: */ movdqu 0x00(STATEP), STATE0 movdqu 0x10(STATEP), STATE1 movdqu 0x20(STATEP), STATE2 movdqu 0x30(STATEP), STATE3 movdqu 0x40(STATEP), STATE4 mov SRC, %r8 or DST, %r8 and $0xF, %r8 jnz .Lenc_u_loop .align 8 .Lenc_a_loop: encrypt_block a STATE0 STATE1 STATE2 STATE3 STATE4 0 encrypt_block a STATE4 STATE0 STATE1 STATE2 STATE3 1 encrypt_block a STATE3 STATE4 STATE0 STATE1 STATE2 2 encrypt_block a STATE2 STATE3 STATE4 STATE0 STATE1 3 encrypt_block a STATE1 STATE2 STATE3 STATE4 STATE0 4 add $0x50, SRC add $0x50, DST jmp .Lenc_a_loop .align 8 .Lenc_u_loop: encrypt_block u STATE0 STATE1 STATE2 STATE3 STATE4 0 encrypt_block u STATE4 STATE0 STATE1 STATE2 STATE3 1 encrypt_block u STATE3 STATE4 STATE0 STATE1 STATE2 2 encrypt_block u STATE2 STATE3 STATE4 STATE0 STATE1 3 encrypt_block u STATE1 STATE2 STATE3 STATE4 STATE0 4 add $0x50, SRC add $0x50, DST jmp .Lenc_u_loop /* store the state: */ .Lenc_out_0: movdqu STATE4, 0x00(STATEP) movdqu STATE0, 0x10(STATEP) movdqu STATE1, 0x20(STATEP) movdqu STATE2, 0x30(STATEP) movdqu STATE3, 0x40(STATEP) FRAME_END RET .Lenc_out_1: movdqu STATE3, 0x00(STATEP) movdqu STATE4, 0x10(STATEP) movdqu STATE0, 0x20(STATEP) movdqu STATE1, 0x30(STATEP) movdqu STATE2, 0x40(STATEP) FRAME_END RET .Lenc_out_2: movdqu STATE2, 0x00(STATEP) movdqu STATE3, 0x10(STATEP) movdqu STATE4, 0x20(STATEP) movdqu STATE0, 0x30(STATEP) movdqu STATE1, 0x40(STATEP) FRAME_END RET .Lenc_out_3: movdqu STATE1, 0x00(STATEP) movdqu STATE2, 0x10(STATEP) movdqu STATE3, 0x20(STATEP) movdqu STATE4, 0x30(STATEP) movdqu STATE0, 0x40(STATEP) FRAME_END RET .Lenc_out_4: movdqu STATE0, 0x00(STATEP) movdqu STATE1, 0x10(STATEP) movdqu STATE2, 0x20(STATEP) movdqu STATE3, 0x30(STATEP) movdqu STATE4, 0x40(STATEP) FRAME_END RET .Lenc_out: FRAME_END RET SYM_FUNC_END(crypto_aegis128_aesni_enc) /* * void crypto_aegis128_aesni_enc_tail(void *state, unsigned int length, * const void *src, void *dst); */ SYM_TYPED_FUNC_START(crypto_aegis128_aesni_enc_tail) FRAME_BEGIN /* load the state: */ movdqu 0x00(STATEP), STATE0 movdqu 0x10(STATEP), STATE1 movdqu 0x20(STATEP), STATE2 movdqu 0x30(STATEP), STATE3 movdqu 0x40(STATEP), STATE4 /* encrypt message: */ call __load_partial movdqa MSG, T0 pxor STATE1, T0 pxor STATE4, T0 movdqa STATE2, T1 pand STATE3, T1 pxor T1, T0 call __store_partial aegis128_update pxor MSG, STATE4 /* store the state: */ movdqu STATE4, 0x00(STATEP) movdqu STATE0, 0x10(STATEP) movdqu STATE1, 0x20(STATEP) movdqu STATE2, 0x30(STATEP) movdqu STATE3, 0x40(STATEP) FRAME_END RET SYM_FUNC_END(crypto_aegis128_aesni_enc_tail) .macro decrypt_block a s0 s1 s2 s3 s4 i movdq\a (\i * 0x10)(SRC), MSG pxor \s1, MSG pxor \s4, MSG movdqa \s2, T1 pand \s3, T1 pxor T1, MSG movdq\a MSG, (\i * 0x10)(DST) aegis128_update pxor MSG, \s4 sub $0x10, LEN cmp $0x10, LEN jl .Ldec_out_\i .endm /* * void crypto_aegis128_aesni_dec(void *state, unsigned int length, * const void *src, void *dst); */ SYM_TYPED_FUNC_START(crypto_aegis128_aesni_dec) FRAME_BEGIN cmp $0x10, LEN jb .Ldec_out /* load the state: */ movdqu 0x00(STATEP), STATE0 movdqu 0x10(STATEP), STATE1 movdqu 0x20(STATEP), STATE2 movdqu 0x30(STATEP), STATE3 movdqu 0x40(STATEP), STATE4 mov SRC, %r8 or DST, %r8 and $0xF, %r8 jnz .Ldec_u_loop .align 8 .Ldec_a_loop: decrypt_block a STATE0 STATE1 STATE2 STATE3 STATE4 0 decrypt_block a STATE4 STATE0 STATE1 STATE2 STATE3 1 decrypt_block a STATE3 STATE4 STATE0 STATE1 STATE2 2 decrypt_block a STATE2 STATE3 STATE4 STATE0 STATE1 3 decrypt_block a STATE1 STATE2 STATE3 STATE4 STATE0 4 add $0x50, SRC add $0x50, DST jmp .Ldec_a_loop .align 8 .Ldec_u_loop: decrypt_block u STATE0 STATE1 STATE2 STATE3 STATE4 0 decrypt_block u STATE4 STATE0 STATE1 STATE2 STATE3 1 decrypt_block u STATE3 STATE4 STATE0 STATE1 STATE2 2 decrypt_block u STATE2 STATE3 STATE4 STATE0 STATE1 3 decrypt_block u STATE1 STATE2 STATE3 STATE4 STATE0 4 add $0x50, SRC add $0x50, DST jmp .Ldec_u_loop /* store the state: */ .Ldec_out_0: movdqu STATE4, 0x00(STATEP) movdqu STATE0, 0x10(STATEP) movdqu STATE1, 0x20(STATEP) movdqu STATE2, 0x30(STATEP) movdqu STATE3, 0x40(STATEP) FRAME_END RET .Ldec_out_1: movdqu STATE3, 0x00(STATEP) movdqu STATE4, 0x10(STATEP) movdqu STATE0, 0x20(STATEP) movdqu STATE1, 0x30(STATEP) movdqu STATE2, 0x40(STATEP) FRAME_END RET .Ldec_out_2: movdqu STATE2, 0x00(STATEP) movdqu STATE3, 0x10(STATEP) movdqu STATE4, 0x20(STATEP) movdqu STATE0, 0x30(STATEP) movdqu STATE1, 0x40(STATEP) FRAME_END RET .Ldec_out_3: movdqu STATE1, 0x00(STATEP) movdqu STATE2, 0x10(STATEP) movdqu STATE3, 0x20(STATEP) movdqu STATE4, 0x30(STATEP) movdqu STATE0, 0x40(STATEP) FRAME_END RET .Ldec_out_4: movdqu STATE0, 0x00(STATEP) movdqu STATE1, 0x10(STATEP) movdqu STATE2, 0x20(STATEP) movdqu STATE3, 0x30(STATEP) movdqu STATE4, 0x40(STATEP) FRAME_END RET .Ldec_out: FRAME_END RET SYM_FUNC_END(crypto_aegis128_aesni_dec) /* * void crypto_aegis128_aesni_dec_tail(void *state, unsigned int length, * const void *src, void *dst); */ SYM_TYPED_FUNC_START(crypto_aegis128_aesni_dec_tail) FRAME_BEGIN /* load the state: */ movdqu 0x00(STATEP), STATE0 movdqu 0x10(STATEP), STATE1 movdqu 0x20(STATEP), STATE2 movdqu 0x30(STATEP), STATE3 movdqu 0x40(STATEP), STATE4 /* decrypt message: */ call __load_partial pxor STATE1, MSG pxor STATE4, MSG movdqa STATE2, T1 pand STATE3, T1 pxor T1, MSG movdqa MSG, T0 call __store_partial /* mask with byte count: */ movq LEN, T0 punpcklbw T0, T0 punpcklbw T0, T0 punpcklbw T0, T0 punpcklbw T0, T0 movdqa .Laegis128_counter, T1 pcmpgtb T1, T0 pand T0, MSG aegis128_update pxor MSG, STATE4 /* store the state: */ movdqu STATE4, 0x00(STATEP) movdqu STATE0, 0x10(STATEP) movdqu STATE1, 0x20(STATEP) movdqu STATE2, 0x30(STATEP) movdqu STATE3, 0x40(STATEP) FRAME_END RET SYM_FUNC_END(crypto_aegis128_aesni_dec_tail) /* * void crypto_aegis128_aesni_final(void *state, void *tag_xor, * u64 assoclen, u64 cryptlen); */ SYM_FUNC_START(crypto_aegis128_aesni_final) FRAME_BEGIN /* load the state: */ movdqu 0x00(STATEP), STATE0 movdqu 0x10(STATEP), STATE1 movdqu 0x20(STATEP), STATE2 movdqu 0x30(STATEP), STATE3 movdqu 0x40(STATEP), STATE4 /* prepare length block: */ movq %rdx, MSG movq %rcx, T0 pslldq $8, T0 pxor T0, MSG psllq $3, MSG /* multiply by 8 (to get bit count) */ pxor STATE3, MSG /* update state: */ aegis128_update; pxor MSG, STATE4 aegis128_update; pxor MSG, STATE3 aegis128_update; pxor MSG, STATE2 aegis128_update; pxor MSG, STATE1 aegis128_update; pxor MSG, STATE0 aegis128_update; pxor MSG, STATE4 aegis128_update; pxor MSG, STATE3 /* xor tag: */ movdqu (%rsi), MSG pxor STATE0, MSG pxor STATE1, MSG pxor STATE2, MSG pxor STATE3, MSG pxor STATE4, MSG movdqu MSG, (%rsi) FRAME_END RET SYM_FUNC_END(crypto_aegis128_aesni_final)
aixcc-public/challenge-001-exemplar-source
12,932
arch/x86/crypto/sha512-avx-asm.S
######################################################################## # Implement fast SHA-512 with AVX instructions. (x86_64) # # Copyright (C) 2013 Intel Corporation. # # Authors: # James Guilford <james.guilford@intel.com> # Kirk Yap <kirk.s.yap@intel.com> # David Cote <david.m.cote@intel.com> # Tim Chen <tim.c.chen@linux.intel.com> # # This software is available to you under a choice of one of two # licenses. You may choose to be licensed under the terms of the GNU # General Public License (GPL) Version 2, available from the file # COPYING in the main directory of this source tree, or the # OpenIB.org BSD license below: # # Redistribution and use in source and binary forms, with or # without modification, are permitted provided that the following # conditions are met: # # - Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # # - Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials # provided with the distribution. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # ######################################################################## # # This code is described in an Intel White-Paper: # "Fast SHA-512 Implementations on Intel Architecture Processors" # # To find it, surf to http://www.intel.com/p/en_US/embedded # and search for that title. # ######################################################################## #include <linux/linkage.h> #include <linux/cfi_types.h> .text # Virtual Registers # ARG1 digest = %rdi # ARG2 msg = %rsi # ARG3 msglen = %rdx T1 = %rcx T2 = %r8 a_64 = %r9 b_64 = %r10 c_64 = %r11 d_64 = %r12 e_64 = %r13 f_64 = %r14 g_64 = %r15 h_64 = %rbx tmp0 = %rax # Local variables (stack frame) # Message Schedule W_SIZE = 80*8 # W[t] + K[t] | W[t+1] + K[t+1] WK_SIZE = 2*8 frame_W = 0 frame_WK = frame_W + W_SIZE frame_size = frame_WK + WK_SIZE # Useful QWORD "arrays" for simpler memory references # MSG, DIGEST, K_t, W_t are arrays # WK_2(t) points to 1 of 2 qwords at frame.WK depdending on t being odd/even # Input message (arg1) #define MSG(i) 8*i(msg) # Output Digest (arg2) #define DIGEST(i) 8*i(digest) # SHA Constants (static mem) #define K_t(i) 8*i+K512(%rip) # Message Schedule (stack frame) #define W_t(i) 8*i+frame_W(%rsp) # W[t]+K[t] (stack frame) #define WK_2(i) 8*((i%2))+frame_WK(%rsp) .macro RotateState # Rotate symbols a..h right TMP = h_64 h_64 = g_64 g_64 = f_64 f_64 = e_64 e_64 = d_64 d_64 = c_64 c_64 = b_64 b_64 = a_64 a_64 = TMP .endm .macro RORQ p1 p2 # shld is faster than ror on Sandybridge shld $(64-\p2), \p1, \p1 .endm .macro SHA512_Round rnd # Compute Round %%t mov f_64, T1 # T1 = f mov e_64, tmp0 # tmp = e xor g_64, T1 # T1 = f ^ g RORQ tmp0, 23 # 41 # tmp = e ror 23 and e_64, T1 # T1 = (f ^ g) & e xor e_64, tmp0 # tmp = (e ror 23) ^ e xor g_64, T1 # T1 = ((f ^ g) & e) ^ g = CH(e,f,g) idx = \rnd add WK_2(idx), T1 # W[t] + K[t] from message scheduler RORQ tmp0, 4 # 18 # tmp = ((e ror 23) ^ e) ror 4 xor e_64, tmp0 # tmp = (((e ror 23) ^ e) ror 4) ^ e mov a_64, T2 # T2 = a add h_64, T1 # T1 = CH(e,f,g) + W[t] + K[t] + h RORQ tmp0, 14 # 14 # tmp = ((((e ror23)^e)ror4)^e)ror14 = S1(e) add tmp0, T1 # T1 = CH(e,f,g) + W[t] + K[t] + S1(e) mov a_64, tmp0 # tmp = a xor c_64, T2 # T2 = a ^ c and c_64, tmp0 # tmp = a & c and b_64, T2 # T2 = (a ^ c) & b xor tmp0, T2 # T2 = ((a ^ c) & b) ^ (a & c) = Maj(a,b,c) mov a_64, tmp0 # tmp = a RORQ tmp0, 5 # 39 # tmp = a ror 5 xor a_64, tmp0 # tmp = (a ror 5) ^ a add T1, d_64 # e(next_state) = d + T1 RORQ tmp0, 6 # 34 # tmp = ((a ror 5) ^ a) ror 6 xor a_64, tmp0 # tmp = (((a ror 5) ^ a) ror 6) ^ a lea (T1, T2), h_64 # a(next_state) = T1 + Maj(a,b,c) RORQ tmp0, 28 # 28 # tmp = ((((a ror5)^a)ror6)^a)ror28 = S0(a) add tmp0, h_64 # a(next_state) = T1 + Maj(a,b,c) S0(a) RotateState .endm .macro SHA512_2Sched_2Round_avx rnd # Compute rounds t-2 and t-1 # Compute message schedule QWORDS t and t+1 # Two rounds are computed based on the values for K[t-2]+W[t-2] and # K[t-1]+W[t-1] which were previously stored at WK_2 by the message # scheduler. # The two new schedule QWORDS are stored at [W_t(t)] and [W_t(t+1)]. # They are then added to their respective SHA512 constants at # [K_t(t)] and [K_t(t+1)] and stored at dqword [WK_2(t)] # For brievity, the comments following vectored instructions only refer to # the first of a pair of QWORDS. # Eg. XMM4=W[t-2] really means XMM4={W[t-2]|W[t-1]} # The computation of the message schedule and the rounds are tightly # stitched to take advantage of instruction-level parallelism. idx = \rnd - 2 vmovdqa W_t(idx), %xmm4 # XMM4 = W[t-2] idx = \rnd - 15 vmovdqu W_t(idx), %xmm5 # XMM5 = W[t-15] mov f_64, T1 vpsrlq $61, %xmm4, %xmm0 # XMM0 = W[t-2]>>61 mov e_64, tmp0 vpsrlq $1, %xmm5, %xmm6 # XMM6 = W[t-15]>>1 xor g_64, T1 RORQ tmp0, 23 # 41 vpsrlq $19, %xmm4, %xmm1 # XMM1 = W[t-2]>>19 and e_64, T1 xor e_64, tmp0 vpxor %xmm1, %xmm0, %xmm0 # XMM0 = W[t-2]>>61 ^ W[t-2]>>19 xor g_64, T1 idx = \rnd add WK_2(idx), T1# vpsrlq $8, %xmm5, %xmm7 # XMM7 = W[t-15]>>8 RORQ tmp0, 4 # 18 vpsrlq $6, %xmm4, %xmm2 # XMM2 = W[t-2]>>6 xor e_64, tmp0 mov a_64, T2 add h_64, T1 vpxor %xmm7, %xmm6, %xmm6 # XMM6 = W[t-15]>>1 ^ W[t-15]>>8 RORQ tmp0, 14 # 14 add tmp0, T1 vpsrlq $7, %xmm5, %xmm8 # XMM8 = W[t-15]>>7 mov a_64, tmp0 xor c_64, T2 vpsllq $(64-61), %xmm4, %xmm3 # XMM3 = W[t-2]<<3 and c_64, tmp0 and b_64, T2 vpxor %xmm3, %xmm2, %xmm2 # XMM2 = W[t-2]>>6 ^ W[t-2]<<3 xor tmp0, T2 mov a_64, tmp0 vpsllq $(64-1), %xmm5, %xmm9 # XMM9 = W[t-15]<<63 RORQ tmp0, 5 # 39 vpxor %xmm9, %xmm8, %xmm8 # XMM8 = W[t-15]>>7 ^ W[t-15]<<63 xor a_64, tmp0 add T1, d_64 RORQ tmp0, 6 # 34 xor a_64, tmp0 vpxor %xmm8, %xmm6, %xmm6 # XMM6 = W[t-15]>>1 ^ W[t-15]>>8 ^ # W[t-15]>>7 ^ W[t-15]<<63 lea (T1, T2), h_64 RORQ tmp0, 28 # 28 vpsllq $(64-19), %xmm4, %xmm4 # XMM4 = W[t-2]<<25 add tmp0, h_64 RotateState vpxor %xmm4, %xmm0, %xmm0 # XMM0 = W[t-2]>>61 ^ W[t-2]>>19 ^ # W[t-2]<<25 mov f_64, T1 vpxor %xmm2, %xmm0, %xmm0 # XMM0 = s1(W[t-2]) mov e_64, tmp0 xor g_64, T1 idx = \rnd - 16 vpaddq W_t(idx), %xmm0, %xmm0 # XMM0 = s1(W[t-2]) + W[t-16] idx = \rnd - 7 vmovdqu W_t(idx), %xmm1 # XMM1 = W[t-7] RORQ tmp0, 23 # 41 and e_64, T1 xor e_64, tmp0 xor g_64, T1 vpsllq $(64-8), %xmm5, %xmm5 # XMM5 = W[t-15]<<56 idx = \rnd + 1 add WK_2(idx), T1 vpxor %xmm5, %xmm6, %xmm6 # XMM6 = s0(W[t-15]) RORQ tmp0, 4 # 18 vpaddq %xmm6, %xmm0, %xmm0 # XMM0 = s1(W[t-2]) + W[t-16] + s0(W[t-15]) xor e_64, tmp0 vpaddq %xmm1, %xmm0, %xmm0 # XMM0 = W[t] = s1(W[t-2]) + W[t-7] + # s0(W[t-15]) + W[t-16] mov a_64, T2 add h_64, T1 RORQ tmp0, 14 # 14 add tmp0, T1 idx = \rnd vmovdqa %xmm0, W_t(idx) # Store W[t] vpaddq K_t(idx), %xmm0, %xmm0 # Compute W[t]+K[t] vmovdqa %xmm0, WK_2(idx) # Store W[t]+K[t] for next rounds mov a_64, tmp0 xor c_64, T2 and c_64, tmp0 and b_64, T2 xor tmp0, T2 mov a_64, tmp0 RORQ tmp0, 5 # 39 xor a_64, tmp0 add T1, d_64 RORQ tmp0, 6 # 34 xor a_64, tmp0 lea (T1, T2), h_64 RORQ tmp0, 28 # 28 add tmp0, h_64 RotateState .endm ######################################################################## # void sha512_transform_avx(sha512_state *state, const u8 *data, int blocks) # Purpose: Updates the SHA512 digest stored at "state" with the message # stored in "data". # The size of the message pointed to by "data" must be an integer multiple # of SHA512 message blocks. # "blocks" is the message length in SHA512 blocks ######################################################################## SYM_TYPED_FUNC_START(sha512_transform_avx) test msglen, msglen je nowork # Save GPRs push %rbx push %r12 push %r13 push %r14 push %r15 # Allocate Stack Space push %rbp mov %rsp, %rbp sub $frame_size, %rsp and $~(0x20 - 1), %rsp updateblock: # Load state variables mov DIGEST(0), a_64 mov DIGEST(1), b_64 mov DIGEST(2), c_64 mov DIGEST(3), d_64 mov DIGEST(4), e_64 mov DIGEST(5), f_64 mov DIGEST(6), g_64 mov DIGEST(7), h_64 t = 0 .rept 80/2 + 1 # (80 rounds) / (2 rounds/iteration) + (1 iteration) # +1 iteration because the scheduler leads hashing by 1 iteration .if t < 2 # BSWAP 2 QWORDS vmovdqa XMM_QWORD_BSWAP(%rip), %xmm1 vmovdqu MSG(t), %xmm0 vpshufb %xmm1, %xmm0, %xmm0 # BSWAP vmovdqa %xmm0, W_t(t) # Store Scheduled Pair vpaddq K_t(t), %xmm0, %xmm0 # Compute W[t]+K[t] vmovdqa %xmm0, WK_2(t) # Store into WK for rounds .elseif t < 16 # BSWAP 2 QWORDS# Compute 2 Rounds vmovdqu MSG(t), %xmm0 vpshufb %xmm1, %xmm0, %xmm0 # BSWAP SHA512_Round t-2 # Round t-2 vmovdqa %xmm0, W_t(t) # Store Scheduled Pair vpaddq K_t(t), %xmm0, %xmm0 # Compute W[t]+K[t] SHA512_Round t-1 # Round t-1 vmovdqa %xmm0, WK_2(t)# Store W[t]+K[t] into WK .elseif t < 79 # Schedule 2 QWORDS# Compute 2 Rounds SHA512_2Sched_2Round_avx t .else # Compute 2 Rounds SHA512_Round t-2 SHA512_Round t-1 .endif t = t+2 .endr # Update digest add a_64, DIGEST(0) add b_64, DIGEST(1) add c_64, DIGEST(2) add d_64, DIGEST(3) add e_64, DIGEST(4) add f_64, DIGEST(5) add g_64, DIGEST(6) add h_64, DIGEST(7) # Advance to next message block add $16*8, msg dec msglen jnz updateblock # Restore Stack Pointer mov %rbp, %rsp pop %rbp # Restore GPRs pop %r15 pop %r14 pop %r13 pop %r12 pop %rbx nowork: RET SYM_FUNC_END(sha512_transform_avx) ######################################################################## ### Binary Data .section .rodata.cst16.XMM_QWORD_BSWAP, "aM", @progbits, 16 .align 16 # Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb. XMM_QWORD_BSWAP: .octa 0x08090a0b0c0d0e0f0001020304050607 # Mergeable 640-byte rodata section. This allows linker to merge the table # with other, exactly the same 640-byte fragment of another rodata section # (if such section exists). .section .rodata.cst640.K512, "aM", @progbits, 640 .align 64 # K[t] used in SHA512 hashing K512: .quad 0x428a2f98d728ae22,0x7137449123ef65cd .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc .quad 0x3956c25bf348b538,0x59f111f1b605d019 .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118 .quad 0xd807aa98a3030242,0x12835b0145706fbe .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1 .quad 0x9bdc06a725c71235,0xc19bf174cf692694 .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3 .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483 .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 .quad 0x983e5152ee66dfab,0xa831c66d2db43210 .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4 .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725 .quad 0x06ca6351e003826f,0x142929670a0e6e70 .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926 .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df .quad 0x650a73548baf63de,0x766a0abb3c77b2a8 .quad 0x81c2c92e47edaee6,0x92722c851482353b .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001 .quad 0xc24b8b70d0f89791,0xc76c51a30654be30 .quad 0xd192e819d6ef5218,0xd69906245565a910 .quad 0xf40e35855771202a,0x106aa07032bbd1b8 .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53 .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 .quad 0x748f82ee5defb2fc,0x78a5636f43172f60 .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec .quad 0x90befffa23631e28,0xa4506cebde82bde9 .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b .quad 0xca273eceea26619c,0xd186b8c721c0c207 .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6 .quad 0x113f9804bef90dae,0x1b710b35131c471b .quad 0x28db77f523047d84,0x32caab7b40c72493 .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
aixcc-public/challenge-001-exemplar-source
26,630
arch/x86/crypto/camellia-aesni-avx-asm_64.S
/* * x86_64/AVX/AES-NI assembler implementation of Camellia * * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * */ /* * Version licensed under 2-clause BSD License is available at: * http://koti.mbnet.fi/axh/crypto/camellia-BSD-1.2.0-aesni1.tar.xz */ #include <linux/linkage.h> #include <asm/frame.h> #define CAMELLIA_TABLE_BYTE_LEN 272 /* struct camellia_ctx: */ #define key_table 0 #define key_length CAMELLIA_TABLE_BYTE_LEN /* register macros */ #define CTX %rdi /********************************************************************** 16-way camellia **********************************************************************/ #define filter_8bit(x, lo_t, hi_t, mask4bit, tmp0) \ vpand x, mask4bit, tmp0; \ vpandn x, mask4bit, x; \ vpsrld $4, x, x; \ \ vpshufb tmp0, lo_t, tmp0; \ vpshufb x, hi_t, x; \ vpxor tmp0, x, x; /* * IN: * x0..x7: byte-sliced AB state * mem_cd: register pointer storing CD state * key: index for key material * OUT: * x0..x7: new byte-sliced CD state */ #define roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2, t3, t4, t5, t6, \ t7, mem_cd, key) \ /* \ * S-function with AES subbytes \ */ \ vmovdqa .Linv_shift_row, t4; \ vbroadcastss .L0f0f0f0f, t7; \ vmovdqa .Lpre_tf_lo_s1, t0; \ vmovdqa .Lpre_tf_hi_s1, t1; \ \ /* AES inverse shift rows */ \ vpshufb t4, x0, x0; \ vpshufb t4, x7, x7; \ vpshufb t4, x1, x1; \ vpshufb t4, x4, x4; \ vpshufb t4, x2, x2; \ vpshufb t4, x5, x5; \ vpshufb t4, x3, x3; \ vpshufb t4, x6, x6; \ \ /* prefilter sboxes 1, 2 and 3 */ \ vmovdqa .Lpre_tf_lo_s4, t2; \ vmovdqa .Lpre_tf_hi_s4, t3; \ filter_8bit(x0, t0, t1, t7, t6); \ filter_8bit(x7, t0, t1, t7, t6); \ filter_8bit(x1, t0, t1, t7, t6); \ filter_8bit(x4, t0, t1, t7, t6); \ filter_8bit(x2, t0, t1, t7, t6); \ filter_8bit(x5, t0, t1, t7, t6); \ \ /* prefilter sbox 4 */ \ vpxor t4, t4, t4; \ filter_8bit(x3, t2, t3, t7, t6); \ filter_8bit(x6, t2, t3, t7, t6); \ \ /* AES subbytes + AES shift rows */ \ vmovdqa .Lpost_tf_lo_s1, t0; \ vmovdqa .Lpost_tf_hi_s1, t1; \ vaesenclast t4, x0, x0; \ vaesenclast t4, x7, x7; \ vaesenclast t4, x1, x1; \ vaesenclast t4, x4, x4; \ vaesenclast t4, x2, x2; \ vaesenclast t4, x5, x5; \ vaesenclast t4, x3, x3; \ vaesenclast t4, x6, x6; \ \ /* postfilter sboxes 1 and 4 */ \ vmovdqa .Lpost_tf_lo_s3, t2; \ vmovdqa .Lpost_tf_hi_s3, t3; \ filter_8bit(x0, t0, t1, t7, t6); \ filter_8bit(x7, t0, t1, t7, t6); \ filter_8bit(x3, t0, t1, t7, t6); \ filter_8bit(x6, t0, t1, t7, t6); \ \ /* postfilter sbox 3 */ \ vmovdqa .Lpost_tf_lo_s2, t4; \ vmovdqa .Lpost_tf_hi_s2, t5; \ filter_8bit(x2, t2, t3, t7, t6); \ filter_8bit(x5, t2, t3, t7, t6); \ \ vpxor t6, t6, t6; \ vmovq key, t0; \ \ /* postfilter sbox 2 */ \ filter_8bit(x1, t4, t5, t7, t2); \ filter_8bit(x4, t4, t5, t7, t2); \ \ vpsrldq $5, t0, t5; \ vpsrldq $1, t0, t1; \ vpsrldq $2, t0, t2; \ vpsrldq $3, t0, t3; \ vpsrldq $4, t0, t4; \ vpshufb t6, t0, t0; \ vpshufb t6, t1, t1; \ vpshufb t6, t2, t2; \ vpshufb t6, t3, t3; \ vpshufb t6, t4, t4; \ vpsrldq $2, t5, t7; \ vpshufb t6, t7, t7; \ \ /* \ * P-function \ */ \ vpxor x5, x0, x0; \ vpxor x6, x1, x1; \ vpxor x7, x2, x2; \ vpxor x4, x3, x3; \ \ vpxor x2, x4, x4; \ vpxor x3, x5, x5; \ vpxor x0, x6, x6; \ vpxor x1, x7, x7; \ \ vpxor x7, x0, x0; \ vpxor x4, x1, x1; \ vpxor x5, x2, x2; \ vpxor x6, x3, x3; \ \ vpxor x3, x4, x4; \ vpxor x0, x5, x5; \ vpxor x1, x6, x6; \ vpxor x2, x7, x7; /* note: high and low parts swapped */ \ \ /* \ * Add key material and result to CD (x becomes new CD) \ */ \ \ vpxor t3, x4, x4; \ vpxor 0 * 16(mem_cd), x4, x4; \ \ vpxor t2, x5, x5; \ vpxor 1 * 16(mem_cd), x5, x5; \ \ vpsrldq $1, t5, t3; \ vpshufb t6, t5, t5; \ vpshufb t6, t3, t6; \ \ vpxor t1, x6, x6; \ vpxor 2 * 16(mem_cd), x6, x6; \ \ vpxor t0, x7, x7; \ vpxor 3 * 16(mem_cd), x7, x7; \ \ vpxor t7, x0, x0; \ vpxor 4 * 16(mem_cd), x0, x0; \ \ vpxor t6, x1, x1; \ vpxor 5 * 16(mem_cd), x1, x1; \ \ vpxor t5, x2, x2; \ vpxor 6 * 16(mem_cd), x2, x2; \ \ vpxor t4, x3, x3; \ vpxor 7 * 16(mem_cd), x3, x3; /* * Size optimization... with inlined roundsm16, binary would be over 5 times * larger and would only be 0.5% faster (on sandy-bridge). */ .align 8 SYM_FUNC_START_LOCAL(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd) roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rcx, (%r9)); RET; SYM_FUNC_END(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd) .align 8 SYM_FUNC_START_LOCAL(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab) roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3, %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11, %rax, (%r9)); RET; SYM_FUNC_END(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab) /* * IN/OUT: * x0..x7: byte-sliced AB state preloaded * mem_ab: byte-sliced AB state in memory * mem_cb: byte-sliced CD state in memory */ #define two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, i, dir, store_ab) \ leaq (key_table + (i) * 8)(CTX), %r9; \ call roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd; \ \ vmovdqu x4, 0 * 16(mem_cd); \ vmovdqu x5, 1 * 16(mem_cd); \ vmovdqu x6, 2 * 16(mem_cd); \ vmovdqu x7, 3 * 16(mem_cd); \ vmovdqu x0, 4 * 16(mem_cd); \ vmovdqu x1, 5 * 16(mem_cd); \ vmovdqu x2, 6 * 16(mem_cd); \ vmovdqu x3, 7 * 16(mem_cd); \ \ leaq (key_table + ((i) + (dir)) * 8)(CTX), %r9; \ call roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab; \ \ store_ab(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab); #define dummy_store(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab) /* do nothing */ #define store_ab_state(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab) \ /* Store new AB state */ \ vmovdqu x0, 0 * 16(mem_ab); \ vmovdqu x1, 1 * 16(mem_ab); \ vmovdqu x2, 2 * 16(mem_ab); \ vmovdqu x3, 3 * 16(mem_ab); \ vmovdqu x4, 4 * 16(mem_ab); \ vmovdqu x5, 5 * 16(mem_ab); \ vmovdqu x6, 6 * 16(mem_ab); \ vmovdqu x7, 7 * 16(mem_ab); #define enc_rounds16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, i) \ two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 2, 1, store_ab_state); \ two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 4, 1, store_ab_state); \ two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 6, 1, dummy_store); #define dec_rounds16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, i) \ two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 7, -1, store_ab_state); \ two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 5, -1, store_ab_state); \ two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 3, -1, dummy_store); /* * IN: * v0..3: byte-sliced 32-bit integers * OUT: * v0..3: (IN <<< 1) */ #define rol32_1_16(v0, v1, v2, v3, t0, t1, t2, zero) \ vpcmpgtb v0, zero, t0; \ vpaddb v0, v0, v0; \ vpabsb t0, t0; \ \ vpcmpgtb v1, zero, t1; \ vpaddb v1, v1, v1; \ vpabsb t1, t1; \ \ vpcmpgtb v2, zero, t2; \ vpaddb v2, v2, v2; \ vpabsb t2, t2; \ \ vpor t0, v1, v1; \ \ vpcmpgtb v3, zero, t0; \ vpaddb v3, v3, v3; \ vpabsb t0, t0; \ \ vpor t1, v2, v2; \ vpor t2, v3, v3; \ vpor t0, v0, v0; /* * IN: * r: byte-sliced AB state in memory * l: byte-sliced CD state in memory * OUT: * x0..x7: new byte-sliced CD state */ #define fls16(l, l0, l1, l2, l3, l4, l5, l6, l7, r, t0, t1, t2, t3, tt0, \ tt1, tt2, tt3, kll, klr, krl, krr) \ /* \ * t0 = kll; \ * t0 &= ll; \ * lr ^= rol32(t0, 1); \ */ \ vpxor tt0, tt0, tt0; \ vmovd kll, t0; \ vpshufb tt0, t0, t3; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t2; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t1; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t0; \ \ vpand l0, t0, t0; \ vpand l1, t1, t1; \ vpand l2, t2, t2; \ vpand l3, t3, t3; \ \ rol32_1_16(t3, t2, t1, t0, tt1, tt2, tt3, tt0); \ \ vpxor l4, t0, l4; \ vmovdqu l4, 4 * 16(l); \ vpxor l5, t1, l5; \ vmovdqu l5, 5 * 16(l); \ vpxor l6, t2, l6; \ vmovdqu l6, 6 * 16(l); \ vpxor l7, t3, l7; \ vmovdqu l7, 7 * 16(l); \ \ /* \ * t2 = krr; \ * t2 |= rr; \ * rl ^= t2; \ */ \ \ vmovd krr, t0; \ vpshufb tt0, t0, t3; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t2; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t1; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t0; \ \ vpor 4 * 16(r), t0, t0; \ vpor 5 * 16(r), t1, t1; \ vpor 6 * 16(r), t2, t2; \ vpor 7 * 16(r), t3, t3; \ \ vpxor 0 * 16(r), t0, t0; \ vpxor 1 * 16(r), t1, t1; \ vpxor 2 * 16(r), t2, t2; \ vpxor 3 * 16(r), t3, t3; \ vmovdqu t0, 0 * 16(r); \ vmovdqu t1, 1 * 16(r); \ vmovdqu t2, 2 * 16(r); \ vmovdqu t3, 3 * 16(r); \ \ /* \ * t2 = krl; \ * t2 &= rl; \ * rr ^= rol32(t2, 1); \ */ \ vmovd krl, t0; \ vpshufb tt0, t0, t3; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t2; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t1; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t0; \ \ vpand 0 * 16(r), t0, t0; \ vpand 1 * 16(r), t1, t1; \ vpand 2 * 16(r), t2, t2; \ vpand 3 * 16(r), t3, t3; \ \ rol32_1_16(t3, t2, t1, t0, tt1, tt2, tt3, tt0); \ \ vpxor 4 * 16(r), t0, t0; \ vpxor 5 * 16(r), t1, t1; \ vpxor 6 * 16(r), t2, t2; \ vpxor 7 * 16(r), t3, t3; \ vmovdqu t0, 4 * 16(r); \ vmovdqu t1, 5 * 16(r); \ vmovdqu t2, 6 * 16(r); \ vmovdqu t3, 7 * 16(r); \ \ /* \ * t0 = klr; \ * t0 |= lr; \ * ll ^= t0; \ */ \ \ vmovd klr, t0; \ vpshufb tt0, t0, t3; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t2; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t1; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t0; \ \ vpor l4, t0, t0; \ vpor l5, t1, t1; \ vpor l6, t2, t2; \ vpor l7, t3, t3; \ \ vpxor l0, t0, l0; \ vmovdqu l0, 0 * 16(l); \ vpxor l1, t1, l1; \ vmovdqu l1, 1 * 16(l); \ vpxor l2, t2, l2; \ vmovdqu l2, 2 * 16(l); \ vpxor l3, t3, l3; \ vmovdqu l3, 3 * 16(l); #define transpose_4x4(x0, x1, x2, x3, t1, t2) \ vpunpckhdq x1, x0, t2; \ vpunpckldq x1, x0, x0; \ \ vpunpckldq x3, x2, t1; \ vpunpckhdq x3, x2, x2; \ \ vpunpckhqdq t1, x0, x1; \ vpunpcklqdq t1, x0, x0; \ \ vpunpckhqdq x2, t2, x3; \ vpunpcklqdq x2, t2, x2; #define byteslice_16x16b(a0, b0, c0, d0, a1, b1, c1, d1, a2, b2, c2, d2, a3, \ b3, c3, d3, st0, st1) \ vmovdqu d2, st0; \ vmovdqu d3, st1; \ transpose_4x4(a0, a1, a2, a3, d2, d3); \ transpose_4x4(b0, b1, b2, b3, d2, d3); \ vmovdqu st0, d2; \ vmovdqu st1, d3; \ \ vmovdqu a0, st0; \ vmovdqu a1, st1; \ transpose_4x4(c0, c1, c2, c3, a0, a1); \ transpose_4x4(d0, d1, d2, d3, a0, a1); \ \ vmovdqu .Lshufb_16x16b, a0; \ vmovdqu st1, a1; \ vpshufb a0, a2, a2; \ vpshufb a0, a3, a3; \ vpshufb a0, b0, b0; \ vpshufb a0, b1, b1; \ vpshufb a0, b2, b2; \ vpshufb a0, b3, b3; \ vpshufb a0, a1, a1; \ vpshufb a0, c0, c0; \ vpshufb a0, c1, c1; \ vpshufb a0, c2, c2; \ vpshufb a0, c3, c3; \ vpshufb a0, d0, d0; \ vpshufb a0, d1, d1; \ vpshufb a0, d2, d2; \ vpshufb a0, d3, d3; \ vmovdqu d3, st1; \ vmovdqu st0, d3; \ vpshufb a0, d3, a0; \ vmovdqu d2, st0; \ \ transpose_4x4(a0, b0, c0, d0, d2, d3); \ transpose_4x4(a1, b1, c1, d1, d2, d3); \ vmovdqu st0, d2; \ vmovdqu st1, d3; \ \ vmovdqu b0, st0; \ vmovdqu b1, st1; \ transpose_4x4(a2, b2, c2, d2, b0, b1); \ transpose_4x4(a3, b3, c3, d3, b0, b1); \ vmovdqu st0, b0; \ vmovdqu st1, b1; \ /* does not adjust output bytes inside vectors */ /* load blocks to registers and apply pre-whitening */ #define inpack16_pre(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, rio, key) \ vmovq key, x0; \ vpshufb .Lpack_bswap, x0, x0; \ \ vpxor 0 * 16(rio), x0, y7; \ vpxor 1 * 16(rio), x0, y6; \ vpxor 2 * 16(rio), x0, y5; \ vpxor 3 * 16(rio), x0, y4; \ vpxor 4 * 16(rio), x0, y3; \ vpxor 5 * 16(rio), x0, y2; \ vpxor 6 * 16(rio), x0, y1; \ vpxor 7 * 16(rio), x0, y0; \ vpxor 8 * 16(rio), x0, x7; \ vpxor 9 * 16(rio), x0, x6; \ vpxor 10 * 16(rio), x0, x5; \ vpxor 11 * 16(rio), x0, x4; \ vpxor 12 * 16(rio), x0, x3; \ vpxor 13 * 16(rio), x0, x2; \ vpxor 14 * 16(rio), x0, x1; \ vpxor 15 * 16(rio), x0, x0; /* byteslice pre-whitened blocks and store to temporary memory */ #define inpack16_post(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd) \ byteslice_16x16b(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, \ y5, y6, y7, (mem_ab), (mem_cd)); \ \ vmovdqu x0, 0 * 16(mem_ab); \ vmovdqu x1, 1 * 16(mem_ab); \ vmovdqu x2, 2 * 16(mem_ab); \ vmovdqu x3, 3 * 16(mem_ab); \ vmovdqu x4, 4 * 16(mem_ab); \ vmovdqu x5, 5 * 16(mem_ab); \ vmovdqu x6, 6 * 16(mem_ab); \ vmovdqu x7, 7 * 16(mem_ab); \ vmovdqu y0, 0 * 16(mem_cd); \ vmovdqu y1, 1 * 16(mem_cd); \ vmovdqu y2, 2 * 16(mem_cd); \ vmovdqu y3, 3 * 16(mem_cd); \ vmovdqu y4, 4 * 16(mem_cd); \ vmovdqu y5, 5 * 16(mem_cd); \ vmovdqu y6, 6 * 16(mem_cd); \ vmovdqu y7, 7 * 16(mem_cd); /* de-byteslice, apply post-whitening and store blocks */ #define outunpack16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, \ y5, y6, y7, key, stack_tmp0, stack_tmp1) \ byteslice_16x16b(y0, y4, x0, x4, y1, y5, x1, x5, y2, y6, x2, x6, y3, \ y7, x3, x7, stack_tmp0, stack_tmp1); \ \ vmovdqu x0, stack_tmp0; \ \ vmovq key, x0; \ vpshufb .Lpack_bswap, x0, x0; \ \ vpxor x0, y7, y7; \ vpxor x0, y6, y6; \ vpxor x0, y5, y5; \ vpxor x0, y4, y4; \ vpxor x0, y3, y3; \ vpxor x0, y2, y2; \ vpxor x0, y1, y1; \ vpxor x0, y0, y0; \ vpxor x0, x7, x7; \ vpxor x0, x6, x6; \ vpxor x0, x5, x5; \ vpxor x0, x4, x4; \ vpxor x0, x3, x3; \ vpxor x0, x2, x2; \ vpxor x0, x1, x1; \ vpxor stack_tmp0, x0, x0; #define write_output(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, rio) \ vmovdqu x0, 0 * 16(rio); \ vmovdqu x1, 1 * 16(rio); \ vmovdqu x2, 2 * 16(rio); \ vmovdqu x3, 3 * 16(rio); \ vmovdqu x4, 4 * 16(rio); \ vmovdqu x5, 5 * 16(rio); \ vmovdqu x6, 6 * 16(rio); \ vmovdqu x7, 7 * 16(rio); \ vmovdqu y0, 8 * 16(rio); \ vmovdqu y1, 9 * 16(rio); \ vmovdqu y2, 10 * 16(rio); \ vmovdqu y3, 11 * 16(rio); \ vmovdqu y4, 12 * 16(rio); \ vmovdqu y5, 13 * 16(rio); \ vmovdqu y6, 14 * 16(rio); \ vmovdqu y7, 15 * 16(rio); /* NB: section is mergeable, all elements must be aligned 16-byte blocks */ .section .rodata.cst16, "aM", @progbits, 16 .align 16 #define SHUFB_BYTES(idx) \ 0 + (idx), 4 + (idx), 8 + (idx), 12 + (idx) .Lshufb_16x16b: .byte SHUFB_BYTES(0), SHUFB_BYTES(1), SHUFB_BYTES(2), SHUFB_BYTES(3); .Lpack_bswap: .long 0x00010203 .long 0x04050607 .long 0x80808080 .long 0x80808080 /* * pre-SubByte transform * * pre-lookup for sbox1, sbox2, sbox3: * swap_bitendianness( * isom_map_camellia_to_aes( * camellia_f( * swap_bitendianess(in) * ) * ) * ) * * (note: '⊕ 0xc5' inside camellia_f()) */ .Lpre_tf_lo_s1: .byte 0x45, 0xe8, 0x40, 0xed, 0x2e, 0x83, 0x2b, 0x86 .byte 0x4b, 0xe6, 0x4e, 0xe3, 0x20, 0x8d, 0x25, 0x88 .Lpre_tf_hi_s1: .byte 0x00, 0x51, 0xf1, 0xa0, 0x8a, 0xdb, 0x7b, 0x2a .byte 0x09, 0x58, 0xf8, 0xa9, 0x83, 0xd2, 0x72, 0x23 /* * pre-SubByte transform * * pre-lookup for sbox4: * swap_bitendianness( * isom_map_camellia_to_aes( * camellia_f( * swap_bitendianess(in <<< 1) * ) * ) * ) * * (note: '⊕ 0xc5' inside camellia_f()) */ .Lpre_tf_lo_s4: .byte 0x45, 0x40, 0x2e, 0x2b, 0x4b, 0x4e, 0x20, 0x25 .byte 0x14, 0x11, 0x7f, 0x7a, 0x1a, 0x1f, 0x71, 0x74 .Lpre_tf_hi_s4: .byte 0x00, 0xf1, 0x8a, 0x7b, 0x09, 0xf8, 0x83, 0x72 .byte 0xad, 0x5c, 0x27, 0xd6, 0xa4, 0x55, 0x2e, 0xdf /* * post-SubByte transform * * post-lookup for sbox1, sbox4: * swap_bitendianness( * camellia_h( * isom_map_aes_to_camellia( * swap_bitendianness( * aes_inverse_affine_transform(in) * ) * ) * ) * ) * * (note: '⊕ 0x6e' inside camellia_h()) */ .Lpost_tf_lo_s1: .byte 0x3c, 0xcc, 0xcf, 0x3f, 0x32, 0xc2, 0xc1, 0x31 .byte 0xdc, 0x2c, 0x2f, 0xdf, 0xd2, 0x22, 0x21, 0xd1 .Lpost_tf_hi_s1: .byte 0x00, 0xf9, 0x86, 0x7f, 0xd7, 0x2e, 0x51, 0xa8 .byte 0xa4, 0x5d, 0x22, 0xdb, 0x73, 0x8a, 0xf5, 0x0c /* * post-SubByte transform * * post-lookup for sbox2: * swap_bitendianness( * camellia_h( * isom_map_aes_to_camellia( * swap_bitendianness( * aes_inverse_affine_transform(in) * ) * ) * ) * ) <<< 1 * * (note: '⊕ 0x6e' inside camellia_h()) */ .Lpost_tf_lo_s2: .byte 0x78, 0x99, 0x9f, 0x7e, 0x64, 0x85, 0x83, 0x62 .byte 0xb9, 0x58, 0x5e, 0xbf, 0xa5, 0x44, 0x42, 0xa3 .Lpost_tf_hi_s2: .byte 0x00, 0xf3, 0x0d, 0xfe, 0xaf, 0x5c, 0xa2, 0x51 .byte 0x49, 0xba, 0x44, 0xb7, 0xe6, 0x15, 0xeb, 0x18 /* * post-SubByte transform * * post-lookup for sbox3: * swap_bitendianness( * camellia_h( * isom_map_aes_to_camellia( * swap_bitendianness( * aes_inverse_affine_transform(in) * ) * ) * ) * ) >>> 1 * * (note: '⊕ 0x6e' inside camellia_h()) */ .Lpost_tf_lo_s3: .byte 0x1e, 0x66, 0xe7, 0x9f, 0x19, 0x61, 0xe0, 0x98 .byte 0x6e, 0x16, 0x97, 0xef, 0x69, 0x11, 0x90, 0xe8 .Lpost_tf_hi_s3: .byte 0x00, 0xfc, 0x43, 0xbf, 0xeb, 0x17, 0xa8, 0x54 .byte 0x52, 0xae, 0x11, 0xed, 0xb9, 0x45, 0xfa, 0x06 /* For isolating SubBytes from AESENCLAST, inverse shift row */ .Linv_shift_row: .byte 0x00, 0x0d, 0x0a, 0x07, 0x04, 0x01, 0x0e, 0x0b .byte 0x08, 0x05, 0x02, 0x0f, 0x0c, 0x09, 0x06, 0x03 /* 4-bit mask */ .section .rodata.cst4.L0f0f0f0f, "aM", @progbits, 4 .align 4 .L0f0f0f0f: .long 0x0f0f0f0f .text .align 8 SYM_FUNC_START_LOCAL(__camellia_enc_blk16) /* input: * %rdi: ctx, CTX * %rax: temporary storage, 256 bytes * %xmm0..%xmm15: 16 plaintext blocks * output: * %xmm0..%xmm15: 16 encrypted blocks, order swapped: * 7, 8, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 */ FRAME_BEGIN leaq 8 * 16(%rax), %rcx; inpack16_post(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %rcx); enc_rounds16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %rcx, 0); fls16(%rax, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %rcx, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, ((key_table + (8) * 8) + 0)(CTX), ((key_table + (8) * 8) + 4)(CTX), ((key_table + (8) * 8) + 8)(CTX), ((key_table + (8) * 8) + 12)(CTX)); enc_rounds16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %rcx, 8); fls16(%rax, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %rcx, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, ((key_table + (16) * 8) + 0)(CTX), ((key_table + (16) * 8) + 4)(CTX), ((key_table + (16) * 8) + 8)(CTX), ((key_table + (16) * 8) + 12)(CTX)); enc_rounds16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %rcx, 16); movl $24, %r8d; cmpl $16, key_length(CTX); jne .Lenc_max32; .Lenc_done: /* load CD for output */ vmovdqu 0 * 16(%rcx), %xmm8; vmovdqu 1 * 16(%rcx), %xmm9; vmovdqu 2 * 16(%rcx), %xmm10; vmovdqu 3 * 16(%rcx), %xmm11; vmovdqu 4 * 16(%rcx), %xmm12; vmovdqu 5 * 16(%rcx), %xmm13; vmovdqu 6 * 16(%rcx), %xmm14; vmovdqu 7 * 16(%rcx), %xmm15; outunpack16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax)); FRAME_END RET; .align 8 .Lenc_max32: movl $32, %r8d; fls16(%rax, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %rcx, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, ((key_table + (24) * 8) + 0)(CTX), ((key_table + (24) * 8) + 4)(CTX), ((key_table + (24) * 8) + 8)(CTX), ((key_table + (24) * 8) + 12)(CTX)); enc_rounds16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %rcx, 24); jmp .Lenc_done; SYM_FUNC_END(__camellia_enc_blk16) .align 8 SYM_FUNC_START_LOCAL(__camellia_dec_blk16) /* input: * %rdi: ctx, CTX * %rax: temporary storage, 256 bytes * %r8d: 24 for 16 byte key, 32 for larger * %xmm0..%xmm15: 16 encrypted blocks * output: * %xmm0..%xmm15: 16 plaintext blocks, order swapped: * 7, 8, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 */ FRAME_BEGIN leaq 8 * 16(%rax), %rcx; inpack16_post(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %rcx); cmpl $32, %r8d; je .Ldec_max32; .Ldec_max24: dec_rounds16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %rcx, 16); fls16(%rax, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %rcx, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, ((key_table + (16) * 8) + 8)(CTX), ((key_table + (16) * 8) + 12)(CTX), ((key_table + (16) * 8) + 0)(CTX), ((key_table + (16) * 8) + 4)(CTX)); dec_rounds16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %rcx, 8); fls16(%rax, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %rcx, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, ((key_table + (8) * 8) + 8)(CTX), ((key_table + (8) * 8) + 12)(CTX), ((key_table + (8) * 8) + 0)(CTX), ((key_table + (8) * 8) + 4)(CTX)); dec_rounds16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %rcx, 0); /* load CD for output */ vmovdqu 0 * 16(%rcx), %xmm8; vmovdqu 1 * 16(%rcx), %xmm9; vmovdqu 2 * 16(%rcx), %xmm10; vmovdqu 3 * 16(%rcx), %xmm11; vmovdqu 4 * 16(%rcx), %xmm12; vmovdqu 5 * 16(%rcx), %xmm13; vmovdqu 6 * 16(%rcx), %xmm14; vmovdqu 7 * 16(%rcx), %xmm15; outunpack16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax)); FRAME_END RET; .align 8 .Ldec_max32: dec_rounds16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %rcx, 24); fls16(%rax, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %rcx, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, ((key_table + (24) * 8) + 8)(CTX), ((key_table + (24) * 8) + 12)(CTX), ((key_table + (24) * 8) + 0)(CTX), ((key_table + (24) * 8) + 4)(CTX)); jmp .Ldec_max24; SYM_FUNC_END(__camellia_dec_blk16) SYM_FUNC_START(camellia_ecb_enc_16way) /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) */ FRAME_BEGIN inpack16_pre(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rdx, (key_table)(CTX)); /* now dst can be used as temporary buffer (even in src == dst case) */ movq %rsi, %rax; call __camellia_enc_blk16; write_output(%xmm7, %xmm6, %xmm5, %xmm4, %xmm3, %xmm2, %xmm1, %xmm0, %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9, %xmm8, %rsi); FRAME_END RET; SYM_FUNC_END(camellia_ecb_enc_16way) SYM_FUNC_START(camellia_ecb_dec_16way) /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) */ FRAME_BEGIN cmpl $16, key_length(CTX); movl $32, %r8d; movl $24, %eax; cmovel %eax, %r8d; /* max */ inpack16_pre(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rdx, (key_table)(CTX, %r8, 8)); /* now dst can be used as temporary buffer (even in src == dst case) */ movq %rsi, %rax; call __camellia_dec_blk16; write_output(%xmm7, %xmm6, %xmm5, %xmm4, %xmm3, %xmm2, %xmm1, %xmm0, %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9, %xmm8, %rsi); FRAME_END RET; SYM_FUNC_END(camellia_ecb_dec_16way) SYM_FUNC_START(camellia_cbc_dec_16way) /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) */ FRAME_BEGIN cmpl $16, key_length(CTX); movl $32, %r8d; movl $24, %eax; cmovel %eax, %r8d; /* max */ inpack16_pre(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rdx, (key_table)(CTX, %r8, 8)); /* * dst might still be in-use (in case dst == src), so use stack for * temporary storage. */ subq $(16 * 16), %rsp; movq %rsp, %rax; call __camellia_dec_blk16; addq $(16 * 16), %rsp; vpxor (0 * 16)(%rdx), %xmm6, %xmm6; vpxor (1 * 16)(%rdx), %xmm5, %xmm5; vpxor (2 * 16)(%rdx), %xmm4, %xmm4; vpxor (3 * 16)(%rdx), %xmm3, %xmm3; vpxor (4 * 16)(%rdx), %xmm2, %xmm2; vpxor (5 * 16)(%rdx), %xmm1, %xmm1; vpxor (6 * 16)(%rdx), %xmm0, %xmm0; vpxor (7 * 16)(%rdx), %xmm15, %xmm15; vpxor (8 * 16)(%rdx), %xmm14, %xmm14; vpxor (9 * 16)(%rdx), %xmm13, %xmm13; vpxor (10 * 16)(%rdx), %xmm12, %xmm12; vpxor (11 * 16)(%rdx), %xmm11, %xmm11; vpxor (12 * 16)(%rdx), %xmm10, %xmm10; vpxor (13 * 16)(%rdx), %xmm9, %xmm9; vpxor (14 * 16)(%rdx), %xmm8, %xmm8; write_output(%xmm7, %xmm6, %xmm5, %xmm4, %xmm3, %xmm2, %xmm1, %xmm0, %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9, %xmm8, %rsi); FRAME_END RET; SYM_FUNC_END(camellia_cbc_dec_16way)
aixcc-public/challenge-001-exemplar-source
1,188
arch/x86/crypto/glue_helper-asm-avx2.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Shared glue code for 128bit block ciphers, AVX2 assembler macros * * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> */ #define load_16way(src, x0, x1, x2, x3, x4, x5, x6, x7) \ vmovdqu (0*32)(src), x0; \ vmovdqu (1*32)(src), x1; \ vmovdqu (2*32)(src), x2; \ vmovdqu (3*32)(src), x3; \ vmovdqu (4*32)(src), x4; \ vmovdqu (5*32)(src), x5; \ vmovdqu (6*32)(src), x6; \ vmovdqu (7*32)(src), x7; #define store_16way(dst, x0, x1, x2, x3, x4, x5, x6, x7) \ vmovdqu x0, (0*32)(dst); \ vmovdqu x1, (1*32)(dst); \ vmovdqu x2, (2*32)(dst); \ vmovdqu x3, (3*32)(dst); \ vmovdqu x4, (4*32)(dst); \ vmovdqu x5, (5*32)(dst); \ vmovdqu x6, (6*32)(dst); \ vmovdqu x7, (7*32)(dst); #define store_cbc_16way(src, dst, x0, x1, x2, x3, x4, x5, x6, x7, t0) \ vpxor t0, t0, t0; \ vinserti128 $1, (src), t0, t0; \ vpxor t0, x0, x0; \ vpxor (0*32+16)(src), x1, x1; \ vpxor (1*32+16)(src), x2, x2; \ vpxor (2*32+16)(src), x3, x3; \ vpxor (3*32+16)(src), x4, x4; \ vpxor (4*32+16)(src), x5, x5; \ vpxor (5*32+16)(src), x6, x6; \ vpxor (6*32+16)(src), x7, x7; \ store_16way(dst, x0, x1, x2, x3, x4, x5, x6, x7);
aixcc-public/challenge-001-exemplar-source
7,922
arch/x86/crypto/sha1_ni_asm.S
/* * Intel SHA Extensions optimized implementation of a SHA-1 update function * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * Contact Information: * Sean Gulley <sean.m.gulley@intel.com> * Tim Chen <tim.c.chen@linux.intel.com> * * BSD LICENSE * * Copyright(c) 2015 Intel Corporation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include <linux/linkage.h> #include <linux/cfi_types.h> #define DIGEST_PTR %rdi /* 1st arg */ #define DATA_PTR %rsi /* 2nd arg */ #define NUM_BLKS %rdx /* 3rd arg */ /* gcc conversion */ #define FRAME_SIZE 32 /* space for 2x16 bytes */ #define ABCD %xmm0 #define E0 %xmm1 /* Need two E's b/c they ping pong */ #define E1 %xmm2 #define MSG0 %xmm3 #define MSG1 %xmm4 #define MSG2 %xmm5 #define MSG3 %xmm6 #define SHUF_MASK %xmm7 /* * Intel SHA Extensions optimized implementation of a SHA-1 update function * * The function takes a pointer to the current hash values, a pointer to the * input data, and a number of 64 byte blocks to process. Once all blocks have * been processed, the digest pointer is updated with the resulting hash value. * The function only processes complete blocks, there is no functionality to * store partial blocks. All message padding and hash value initialization must * be done outside the update function. * * The indented lines in the loop are instructions related to rounds processing. * The non-indented lines are instructions related to the message schedule. * * void sha1_ni_transform(uint32_t *digest, const void *data, uint32_t numBlocks) * digest : pointer to digest * data: pointer to input data * numBlocks: Number of blocks to process */ .text .align 32 SYM_TYPED_FUNC_START(sha1_ni_transform) push %rbp mov %rsp, %rbp sub $FRAME_SIZE, %rsp and $~0xF, %rsp shl $6, NUM_BLKS /* convert to bytes */ jz .Ldone_hash add DATA_PTR, NUM_BLKS /* pointer to end of data */ /* load initial hash values */ pinsrd $3, 1*16(DIGEST_PTR), E0 movdqu 0*16(DIGEST_PTR), ABCD pand UPPER_WORD_MASK(%rip), E0 pshufd $0x1B, ABCD, ABCD movdqa PSHUFFLE_BYTE_FLIP_MASK(%rip), SHUF_MASK .Lloop0: /* Save hash values for addition after rounds */ movdqa E0, (0*16)(%rsp) movdqa ABCD, (1*16)(%rsp) /* Rounds 0-3 */ movdqu 0*16(DATA_PTR), MSG0 pshufb SHUF_MASK, MSG0 paddd MSG0, E0 movdqa ABCD, E1 sha1rnds4 $0, E0, ABCD /* Rounds 4-7 */ movdqu 1*16(DATA_PTR), MSG1 pshufb SHUF_MASK, MSG1 sha1nexte MSG1, E1 movdqa ABCD, E0 sha1rnds4 $0, E1, ABCD sha1msg1 MSG1, MSG0 /* Rounds 8-11 */ movdqu 2*16(DATA_PTR), MSG2 pshufb SHUF_MASK, MSG2 sha1nexte MSG2, E0 movdqa ABCD, E1 sha1rnds4 $0, E0, ABCD sha1msg1 MSG2, MSG1 pxor MSG2, MSG0 /* Rounds 12-15 */ movdqu 3*16(DATA_PTR), MSG3 pshufb SHUF_MASK, MSG3 sha1nexte MSG3, E1 movdqa ABCD, E0 sha1msg2 MSG3, MSG0 sha1rnds4 $0, E1, ABCD sha1msg1 MSG3, MSG2 pxor MSG3, MSG1 /* Rounds 16-19 */ sha1nexte MSG0, E0 movdqa ABCD, E1 sha1msg2 MSG0, MSG1 sha1rnds4 $0, E0, ABCD sha1msg1 MSG0, MSG3 pxor MSG0, MSG2 /* Rounds 20-23 */ sha1nexte MSG1, E1 movdqa ABCD, E0 sha1msg2 MSG1, MSG2 sha1rnds4 $1, E1, ABCD sha1msg1 MSG1, MSG0 pxor MSG1, MSG3 /* Rounds 24-27 */ sha1nexte MSG2, E0 movdqa ABCD, E1 sha1msg2 MSG2, MSG3 sha1rnds4 $1, E0, ABCD sha1msg1 MSG2, MSG1 pxor MSG2, MSG0 /* Rounds 28-31 */ sha1nexte MSG3, E1 movdqa ABCD, E0 sha1msg2 MSG3, MSG0 sha1rnds4 $1, E1, ABCD sha1msg1 MSG3, MSG2 pxor MSG3, MSG1 /* Rounds 32-35 */ sha1nexte MSG0, E0 movdqa ABCD, E1 sha1msg2 MSG0, MSG1 sha1rnds4 $1, E0, ABCD sha1msg1 MSG0, MSG3 pxor MSG0, MSG2 /* Rounds 36-39 */ sha1nexte MSG1, E1 movdqa ABCD, E0 sha1msg2 MSG1, MSG2 sha1rnds4 $1, E1, ABCD sha1msg1 MSG1, MSG0 pxor MSG1, MSG3 /* Rounds 40-43 */ sha1nexte MSG2, E0 movdqa ABCD, E1 sha1msg2 MSG2, MSG3 sha1rnds4 $2, E0, ABCD sha1msg1 MSG2, MSG1 pxor MSG2, MSG0 /* Rounds 44-47 */ sha1nexte MSG3, E1 movdqa ABCD, E0 sha1msg2 MSG3, MSG0 sha1rnds4 $2, E1, ABCD sha1msg1 MSG3, MSG2 pxor MSG3, MSG1 /* Rounds 48-51 */ sha1nexte MSG0, E0 movdqa ABCD, E1 sha1msg2 MSG0, MSG1 sha1rnds4 $2, E0, ABCD sha1msg1 MSG0, MSG3 pxor MSG0, MSG2 /* Rounds 52-55 */ sha1nexte MSG1, E1 movdqa ABCD, E0 sha1msg2 MSG1, MSG2 sha1rnds4 $2, E1, ABCD sha1msg1 MSG1, MSG0 pxor MSG1, MSG3 /* Rounds 56-59 */ sha1nexte MSG2, E0 movdqa ABCD, E1 sha1msg2 MSG2, MSG3 sha1rnds4 $2, E0, ABCD sha1msg1 MSG2, MSG1 pxor MSG2, MSG0 /* Rounds 60-63 */ sha1nexte MSG3, E1 movdqa ABCD, E0 sha1msg2 MSG3, MSG0 sha1rnds4 $3, E1, ABCD sha1msg1 MSG3, MSG2 pxor MSG3, MSG1 /* Rounds 64-67 */ sha1nexte MSG0, E0 movdqa ABCD, E1 sha1msg2 MSG0, MSG1 sha1rnds4 $3, E0, ABCD sha1msg1 MSG0, MSG3 pxor MSG0, MSG2 /* Rounds 68-71 */ sha1nexte MSG1, E1 movdqa ABCD, E0 sha1msg2 MSG1, MSG2 sha1rnds4 $3, E1, ABCD pxor MSG1, MSG3 /* Rounds 72-75 */ sha1nexte MSG2, E0 movdqa ABCD, E1 sha1msg2 MSG2, MSG3 sha1rnds4 $3, E0, ABCD /* Rounds 76-79 */ sha1nexte MSG3, E1 movdqa ABCD, E0 sha1rnds4 $3, E1, ABCD /* Add current hash values with previously saved */ sha1nexte (0*16)(%rsp), E0 paddd (1*16)(%rsp), ABCD /* Increment data pointer and loop if more to process */ add $64, DATA_PTR cmp NUM_BLKS, DATA_PTR jne .Lloop0 /* Write hash values back in the correct order */ pshufd $0x1B, ABCD, ABCD movdqu ABCD, 0*16(DIGEST_PTR) pextrd $3, E0, 1*16(DIGEST_PTR) .Ldone_hash: mov %rbp, %rsp pop %rbp RET SYM_FUNC_END(sha1_ni_transform) .section .rodata.cst16.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 16 .align 16 PSHUFFLE_BYTE_FLIP_MASK: .octa 0x000102030405060708090a0b0c0d0e0f .section .rodata.cst16.UPPER_WORD_MASK, "aM", @progbits, 16 .align 16 UPPER_WORD_MASK: .octa 0xFFFFFFFF000000000000000000000000
aixcc-public/challenge-001-exemplar-source
23,683
arch/x86/crypto/sha256-avx2-asm.S
######################################################################## # Implement fast SHA-256 with AVX2 instructions. (x86_64) # # Copyright (C) 2013 Intel Corporation. # # Authors: # James Guilford <james.guilford@intel.com> # Kirk Yap <kirk.s.yap@intel.com> # Tim Chen <tim.c.chen@linux.intel.com> # # This software is available to you under a choice of one of two # licenses. You may choose to be licensed under the terms of the GNU # General Public License (GPL) Version 2, available from the file # COPYING in the main directory of this source tree, or the # OpenIB.org BSD license below: # # Redistribution and use in source and binary forms, with or # without modification, are permitted provided that the following # conditions are met: # # - Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # # - Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials # provided with the distribution. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # ######################################################################## # # This code is described in an Intel White-Paper: # "Fast SHA-256 Implementations on Intel Architecture Processors" # # To find it, surf to http://www.intel.com/p/en_US/embedded # and search for that title. # ######################################################################## # This code schedules 2 blocks at a time, with 4 lanes per block ######################################################################## #include <linux/linkage.h> #include <linux/cfi_types.h> ## assume buffers not aligned #define VMOVDQ vmovdqu ################################ Define Macros # addm [mem], reg # Add reg to mem using reg-mem add and store .macro addm p1 p2 add \p1, \p2 mov \p2, \p1 .endm ################################ X0 = %ymm4 X1 = %ymm5 X2 = %ymm6 X3 = %ymm7 # XMM versions of above XWORD0 = %xmm4 XWORD1 = %xmm5 XWORD2 = %xmm6 XWORD3 = %xmm7 XTMP0 = %ymm0 XTMP1 = %ymm1 XTMP2 = %ymm2 XTMP3 = %ymm3 XTMP4 = %ymm8 XFER = %ymm9 XTMP5 = %ymm11 SHUF_00BA = %ymm10 # shuffle xBxA -> 00BA SHUF_DC00 = %ymm12 # shuffle xDxC -> DC00 BYTE_FLIP_MASK = %ymm13 X_BYTE_FLIP_MASK = %xmm13 # XMM version of BYTE_FLIP_MASK NUM_BLKS = %rdx # 3rd arg INP = %rsi # 2nd arg CTX = %rdi # 1st arg c = %ecx d = %r8d e = %edx # clobbers NUM_BLKS y3 = %esi # clobbers INP SRND = CTX # SRND is same register as CTX a = %eax b = %ebx f = %r9d g = %r10d h = %r11d old_h = %r11d T1 = %r12d y0 = %r13d y1 = %r14d y2 = %r15d _XFER_SIZE = 2*64*4 # 2 blocks, 64 rounds, 4 bytes/round _XMM_SAVE_SIZE = 0 _INP_END_SIZE = 8 _INP_SIZE = 8 _CTX_SIZE = 8 _XFER = 0 _XMM_SAVE = _XFER + _XFER_SIZE _INP_END = _XMM_SAVE + _XMM_SAVE_SIZE _INP = _INP_END + _INP_END_SIZE _CTX = _INP + _INP_SIZE STACK_SIZE = _CTX + _CTX_SIZE # rotate_Xs # Rotate values of symbols X0...X3 .macro rotate_Xs X_ = X0 X0 = X1 X1 = X2 X2 = X3 X3 = X_ .endm # ROTATE_ARGS # Rotate values of symbols a...h .macro ROTATE_ARGS old_h = h TMP_ = h h = g g = f f = e e = d d = c c = b b = a a = TMP_ .endm .macro FOUR_ROUNDS_AND_SCHED disp ################################### RND N + 0 ############################ mov a, y3 # y3 = a # MAJA rorx $25, e, y0 # y0 = e >> 25 # S1A rorx $11, e, y1 # y1 = e >> 11 # S1B addl \disp(%rsp, SRND), h # h = k + w + h # -- or c, y3 # y3 = a|c # MAJA vpalignr $4, X2, X3, XTMP0 # XTMP0 = W[-7] mov f, y2 # y2 = f # CH rorx $13, a, T1 # T1 = a >> 13 # S0B xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1 xor g, y2 # y2 = f^g # CH vpaddd X0, XTMP0, XTMP0 # XTMP0 = W[-7] + W[-16]# y1 = (e >> 6)# S1 rorx $6, e, y1 # y1 = (e >> 6) # S1 and e, y2 # y2 = (f^g)&e # CH xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1 rorx $22, a, y1 # y1 = a >> 22 # S0A add h, d # d = k + w + h + d # -- and b, y3 # y3 = (a|c)&b # MAJA vpalignr $4, X0, X1, XTMP1 # XTMP1 = W[-15] xor T1, y1 # y1 = (a>>22) ^ (a>>13) # S0 rorx $2, a, T1 # T1 = (a >> 2) # S0 xor g, y2 # y2 = CH = ((f^g)&e)^g # CH vpsrld $7, XTMP1, XTMP2 xor T1, y1 # y1 = (a>>22) ^ (a>>13) ^ (a>>2) # S0 mov a, T1 # T1 = a # MAJB and c, T1 # T1 = a&c # MAJB add y0, y2 # y2 = S1 + CH # -- vpslld $(32-7), XTMP1, XTMP3 or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ add y1, h # h = k + w + h + S0 # -- add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- vpor XTMP2, XTMP3, XTMP3 # XTMP3 = W[-15] ror 7 vpsrld $18, XTMP1, XTMP2 add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- add y3, h # h = t1 + S0 + MAJ # -- ROTATE_ARGS ################################### RND N + 1 ############################ mov a, y3 # y3 = a # MAJA rorx $25, e, y0 # y0 = e >> 25 # S1A rorx $11, e, y1 # y1 = e >> 11 # S1B offset = \disp + 1*4 addl offset(%rsp, SRND), h # h = k + w + h # -- or c, y3 # y3 = a|c # MAJA vpsrld $3, XTMP1, XTMP4 # XTMP4 = W[-15] >> 3 mov f, y2 # y2 = f # CH rorx $13, a, T1 # T1 = a >> 13 # S0B xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1 xor g, y2 # y2 = f^g # CH rorx $6, e, y1 # y1 = (e >> 6) # S1 xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1 rorx $22, a, y1 # y1 = a >> 22 # S0A and e, y2 # y2 = (f^g)&e # CH add h, d # d = k + w + h + d # -- vpslld $(32-18), XTMP1, XTMP1 and b, y3 # y3 = (a|c)&b # MAJA xor T1, y1 # y1 = (a>>22) ^ (a>>13) # S0 vpxor XTMP1, XTMP3, XTMP3 rorx $2, a, T1 # T1 = (a >> 2) # S0 xor g, y2 # y2 = CH = ((f^g)&e)^g # CH vpxor XTMP2, XTMP3, XTMP3 # XTMP3 = W[-15] ror 7 ^ W[-15] ror 18 xor T1, y1 # y1 = (a>>22) ^ (a>>13) ^ (a>>2) # S0 mov a, T1 # T1 = a # MAJB and c, T1 # T1 = a&c # MAJB add y0, y2 # y2 = S1 + CH # -- vpxor XTMP4, XTMP3, XTMP1 # XTMP1 = s0 vpshufd $0b11111010, X3, XTMP2 # XTMP2 = W[-2] {BBAA} or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ add y1, h # h = k + w + h + S0 # -- vpaddd XTMP1, XTMP0, XTMP0 # XTMP0 = W[-16] + W[-7] + s0 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- add y3, h # h = t1 + S0 + MAJ # -- vpsrld $10, XTMP2, XTMP4 # XTMP4 = W[-2] >> 10 {BBAA} ROTATE_ARGS ################################### RND N + 2 ############################ mov a, y3 # y3 = a # MAJA rorx $25, e, y0 # y0 = e >> 25 # S1A offset = \disp + 2*4 addl offset(%rsp, SRND), h # h = k + w + h # -- vpsrlq $19, XTMP2, XTMP3 # XTMP3 = W[-2] ror 19 {xBxA} rorx $11, e, y1 # y1 = e >> 11 # S1B or c, y3 # y3 = a|c # MAJA mov f, y2 # y2 = f # CH xor g, y2 # y2 = f^g # CH rorx $13, a, T1 # T1 = a >> 13 # S0B xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1 vpsrlq $17, XTMP2, XTMP2 # XTMP2 = W[-2] ror 17 {xBxA} and e, y2 # y2 = (f^g)&e # CH rorx $6, e, y1 # y1 = (e >> 6) # S1 vpxor XTMP3, XTMP2, XTMP2 add h, d # d = k + w + h + d # -- and b, y3 # y3 = (a|c)&b # MAJA xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1 rorx $22, a, y1 # y1 = a >> 22 # S0A vpxor XTMP2, XTMP4, XTMP4 # XTMP4 = s1 {xBxA} xor g, y2 # y2 = CH = ((f^g)&e)^g # CH vpshufb SHUF_00BA, XTMP4, XTMP4 # XTMP4 = s1 {00BA} xor T1, y1 # y1 = (a>>22) ^ (a>>13) # S0 rorx $2, a ,T1 # T1 = (a >> 2) # S0 vpaddd XTMP4, XTMP0, XTMP0 # XTMP0 = {..., ..., W[1], W[0]} xor T1, y1 # y1 = (a>>22) ^ (a>>13) ^ (a>>2) # S0 mov a, T1 # T1 = a # MAJB and c, T1 # T1 = a&c # MAJB add y0, y2 # y2 = S1 + CH # -- vpshufd $0b01010000, XTMP0, XTMP2 # XTMP2 = W[-2] {DDCC} or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ add y1,h # h = k + w + h + S0 # -- add y2,d # d = k + w + h + d + S1 + CH = d + t1 # -- add y2,h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- add y3,h # h = t1 + S0 + MAJ # -- ROTATE_ARGS ################################### RND N + 3 ############################ mov a, y3 # y3 = a # MAJA rorx $25, e, y0 # y0 = e >> 25 # S1A rorx $11, e, y1 # y1 = e >> 11 # S1B offset = \disp + 3*4 addl offset(%rsp, SRND), h # h = k + w + h # -- or c, y3 # y3 = a|c # MAJA vpsrld $10, XTMP2, XTMP5 # XTMP5 = W[-2] >> 10 {DDCC} mov f, y2 # y2 = f # CH rorx $13, a, T1 # T1 = a >> 13 # S0B xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1 xor g, y2 # y2 = f^g # CH vpsrlq $19, XTMP2, XTMP3 # XTMP3 = W[-2] ror 19 {xDxC} rorx $6, e, y1 # y1 = (e >> 6) # S1 and e, y2 # y2 = (f^g)&e # CH add h, d # d = k + w + h + d # -- and b, y3 # y3 = (a|c)&b # MAJA vpsrlq $17, XTMP2, XTMP2 # XTMP2 = W[-2] ror 17 {xDxC} xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1 xor g, y2 # y2 = CH = ((f^g)&e)^g # CH vpxor XTMP3, XTMP2, XTMP2 rorx $22, a, y1 # y1 = a >> 22 # S0A add y0, y2 # y2 = S1 + CH # -- vpxor XTMP2, XTMP5, XTMP5 # XTMP5 = s1 {xDxC} xor T1, y1 # y1 = (a>>22) ^ (a>>13) # S0 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- rorx $2, a, T1 # T1 = (a >> 2) # S0 vpshufb SHUF_DC00, XTMP5, XTMP5 # XTMP5 = s1 {DC00} vpaddd XTMP0, XTMP5, X0 # X0 = {W[3], W[2], W[1], W[0]} xor T1, y1 # y1 = (a>>22) ^ (a>>13) ^ (a>>2) # S0 mov a, T1 # T1 = a # MAJB and c, T1 # T1 = a&c # MAJB or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ add y1, h # h = k + w + h + S0 # -- add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- add y3, h # h = t1 + S0 + MAJ # -- ROTATE_ARGS rotate_Xs .endm .macro DO_4ROUNDS disp ################################### RND N + 0 ########################### mov f, y2 # y2 = f # CH rorx $25, e, y0 # y0 = e >> 25 # S1A rorx $11, e, y1 # y1 = e >> 11 # S1B xor g, y2 # y2 = f^g # CH xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1 rorx $6, e, y1 # y1 = (e >> 6) # S1 and e, y2 # y2 = (f^g)&e # CH xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1 rorx $13, a, T1 # T1 = a >> 13 # S0B xor g, y2 # y2 = CH = ((f^g)&e)^g # CH rorx $22, a, y1 # y1 = a >> 22 # S0A mov a, y3 # y3 = a # MAJA xor T1, y1 # y1 = (a>>22) ^ (a>>13) # S0 rorx $2, a, T1 # T1 = (a >> 2) # S0 addl \disp(%rsp, SRND), h # h = k + w + h # -- or c, y3 # y3 = a|c # MAJA xor T1, y1 # y1 = (a>>22) ^ (a>>13) ^ (a>>2) # S0 mov a, T1 # T1 = a # MAJB and b, y3 # y3 = (a|c)&b # MAJA and c, T1 # T1 = a&c # MAJB add y0, y2 # y2 = S1 + CH # -- add h, d # d = k + w + h + d # -- or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ add y1, h # h = k + w + h + S0 # -- add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- ROTATE_ARGS ################################### RND N + 1 ########################### add y2, old_h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- mov f, y2 # y2 = f # CH rorx $25, e, y0 # y0 = e >> 25 # S1A rorx $11, e, y1 # y1 = e >> 11 # S1B xor g, y2 # y2 = f^g # CH xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1 rorx $6, e, y1 # y1 = (e >> 6) # S1 and e, y2 # y2 = (f^g)&e # CH add y3, old_h # h = t1 + S0 + MAJ # -- xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1 rorx $13, a, T1 # T1 = a >> 13 # S0B xor g, y2 # y2 = CH = ((f^g)&e)^g # CH rorx $22, a, y1 # y1 = a >> 22 # S0A mov a, y3 # y3 = a # MAJA xor T1, y1 # y1 = (a>>22) ^ (a>>13) # S0 rorx $2, a, T1 # T1 = (a >> 2) # S0 offset = 4*1 + \disp addl offset(%rsp, SRND), h # h = k + w + h # -- or c, y3 # y3 = a|c # MAJA xor T1, y1 # y1 = (a>>22) ^ (a>>13) ^ (a>>2) # S0 mov a, T1 # T1 = a # MAJB and b, y3 # y3 = (a|c)&b # MAJA and c, T1 # T1 = a&c # MAJB add y0, y2 # y2 = S1 + CH # -- add h, d # d = k + w + h + d # -- or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ add y1, h # h = k + w + h + S0 # -- add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- ROTATE_ARGS ################################### RND N + 2 ############################## add y2, old_h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- mov f, y2 # y2 = f # CH rorx $25, e, y0 # y0 = e >> 25 # S1A rorx $11, e, y1 # y1 = e >> 11 # S1B xor g, y2 # y2 = f^g # CH xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1 rorx $6, e, y1 # y1 = (e >> 6) # S1 and e, y2 # y2 = (f^g)&e # CH add y3, old_h # h = t1 + S0 + MAJ # -- xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1 rorx $13, a, T1 # T1 = a >> 13 # S0B xor g, y2 # y2 = CH = ((f^g)&e)^g # CH rorx $22, a, y1 # y1 = a >> 22 # S0A mov a, y3 # y3 = a # MAJA xor T1, y1 # y1 = (a>>22) ^ (a>>13) # S0 rorx $2, a, T1 # T1 = (a >> 2) # S0 offset = 4*2 + \disp addl offset(%rsp, SRND), h # h = k + w + h # -- or c, y3 # y3 = a|c # MAJA xor T1, y1 # y1 = (a>>22) ^ (a>>13) ^ (a>>2) # S0 mov a, T1 # T1 = a # MAJB and b, y3 # y3 = (a|c)&b # MAJA and c, T1 # T1 = a&c # MAJB add y0, y2 # y2 = S1 + CH # -- add h, d # d = k + w + h + d # -- or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ add y1, h # h = k + w + h + S0 # -- add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- ROTATE_ARGS ################################### RND N + 3 ########################### add y2, old_h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- mov f, y2 # y2 = f # CH rorx $25, e, y0 # y0 = e >> 25 # S1A rorx $11, e, y1 # y1 = e >> 11 # S1B xor g, y2 # y2 = f^g # CH xor y1, y0 # y0 = (e>>25) ^ (e>>11) # S1 rorx $6, e, y1 # y1 = (e >> 6) # S1 and e, y2 # y2 = (f^g)&e # CH add y3, old_h # h = t1 + S0 + MAJ # -- xor y1, y0 # y0 = (e>>25) ^ (e>>11) ^ (e>>6) # S1 rorx $13, a, T1 # T1 = a >> 13 # S0B xor g, y2 # y2 = CH = ((f^g)&e)^g # CH rorx $22, a, y1 # y1 = a >> 22 # S0A mov a, y3 # y3 = a # MAJA xor T1, y1 # y1 = (a>>22) ^ (a>>13) # S0 rorx $2, a, T1 # T1 = (a >> 2) # S0 offset = 4*3 + \disp addl offset(%rsp, SRND), h # h = k + w + h # -- or c, y3 # y3 = a|c # MAJA xor T1, y1 # y1 = (a>>22) ^ (a>>13) ^ (a>>2) # S0 mov a, T1 # T1 = a # MAJB and b, y3 # y3 = (a|c)&b # MAJA and c, T1 # T1 = a&c # MAJB add y0, y2 # y2 = S1 + CH # -- add h, d # d = k + w + h + d # -- or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ add y1, h # h = k + w + h + S0 # -- add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- add y3, h # h = t1 + S0 + MAJ # -- ROTATE_ARGS .endm ######################################################################## ## void sha256_transform_rorx(struct sha256_state *state, const u8 *data, int blocks) ## arg 1 : pointer to state ## arg 2 : pointer to input data ## arg 3 : Num blocks ######################################################################## .text SYM_TYPED_FUNC_START(sha256_transform_rorx) .align 32 pushq %rbx pushq %r12 pushq %r13 pushq %r14 pushq %r15 push %rbp mov %rsp, %rbp subq $STACK_SIZE, %rsp and $-32, %rsp # align rsp to 32 byte boundary shl $6, NUM_BLKS # convert to bytes jz done_hash lea -64(INP, NUM_BLKS), NUM_BLKS # pointer to last block mov NUM_BLKS, _INP_END(%rsp) cmp NUM_BLKS, INP je only_one_block ## load initial digest mov (CTX), a mov 4*1(CTX), b mov 4*2(CTX), c mov 4*3(CTX), d mov 4*4(CTX), e mov 4*5(CTX), f mov 4*6(CTX), g mov 4*7(CTX), h vmovdqa PSHUFFLE_BYTE_FLIP_MASK(%rip), BYTE_FLIP_MASK vmovdqa _SHUF_00BA(%rip), SHUF_00BA vmovdqa _SHUF_DC00(%rip), SHUF_DC00 mov CTX, _CTX(%rsp) loop0: ## Load first 16 dwords from two blocks VMOVDQ 0*32(INP),XTMP0 VMOVDQ 1*32(INP),XTMP1 VMOVDQ 2*32(INP),XTMP2 VMOVDQ 3*32(INP),XTMP3 ## byte swap data vpshufb BYTE_FLIP_MASK, XTMP0, XTMP0 vpshufb BYTE_FLIP_MASK, XTMP1, XTMP1 vpshufb BYTE_FLIP_MASK, XTMP2, XTMP2 vpshufb BYTE_FLIP_MASK, XTMP3, XTMP3 ## transpose data into high/low halves vperm2i128 $0x20, XTMP2, XTMP0, X0 vperm2i128 $0x31, XTMP2, XTMP0, X1 vperm2i128 $0x20, XTMP3, XTMP1, X2 vperm2i128 $0x31, XTMP3, XTMP1, X3 last_block_enter: add $64, INP mov INP, _INP(%rsp) ## schedule 48 input dwords, by doing 3 rounds of 12 each xor SRND, SRND .align 16 loop1: vpaddd K256+0*32(SRND), X0, XFER vmovdqa XFER, 0*32+_XFER(%rsp, SRND) FOUR_ROUNDS_AND_SCHED _XFER + 0*32 vpaddd K256+1*32(SRND), X0, XFER vmovdqa XFER, 1*32+_XFER(%rsp, SRND) FOUR_ROUNDS_AND_SCHED _XFER + 1*32 vpaddd K256+2*32(SRND), X0, XFER vmovdqa XFER, 2*32+_XFER(%rsp, SRND) FOUR_ROUNDS_AND_SCHED _XFER + 2*32 vpaddd K256+3*32(SRND), X0, XFER vmovdqa XFER, 3*32+_XFER(%rsp, SRND) FOUR_ROUNDS_AND_SCHED _XFER + 3*32 add $4*32, SRND cmp $3*4*32, SRND jb loop1 loop2: ## Do last 16 rounds with no scheduling vpaddd K256+0*32(SRND), X0, XFER vmovdqa XFER, 0*32+_XFER(%rsp, SRND) DO_4ROUNDS _XFER + 0*32 vpaddd K256+1*32(SRND), X1, XFER vmovdqa XFER, 1*32+_XFER(%rsp, SRND) DO_4ROUNDS _XFER + 1*32 add $2*32, SRND vmovdqa X2, X0 vmovdqa X3, X1 cmp $4*4*32, SRND jb loop2 mov _CTX(%rsp), CTX mov _INP(%rsp), INP addm (4*0)(CTX),a addm (4*1)(CTX),b addm (4*2)(CTX),c addm (4*3)(CTX),d addm (4*4)(CTX),e addm (4*5)(CTX),f addm (4*6)(CTX),g addm (4*7)(CTX),h cmp _INP_END(%rsp), INP ja done_hash #### Do second block using previously scheduled results xor SRND, SRND .align 16 loop3: DO_4ROUNDS _XFER + 0*32 + 16 DO_4ROUNDS _XFER + 1*32 + 16 add $2*32, SRND cmp $4*4*32, SRND jb loop3 mov _CTX(%rsp), CTX mov _INP(%rsp), INP add $64, INP addm (4*0)(CTX),a addm (4*1)(CTX),b addm (4*2)(CTX),c addm (4*3)(CTX),d addm (4*4)(CTX),e addm (4*5)(CTX),f addm (4*6)(CTX),g addm (4*7)(CTX),h cmp _INP_END(%rsp), INP jb loop0 ja done_hash do_last_block: VMOVDQ 0*16(INP),XWORD0 VMOVDQ 1*16(INP),XWORD1 VMOVDQ 2*16(INP),XWORD2 VMOVDQ 3*16(INP),XWORD3 vpshufb X_BYTE_FLIP_MASK, XWORD0, XWORD0 vpshufb X_BYTE_FLIP_MASK, XWORD1, XWORD1 vpshufb X_BYTE_FLIP_MASK, XWORD2, XWORD2 vpshufb X_BYTE_FLIP_MASK, XWORD3, XWORD3 jmp last_block_enter only_one_block: ## load initial digest mov (4*0)(CTX),a mov (4*1)(CTX),b mov (4*2)(CTX),c mov (4*3)(CTX),d mov (4*4)(CTX),e mov (4*5)(CTX),f mov (4*6)(CTX),g mov (4*7)(CTX),h vmovdqa PSHUFFLE_BYTE_FLIP_MASK(%rip), BYTE_FLIP_MASK vmovdqa _SHUF_00BA(%rip), SHUF_00BA vmovdqa _SHUF_DC00(%rip), SHUF_DC00 mov CTX, _CTX(%rsp) jmp do_last_block done_hash: mov %rbp, %rsp pop %rbp popq %r15 popq %r14 popq %r13 popq %r12 popq %rbx RET SYM_FUNC_END(sha256_transform_rorx) .section .rodata.cst512.K256, "aM", @progbits, 512 .align 64 K256: .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 .section .rodata.cst32.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 32 .align 32 PSHUFFLE_BYTE_FLIP_MASK: .octa 0x0c0d0e0f08090a0b0405060700010203,0x0c0d0e0f08090a0b0405060700010203 # shuffle xBxA -> 00BA .section .rodata.cst32._SHUF_00BA, "aM", @progbits, 32 .align 32 _SHUF_00BA: .octa 0xFFFFFFFFFFFFFFFF0b0a090803020100,0xFFFFFFFFFFFFFFFF0b0a090803020100 # shuffle xDxC -> DC00 .section .rodata.cst32._SHUF_DC00, "aM", @progbits, 32 .align 32 _SHUF_DC00: .octa 0x0b0a090803020100FFFFFFFFFFFFFFFF,0x0b0a090803020100FFFFFFFFFFFFFFFF
aixcc-public/challenge-001-exemplar-source
24,567
arch/x86/crypto/sha512-avx2-asm.S
######################################################################## # Implement fast SHA-512 with AVX2 instructions. (x86_64) # # Copyright (C) 2013 Intel Corporation. # # Authors: # James Guilford <james.guilford@intel.com> # Kirk Yap <kirk.s.yap@intel.com> # David Cote <david.m.cote@intel.com> # Tim Chen <tim.c.chen@linux.intel.com> # # This software is available to you under a choice of one of two # licenses. You may choose to be licensed under the terms of the GNU # General Public License (GPL) Version 2, available from the file # COPYING in the main directory of this source tree, or the # OpenIB.org BSD license below: # # Redistribution and use in source and binary forms, with or # without modification, are permitted provided that the following # conditions are met: # # - Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # # - Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials # provided with the distribution. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # ######################################################################## # # This code is described in an Intel White-Paper: # "Fast SHA-512 Implementations on Intel Architecture Processors" # # To find it, surf to http://www.intel.com/p/en_US/embedded # and search for that title. # ######################################################################## # This code schedules 1 blocks at a time, with 4 lanes per block ######################################################################## #include <linux/linkage.h> #include <linux/cfi_types.h> .text # Virtual Registers Y_0 = %ymm4 Y_1 = %ymm5 Y_2 = %ymm6 Y_3 = %ymm7 YTMP0 = %ymm0 YTMP1 = %ymm1 YTMP2 = %ymm2 YTMP3 = %ymm3 YTMP4 = %ymm8 XFER = YTMP0 BYTE_FLIP_MASK = %ymm9 # 1st arg is %rdi, which is saved to the stack and accessed later via %r12 CTX1 = %rdi CTX2 = %r12 # 2nd arg INP = %rsi # 3rd arg NUM_BLKS = %rdx c = %rcx d = %r8 e = %rdx y3 = %rsi TBL = %rdi # clobbers CTX1 a = %rax b = %rbx f = %r9 g = %r10 h = %r11 old_h = %r11 T1 = %r12 # clobbers CTX2 y0 = %r13 y1 = %r14 y2 = %r15 # Local variables (stack frame) XFER_SIZE = 4*8 SRND_SIZE = 1*8 INP_SIZE = 1*8 INPEND_SIZE = 1*8 CTX_SIZE = 1*8 frame_XFER = 0 frame_SRND = frame_XFER + XFER_SIZE frame_INP = frame_SRND + SRND_SIZE frame_INPEND = frame_INP + INP_SIZE frame_CTX = frame_INPEND + INPEND_SIZE frame_size = frame_CTX + CTX_SIZE ## assume buffers not aligned #define VMOVDQ vmovdqu # addm [mem], reg # Add reg to mem using reg-mem add and store .macro addm p1 p2 add \p1, \p2 mov \p2, \p1 .endm # COPY_YMM_AND_BSWAP ymm, [mem], byte_flip_mask # Load ymm with mem and byte swap each dword .macro COPY_YMM_AND_BSWAP p1 p2 p3 VMOVDQ \p2, \p1 vpshufb \p3, \p1, \p1 .endm # rotate_Ys # Rotate values of symbols Y0...Y3 .macro rotate_Ys Y_ = Y_0 Y_0 = Y_1 Y_1 = Y_2 Y_2 = Y_3 Y_3 = Y_ .endm # RotateState .macro RotateState # Rotate symbols a..h right old_h = h TMP_ = h h = g g = f f = e e = d d = c c = b b = a a = TMP_ .endm # macro MY_VPALIGNR YDST, YSRC1, YSRC2, RVAL # YDST = {YSRC1, YSRC2} >> RVAL*8 .macro MY_VPALIGNR YDST YSRC1 YSRC2 RVAL vperm2f128 $0x3, \YSRC2, \YSRC1, \YDST # YDST = {YS1_LO, YS2_HI} vpalignr $\RVAL, \YSRC2, \YDST, \YDST # YDST = {YDS1, YS2} >> RVAL*8 .endm .macro FOUR_ROUNDS_AND_SCHED ################################### RND N + 0 ######################################### # Extract w[t-7] MY_VPALIGNR YTMP0, Y_3, Y_2, 8 # YTMP0 = W[-7] # Calculate w[t-16] + w[t-7] vpaddq Y_0, YTMP0, YTMP0 # YTMP0 = W[-7] + W[-16] # Extract w[t-15] MY_VPALIGNR YTMP1, Y_1, Y_0, 8 # YTMP1 = W[-15] # Calculate sigma0 # Calculate w[t-15] ror 1 vpsrlq $1, YTMP1, YTMP2 vpsllq $(64-1), YTMP1, YTMP3 vpor YTMP2, YTMP3, YTMP3 # YTMP3 = W[-15] ror 1 # Calculate w[t-15] shr 7 vpsrlq $7, YTMP1, YTMP4 # YTMP4 = W[-15] >> 7 mov a, y3 # y3 = a # MAJA rorx $41, e, y0 # y0 = e >> 41 # S1A rorx $18, e, y1 # y1 = e >> 18 # S1B add frame_XFER(%rsp),h # h = k + w + h # -- or c, y3 # y3 = a|c # MAJA mov f, y2 # y2 = f # CH rorx $34, a, T1 # T1 = a >> 34 # S0B xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1 xor g, y2 # y2 = f^g # CH rorx $14, e, y1 # y1 = (e >> 14) # S1 and e, y2 # y2 = (f^g)&e # CH xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1 rorx $39, a, y1 # y1 = a >> 39 # S0A add h, d # d = k + w + h + d # -- and b, y3 # y3 = (a|c)&b # MAJA xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0 rorx $28, a, T1 # T1 = (a >> 28) # S0 xor g, y2 # y2 = CH = ((f^g)&e)^g # CH xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0 mov a, T1 # T1 = a # MAJB and c, T1 # T1 = a&c # MAJB add y0, y2 # y2 = S1 + CH # -- or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ add y1, h # h = k + w + h + S0 # -- add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- add y3, h # h = t1 + S0 + MAJ # -- RotateState ################################### RND N + 1 ######################################### # Calculate w[t-15] ror 8 vpsrlq $8, YTMP1, YTMP2 vpsllq $(64-8), YTMP1, YTMP1 vpor YTMP2, YTMP1, YTMP1 # YTMP1 = W[-15] ror 8 # XOR the three components vpxor YTMP4, YTMP3, YTMP3 # YTMP3 = W[-15] ror 1 ^ W[-15] >> 7 vpxor YTMP1, YTMP3, YTMP1 # YTMP1 = s0 # Add three components, w[t-16], w[t-7] and sigma0 vpaddq YTMP1, YTMP0, YTMP0 # YTMP0 = W[-16] + W[-7] + s0 # Move to appropriate lanes for calculating w[16] and w[17] vperm2f128 $0x0, YTMP0, YTMP0, Y_0 # Y_0 = W[-16] + W[-7] + s0 {BABA} # Move to appropriate lanes for calculating w[18] and w[19] vpand MASK_YMM_LO(%rip), YTMP0, YTMP0 # YTMP0 = W[-16] + W[-7] + s0 {DC00} # Calculate w[16] and w[17] in both 128 bit lanes # Calculate sigma1 for w[16] and w[17] on both 128 bit lanes vperm2f128 $0x11, Y_3, Y_3, YTMP2 # YTMP2 = W[-2] {BABA} vpsrlq $6, YTMP2, YTMP4 # YTMP4 = W[-2] >> 6 {BABA} mov a, y3 # y3 = a # MAJA rorx $41, e, y0 # y0 = e >> 41 # S1A rorx $18, e, y1 # y1 = e >> 18 # S1B add 1*8+frame_XFER(%rsp), h # h = k + w + h # -- or c, y3 # y3 = a|c # MAJA mov f, y2 # y2 = f # CH rorx $34, a, T1 # T1 = a >> 34 # S0B xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1 xor g, y2 # y2 = f^g # CH rorx $14, e, y1 # y1 = (e >> 14) # S1 xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1 rorx $39, a, y1 # y1 = a >> 39 # S0A and e, y2 # y2 = (f^g)&e # CH add h, d # d = k + w + h + d # -- and b, y3 # y3 = (a|c)&b # MAJA xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0 rorx $28, a, T1 # T1 = (a >> 28) # S0 xor g, y2 # y2 = CH = ((f^g)&e)^g # CH xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0 mov a, T1 # T1 = a # MAJB and c, T1 # T1 = a&c # MAJB add y0, y2 # y2 = S1 + CH # -- or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ add y1, h # h = k + w + h + S0 # -- add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- add y3, h # h = t1 + S0 + MAJ # -- RotateState ################################### RND N + 2 ######################################### vpsrlq $19, YTMP2, YTMP3 # YTMP3 = W[-2] >> 19 {BABA} vpsllq $(64-19), YTMP2, YTMP1 # YTMP1 = W[-2] << 19 {BABA} vpor YTMP1, YTMP3, YTMP3 # YTMP3 = W[-2] ror 19 {BABA} vpxor YTMP3, YTMP4, YTMP4 # YTMP4 = W[-2] ror 19 ^ W[-2] >> 6 {BABA} vpsrlq $61, YTMP2, YTMP3 # YTMP3 = W[-2] >> 61 {BABA} vpsllq $(64-61), YTMP2, YTMP1 # YTMP1 = W[-2] << 61 {BABA} vpor YTMP1, YTMP3, YTMP3 # YTMP3 = W[-2] ror 61 {BABA} vpxor YTMP3, YTMP4, YTMP4 # YTMP4 = s1 = (W[-2] ror 19) ^ # (W[-2] ror 61) ^ (W[-2] >> 6) {BABA} # Add sigma1 to the other compunents to get w[16] and w[17] vpaddq YTMP4, Y_0, Y_0 # Y_0 = {W[1], W[0], W[1], W[0]} # Calculate sigma1 for w[18] and w[19] for upper 128 bit lane vpsrlq $6, Y_0, YTMP4 # YTMP4 = W[-2] >> 6 {DC--} mov a, y3 # y3 = a # MAJA rorx $41, e, y0 # y0 = e >> 41 # S1A add 2*8+frame_XFER(%rsp), h # h = k + w + h # -- rorx $18, e, y1 # y1 = e >> 18 # S1B or c, y3 # y3 = a|c # MAJA mov f, y2 # y2 = f # CH xor g, y2 # y2 = f^g # CH rorx $34, a, T1 # T1 = a >> 34 # S0B xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1 and e, y2 # y2 = (f^g)&e # CH rorx $14, e, y1 # y1 = (e >> 14) # S1 add h, d # d = k + w + h + d # -- and b, y3 # y3 = (a|c)&b # MAJA xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1 rorx $39, a, y1 # y1 = a >> 39 # S0A xor g, y2 # y2 = CH = ((f^g)&e)^g # CH xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0 rorx $28, a, T1 # T1 = (a >> 28) # S0 xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0 mov a, T1 # T1 = a # MAJB and c, T1 # T1 = a&c # MAJB add y0, y2 # y2 = S1 + CH # -- or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ add y1, h # h = k + w + h + S0 # -- add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- add y3, h # h = t1 + S0 + MAJ # -- RotateState ################################### RND N + 3 ######################################### vpsrlq $19, Y_0, YTMP3 # YTMP3 = W[-2] >> 19 {DC--} vpsllq $(64-19), Y_0, YTMP1 # YTMP1 = W[-2] << 19 {DC--} vpor YTMP1, YTMP3, YTMP3 # YTMP3 = W[-2] ror 19 {DC--} vpxor YTMP3, YTMP4, YTMP4 # YTMP4 = W[-2] ror 19 ^ W[-2] >> 6 {DC--} vpsrlq $61, Y_0, YTMP3 # YTMP3 = W[-2] >> 61 {DC--} vpsllq $(64-61), Y_0, YTMP1 # YTMP1 = W[-2] << 61 {DC--} vpor YTMP1, YTMP3, YTMP3 # YTMP3 = W[-2] ror 61 {DC--} vpxor YTMP3, YTMP4, YTMP4 # YTMP4 = s1 = (W[-2] ror 19) ^ # (W[-2] ror 61) ^ (W[-2] >> 6) {DC--} # Add the sigma0 + w[t-7] + w[t-16] for w[18] and w[19] # to newly calculated sigma1 to get w[18] and w[19] vpaddq YTMP4, YTMP0, YTMP2 # YTMP2 = {W[3], W[2], --, --} # Form w[19, w[18], w17], w[16] vpblendd $0xF0, YTMP2, Y_0, Y_0 # Y_0 = {W[3], W[2], W[1], W[0]} mov a, y3 # y3 = a # MAJA rorx $41, e, y0 # y0 = e >> 41 # S1A rorx $18, e, y1 # y1 = e >> 18 # S1B add 3*8+frame_XFER(%rsp), h # h = k + w + h # -- or c, y3 # y3 = a|c # MAJA mov f, y2 # y2 = f # CH rorx $34, a, T1 # T1 = a >> 34 # S0B xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1 xor g, y2 # y2 = f^g # CH rorx $14, e, y1 # y1 = (e >> 14) # S1 and e, y2 # y2 = (f^g)&e # CH add h, d # d = k + w + h + d # -- and b, y3 # y3 = (a|c)&b # MAJA xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1 xor g, y2 # y2 = CH = ((f^g)&e)^g # CH rorx $39, a, y1 # y1 = a >> 39 # S0A add y0, y2 # y2 = S1 + CH # -- xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- rorx $28, a, T1 # T1 = (a >> 28) # S0 xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0 mov a, T1 # T1 = a # MAJB and c, T1 # T1 = a&c # MAJB or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ add y1, h # h = k + w + h + S0 # -- add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- add y3, h # h = t1 + S0 + MAJ # -- RotateState rotate_Ys .endm .macro DO_4ROUNDS ################################### RND N + 0 ######################################### mov f, y2 # y2 = f # CH rorx $41, e, y0 # y0 = e >> 41 # S1A rorx $18, e, y1 # y1 = e >> 18 # S1B xor g, y2 # y2 = f^g # CH xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1 rorx $14, e, y1 # y1 = (e >> 14) # S1 and e, y2 # y2 = (f^g)&e # CH xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1 rorx $34, a, T1 # T1 = a >> 34 # S0B xor g, y2 # y2 = CH = ((f^g)&e)^g # CH rorx $39, a, y1 # y1 = a >> 39 # S0A mov a, y3 # y3 = a # MAJA xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0 rorx $28, a, T1 # T1 = (a >> 28) # S0 add frame_XFER(%rsp), h # h = k + w + h # -- or c, y3 # y3 = a|c # MAJA xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0 mov a, T1 # T1 = a # MAJB and b, y3 # y3 = (a|c)&b # MAJA and c, T1 # T1 = a&c # MAJB add y0, y2 # y2 = S1 + CH # -- add h, d # d = k + w + h + d # -- or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ add y1, h # h = k + w + h + S0 # -- add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- RotateState ################################### RND N + 1 ######################################### add y2, old_h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- mov f, y2 # y2 = f # CH rorx $41, e, y0 # y0 = e >> 41 # S1A rorx $18, e, y1 # y1 = e >> 18 # S1B xor g, y2 # y2 = f^g # CH xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1 rorx $14, e, y1 # y1 = (e >> 14) # S1 and e, y2 # y2 = (f^g)&e # CH add y3, old_h # h = t1 + S0 + MAJ # -- xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1 rorx $34, a, T1 # T1 = a >> 34 # S0B xor g, y2 # y2 = CH = ((f^g)&e)^g # CH rorx $39, a, y1 # y1 = a >> 39 # S0A mov a, y3 # y3 = a # MAJA xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0 rorx $28, a, T1 # T1 = (a >> 28) # S0 add 8*1+frame_XFER(%rsp), h # h = k + w + h # -- or c, y3 # y3 = a|c # MAJA xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0 mov a, T1 # T1 = a # MAJB and b, y3 # y3 = (a|c)&b # MAJA and c, T1 # T1 = a&c # MAJB add y0, y2 # y2 = S1 + CH # -- add h, d # d = k + w + h + d # -- or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ add y1, h # h = k + w + h + S0 # -- add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- RotateState ################################### RND N + 2 ######################################### add y2, old_h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- mov f, y2 # y2 = f # CH rorx $41, e, y0 # y0 = e >> 41 # S1A rorx $18, e, y1 # y1 = e >> 18 # S1B xor g, y2 # y2 = f^g # CH xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1 rorx $14, e, y1 # y1 = (e >> 14) # S1 and e, y2 # y2 = (f^g)&e # CH add y3, old_h # h = t1 + S0 + MAJ # -- xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1 rorx $34, a, T1 # T1 = a >> 34 # S0B xor g, y2 # y2 = CH = ((f^g)&e)^g # CH rorx $39, a, y1 # y1 = a >> 39 # S0A mov a, y3 # y3 = a # MAJA xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0 rorx $28, a, T1 # T1 = (a >> 28) # S0 add 8*2+frame_XFER(%rsp), h # h = k + w + h # -- or c, y3 # y3 = a|c # MAJA xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0 mov a, T1 # T1 = a # MAJB and b, y3 # y3 = (a|c)&b # MAJA and c, T1 # T1 = a&c # MAJB add y0, y2 # y2 = S1 + CH # -- add h, d # d = k + w + h + d # -- or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ add y1, h # h = k + w + h + S0 # -- add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- RotateState ################################### RND N + 3 ######################################### add y2, old_h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- mov f, y2 # y2 = f # CH rorx $41, e, y0 # y0 = e >> 41 # S1A rorx $18, e, y1 # y1 = e >> 18 # S1B xor g, y2 # y2 = f^g # CH xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1 rorx $14, e, y1 # y1 = (e >> 14) # S1 and e, y2 # y2 = (f^g)&e # CH add y3, old_h # h = t1 + S0 + MAJ # -- xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1 rorx $34, a, T1 # T1 = a >> 34 # S0B xor g, y2 # y2 = CH = ((f^g)&e)^g # CH rorx $39, a, y1 # y1 = a >> 39 # S0A mov a, y3 # y3 = a # MAJA xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0 rorx $28, a, T1 # T1 = (a >> 28) # S0 add 8*3+frame_XFER(%rsp), h # h = k + w + h # -- or c, y3 # y3 = a|c # MAJA xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0 mov a, T1 # T1 = a # MAJB and b, y3 # y3 = (a|c)&b # MAJA and c, T1 # T1 = a&c # MAJB add y0, y2 # y2 = S1 + CH # -- add h, d # d = k + w + h + d # -- or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ add y1, h # h = k + w + h + S0 # -- add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- add y3, h # h = t1 + S0 + MAJ # -- RotateState .endm ######################################################################## # void sha512_transform_rorx(sha512_state *state, const u8 *data, int blocks) # Purpose: Updates the SHA512 digest stored at "state" with the message # stored in "data". # The size of the message pointed to by "data" must be an integer multiple # of SHA512 message blocks. # "blocks" is the message length in SHA512 blocks ######################################################################## SYM_TYPED_FUNC_START(sha512_transform_rorx) # Save GPRs push %rbx push %r12 push %r13 push %r14 push %r15 # Allocate Stack Space push %rbp mov %rsp, %rbp sub $frame_size, %rsp and $~(0x20 - 1), %rsp shl $7, NUM_BLKS # convert to bytes jz done_hash add INP, NUM_BLKS # pointer to end of data mov NUM_BLKS, frame_INPEND(%rsp) ## load initial digest mov 8*0(CTX1), a mov 8*1(CTX1), b mov 8*2(CTX1), c mov 8*3(CTX1), d mov 8*4(CTX1), e mov 8*5(CTX1), f mov 8*6(CTX1), g mov 8*7(CTX1), h # save %rdi (CTX) before it gets clobbered mov %rdi, frame_CTX(%rsp) vmovdqa PSHUFFLE_BYTE_FLIP_MASK(%rip), BYTE_FLIP_MASK loop0: lea K512(%rip), TBL ## byte swap first 16 dwords COPY_YMM_AND_BSWAP Y_0, (INP), BYTE_FLIP_MASK COPY_YMM_AND_BSWAP Y_1, 1*32(INP), BYTE_FLIP_MASK COPY_YMM_AND_BSWAP Y_2, 2*32(INP), BYTE_FLIP_MASK COPY_YMM_AND_BSWAP Y_3, 3*32(INP), BYTE_FLIP_MASK mov INP, frame_INP(%rsp) ## schedule 64 input dwords, by doing 12 rounds of 4 each movq $4, frame_SRND(%rsp) .align 16 loop1: vpaddq (TBL), Y_0, XFER vmovdqa XFER, frame_XFER(%rsp) FOUR_ROUNDS_AND_SCHED vpaddq 1*32(TBL), Y_0, XFER vmovdqa XFER, frame_XFER(%rsp) FOUR_ROUNDS_AND_SCHED vpaddq 2*32(TBL), Y_0, XFER vmovdqa XFER, frame_XFER(%rsp) FOUR_ROUNDS_AND_SCHED vpaddq 3*32(TBL), Y_0, XFER vmovdqa XFER, frame_XFER(%rsp) add $(4*32), TBL FOUR_ROUNDS_AND_SCHED subq $1, frame_SRND(%rsp) jne loop1 movq $2, frame_SRND(%rsp) loop2: vpaddq (TBL), Y_0, XFER vmovdqa XFER, frame_XFER(%rsp) DO_4ROUNDS vpaddq 1*32(TBL), Y_1, XFER vmovdqa XFER, frame_XFER(%rsp) add $(2*32), TBL DO_4ROUNDS vmovdqa Y_2, Y_0 vmovdqa Y_3, Y_1 subq $1, frame_SRND(%rsp) jne loop2 mov frame_CTX(%rsp), CTX2 addm 8*0(CTX2), a addm 8*1(CTX2), b addm 8*2(CTX2), c addm 8*3(CTX2), d addm 8*4(CTX2), e addm 8*5(CTX2), f addm 8*6(CTX2), g addm 8*7(CTX2), h mov frame_INP(%rsp), INP add $128, INP cmp frame_INPEND(%rsp), INP jne loop0 done_hash: # Restore Stack Pointer mov %rbp, %rsp pop %rbp # Restore GPRs pop %r15 pop %r14 pop %r13 pop %r12 pop %rbx RET SYM_FUNC_END(sha512_transform_rorx) ######################################################################## ### Binary Data # Mergeable 640-byte rodata section. This allows linker to merge the table # with other, exactly the same 640-byte fragment of another rodata section # (if such section exists). .section .rodata.cst640.K512, "aM", @progbits, 640 .align 64 # K[t] used in SHA512 hashing K512: .quad 0x428a2f98d728ae22,0x7137449123ef65cd .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc .quad 0x3956c25bf348b538,0x59f111f1b605d019 .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118 .quad 0xd807aa98a3030242,0x12835b0145706fbe .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1 .quad 0x9bdc06a725c71235,0xc19bf174cf692694 .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3 .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483 .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 .quad 0x983e5152ee66dfab,0xa831c66d2db43210 .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4 .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725 .quad 0x06ca6351e003826f,0x142929670a0e6e70 .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926 .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df .quad 0x650a73548baf63de,0x766a0abb3c77b2a8 .quad 0x81c2c92e47edaee6,0x92722c851482353b .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001 .quad 0xc24b8b70d0f89791,0xc76c51a30654be30 .quad 0xd192e819d6ef5218,0xd69906245565a910 .quad 0xf40e35855771202a,0x106aa07032bbd1b8 .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53 .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 .quad 0x748f82ee5defb2fc,0x78a5636f43172f60 .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec .quad 0x90befffa23631e28,0xa4506cebde82bde9 .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b .quad 0xca273eceea26619c,0xd186b8c721c0c207 .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6 .quad 0x113f9804bef90dae,0x1b710b35131c471b .quad 0x28db77f523047d84,0x32caab7b40c72493 .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817 .section .rodata.cst32.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 32 .align 32 # Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb. PSHUFFLE_BYTE_FLIP_MASK: .octa 0x08090a0b0c0d0e0f0001020304050607 .octa 0x18191a1b1c1d1e1f1011121314151617 .section .rodata.cst32.MASK_YMM_LO, "aM", @progbits, 32 .align 32 MASK_YMM_LO: .octa 0x00000000000000000000000000000000 .octa 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
aixcc-public/challenge-001-exemplar-source
2,785
arch/x86/crypto/ghash-clmulni-intel_asm.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Accelerated GHASH implementation with Intel PCLMULQDQ-NI * instructions. This file contains accelerated part of ghash * implementation. More information about PCLMULQDQ can be found at: * * http://software.intel.com/en-us/articles/carry-less-multiplication-and-its-usage-for-computing-the-gcm-mode/ * * Copyright (c) 2009 Intel Corp. * Author: Huang Ying <ying.huang@intel.com> * Vinodh Gopal * Erdinc Ozturk * Deniz Karakoyunlu */ #include <linux/linkage.h> #include <asm/frame.h> .section .rodata.cst16.bswap_mask, "aM", @progbits, 16 .align 16 .Lbswap_mask: .octa 0x000102030405060708090a0b0c0d0e0f #define DATA %xmm0 #define SHASH %xmm1 #define T1 %xmm2 #define T2 %xmm3 #define T3 %xmm4 #define BSWAP %xmm5 #define IN1 %xmm6 .text /* * __clmul_gf128mul_ble: internal ABI * input: * DATA: operand1 * SHASH: operand2, hash_key << 1 mod poly * output: * DATA: operand1 * operand2 mod poly * changed: * T1 * T2 * T3 */ SYM_FUNC_START_LOCAL(__clmul_gf128mul_ble) movaps DATA, T1 pshufd $0b01001110, DATA, T2 pshufd $0b01001110, SHASH, T3 pxor DATA, T2 pxor SHASH, T3 pclmulqdq $0x00, SHASH, DATA # DATA = a0 * b0 pclmulqdq $0x11, SHASH, T1 # T1 = a1 * b1 pclmulqdq $0x00, T3, T2 # T2 = (a1 + a0) * (b1 + b0) pxor DATA, T2 pxor T1, T2 # T2 = a0 * b1 + a1 * b0 movaps T2, T3 pslldq $8, T3 psrldq $8, T2 pxor T3, DATA pxor T2, T1 # <T1:DATA> is result of # carry-less multiplication # first phase of the reduction movaps DATA, T3 psllq $1, T3 pxor DATA, T3 psllq $5, T3 pxor DATA, T3 psllq $57, T3 movaps T3, T2 pslldq $8, T2 psrldq $8, T3 pxor T2, DATA pxor T3, T1 # second phase of the reduction movaps DATA, T2 psrlq $5, T2 pxor DATA, T2 psrlq $1, T2 pxor DATA, T2 psrlq $1, T2 pxor T2, T1 pxor T1, DATA RET SYM_FUNC_END(__clmul_gf128mul_ble) /* void clmul_ghash_mul(char *dst, const u128 *shash) */ SYM_FUNC_START(clmul_ghash_mul) FRAME_BEGIN movups (%rdi), DATA movups (%rsi), SHASH movaps .Lbswap_mask, BSWAP pshufb BSWAP, DATA call __clmul_gf128mul_ble pshufb BSWAP, DATA movups DATA, (%rdi) FRAME_END RET SYM_FUNC_END(clmul_ghash_mul) /* * void clmul_ghash_update(char *dst, const char *src, unsigned int srclen, * const u128 *shash); */ SYM_FUNC_START(clmul_ghash_update) FRAME_BEGIN cmp $16, %rdx jb .Lupdate_just_ret # check length movaps .Lbswap_mask, BSWAP movups (%rdi), DATA movups (%rcx), SHASH pshufb BSWAP, DATA .align 4 .Lupdate_loop: movups (%rsi), IN1 pshufb BSWAP, IN1 pxor IN1, DATA call __clmul_gf128mul_ble sub $16, %rdx add $16, %rsi cmp $16, %rdx jge .Lupdate_loop pshufb BSWAP, DATA movups DATA, (%rdi) .Lupdate_just_ret: FRAME_END RET SYM_FUNC_END(clmul_ghash_update)
aixcc-public/challenge-001-exemplar-source
4,056
arch/x86/math-emu/polynom_Xsig.S
/* SPDX-License-Identifier: GPL-2.0 */ /*---------------------------------------------------------------------------+ | polynomial_Xsig.S | | | | Fixed point arithmetic polynomial evaluation. | | | | Copyright (C) 1992,1993,1994,1995 | | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, | | Australia. E-mail billm@jacobi.maths.monash.edu.au | | | | Call from C as: | | void polynomial_Xsig(Xsig *accum, unsigned long long x, | | unsigned long long terms[], int n) | | | | Computes: | | terms[0] + (terms[1] + (terms[2] + ... + (terms[n-1]*x)*x)*x)*x) ... )*x | | and adds the result to the 12 byte Xsig. | | The terms[] are each 8 bytes, but all computation is performed to 12 byte | | precision. | | | | This function must be used carefully: most overflow of intermediate | | results is controlled, but overflow of the result is not. | | | +---------------------------------------------------------------------------*/ .file "polynomial_Xsig.S" #include "fpu_emu.h" #define TERM_SIZE $8 #define SUM_MS -20(%ebp) /* sum ms long */ #define SUM_MIDDLE -24(%ebp) /* sum middle long */ #define SUM_LS -28(%ebp) /* sum ls long */ #define ACCUM_MS -4(%ebp) /* accum ms long */ #define ACCUM_MIDDLE -8(%ebp) /* accum middle long */ #define ACCUM_LS -12(%ebp) /* accum ls long */ #define OVERFLOWED -16(%ebp) /* addition overflow flag */ .text SYM_FUNC_START(polynomial_Xsig) pushl %ebp movl %esp,%ebp subl $32,%esp pushl %esi pushl %edi pushl %ebx movl PARAM2,%esi /* x */ movl PARAM3,%edi /* terms */ movl TERM_SIZE,%eax mull PARAM4 /* n */ addl %eax,%edi movl 4(%edi),%edx /* terms[n] */ movl %edx,SUM_MS movl (%edi),%edx /* terms[n] */ movl %edx,SUM_MIDDLE xor %eax,%eax movl %eax,SUM_LS movb %al,OVERFLOWED subl TERM_SIZE,%edi decl PARAM4 js L_accum_done L_accum_loop: xor %eax,%eax movl %eax,ACCUM_MS movl %eax,ACCUM_MIDDLE movl SUM_MIDDLE,%eax mull (%esi) /* x ls long */ movl %edx,ACCUM_LS movl SUM_MIDDLE,%eax mull 4(%esi) /* x ms long */ addl %eax,ACCUM_LS adcl %edx,ACCUM_MIDDLE adcl $0,ACCUM_MS movl SUM_MS,%eax mull (%esi) /* x ls long */ addl %eax,ACCUM_LS adcl %edx,ACCUM_MIDDLE adcl $0,ACCUM_MS movl SUM_MS,%eax mull 4(%esi) /* x ms long */ addl %eax,ACCUM_MIDDLE adcl %edx,ACCUM_MS testb $0xff,OVERFLOWED jz L_no_overflow movl (%esi),%eax addl %eax,ACCUM_MIDDLE movl 4(%esi),%eax adcl %eax,ACCUM_MS /* This could overflow too */ L_no_overflow: /* * Now put the sum of next term and the accumulator * into the sum register */ movl ACCUM_LS,%eax addl (%edi),%eax /* term ls long */ movl %eax,SUM_LS movl ACCUM_MIDDLE,%eax adcl (%edi),%eax /* term ls long */ movl %eax,SUM_MIDDLE movl ACCUM_MS,%eax adcl 4(%edi),%eax /* term ms long */ movl %eax,SUM_MS sbbb %al,%al movb %al,OVERFLOWED /* Used in the next iteration */ subl TERM_SIZE,%edi decl PARAM4 jns L_accum_loop L_accum_done: movl PARAM1,%edi /* accum */ movl SUM_LS,%eax addl %eax,(%edi) movl SUM_MIDDLE,%eax adcl %eax,4(%edi) movl SUM_MS,%eax adcl %eax,8(%edi) popl %ebx popl %edi popl %esi leave RET SYM_FUNC_END(polynomial_Xsig)
aixcc-public/challenge-001-exemplar-source
10,167
arch/x86/math-emu/div_Xsig.S
/* SPDX-License-Identifier: GPL-2.0 */ .file "div_Xsig.S" /*---------------------------------------------------------------------------+ | div_Xsig.S | | | | Division subroutine for 96 bit quantities | | | | Copyright (C) 1994,1995 | | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, | | Australia. E-mail billm@jacobi.maths.monash.edu.au | | | | | +---------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------+ | Divide the 96 bit quantity pointed to by a, by that pointed to by b, and | | put the 96 bit result at the location d. | | | | The result may not be accurate to 96 bits. It is intended for use where | | a result better than 64 bits is required. The result should usually be | | good to at least 94 bits. | | The returned result is actually divided by one half. This is done to | | prevent overflow. | | | | .aaaaaaaaaaaaaa / .bbbbbbbbbbbbb -> .dddddddddddd | | | | void div_Xsig(Xsig *a, Xsig *b, Xsig *dest) | | | +---------------------------------------------------------------------------*/ #include "exception.h" #include "fpu_emu.h" #define XsigLL(x) (x) #define XsigL(x) 4(x) #define XsigH(x) 8(x) #ifndef NON_REENTRANT_FPU /* Local storage on the stack: Accumulator: FPU_accum_3:FPU_accum_2:FPU_accum_1:FPU_accum_0 */ #define FPU_accum_3 -4(%ebp) #define FPU_accum_2 -8(%ebp) #define FPU_accum_1 -12(%ebp) #define FPU_accum_0 -16(%ebp) #define FPU_result_3 -20(%ebp) #define FPU_result_2 -24(%ebp) #define FPU_result_1 -28(%ebp) #else .data /* Local storage in a static area: Accumulator: FPU_accum_3:FPU_accum_2:FPU_accum_1:FPU_accum_0 */ .align 4,0 FPU_accum_3: .long 0 FPU_accum_2: .long 0 FPU_accum_1: .long 0 FPU_accum_0: .long 0 FPU_result_3: .long 0 FPU_result_2: .long 0 FPU_result_1: .long 0 #endif /* NON_REENTRANT_FPU */ .text SYM_FUNC_START(div_Xsig) pushl %ebp movl %esp,%ebp #ifndef NON_REENTRANT_FPU subl $28,%esp #endif /* NON_REENTRANT_FPU */ pushl %esi pushl %edi pushl %ebx movl PARAM1,%esi /* pointer to num */ movl PARAM2,%ebx /* pointer to denom */ #ifdef PARANOID testl $0x80000000, XsigH(%ebx) /* Divisor */ je L_bugged #endif /* PARANOID */ /*---------------------------------------------------------------------------+ | Divide: Return arg1/arg2 to arg3. | | | | The maximum returned value is (ignoring exponents) | | .ffffffff ffffffff | | ------------------ = 1.ffffffff fffffffe | | .80000000 00000000 | | and the minimum is | | .80000000 00000000 | | ------------------ = .80000000 00000001 (rounded) | | .ffffffff ffffffff | | | +---------------------------------------------------------------------------*/ /* Save extended dividend in local register */ /* Divide by 2 to prevent overflow */ clc movl XsigH(%esi),%eax rcrl %eax movl %eax,FPU_accum_3 movl XsigL(%esi),%eax rcrl %eax movl %eax,FPU_accum_2 movl XsigLL(%esi),%eax rcrl %eax movl %eax,FPU_accum_1 movl $0,%eax rcrl %eax movl %eax,FPU_accum_0 movl FPU_accum_2,%eax /* Get the current num */ movl FPU_accum_3,%edx /*----------------------------------------------------------------------*/ /* Initialization done. Do the first 32 bits. */ /* We will divide by a number which is too large */ movl XsigH(%ebx),%ecx addl $1,%ecx jnc LFirst_div_not_1 /* here we need to divide by 100000000h, i.e., no division at all.. */ mov %edx,%eax jmp LFirst_div_done LFirst_div_not_1: divl %ecx /* Divide the numerator by the augmented denom ms dw */ LFirst_div_done: movl %eax,FPU_result_3 /* Put the result in the answer */ mull XsigH(%ebx) /* mul by the ms dw of the denom */ subl %eax,FPU_accum_2 /* Subtract from the num local reg */ sbbl %edx,FPU_accum_3 movl FPU_result_3,%eax /* Get the result back */ mull XsigL(%ebx) /* now mul the ls dw of the denom */ subl %eax,FPU_accum_1 /* Subtract from the num local reg */ sbbl %edx,FPU_accum_2 sbbl $0,FPU_accum_3 je LDo_2nd_32_bits /* Must check for non-zero result here */ #ifdef PARANOID jb L_bugged_1 #endif /* PARANOID */ /* need to subtract another once of the denom */ incl FPU_result_3 /* Correct the answer */ movl XsigL(%ebx),%eax movl XsigH(%ebx),%edx subl %eax,FPU_accum_1 /* Subtract from the num local reg */ sbbl %edx,FPU_accum_2 #ifdef PARANOID sbbl $0,FPU_accum_3 jne L_bugged_1 /* Must check for non-zero result here */ #endif /* PARANOID */ /*----------------------------------------------------------------------*/ /* Half of the main problem is done, there is just a reduced numerator to handle now. Work with the second 32 bits, FPU_accum_0 not used from now on */ LDo_2nd_32_bits: movl FPU_accum_2,%edx /* get the reduced num */ movl FPU_accum_1,%eax /* need to check for possible subsequent overflow */ cmpl XsigH(%ebx),%edx jb LDo_2nd_div ja LPrevent_2nd_overflow cmpl XsigL(%ebx),%eax jb LDo_2nd_div LPrevent_2nd_overflow: /* The numerator is greater or equal, would cause overflow */ /* prevent overflow */ subl XsigL(%ebx),%eax sbbl XsigH(%ebx),%edx movl %edx,FPU_accum_2 movl %eax,FPU_accum_1 incl FPU_result_3 /* Reflect the subtraction in the answer */ #ifdef PARANOID je L_bugged_2 /* Can't bump the result to 1.0 */ #endif /* PARANOID */ LDo_2nd_div: cmpl $0,%ecx /* augmented denom msw */ jnz LSecond_div_not_1 /* %ecx == 0, we are dividing by 1.0 */ mov %edx,%eax jmp LSecond_div_done LSecond_div_not_1: divl %ecx /* Divide the numerator by the denom ms dw */ LSecond_div_done: movl %eax,FPU_result_2 /* Put the result in the answer */ mull XsigH(%ebx) /* mul by the ms dw of the denom */ subl %eax,FPU_accum_1 /* Subtract from the num local reg */ sbbl %edx,FPU_accum_2 #ifdef PARANOID jc L_bugged_2 #endif /* PARANOID */ movl FPU_result_2,%eax /* Get the result back */ mull XsigL(%ebx) /* now mul the ls dw of the denom */ subl %eax,FPU_accum_0 /* Subtract from the num local reg */ sbbl %edx,FPU_accum_1 /* Subtract from the num local reg */ sbbl $0,FPU_accum_2 #ifdef PARANOID jc L_bugged_2 #endif /* PARANOID */ jz LDo_3rd_32_bits #ifdef PARANOID cmpl $1,FPU_accum_2 jne L_bugged_2 #endif /* PARANOID */ /* need to subtract another once of the denom */ movl XsigL(%ebx),%eax movl XsigH(%ebx),%edx subl %eax,FPU_accum_0 /* Subtract from the num local reg */ sbbl %edx,FPU_accum_1 sbbl $0,FPU_accum_2 #ifdef PARANOID jc L_bugged_2 jne L_bugged_2 #endif /* PARANOID */ addl $1,FPU_result_2 /* Correct the answer */ adcl $0,FPU_result_3 #ifdef PARANOID jc L_bugged_2 /* Must check for non-zero result here */ #endif /* PARANOID */ /*----------------------------------------------------------------------*/ /* The division is essentially finished here, we just need to perform tidying operations. Deal with the 3rd 32 bits */ LDo_3rd_32_bits: /* We use an approximation for the third 32 bits. To take account of the 3rd 32 bits of the divisor (call them del), we subtract del * (a/b) */ movl FPU_result_3,%eax /* a/b */ mull XsigLL(%ebx) /* del */ subl %edx,FPU_accum_1 /* A borrow indicates that the result is negative */ jnb LTest_over movl XsigH(%ebx),%edx addl %edx,FPU_accum_1 subl $1,FPU_result_2 /* Adjust the answer */ sbbl $0,FPU_result_3 /* The above addition might not have been enough, check again. */ movl FPU_accum_1,%edx /* get the reduced num */ cmpl XsigH(%ebx),%edx /* denom */ jb LDo_3rd_div movl XsigH(%ebx),%edx addl %edx,FPU_accum_1 subl $1,FPU_result_2 /* Adjust the answer */ sbbl $0,FPU_result_3 jmp LDo_3rd_div LTest_over: movl FPU_accum_1,%edx /* get the reduced num */ /* need to check for possible subsequent overflow */ cmpl XsigH(%ebx),%edx /* denom */ jb LDo_3rd_div /* prevent overflow */ subl XsigH(%ebx),%edx movl %edx,FPU_accum_1 addl $1,FPU_result_2 /* Reflect the subtraction in the answer */ adcl $0,FPU_result_3 LDo_3rd_div: movl FPU_accum_0,%eax movl FPU_accum_1,%edx divl XsigH(%ebx) movl %eax,FPU_result_1 /* Rough estimate of third word */ movl PARAM3,%esi /* pointer to answer */ movl FPU_result_1,%eax movl %eax,XsigLL(%esi) movl FPU_result_2,%eax movl %eax,XsigL(%esi) movl FPU_result_3,%eax movl %eax,XsigH(%esi) L_exit: popl %ebx popl %edi popl %esi leave RET #ifdef PARANOID /* The logic is wrong if we got here */ L_bugged: pushl EX_INTERNAL|0x240 call EXCEPTION pop %ebx jmp L_exit L_bugged_1: pushl EX_INTERNAL|0x241 call EXCEPTION pop %ebx jmp L_exit L_bugged_2: pushl EX_INTERNAL|0x242 call EXCEPTION pop %ebx jmp L_exit #endif /* PARANOID */ SYM_FUNC_END(div_Xsig)
aixcc-public/challenge-001-exemplar-source
18,076
arch/x86/math-emu/reg_round.S
/* SPDX-License-Identifier: GPL-2.0 */ .file "reg_round.S" /*---------------------------------------------------------------------------+ | reg_round.S | | | | Rounding/truncation/etc for FPU basic arithmetic functions. | | | | Copyright (C) 1993,1995,1997 | | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, | | Australia. E-mail billm@suburbia.net | | | | This code has four possible entry points. | | The following must be entered by a jmp instruction: | | fpu_reg_round, fpu_reg_round_sqrt, and fpu_Arith_exit. | | | | The FPU_round entry point is intended to be used by C code. | | From C, call as: | | int FPU_round(FPU_REG *arg, unsigned int extent, unsigned int control_w) | | | | Return value is the tag of the answer, or-ed with FPU_Exception if | | one was raised, or -1 on internal error. | | | | For correct "up" and "down" rounding, the argument must have the correct | | sign. | | | +---------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------+ | Four entry points. | | | | Needed by both the fpu_reg_round and fpu_reg_round_sqrt entry points: | | %eax:%ebx 64 bit significand | | %edx 32 bit extension of the significand | | %edi pointer to an FPU_REG for the result to be stored | | stack calling function must have set up a C stack frame and | | pushed %esi, %edi, and %ebx | | | | Needed just for the fpu_reg_round_sqrt entry point: | | %cx A control word in the same format as the FPU control word. | | Otherwise, PARAM4 must give such a value. | | | | | | The significand and its extension are assumed to be exact in the | | following sense: | | If the significand by itself is the exact result then the significand | | extension (%edx) must contain 0, otherwise the significand extension | | must be non-zero. | | If the significand extension is non-zero then the significand is | | smaller than the magnitude of the correct exact result by an amount | | greater than zero and less than one ls bit of the significand. | | The significand extension is only required to have three possible | | non-zero values: | | less than 0x80000000 <=> the significand is less than 1/2 an ls | | bit smaller than the magnitude of the | | true exact result. | | exactly 0x80000000 <=> the significand is exactly 1/2 an ls bit | | smaller than the magnitude of the true | | exact result. | | greater than 0x80000000 <=> the significand is more than 1/2 an ls | | bit smaller than the magnitude of the | | true exact result. | | | +---------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------+ | The code in this module has become quite complex, but it should handle | | all of the FPU flags which are set at this stage of the basic arithmetic | | computations. | | There are a few rare cases where the results are not set identically to | | a real FPU. These require a bit more thought because at this stage the | | results of the code here appear to be more consistent... | | This may be changed in a future version. | +---------------------------------------------------------------------------*/ #include "fpu_emu.h" #include "exception.h" #include "control_w.h" /* Flags for FPU_bits_lost */ #define LOST_DOWN $1 #define LOST_UP $2 /* Flags for FPU_denormal */ #define DENORMAL $1 #define UNMASKED_UNDERFLOW $2 #ifndef NON_REENTRANT_FPU /* Make the code re-entrant by putting local storage on the stack: */ #define FPU_bits_lost (%esp) #define FPU_denormal 1(%esp) #else /* Not re-entrant, so we can gain speed by putting local storage in a static area: */ .data .align 4,0 FPU_bits_lost: .byte 0 FPU_denormal: .byte 0 #endif /* NON_REENTRANT_FPU */ .text .globl fpu_reg_round .globl fpu_Arith_exit /* Entry point when called from C */ SYM_FUNC_START(FPU_round) pushl %ebp movl %esp,%ebp pushl %esi pushl %edi pushl %ebx movl PARAM1,%edi movl SIGH(%edi),%eax movl SIGL(%edi),%ebx movl PARAM2,%edx fpu_reg_round: /* Normal entry point */ movl PARAM4,%ecx #ifndef NON_REENTRANT_FPU pushl %ebx /* adjust the stack pointer */ #endif /* NON_REENTRANT_FPU */ #ifdef PARANOID /* Cannot use this here yet */ /* orl %eax,%eax */ /* jns L_entry_bugged */ #endif /* PARANOID */ cmpw EXP_UNDER,EXP(%edi) jle L_Make_denorm /* The number is a de-normal */ movb $0,FPU_denormal /* 0 -> not a de-normal */ Denorm_done: movb $0,FPU_bits_lost /* No bits yet lost in rounding */ movl %ecx,%esi andl CW_PC,%ecx cmpl PR_64_BITS,%ecx je LRound_To_64 cmpl PR_53_BITS,%ecx je LRound_To_53 cmpl PR_24_BITS,%ecx je LRound_To_24 #ifdef PECULIAR_486 /* With the precision control bits set to 01 "(reserved)", a real 80486 behaves as if the precision control bits were set to 11 "64 bits" */ cmpl PR_RESERVED_BITS,%ecx je LRound_To_64 #ifdef PARANOID jmp L_bugged_denorm_486 #endif /* PARANOID */ #else #ifdef PARANOID jmp L_bugged_denorm /* There is no bug, just a bad control word */ #endif /* PARANOID */ #endif /* PECULIAR_486 */ /* Round etc to 24 bit precision */ LRound_To_24: movl %esi,%ecx andl CW_RC,%ecx cmpl RC_RND,%ecx je LRound_nearest_24 cmpl RC_CHOP,%ecx je LCheck_truncate_24 cmpl RC_UP,%ecx /* Towards +infinity */ je LUp_24 cmpl RC_DOWN,%ecx /* Towards -infinity */ je LDown_24 #ifdef PARANOID jmp L_bugged_round24 #endif /* PARANOID */ LUp_24: cmpb SIGN_POS,PARAM5 jne LCheck_truncate_24 /* If negative then up==truncate */ jmp LCheck_24_round_up LDown_24: cmpb SIGN_POS,PARAM5 je LCheck_truncate_24 /* If positive then down==truncate */ LCheck_24_round_up: movl %eax,%ecx andl $0x000000ff,%ecx orl %ebx,%ecx orl %edx,%ecx jnz LDo_24_round_up jmp L_Re_normalise LRound_nearest_24: /* Do rounding of the 24th bit if needed (nearest or even) */ movl %eax,%ecx andl $0x000000ff,%ecx cmpl $0x00000080,%ecx jc LCheck_truncate_24 /* less than half, no increment needed */ jne LGreater_Half_24 /* greater than half, increment needed */ /* Possibly half, we need to check the ls bits */ orl %ebx,%ebx jnz LGreater_Half_24 /* greater than half, increment needed */ orl %edx,%edx jnz LGreater_Half_24 /* greater than half, increment needed */ /* Exactly half, increment only if 24th bit is 1 (round to even) */ testl $0x00000100,%eax jz LDo_truncate_24 LGreater_Half_24: /* Rounding: increment at the 24th bit */ LDo_24_round_up: andl $0xffffff00,%eax /* Truncate to 24 bits */ xorl %ebx,%ebx movb LOST_UP,FPU_bits_lost addl $0x00000100,%eax jmp LCheck_Round_Overflow LCheck_truncate_24: movl %eax,%ecx andl $0x000000ff,%ecx orl %ebx,%ecx orl %edx,%ecx jz L_Re_normalise /* No truncation needed */ LDo_truncate_24: andl $0xffffff00,%eax /* Truncate to 24 bits */ xorl %ebx,%ebx movb LOST_DOWN,FPU_bits_lost jmp L_Re_normalise /* Round etc to 53 bit precision */ LRound_To_53: movl %esi,%ecx andl CW_RC,%ecx cmpl RC_RND,%ecx je LRound_nearest_53 cmpl RC_CHOP,%ecx je LCheck_truncate_53 cmpl RC_UP,%ecx /* Towards +infinity */ je LUp_53 cmpl RC_DOWN,%ecx /* Towards -infinity */ je LDown_53 #ifdef PARANOID jmp L_bugged_round53 #endif /* PARANOID */ LUp_53: cmpb SIGN_POS,PARAM5 jne LCheck_truncate_53 /* If negative then up==truncate */ jmp LCheck_53_round_up LDown_53: cmpb SIGN_POS,PARAM5 je LCheck_truncate_53 /* If positive then down==truncate */ LCheck_53_round_up: movl %ebx,%ecx andl $0x000007ff,%ecx orl %edx,%ecx jnz LDo_53_round_up jmp L_Re_normalise LRound_nearest_53: /* Do rounding of the 53rd bit if needed (nearest or even) */ movl %ebx,%ecx andl $0x000007ff,%ecx cmpl $0x00000400,%ecx jc LCheck_truncate_53 /* less than half, no increment needed */ jnz LGreater_Half_53 /* greater than half, increment needed */ /* Possibly half, we need to check the ls bits */ orl %edx,%edx jnz LGreater_Half_53 /* greater than half, increment needed */ /* Exactly half, increment only if 53rd bit is 1 (round to even) */ testl $0x00000800,%ebx jz LTruncate_53 LGreater_Half_53: /* Rounding: increment at the 53rd bit */ LDo_53_round_up: movb LOST_UP,FPU_bits_lost andl $0xfffff800,%ebx /* Truncate to 53 bits */ addl $0x00000800,%ebx adcl $0,%eax jmp LCheck_Round_Overflow LCheck_truncate_53: movl %ebx,%ecx andl $0x000007ff,%ecx orl %edx,%ecx jz L_Re_normalise LTruncate_53: movb LOST_DOWN,FPU_bits_lost andl $0xfffff800,%ebx /* Truncate to 53 bits */ jmp L_Re_normalise /* Round etc to 64 bit precision */ LRound_To_64: movl %esi,%ecx andl CW_RC,%ecx cmpl RC_RND,%ecx je LRound_nearest_64 cmpl RC_CHOP,%ecx je LCheck_truncate_64 cmpl RC_UP,%ecx /* Towards +infinity */ je LUp_64 cmpl RC_DOWN,%ecx /* Towards -infinity */ je LDown_64 #ifdef PARANOID jmp L_bugged_round64 #endif /* PARANOID */ LUp_64: cmpb SIGN_POS,PARAM5 jne LCheck_truncate_64 /* If negative then up==truncate */ orl %edx,%edx jnz LDo_64_round_up jmp L_Re_normalise LDown_64: cmpb SIGN_POS,PARAM5 je LCheck_truncate_64 /* If positive then down==truncate */ orl %edx,%edx jnz LDo_64_round_up jmp L_Re_normalise LRound_nearest_64: cmpl $0x80000000,%edx jc LCheck_truncate_64 jne LDo_64_round_up /* Now test for round-to-even */ testb $1,%bl jz LCheck_truncate_64 LDo_64_round_up: movb LOST_UP,FPU_bits_lost addl $1,%ebx adcl $0,%eax LCheck_Round_Overflow: jnc L_Re_normalise /* Overflow, adjust the result (significand to 1.0) */ rcrl $1,%eax rcrl $1,%ebx incw EXP(%edi) jmp L_Re_normalise LCheck_truncate_64: orl %edx,%edx jz L_Re_normalise LTruncate_64: movb LOST_DOWN,FPU_bits_lost L_Re_normalise: testb $0xff,FPU_denormal jnz Normalise_result L_Normalised: movl TAG_Valid,%edx L_deNormalised: cmpb LOST_UP,FPU_bits_lost je L_precision_lost_up cmpb LOST_DOWN,FPU_bits_lost je L_precision_lost_down L_no_precision_loss: /* store the result */ L_Store_significand: movl %eax,SIGH(%edi) movl %ebx,SIGL(%edi) cmpw EXP_OVER,EXP(%edi) jge L_overflow movl %edx,%eax /* Convert the exponent to 80x87 form. */ addw EXTENDED_Ebias,EXP(%edi) andw $0x7fff,EXP(%edi) fpu_reg_round_signed_special_exit: cmpb SIGN_POS,PARAM5 je fpu_reg_round_special_exit orw $0x8000,EXP(%edi) /* Negative sign for the result. */ fpu_reg_round_special_exit: #ifndef NON_REENTRANT_FPU popl %ebx /* adjust the stack pointer */ #endif /* NON_REENTRANT_FPU */ fpu_Arith_exit: popl %ebx popl %edi popl %esi leave RET /* * Set the FPU status flags to represent precision loss due to * round-up. */ L_precision_lost_up: push %edx push %eax call set_precision_flag_up popl %eax popl %edx jmp L_no_precision_loss /* * Set the FPU status flags to represent precision loss due to * truncation. */ L_precision_lost_down: push %edx push %eax call set_precision_flag_down popl %eax popl %edx jmp L_no_precision_loss /* * The number is a denormal (which might get rounded up to a normal) * Shift the number right the required number of bits, which will * have to be undone later... */ L_Make_denorm: /* The action to be taken depends upon whether the underflow exception is masked */ testb CW_Underflow,%cl /* Underflow mask. */ jz Unmasked_underflow /* Do not make a denormal. */ movb DENORMAL,FPU_denormal pushl %ecx /* Save */ movw EXP_UNDER+1,%cx subw EXP(%edi),%cx cmpw $64,%cx /* shrd only works for 0..31 bits */ jnc Denorm_shift_more_than_63 cmpw $32,%cx /* shrd only works for 0..31 bits */ jnc Denorm_shift_more_than_32 /* * We got here without jumps by assuming that the most common requirement * is for a small de-normalising shift. * Shift by [1..31] bits */ addw %cx,EXP(%edi) orl %edx,%edx /* extension */ setne %ch /* Save whether %edx is non-zero */ xorl %edx,%edx shrd %cl,%ebx,%edx shrd %cl,%eax,%ebx shr %cl,%eax orb %ch,%dl popl %ecx jmp Denorm_done /* Shift by [32..63] bits */ Denorm_shift_more_than_32: addw %cx,EXP(%edi) subb $32,%cl orl %edx,%edx setne %ch orb %ch,%bl xorl %edx,%edx shrd %cl,%ebx,%edx shrd %cl,%eax,%ebx shr %cl,%eax orl %edx,%edx /* test these 32 bits */ setne %cl orb %ch,%bl orb %cl,%bl movl %ebx,%edx movl %eax,%ebx xorl %eax,%eax popl %ecx jmp Denorm_done /* Shift by [64..) bits */ Denorm_shift_more_than_63: cmpw $64,%cx jne Denorm_shift_more_than_64 /* Exactly 64 bit shift */ addw %cx,EXP(%edi) xorl %ecx,%ecx orl %edx,%edx setne %cl orl %ebx,%ebx setne %ch orb %ch,%cl orb %cl,%al movl %eax,%edx xorl %eax,%eax xorl %ebx,%ebx popl %ecx jmp Denorm_done Denorm_shift_more_than_64: movw EXP_UNDER+1,EXP(%edi) /* This is easy, %eax must be non-zero, so.. */ movl $1,%edx xorl %eax,%eax xorl %ebx,%ebx popl %ecx jmp Denorm_done Unmasked_underflow: movb UNMASKED_UNDERFLOW,FPU_denormal jmp Denorm_done /* Undo the de-normalisation. */ Normalise_result: cmpb UNMASKED_UNDERFLOW,FPU_denormal je Signal_underflow /* The number must be a denormal if we got here. */ #ifdef PARANOID /* But check it... just in case. */ cmpw EXP_UNDER+1,EXP(%edi) jne L_norm_bugged #endif /* PARANOID */ #ifdef PECULIAR_486 /* * This implements a special feature of 80486 behaviour. * Underflow will be signaled even if the number is * not a denormal after rounding. * This difference occurs only for masked underflow, and not * in the unmasked case. * Actual 80486 behaviour differs from this in some circumstances. */ orl %eax,%eax /* ms bits */ js LPseudoDenormal /* Will be masked underflow */ #else orl %eax,%eax /* ms bits */ js L_Normalised /* No longer a denormal */ #endif /* PECULIAR_486 */ jnz LDenormal_adj_exponent orl %ebx,%ebx jz L_underflow_to_zero /* The contents are zero */ LDenormal_adj_exponent: decw EXP(%edi) LPseudoDenormal: testb $0xff,FPU_bits_lost /* bits lost == underflow */ movl TAG_Special,%edx jz L_deNormalised /* There must be a masked underflow */ push %eax pushl EX_Underflow call EXCEPTION popl %eax popl %eax movl TAG_Special,%edx jmp L_deNormalised /* * The operations resulted in a number too small to represent. * Masked response. */ L_underflow_to_zero: push %eax call set_precision_flag_down popl %eax push %eax pushl EX_Underflow call EXCEPTION popl %eax popl %eax /* Reduce the exponent to EXP_UNDER */ movw EXP_UNDER,EXP(%edi) movl TAG_Zero,%edx jmp L_Store_significand /* The operations resulted in a number too large to represent. */ L_overflow: addw EXTENDED_Ebias,EXP(%edi) /* Set for unmasked response. */ push %edi call arith_overflow pop %edi jmp fpu_reg_round_signed_special_exit Signal_underflow: /* The number may have been changed to a non-denormal */ /* by the rounding operations. */ cmpw EXP_UNDER,EXP(%edi) jle Do_unmasked_underflow jmp L_Normalised Do_unmasked_underflow: /* Increase the exponent by the magic number */ addw $(3*(1<<13)),EXP(%edi) push %eax pushl EX_Underflow call EXCEPTION popl %eax popl %eax jmp L_Normalised #ifdef PARANOID #ifdef PECULIAR_486 L_bugged_denorm_486: pushl EX_INTERNAL|0x236 call EXCEPTION popl %ebx jmp L_exception_exit #else L_bugged_denorm: pushl EX_INTERNAL|0x230 call EXCEPTION popl %ebx jmp L_exception_exit #endif /* PECULIAR_486 */ L_bugged_round24: pushl EX_INTERNAL|0x231 call EXCEPTION popl %ebx jmp L_exception_exit L_bugged_round53: pushl EX_INTERNAL|0x232 call EXCEPTION popl %ebx jmp L_exception_exit L_bugged_round64: pushl EX_INTERNAL|0x233 call EXCEPTION popl %ebx jmp L_exception_exit L_norm_bugged: pushl EX_INTERNAL|0x234 call EXCEPTION popl %ebx jmp L_exception_exit L_entry_bugged: pushl EX_INTERNAL|0x235 call EXCEPTION popl %ebx L_exception_exit: mov $-1,%eax jmp fpu_reg_round_special_exit #endif /* PARANOID */ SYM_FUNC_END(FPU_round)
aixcc-public/challenge-001-exemplar-source
3,412
arch/x86/math-emu/round_Xsig.S
/* SPDX-License-Identifier: GPL-2.0 */ /*---------------------------------------------------------------------------+ | round_Xsig.S | | | | Copyright (C) 1992,1993,1994,1995 | | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, | | Australia. E-mail billm@jacobi.maths.monash.edu.au | | | | Normalize and round a 12 byte quantity. | | Call from C as: | | int round_Xsig(Xsig *n) | | | | Normalize a 12 byte quantity. | | Call from C as: | | int norm_Xsig(Xsig *n) | | | | Each function returns the size of the shift (nr of bits). | | | +---------------------------------------------------------------------------*/ .file "round_Xsig.S" #include "fpu_emu.h" .text SYM_FUNC_START(round_Xsig) pushl %ebp movl %esp,%ebp pushl %ebx /* Reserve some space */ pushl %ebx pushl %esi movl PARAM1,%esi movl 8(%esi),%edx movl 4(%esi),%ebx movl (%esi),%eax movl $0,-4(%ebp) orl %edx,%edx /* ms bits */ js L_round /* Already normalized */ jnz L_shift_1 /* Shift left 1 - 31 bits */ movl %ebx,%edx movl %eax,%ebx xorl %eax,%eax movl $-32,-4(%ebp) /* We need to shift left by 1 - 31 bits */ L_shift_1: bsrl %edx,%ecx /* get the required shift in %ecx */ subl $31,%ecx negl %ecx subl %ecx,-4(%ebp) shld %cl,%ebx,%edx shld %cl,%eax,%ebx shl %cl,%eax L_round: testl $0x80000000,%eax jz L_exit addl $1,%ebx adcl $0,%edx jnz L_exit movl $0x80000000,%edx incl -4(%ebp) L_exit: movl %edx,8(%esi) movl %ebx,4(%esi) movl %eax,(%esi) movl -4(%ebp),%eax popl %esi popl %ebx leave RET SYM_FUNC_END(round_Xsig) SYM_FUNC_START(norm_Xsig) pushl %ebp movl %esp,%ebp pushl %ebx /* Reserve some space */ pushl %ebx pushl %esi movl PARAM1,%esi movl 8(%esi),%edx movl 4(%esi),%ebx movl (%esi),%eax movl $0,-4(%ebp) orl %edx,%edx /* ms bits */ js L_n_exit /* Already normalized */ jnz L_n_shift_1 /* Shift left 1 - 31 bits */ movl %ebx,%edx movl %eax,%ebx xorl %eax,%eax movl $-32,-4(%ebp) orl %edx,%edx /* ms bits */ js L_n_exit /* Normalized now */ jnz L_n_shift_1 /* Shift left 1 - 31 bits */ movl %ebx,%edx movl %eax,%ebx xorl %eax,%eax addl $-32,-4(%ebp) jmp L_n_exit /* Might not be normalized, but shift no more. */ /* We need to shift left by 1 - 31 bits */ L_n_shift_1: bsrl %edx,%ecx /* get the required shift in %ecx */ subl $31,%ecx negl %ecx subl %ecx,-4(%ebp) shld %cl,%ebx,%edx shld %cl,%eax,%ebx shl %cl,%eax L_n_exit: movl %edx,8(%esi) movl %ebx,4(%esi) movl %eax,(%esi) movl -4(%ebp),%eax popl %esi popl %ebx leave RET SYM_FUNC_END(norm_Xsig)
aixcc-public/challenge-001-exemplar-source
11,040
arch/x86/math-emu/wm_sqrt.S
/* SPDX-License-Identifier: GPL-2.0 */ .file "wm_sqrt.S" /*---------------------------------------------------------------------------+ | wm_sqrt.S | | | | Fixed point arithmetic square root evaluation. | | | | Copyright (C) 1992,1993,1995,1997 | | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, | | Australia. E-mail billm@suburbia.net | | | | Call from C as: | | int wm_sqrt(FPU_REG *n, unsigned int control_word) | | | +---------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------+ | wm_sqrt(FPU_REG *n, unsigned int control_word) | | returns the square root of n in n. | | | | Use Newton's method to compute the square root of a number, which must | | be in the range [1.0 .. 4.0), to 64 bits accuracy. | | Does not check the sign or tag of the argument. | | Sets the exponent, but not the sign or tag of the result. | | | | The guess is kept in %esi:%edi | +---------------------------------------------------------------------------*/ #include "exception.h" #include "fpu_emu.h" #ifndef NON_REENTRANT_FPU /* Local storage on the stack: */ #define FPU_accum_3 -4(%ebp) /* ms word */ #define FPU_accum_2 -8(%ebp) #define FPU_accum_1 -12(%ebp) #define FPU_accum_0 -16(%ebp) /* * The de-normalised argument: * sq_2 sq_1 sq_0 * b b b b b b b ... b b b b b b .... b b b b 0 0 0 ... 0 * ^ binary point here */ #define FPU_fsqrt_arg_2 -20(%ebp) /* ms word */ #define FPU_fsqrt_arg_1 -24(%ebp) #define FPU_fsqrt_arg_0 -28(%ebp) /* ls word, at most the ms bit is set */ #else /* Local storage in a static area: */ .data .align 4,0 FPU_accum_3: .long 0 /* ms word */ FPU_accum_2: .long 0 FPU_accum_1: .long 0 FPU_accum_0: .long 0 /* The de-normalised argument: sq_2 sq_1 sq_0 b b b b b b b ... b b b b b b .... b b b b 0 0 0 ... 0 ^ binary point here */ FPU_fsqrt_arg_2: .long 0 /* ms word */ FPU_fsqrt_arg_1: .long 0 FPU_fsqrt_arg_0: .long 0 /* ls word, at most the ms bit is set */ #endif /* NON_REENTRANT_FPU */ .text SYM_FUNC_START(wm_sqrt) pushl %ebp movl %esp,%ebp #ifndef NON_REENTRANT_FPU subl $28,%esp #endif /* NON_REENTRANT_FPU */ pushl %esi pushl %edi pushl %ebx movl PARAM1,%esi movl SIGH(%esi),%eax movl SIGL(%esi),%ecx xorl %edx,%edx /* We use a rough linear estimate for the first guess.. */ cmpw EXP_BIAS,EXP(%esi) jnz sqrt_arg_ge_2 shrl $1,%eax /* arg is in the range [1.0 .. 2.0) */ rcrl $1,%ecx rcrl $1,%edx sqrt_arg_ge_2: /* From here on, n is never accessed directly again until it is replaced by the answer. */ movl %eax,FPU_fsqrt_arg_2 /* ms word of n */ movl %ecx,FPU_fsqrt_arg_1 movl %edx,FPU_fsqrt_arg_0 /* Make a linear first estimate */ shrl $1,%eax addl $0x40000000,%eax movl $0xaaaaaaaa,%ecx mull %ecx shll %edx /* max result was 7fff... */ testl $0x80000000,%edx /* but min was 3fff... */ jnz sqrt_prelim_no_adjust movl $0x80000000,%edx /* round up */ sqrt_prelim_no_adjust: movl %edx,%esi /* Our first guess */ /* We have now computed (approx) (2 + x) / 3, which forms the basis for a few iterations of Newton's method */ movl FPU_fsqrt_arg_2,%ecx /* ms word */ /* * From our initial estimate, three iterations are enough to get us * to 30 bits or so. This will then allow two iterations at better * precision to complete the process. */ /* Compute (g + n/g)/2 at each iteration (g is the guess). */ shrl %ecx /* Doing this first will prevent a divide */ /* overflow later. */ movl %ecx,%edx /* msw of the arg / 2 */ divl %esi /* current estimate */ shrl %esi /* divide by 2 */ addl %eax,%esi /* the new estimate */ movl %ecx,%edx divl %esi shrl %esi addl %eax,%esi movl %ecx,%edx divl %esi shrl %esi addl %eax,%esi /* * Now that an estimate accurate to about 30 bits has been obtained (in %esi), * we improve it to 60 bits or so. * * The strategy from now on is to compute new estimates from * guess := guess + (n - guess^2) / (2 * guess) */ /* First, find the square of the guess */ movl %esi,%eax mull %esi /* guess^2 now in %edx:%eax */ movl FPU_fsqrt_arg_1,%ecx subl %ecx,%eax movl FPU_fsqrt_arg_2,%ecx /* ms word of normalized n */ sbbl %ecx,%edx jnc sqrt_stage_2_positive /* Subtraction gives a negative result, negate the result before division. */ notl %edx notl %eax addl $1,%eax adcl $0,%edx divl %esi movl %eax,%ecx movl %edx,%eax divl %esi jmp sqrt_stage_2_finish sqrt_stage_2_positive: divl %esi movl %eax,%ecx movl %edx,%eax divl %esi notl %ecx notl %eax addl $1,%eax adcl $0,%ecx sqrt_stage_2_finish: sarl $1,%ecx /* divide by 2 */ rcrl $1,%eax /* Form the new estimate in %esi:%edi */ movl %eax,%edi addl %ecx,%esi jnz sqrt_stage_2_done /* result should be [1..2) */ #ifdef PARANOID /* It should be possible to get here only if the arg is ffff....ffff */ cmpl $0xffffffff,FPU_fsqrt_arg_1 jnz sqrt_stage_2_error #endif /* PARANOID */ /* The best rounded result. */ xorl %eax,%eax decl %eax movl %eax,%edi movl %eax,%esi movl $0x7fffffff,%eax jmp sqrt_round_result #ifdef PARANOID sqrt_stage_2_error: pushl EX_INTERNAL|0x213 call EXCEPTION #endif /* PARANOID */ sqrt_stage_2_done: /* Now the square root has been computed to better than 60 bits. */ /* Find the square of the guess. */ movl %edi,%eax /* ls word of guess */ mull %edi movl %edx,FPU_accum_1 movl %esi,%eax mull %esi movl %edx,FPU_accum_3 movl %eax,FPU_accum_2 movl %edi,%eax mull %esi addl %eax,FPU_accum_1 adcl %edx,FPU_accum_2 adcl $0,FPU_accum_3 /* movl %esi,%eax */ /* mull %edi */ addl %eax,FPU_accum_1 adcl %edx,FPU_accum_2 adcl $0,FPU_accum_3 /* guess^2 now in FPU_accum_3:FPU_accum_2:FPU_accum_1 */ movl FPU_fsqrt_arg_0,%eax /* get normalized n */ subl %eax,FPU_accum_1 movl FPU_fsqrt_arg_1,%eax sbbl %eax,FPU_accum_2 movl FPU_fsqrt_arg_2,%eax /* ms word of normalized n */ sbbl %eax,FPU_accum_3 jnc sqrt_stage_3_positive /* Subtraction gives a negative result, negate the result before division */ notl FPU_accum_1 notl FPU_accum_2 notl FPU_accum_3 addl $1,FPU_accum_1 adcl $0,FPU_accum_2 #ifdef PARANOID adcl $0,FPU_accum_3 /* This must be zero */ jz sqrt_stage_3_no_error sqrt_stage_3_error: pushl EX_INTERNAL|0x207 call EXCEPTION sqrt_stage_3_no_error: #endif /* PARANOID */ movl FPU_accum_2,%edx movl FPU_accum_1,%eax divl %esi movl %eax,%ecx movl %edx,%eax divl %esi sarl $1,%ecx /* divide by 2 */ rcrl $1,%eax /* prepare to round the result */ addl %ecx,%edi adcl $0,%esi jmp sqrt_stage_3_finished sqrt_stage_3_positive: movl FPU_accum_2,%edx movl FPU_accum_1,%eax divl %esi movl %eax,%ecx movl %edx,%eax divl %esi sarl $1,%ecx /* divide by 2 */ rcrl $1,%eax /* prepare to round the result */ notl %eax /* Negate the correction term */ notl %ecx addl $1,%eax adcl $0,%ecx /* carry here ==> correction == 0 */ adcl $0xffffffff,%esi addl %ecx,%edi adcl $0,%esi sqrt_stage_3_finished: /* * The result in %esi:%edi:%esi should be good to about 90 bits here, * and the rounding information here does not have sufficient accuracy * in a few rare cases. */ cmpl $0xffffffe0,%eax ja sqrt_near_exact_x cmpl $0x00000020,%eax jb sqrt_near_exact cmpl $0x7fffffe0,%eax jb sqrt_round_result cmpl $0x80000020,%eax jb sqrt_get_more_precision sqrt_round_result: /* Set up for rounding operations */ movl %eax,%edx movl %esi,%eax movl %edi,%ebx movl PARAM1,%edi movw EXP_BIAS,EXP(%edi) /* Result is in [1.0 .. 2.0) */ jmp fpu_reg_round sqrt_near_exact_x: /* First, the estimate must be rounded up. */ addl $1,%edi adcl $0,%esi sqrt_near_exact: /* * This is an easy case because x^1/2 is monotonic. * We need just find the square of our estimate, compare it * with the argument, and deduce whether our estimate is * above, below, or exact. We use the fact that the estimate * is known to be accurate to about 90 bits. */ movl %edi,%eax /* ls word of guess */ mull %edi movl %edx,%ebx /* 2nd ls word of square */ movl %eax,%ecx /* ls word of square */ movl %edi,%eax mull %esi addl %eax,%ebx addl %eax,%ebx #ifdef PARANOID cmp $0xffffffb0,%ebx jb sqrt_near_exact_ok cmp $0x00000050,%ebx ja sqrt_near_exact_ok pushl EX_INTERNAL|0x214 call EXCEPTION sqrt_near_exact_ok: #endif /* PARANOID */ or %ebx,%ebx js sqrt_near_exact_small jnz sqrt_near_exact_large or %ebx,%edx jnz sqrt_near_exact_large /* Our estimate is exactly the right answer */ xorl %eax,%eax jmp sqrt_round_result sqrt_near_exact_small: /* Our estimate is too small */ movl $0x000000ff,%eax jmp sqrt_round_result sqrt_near_exact_large: /* Our estimate is too large, we need to decrement it */ subl $1,%edi sbbl $0,%esi movl $0xffffff00,%eax jmp sqrt_round_result sqrt_get_more_precision: /* This case is almost the same as the above, except we start with an extra bit of precision in the estimate. */ stc /* The extra bit. */ rcll $1,%edi /* Shift the estimate left one bit */ rcll $1,%esi movl %edi,%eax /* ls word of guess */ mull %edi movl %edx,%ebx /* 2nd ls word of square */ movl %eax,%ecx /* ls word of square */ movl %edi,%eax mull %esi addl %eax,%ebx addl %eax,%ebx /* Put our estimate back to its original value */ stc /* The ms bit. */ rcrl $1,%esi /* Shift the estimate left one bit */ rcrl $1,%edi #ifdef PARANOID cmp $0xffffff60,%ebx jb sqrt_more_prec_ok cmp $0x000000a0,%ebx ja sqrt_more_prec_ok pushl EX_INTERNAL|0x215 call EXCEPTION sqrt_more_prec_ok: #endif /* PARANOID */ or %ebx,%ebx js sqrt_more_prec_small jnz sqrt_more_prec_large or %ebx,%ecx jnz sqrt_more_prec_large /* Our estimate is exactly the right answer */ movl $0x80000000,%eax jmp sqrt_round_result sqrt_more_prec_small: /* Our estimate is too small */ movl $0x800000ff,%eax jmp sqrt_round_result sqrt_more_prec_large: /* Our estimate is too large */ movl $0x7fffff00,%eax jmp sqrt_round_result SYM_FUNC_END(wm_sqrt)
aixcc-public/challenge-001-exemplar-source
2,528
arch/x86/math-emu/shr_Xsig.S
/* SPDX-License-Identifier: GPL-2.0 */ .file "shr_Xsig.S" /*---------------------------------------------------------------------------+ | shr_Xsig.S | | | | 12 byte right shift function | | | | Copyright (C) 1992,1994,1995 | | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, | | Australia. E-mail billm@jacobi.maths.monash.edu.au | | | | Call from C as: | | void shr_Xsig(Xsig *arg, unsigned nr) | | | | Extended shift right function. | | Fastest for small shifts. | | Shifts the 12 byte quantity pointed to by the first arg (arg) | | right by the number of bits specified by the second arg (nr). | | | +---------------------------------------------------------------------------*/ #include "fpu_emu.h" .text SYM_FUNC_START(shr_Xsig) push %ebp movl %esp,%ebp pushl %esi movl PARAM2,%ecx movl PARAM1,%esi cmpl $32,%ecx /* shrd only works for 0..31 bits */ jnc L_more_than_31 /* less than 32 bits */ pushl %ebx movl (%esi),%eax /* lsl */ movl 4(%esi),%ebx /* midl */ movl 8(%esi),%edx /* msl */ shrd %cl,%ebx,%eax shrd %cl,%edx,%ebx shr %cl,%edx movl %eax,(%esi) movl %ebx,4(%esi) movl %edx,8(%esi) popl %ebx popl %esi leave RET L_more_than_31: cmpl $64,%ecx jnc L_more_than_63 subb $32,%cl movl 4(%esi),%eax /* midl */ movl 8(%esi),%edx /* msl */ shrd %cl,%edx,%eax shr %cl,%edx movl %eax,(%esi) movl %edx,4(%esi) movl $0,8(%esi) popl %esi leave RET L_more_than_63: cmpl $96,%ecx jnc L_more_than_95 subb $64,%cl movl 8(%esi),%eax /* msl */ shr %cl,%eax xorl %edx,%edx movl %eax,(%esi) movl %edx,4(%esi) movl %edx,8(%esi) popl %esi leave RET L_more_than_95: xorl %eax,%eax movl %eax,(%esi) movl %eax,4(%esi) movl %eax,8(%esi) popl %esi leave RET SYM_FUNC_END(shr_Xsig)
aixcc-public/challenge-001-exemplar-source
3,715
arch/x86/math-emu/reg_u_mul.S
/* SPDX-License-Identifier: GPL-2.0 */ .file "reg_u_mul.S" /*---------------------------------------------------------------------------+ | reg_u_mul.S | | | | Core multiplication routine | | | | Copyright (C) 1992,1993,1995,1997 | | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, Australia | | E-mail billm@suburbia.net | | | | | +---------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------+ | Basic multiplication routine. | | Does not check the resulting exponent for overflow/underflow | | | | FPU_u_mul(FPU_REG *a, FPU_REG *b, FPU_REG *c, unsigned int cw); | | | | Internal working is at approx 128 bits. | | Result is rounded to nearest 53 or 64 bits, using "nearest or even". | +---------------------------------------------------------------------------*/ #include "exception.h" #include "fpu_emu.h" #include "control_w.h" #ifndef NON_REENTRANT_FPU /* Local storage on the stack: */ #define FPU_accum_0 -4(%ebp) /* ms word */ #define FPU_accum_1 -8(%ebp) #else /* Local storage in a static area: */ .data .align 4,0 FPU_accum_0: .long 0 FPU_accum_1: .long 0 #endif /* NON_REENTRANT_FPU */ .text SYM_FUNC_START(FPU_u_mul) pushl %ebp movl %esp,%ebp #ifndef NON_REENTRANT_FPU subl $8,%esp #endif /* NON_REENTRANT_FPU */ pushl %esi pushl %edi pushl %ebx movl PARAM1,%esi movl PARAM2,%edi #ifdef PARANOID testl $0x80000000,SIGH(%esi) jz L_bugged testl $0x80000000,SIGH(%edi) jz L_bugged #endif /* PARANOID */ xorl %ecx,%ecx xorl %ebx,%ebx movl SIGL(%esi),%eax mull SIGL(%edi) movl %eax,FPU_accum_0 movl %edx,FPU_accum_1 movl SIGL(%esi),%eax mull SIGH(%edi) addl %eax,FPU_accum_1 adcl %edx,%ebx /* adcl $0,%ecx // overflow here is not possible */ movl SIGH(%esi),%eax mull SIGL(%edi) addl %eax,FPU_accum_1 adcl %edx,%ebx adcl $0,%ecx movl SIGH(%esi),%eax mull SIGH(%edi) addl %eax,%ebx adcl %edx,%ecx /* Get the sum of the exponents. */ movl PARAM6,%eax subl EXP_BIAS-1,%eax /* Two denormals can cause an exponent underflow */ cmpl EXP_WAY_UNDER,%eax jg Exp_not_underflow /* Set to a really low value allow correct handling */ movl EXP_WAY_UNDER,%eax Exp_not_underflow: /* Have now finished with the sources */ movl PARAM3,%edi /* Point to the destination */ movw %ax,EXP(%edi) /* Now make sure that the result is normalized */ testl $0x80000000,%ecx jnz LResult_Normalised /* Normalize by shifting left one bit */ shll $1,FPU_accum_0 rcll $1,FPU_accum_1 rcll $1,%ebx rcll $1,%ecx decw EXP(%edi) LResult_Normalised: movl FPU_accum_0,%eax movl FPU_accum_1,%edx orl %eax,%eax jz L_extent_zero orl $1,%edx L_extent_zero: movl %ecx,%eax jmp fpu_reg_round #ifdef PARANOID L_bugged: pushl EX_INTERNAL|0x205 call EXCEPTION pop %ebx jmp L_exit L_exit: popl %ebx popl %edi popl %esi leave RET #endif /* PARANOID */ SYM_FUNC_END(FPU_u_mul)
aixcc-public/challenge-001-exemplar-source
4,321
arch/x86/math-emu/mul_Xsig.S
/* SPDX-License-Identifier: GPL-2.0 */ /*---------------------------------------------------------------------------+ | mul_Xsig.S | | | | Multiply a 12 byte fixed point number by another fixed point number. | | | | Copyright (C) 1992,1994,1995 | | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, | | Australia. E-mail billm@jacobi.maths.monash.edu.au | | | | Call from C as: | | void mul32_Xsig(Xsig *x, unsigned b) | | | | void mul64_Xsig(Xsig *x, unsigned long long *b) | | | | void mul_Xsig_Xsig(Xsig *x, unsigned *b) | | | | The result is neither rounded nor normalized, and the ls bit or so may | | be wrong. | | | +---------------------------------------------------------------------------*/ .file "mul_Xsig.S" #include "fpu_emu.h" .text SYM_FUNC_START(mul32_Xsig) pushl %ebp movl %esp,%ebp subl $16,%esp pushl %esi movl PARAM1,%esi movl PARAM2,%ecx xor %eax,%eax movl %eax,-4(%ebp) movl %eax,-8(%ebp) movl (%esi),%eax /* lsl of Xsig */ mull %ecx /* msl of b */ movl %edx,-12(%ebp) movl 4(%esi),%eax /* midl of Xsig */ mull %ecx /* msl of b */ addl %eax,-12(%ebp) adcl %edx,-8(%ebp) adcl $0,-4(%ebp) movl 8(%esi),%eax /* msl of Xsig */ mull %ecx /* msl of b */ addl %eax,-8(%ebp) adcl %edx,-4(%ebp) movl -12(%ebp),%eax movl %eax,(%esi) movl -8(%ebp),%eax movl %eax,4(%esi) movl -4(%ebp),%eax movl %eax,8(%esi) popl %esi leave RET SYM_FUNC_END(mul32_Xsig) SYM_FUNC_START(mul64_Xsig) pushl %ebp movl %esp,%ebp subl $16,%esp pushl %esi movl PARAM1,%esi movl PARAM2,%ecx xor %eax,%eax movl %eax,-4(%ebp) movl %eax,-8(%ebp) movl (%esi),%eax /* lsl of Xsig */ mull 4(%ecx) /* msl of b */ movl %edx,-12(%ebp) movl 4(%esi),%eax /* midl of Xsig */ mull (%ecx) /* lsl of b */ addl %edx,-12(%ebp) adcl $0,-8(%ebp) adcl $0,-4(%ebp) movl 4(%esi),%eax /* midl of Xsig */ mull 4(%ecx) /* msl of b */ addl %eax,-12(%ebp) adcl %edx,-8(%ebp) adcl $0,-4(%ebp) movl 8(%esi),%eax /* msl of Xsig */ mull (%ecx) /* lsl of b */ addl %eax,-12(%ebp) adcl %edx,-8(%ebp) adcl $0,-4(%ebp) movl 8(%esi),%eax /* msl of Xsig */ mull 4(%ecx) /* msl of b */ addl %eax,-8(%ebp) adcl %edx,-4(%ebp) movl -12(%ebp),%eax movl %eax,(%esi) movl -8(%ebp),%eax movl %eax,4(%esi) movl -4(%ebp),%eax movl %eax,8(%esi) popl %esi leave RET SYM_FUNC_END(mul64_Xsig) SYM_FUNC_START(mul_Xsig_Xsig) pushl %ebp movl %esp,%ebp subl $16,%esp pushl %esi movl PARAM1,%esi movl PARAM2,%ecx xor %eax,%eax movl %eax,-4(%ebp) movl %eax,-8(%ebp) movl (%esi),%eax /* lsl of Xsig */ mull 8(%ecx) /* msl of b */ movl %edx,-12(%ebp) movl 4(%esi),%eax /* midl of Xsig */ mull 4(%ecx) /* midl of b */ addl %edx,-12(%ebp) adcl $0,-8(%ebp) adcl $0,-4(%ebp) movl 8(%esi),%eax /* msl of Xsig */ mull (%ecx) /* lsl of b */ addl %edx,-12(%ebp) adcl $0,-8(%ebp) adcl $0,-4(%ebp) movl 4(%esi),%eax /* midl of Xsig */ mull 8(%ecx) /* msl of b */ addl %eax,-12(%ebp) adcl %edx,-8(%ebp) adcl $0,-4(%ebp) movl 8(%esi),%eax /* msl of Xsig */ mull 4(%ecx) /* midl of b */ addl %eax,-12(%ebp) adcl %edx,-8(%ebp) adcl $0,-4(%ebp) movl 8(%esi),%eax /* msl of Xsig */ mull 8(%ecx) /* msl of b */ addl %eax,-8(%ebp) adcl %edx,-4(%ebp) movl -12(%ebp),%edx movl %edx,(%esi) movl -8(%ebp),%edx movl %edx,4(%esi) movl -4(%ebp),%edx movl %edx,8(%esi) popl %esi leave RET SYM_FUNC_END(mul_Xsig_Xsig)
aixcc-public/challenge-001-exemplar-source
6,189
arch/x86/math-emu/reg_u_sub.S
/* SPDX-License-Identifier: GPL-2.0 */ .file "reg_u_sub.S" /*---------------------------------------------------------------------------+ | reg_u_sub.S | | | | Core floating point subtraction routine. | | | | Copyright (C) 1992,1993,1995,1997 | | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, Australia | | E-mail billm@suburbia.net | | | | Call from C as: | | int FPU_u_sub(FPU_REG *arg1, FPU_REG *arg2, FPU_REG *answ, | | int control_w) | | Return value is the tag of the answer, or-ed with FPU_Exception if | | one was raised, or -1 on internal error. | | | +---------------------------------------------------------------------------*/ /* | Kernel subtraction routine FPU_u_sub(reg *arg1, reg *arg2, reg *answ). | Takes two valid reg f.p. numbers (TAG_Valid), which are | treated as unsigned numbers, | and returns their difference as a TAG_Valid or TAG_Zero f.p. | number. | The first number (arg1) must be the larger. | The returned number is normalized. | Basic checks are performed if PARANOID is defined. */ #include "exception.h" #include "fpu_emu.h" #include "control_w.h" .text SYM_FUNC_START(FPU_u_sub) pushl %ebp movl %esp,%ebp pushl %esi pushl %edi pushl %ebx movl PARAM1,%esi /* source 1 */ movl PARAM2,%edi /* source 2 */ movl PARAM6,%ecx subl PARAM7,%ecx /* exp1 - exp2 */ #ifdef PARANOID /* source 2 is always smaller than source 1 */ js L_bugged_1 testl $0x80000000,SIGH(%edi) /* The args are assumed to be be normalized */ je L_bugged_2 testl $0x80000000,SIGH(%esi) je L_bugged_2 #endif /* PARANOID */ /*--------------------------------------+ | Form a register holding the | | smaller number | +--------------------------------------*/ movl SIGH(%edi),%eax /* register ms word */ movl SIGL(%edi),%ebx /* register ls word */ movl PARAM3,%edi /* destination */ movl PARAM6,%edx movw %dx,EXP(%edi) /* Copy exponent to destination */ xorl %edx,%edx /* register extension */ /*--------------------------------------+ | Shift the temporary register | | right the required number of | | places. | +--------------------------------------*/ cmpw $32,%cx /* shrd only works for 0..31 bits */ jnc L_more_than_31 /* less than 32 bits */ shrd %cl,%ebx,%edx shrd %cl,%eax,%ebx shr %cl,%eax jmp L_shift_done L_more_than_31: cmpw $64,%cx jnc L_more_than_63 subb $32,%cl jz L_exactly_32 shrd %cl,%eax,%edx shr %cl,%eax orl %ebx,%ebx jz L_more_31_no_low /* none of the lowest bits is set */ orl $1,%edx /* record the fact in the extension */ L_more_31_no_low: movl %eax,%ebx xorl %eax,%eax jmp L_shift_done L_exactly_32: movl %ebx,%edx movl %eax,%ebx xorl %eax,%eax jmp L_shift_done L_more_than_63: cmpw $65,%cx jnc L_more_than_64 /* Shift right by 64 bits */ movl %eax,%edx orl %ebx,%ebx jz L_more_63_no_low orl $1,%edx jmp L_more_63_no_low L_more_than_64: jne L_more_than_65 /* Shift right by 65 bits */ /* Carry is clear if we get here */ movl %eax,%edx rcrl %edx jnc L_shift_65_nc orl $1,%edx jmp L_more_63_no_low L_shift_65_nc: orl %ebx,%ebx jz L_more_63_no_low orl $1,%edx jmp L_more_63_no_low L_more_than_65: movl $1,%edx /* The shifted nr always at least one '1' */ L_more_63_no_low: xorl %ebx,%ebx xorl %eax,%eax L_shift_done: L_subtr: /*------------------------------+ | Do the subtraction | +------------------------------*/ xorl %ecx,%ecx subl %edx,%ecx movl %ecx,%edx movl SIGL(%esi),%ecx sbbl %ebx,%ecx movl %ecx,%ebx movl SIGH(%esi),%ecx sbbl %eax,%ecx movl %ecx,%eax #ifdef PARANOID /* We can never get a borrow */ jc L_bugged #endif /* PARANOID */ /*--------------------------------------+ | Normalize the result | +--------------------------------------*/ testl $0x80000000,%eax jnz L_round /* no shifting needed */ orl %eax,%eax jnz L_shift_1 /* shift left 1 - 31 bits */ orl %ebx,%ebx jnz L_shift_32 /* shift left 32 - 63 bits */ /* * A rare case, the only one which is non-zero if we got here * is: 1000000 .... 0000 * -0111111 .... 1111 1 * -------------------- * 0000000 .... 0000 1 */ cmpl $0x80000000,%edx jnz L_must_be_zero /* Shift left 64 bits */ subw $64,EXP(%edi) xchg %edx,%eax jmp fpu_reg_round L_must_be_zero: #ifdef PARANOID orl %edx,%edx jnz L_bugged_3 #endif /* PARANOID */ /* The result is zero */ movw $0,EXP(%edi) /* exponent */ movl $0,SIGL(%edi) movl $0,SIGH(%edi) movl TAG_Zero,%eax jmp L_exit L_shift_32: movl %ebx,%eax movl %edx,%ebx movl $0,%edx subw $32,EXP(%edi) /* Can get underflow here */ /* We need to shift left by 1 - 31 bits */ L_shift_1: bsrl %eax,%ecx /* get the required shift in %ecx */ subl $31,%ecx negl %ecx shld %cl,%ebx,%eax shld %cl,%edx,%ebx shl %cl,%edx subw %cx,EXP(%edi) /* Can get underflow here */ L_round: jmp fpu_reg_round /* Round the result */ #ifdef PARANOID L_bugged_1: pushl EX_INTERNAL|0x206 call EXCEPTION pop %ebx jmp L_error_exit L_bugged_2: pushl EX_INTERNAL|0x209 call EXCEPTION pop %ebx jmp L_error_exit L_bugged_3: pushl EX_INTERNAL|0x210 call EXCEPTION pop %ebx jmp L_error_exit L_bugged_4: pushl EX_INTERNAL|0x211 call EXCEPTION pop %ebx jmp L_error_exit L_bugged: pushl EX_INTERNAL|0x212 call EXCEPTION pop %ebx jmp L_error_exit L_error_exit: movl $-1,%eax #endif /* PARANOID */ L_exit: popl %ebx popl %edi popl %esi leave RET SYM_FUNC_END(FPU_u_sub)
aixcc-public/challenge-001-exemplar-source
1,621
arch/x86/math-emu/div_small.S
/* SPDX-License-Identifier: GPL-2.0 */ .file "div_small.S" /*---------------------------------------------------------------------------+ | div_small.S | | | | Divide a 64 bit integer by a 32 bit integer & return remainder. | | | | Copyright (C) 1992,1995 | | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, | | Australia. E-mail billm@jacobi.maths.monash.edu.au | | | | | +---------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------+ | unsigned long FPU_div_small(unsigned long long *x, unsigned long y) | +---------------------------------------------------------------------------*/ #include "fpu_emu.h" .text SYM_FUNC_START(FPU_div_small) pushl %ebp movl %esp,%ebp pushl %esi movl PARAM1,%esi /* pointer to num */ movl PARAM2,%ecx /* The denominator */ movl 4(%esi),%eax /* Get the current num msw */ xorl %edx,%edx divl %ecx movl %eax,4(%esi) movl (%esi),%eax /* Get the num lsw */ divl %ecx movl %eax,(%esi) movl %edx,%eax /* Return the remainder in eax */ popl %esi leave RET SYM_FUNC_END(FPU_div_small)
aixcc-public/challenge-001-exemplar-source
4,044
arch/x86/math-emu/reg_u_add.S
/* SPDX-License-Identifier: GPL-2.0 */ .file "reg_u_add.S" /*---------------------------------------------------------------------------+ | reg_u_add.S | | | | Add two valid (TAG_Valid) FPU_REG numbers, of the same sign, and put the | | result in a destination FPU_REG. | | | | Copyright (C) 1992,1993,1995,1997 | | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, Australia | | E-mail billm@suburbia.net | | | | Call from C as: | | int FPU_u_add(FPU_REG *arg1, FPU_REG *arg2, FPU_REG *answ, | | int control_w) | | Return value is the tag of the answer, or-ed with FPU_Exception if | | one was raised, or -1 on internal error. | | | +---------------------------------------------------------------------------*/ /* | Kernel addition routine FPU_u_add(reg *arg1, reg *arg2, reg *answ). | Takes two valid reg f.p. numbers (TAG_Valid), which are | treated as unsigned numbers, | and returns their sum as a TAG_Valid or TAG_Special f.p. number. | The returned number is normalized. | Basic checks are performed if PARANOID is defined. */ #include "exception.h" #include "fpu_emu.h" #include "control_w.h" .text SYM_FUNC_START(FPU_u_add) pushl %ebp movl %esp,%ebp pushl %esi pushl %edi pushl %ebx movl PARAM1,%esi /* source 1 */ movl PARAM2,%edi /* source 2 */ movl PARAM6,%ecx movl %ecx,%edx subl PARAM7,%ecx /* exp1 - exp2 */ jge L_arg1_larger /* num1 is smaller */ movl SIGL(%esi),%ebx movl SIGH(%esi),%eax movl %edi,%esi movl PARAM7,%edx negw %cx jmp L_accum_loaded L_arg1_larger: /* num1 has larger or equal exponent */ movl SIGL(%edi),%ebx movl SIGH(%edi),%eax L_accum_loaded: movl PARAM3,%edi /* destination */ movw %dx,EXP(%edi) /* Copy exponent to destination */ xorl %edx,%edx /* clear the extension */ #ifdef PARANOID testl $0x80000000,%eax je L_bugged testl $0x80000000,SIGH(%esi) je L_bugged #endif /* PARANOID */ /* The number to be shifted is in %eax:%ebx:%edx */ cmpw $32,%cx /* shrd only works for 0..31 bits */ jnc L_more_than_31 /* less than 32 bits */ shrd %cl,%ebx,%edx shrd %cl,%eax,%ebx shr %cl,%eax jmp L_shift_done L_more_than_31: cmpw $64,%cx jnc L_more_than_63 subb $32,%cl jz L_exactly_32 shrd %cl,%eax,%edx shr %cl,%eax orl %ebx,%ebx jz L_more_31_no_low /* none of the lowest bits is set */ orl $1,%edx /* record the fact in the extension */ L_more_31_no_low: movl %eax,%ebx xorl %eax,%eax jmp L_shift_done L_exactly_32: movl %ebx,%edx movl %eax,%ebx xorl %eax,%eax jmp L_shift_done L_more_than_63: cmpw $65,%cx jnc L_more_than_64 movl %eax,%edx orl %ebx,%ebx jz L_more_63_no_low orl $1,%edx jmp L_more_63_no_low L_more_than_64: movl $1,%edx /* The shifted nr always at least one '1' */ L_more_63_no_low: xorl %ebx,%ebx xorl %eax,%eax L_shift_done: /* Now do the addition */ addl SIGL(%esi),%ebx adcl SIGH(%esi),%eax jnc L_round_the_result /* Overflow, adjust the result */ rcrl $1,%eax rcrl $1,%ebx rcrl $1,%edx jnc L_no_bit_lost orl $1,%edx L_no_bit_lost: incw EXP(%edi) L_round_the_result: jmp fpu_reg_round /* Round the result */ #ifdef PARANOID /* If we ever get here then we have problems! */ L_bugged: pushl EX_INTERNAL|0x201 call EXCEPTION pop %ebx movl $-1,%eax jmp L_exit L_exit: popl %ebx popl %edi popl %esi leave RET #endif /* PARANOID */ SYM_FUNC_END(FPU_u_add)
aixcc-public/challenge-001-exemplar-source
3,709
arch/x86/math-emu/reg_norm.S
/* SPDX-License-Identifier: GPL-2.0 */ /*---------------------------------------------------------------------------+ | reg_norm.S | | | | Copyright (C) 1992,1993,1994,1995,1997 | | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, | | Australia. E-mail billm@suburbia.net | | | | Normalize the value in a FPU_REG. | | | | Call from C as: | | int FPU_normalize(FPU_REG *n) | | | | int FPU_normalize_nuo(FPU_REG *n) | | | | Return value is the tag of the answer, or-ed with FPU_Exception if | | one was raised, or -1 on internal error. | | | +---------------------------------------------------------------------------*/ #include "fpu_emu.h" .text SYM_FUNC_START(FPU_normalize) pushl %ebp movl %esp,%ebp pushl %ebx movl PARAM1,%ebx movl SIGH(%ebx),%edx movl SIGL(%ebx),%eax orl %edx,%edx /* ms bits */ js L_done /* Already normalized */ jnz L_shift_1 /* Shift left 1 - 31 bits */ orl %eax,%eax jz L_zero /* The contents are zero */ movl %eax,%edx xorl %eax,%eax subw $32,EXP(%ebx) /* This can cause an underflow */ /* We need to shift left by 1 - 31 bits */ L_shift_1: bsrl %edx,%ecx /* get the required shift in %ecx */ subl $31,%ecx negl %ecx shld %cl,%eax,%edx shl %cl,%eax subw %cx,EXP(%ebx) /* This can cause an underflow */ movl %edx,SIGH(%ebx) movl %eax,SIGL(%ebx) L_done: cmpw EXP_OVER,EXP(%ebx) jge L_overflow cmpw EXP_UNDER,EXP(%ebx) jle L_underflow L_exit_valid: movl TAG_Valid,%eax /* Convert the exponent to 80x87 form. */ addw EXTENDED_Ebias,EXP(%ebx) andw $0x7fff,EXP(%ebx) L_exit: popl %ebx leave RET L_zero: movw $0,EXP(%ebx) movl TAG_Zero,%eax jmp L_exit L_underflow: /* Convert the exponent to 80x87 form. */ addw EXTENDED_Ebias,EXP(%ebx) push %ebx call arith_underflow pop %ebx jmp L_exit L_overflow: /* Convert the exponent to 80x87 form. */ addw EXTENDED_Ebias,EXP(%ebx) push %ebx call arith_overflow pop %ebx jmp L_exit SYM_FUNC_END(FPU_normalize) /* Normalise without reporting underflow or overflow */ SYM_FUNC_START(FPU_normalize_nuo) pushl %ebp movl %esp,%ebp pushl %ebx movl PARAM1,%ebx movl SIGH(%ebx),%edx movl SIGL(%ebx),%eax orl %edx,%edx /* ms bits */ js L_exit_nuo_valid /* Already normalized */ jnz L_nuo_shift_1 /* Shift left 1 - 31 bits */ orl %eax,%eax jz L_exit_nuo_zero /* The contents are zero */ movl %eax,%edx xorl %eax,%eax subw $32,EXP(%ebx) /* This can cause an underflow */ /* We need to shift left by 1 - 31 bits */ L_nuo_shift_1: bsrl %edx,%ecx /* get the required shift in %ecx */ subl $31,%ecx negl %ecx shld %cl,%eax,%edx shl %cl,%eax subw %cx,EXP(%ebx) /* This can cause an underflow */ movl %edx,SIGH(%ebx) movl %eax,SIGL(%ebx) L_exit_nuo_valid: movl TAG_Valid,%eax popl %ebx leave RET L_exit_nuo_zero: movl TAG_Zero,%eax movw EXP_UNDER,EXP(%ebx) popl %ebx leave RET SYM_FUNC_END(FPU_normalize_nuo)
aixcc-public/challenge-001-exemplar-source
6,312
arch/x86/math-emu/wm_shrx.S
/* SPDX-License-Identifier: GPL-2.0 */ .file "wm_shrx.S" /*---------------------------------------------------------------------------+ | wm_shrx.S | | | | 64 bit right shift functions | | | | Copyright (C) 1992,1995 | | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, | | Australia. E-mail billm@jacobi.maths.monash.edu.au | | | | Call from C as: | | unsigned FPU_shrx(void *arg1, unsigned arg2) | | and | | unsigned FPU_shrxs(void *arg1, unsigned arg2) | | | +---------------------------------------------------------------------------*/ #include "fpu_emu.h" .text /*---------------------------------------------------------------------------+ | unsigned FPU_shrx(void *arg1, unsigned arg2) | | | | Extended shift right function. | | Fastest for small shifts. | | Shifts the 64 bit quantity pointed to by the first arg (arg1) | | right by the number of bits specified by the second arg (arg2). | | Forms a 96 bit quantity from the 64 bit arg and eax: | | [ 64 bit arg ][ eax ] | | shift right ---------> | | The eax register is initialized to 0 before the shifting. | | Results returned in the 64 bit arg and eax. | +---------------------------------------------------------------------------*/ SYM_FUNC_START(FPU_shrx) push %ebp movl %esp,%ebp pushl %esi movl PARAM2,%ecx movl PARAM1,%esi cmpl $32,%ecx /* shrd only works for 0..31 bits */ jnc L_more_than_31 /* less than 32 bits */ pushl %ebx movl (%esi),%ebx /* lsl */ movl 4(%esi),%edx /* msl */ xorl %eax,%eax /* extension */ shrd %cl,%ebx,%eax shrd %cl,%edx,%ebx shr %cl,%edx movl %ebx,(%esi) movl %edx,4(%esi) popl %ebx popl %esi leave RET L_more_than_31: cmpl $64,%ecx jnc L_more_than_63 subb $32,%cl movl (%esi),%eax /* lsl */ movl 4(%esi),%edx /* msl */ shrd %cl,%edx,%eax shr %cl,%edx movl %edx,(%esi) movl $0,4(%esi) popl %esi leave RET L_more_than_63: cmpl $96,%ecx jnc L_more_than_95 subb $64,%cl movl 4(%esi),%eax /* msl */ shr %cl,%eax xorl %edx,%edx movl %edx,(%esi) movl %edx,4(%esi) popl %esi leave RET L_more_than_95: xorl %eax,%eax movl %eax,(%esi) movl %eax,4(%esi) popl %esi leave RET SYM_FUNC_END(FPU_shrx) /*---------------------------------------------------------------------------+ | unsigned FPU_shrxs(void *arg1, unsigned arg2) | | | | Extended shift right function (optimized for small floating point | | integers). | | Shifts the 64 bit quantity pointed to by the first arg (arg1) | | right by the number of bits specified by the second arg (arg2). | | Forms a 96 bit quantity from the 64 bit arg and eax: | | [ 64 bit arg ][ eax ] | | shift right ---------> | | The eax register is initialized to 0 before the shifting. | | The lower 8 bits of eax are lost and replaced by a flag which is | | set (to 0x01) if any bit, apart from the first one, is set in the | | part which has been shifted out of the arg. | | Results returned in the 64 bit arg and eax. | +---------------------------------------------------------------------------*/ SYM_FUNC_START(FPU_shrxs) push %ebp movl %esp,%ebp pushl %esi pushl %ebx movl PARAM2,%ecx movl PARAM1,%esi cmpl $64,%ecx /* shrd only works for 0..31 bits */ jnc Ls_more_than_63 cmpl $32,%ecx /* shrd only works for 0..31 bits */ jc Ls_less_than_32 /* We got here without jumps by assuming that the most common requirement is for small integers */ /* Shift by [32..63] bits */ subb $32,%cl movl (%esi),%eax /* lsl */ movl 4(%esi),%edx /* msl */ xorl %ebx,%ebx shrd %cl,%eax,%ebx shrd %cl,%edx,%eax shr %cl,%edx orl %ebx,%ebx /* test these 32 bits */ setne %bl test $0x7fffffff,%eax /* and 31 bits here */ setne %bh orw %bx,%bx /* Any of the 63 bit set ? */ setne %al movl %edx,(%esi) movl $0,4(%esi) popl %ebx popl %esi leave RET /* Shift by [0..31] bits */ Ls_less_than_32: movl (%esi),%ebx /* lsl */ movl 4(%esi),%edx /* msl */ xorl %eax,%eax /* extension */ shrd %cl,%ebx,%eax shrd %cl,%edx,%ebx shr %cl,%edx test $0x7fffffff,%eax /* only need to look at eax here */ setne %al movl %ebx,(%esi) movl %edx,4(%esi) popl %ebx popl %esi leave RET /* Shift by [64..95] bits */ Ls_more_than_63: cmpl $96,%ecx jnc Ls_more_than_95 subb $64,%cl movl (%esi),%ebx /* lsl */ movl 4(%esi),%eax /* msl */ xorl %edx,%edx /* extension */ shrd %cl,%ebx,%edx shrd %cl,%eax,%ebx shr %cl,%eax orl %ebx,%edx setne %bl test $0x7fffffff,%eax /* only need to look at eax here */ setne %bh orw %bx,%bx setne %al xorl %edx,%edx movl %edx,(%esi) /* set to zero */ movl %edx,4(%esi) /* set to zero */ popl %ebx popl %esi leave RET Ls_more_than_95: /* Shift by [96..inf) bits */ xorl %eax,%eax movl (%esi),%ebx orl 4(%esi),%ebx setne %al xorl %ebx,%ebx movl %ebx,(%esi) movl %ebx,4(%esi) popl %ebx popl %esi leave RET SYM_FUNC_END(FPU_shrxs)
aixcc-public/challenge-001-exemplar-source
12,455
arch/x86/math-emu/reg_u_div.S
/* SPDX-License-Identifier: GPL-2.0 */ .file "reg_u_div.S" /*---------------------------------------------------------------------------+ | reg_u_div.S | | | | Divide one FPU_REG by another and put the result in a destination FPU_REG.| | | | Copyright (C) 1992,1993,1995,1997 | | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, Australia | | E-mail billm@suburbia.net | | | | | +---------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------+ | Call from C as: | | int FPU_u_div(FPU_REG *a, FPU_REG *b, FPU_REG *dest, | | unsigned int control_word, char *sign) | | | | Does not compute the destination exponent, but does adjust it. | | | | Return value is the tag of the answer, or-ed with FPU_Exception if | | one was raised, or -1 on internal error. | +---------------------------------------------------------------------------*/ #include "exception.h" #include "fpu_emu.h" #include "control_w.h" /* #define dSIGL(x) (x) */ /* #define dSIGH(x) 4(x) */ #ifndef NON_REENTRANT_FPU /* Local storage on the stack: Result: FPU_accum_3:FPU_accum_2:FPU_accum_1:FPU_accum_0 Overflow flag: ovfl_flag */ #define FPU_accum_3 -4(%ebp) #define FPU_accum_2 -8(%ebp) #define FPU_accum_1 -12(%ebp) #define FPU_accum_0 -16(%ebp) #define FPU_result_1 -20(%ebp) #define FPU_result_2 -24(%ebp) #define FPU_ovfl_flag -28(%ebp) #else .data /* Local storage in a static area: Result: FPU_accum_3:FPU_accum_2:FPU_accum_1:FPU_accum_0 Overflow flag: ovfl_flag */ .align 4,0 FPU_accum_3: .long 0 FPU_accum_2: .long 0 FPU_accum_1: .long 0 FPU_accum_0: .long 0 FPU_result_1: .long 0 FPU_result_2: .long 0 FPU_ovfl_flag: .byte 0 #endif /* NON_REENTRANT_FPU */ #define REGA PARAM1 #define REGB PARAM2 #define DEST PARAM3 .text SYM_FUNC_START(FPU_u_div) pushl %ebp movl %esp,%ebp #ifndef NON_REENTRANT_FPU subl $28,%esp #endif /* NON_REENTRANT_FPU */ pushl %esi pushl %edi pushl %ebx movl REGA,%esi movl REGB,%ebx movl DEST,%edi movswl EXP(%esi),%edx movswl EXP(%ebx),%eax subl %eax,%edx addl EXP_BIAS,%edx /* A denormal and a large number can cause an exponent underflow */ cmpl EXP_WAY_UNDER,%edx jg xExp_not_underflow /* Set to a really low value allow correct handling */ movl EXP_WAY_UNDER,%edx xExp_not_underflow: movw %dx,EXP(%edi) #ifdef PARANOID /* testl $0x80000000, SIGH(%esi) // Dividend */ /* je L_bugged */ testl $0x80000000, SIGH(%ebx) /* Divisor */ je L_bugged #endif /* PARANOID */ /* Check if the divisor can be treated as having just 32 bits */ cmpl $0,SIGL(%ebx) jnz L_Full_Division /* Can't do a quick divide */ /* We should be able to zip through the division here */ movl SIGH(%ebx),%ecx /* The divisor */ movl SIGH(%esi),%edx /* Dividend */ movl SIGL(%esi),%eax /* Dividend */ cmpl %ecx,%edx setaeb FPU_ovfl_flag /* Keep a record */ jb L_no_adjust subl %ecx,%edx /* Prevent the overflow */ L_no_adjust: /* Divide the 64 bit number by the 32 bit denominator */ divl %ecx movl %eax,FPU_result_2 /* Work on the remainder of the first division */ xorl %eax,%eax divl %ecx movl %eax,FPU_result_1 /* Work on the remainder of the 64 bit division */ xorl %eax,%eax divl %ecx testb $255,FPU_ovfl_flag /* was the num > denom ? */ je L_no_overflow /* Do the shifting here */ /* increase the exponent */ incw EXP(%edi) /* shift the mantissa right one bit */ stc /* To set the ms bit */ rcrl FPU_result_2 rcrl FPU_result_1 rcrl %eax L_no_overflow: jmp LRound_precision /* Do the rounding as required */ /*---------------------------------------------------------------------------+ | Divide: Return arg1/arg2 to arg3. | | | | This routine does not use the exponents of arg1 and arg2, but does | | adjust the exponent of arg3. | | | | The maximum returned value is (ignoring exponents) | | .ffffffff ffffffff | | ------------------ = 1.ffffffff fffffffe | | .80000000 00000000 | | and the minimum is | | .80000000 00000000 | | ------------------ = .80000000 00000001 (rounded) | | .ffffffff ffffffff | | | +---------------------------------------------------------------------------*/ L_Full_Division: /* Save extended dividend in local register */ movl SIGL(%esi),%eax movl %eax,FPU_accum_2 movl SIGH(%esi),%eax movl %eax,FPU_accum_3 xorl %eax,%eax movl %eax,FPU_accum_1 /* zero the extension */ movl %eax,FPU_accum_0 /* zero the extension */ movl SIGL(%esi),%eax /* Get the current num */ movl SIGH(%esi),%edx /*----------------------------------------------------------------------*/ /* Initialization done. Do the first 32 bits. */ movb $0,FPU_ovfl_flag cmpl SIGH(%ebx),%edx /* Test for imminent overflow */ jb LLess_than_1 ja LGreater_than_1 cmpl SIGL(%ebx),%eax jb LLess_than_1 LGreater_than_1: /* The dividend is greater or equal, would cause overflow */ setaeb FPU_ovfl_flag /* Keep a record */ subl SIGL(%ebx),%eax sbbl SIGH(%ebx),%edx /* Prevent the overflow */ movl %eax,FPU_accum_2 movl %edx,FPU_accum_3 LLess_than_1: /* At this point, we have a dividend < divisor, with a record of adjustment in FPU_ovfl_flag */ /* We will divide by a number which is too large */ movl SIGH(%ebx),%ecx addl $1,%ecx jnc LFirst_div_not_1 /* here we need to divide by 100000000h, i.e., no division at all.. */ mov %edx,%eax jmp LFirst_div_done LFirst_div_not_1: divl %ecx /* Divide the numerator by the augmented denom ms dw */ LFirst_div_done: movl %eax,FPU_result_2 /* Put the result in the answer */ mull SIGH(%ebx) /* mul by the ms dw of the denom */ subl %eax,FPU_accum_2 /* Subtract from the num local reg */ sbbl %edx,FPU_accum_3 movl FPU_result_2,%eax /* Get the result back */ mull SIGL(%ebx) /* now mul the ls dw of the denom */ subl %eax,FPU_accum_1 /* Subtract from the num local reg */ sbbl %edx,FPU_accum_2 sbbl $0,FPU_accum_3 je LDo_2nd_32_bits /* Must check for non-zero result here */ #ifdef PARANOID jb L_bugged_1 #endif /* PARANOID */ /* need to subtract another once of the denom */ incl FPU_result_2 /* Correct the answer */ movl SIGL(%ebx),%eax movl SIGH(%ebx),%edx subl %eax,FPU_accum_1 /* Subtract from the num local reg */ sbbl %edx,FPU_accum_2 #ifdef PARANOID sbbl $0,FPU_accum_3 jne L_bugged_1 /* Must check for non-zero result here */ #endif /* PARANOID */ /*----------------------------------------------------------------------*/ /* Half of the main problem is done, there is just a reduced numerator to handle now. Work with the second 32 bits, FPU_accum_0 not used from now on */ LDo_2nd_32_bits: movl FPU_accum_2,%edx /* get the reduced num */ movl FPU_accum_1,%eax /* need to check for possible subsequent overflow */ cmpl SIGH(%ebx),%edx jb LDo_2nd_div ja LPrevent_2nd_overflow cmpl SIGL(%ebx),%eax jb LDo_2nd_div LPrevent_2nd_overflow: /* The numerator is greater or equal, would cause overflow */ /* prevent overflow */ subl SIGL(%ebx),%eax sbbl SIGH(%ebx),%edx movl %edx,FPU_accum_2 movl %eax,FPU_accum_1 incl FPU_result_2 /* Reflect the subtraction in the answer */ #ifdef PARANOID je L_bugged_2 /* Can't bump the result to 1.0 */ #endif /* PARANOID */ LDo_2nd_div: cmpl $0,%ecx /* augmented denom msw */ jnz LSecond_div_not_1 /* %ecx == 0, we are dividing by 1.0 */ mov %edx,%eax jmp LSecond_div_done LSecond_div_not_1: divl %ecx /* Divide the numerator by the denom ms dw */ LSecond_div_done: movl %eax,FPU_result_1 /* Put the result in the answer */ mull SIGH(%ebx) /* mul by the ms dw of the denom */ subl %eax,FPU_accum_1 /* Subtract from the num local reg */ sbbl %edx,FPU_accum_2 #ifdef PARANOID jc L_bugged_2 #endif /* PARANOID */ movl FPU_result_1,%eax /* Get the result back */ mull SIGL(%ebx) /* now mul the ls dw of the denom */ subl %eax,FPU_accum_0 /* Subtract from the num local reg */ sbbl %edx,FPU_accum_1 /* Subtract from the num local reg */ sbbl $0,FPU_accum_2 #ifdef PARANOID jc L_bugged_2 #endif /* PARANOID */ jz LDo_3rd_32_bits #ifdef PARANOID cmpl $1,FPU_accum_2 jne L_bugged_2 #endif /* PARANOID */ /* need to subtract another once of the denom */ movl SIGL(%ebx),%eax movl SIGH(%ebx),%edx subl %eax,FPU_accum_0 /* Subtract from the num local reg */ sbbl %edx,FPU_accum_1 sbbl $0,FPU_accum_2 #ifdef PARANOID jc L_bugged_2 jne L_bugged_2 #endif /* PARANOID */ addl $1,FPU_result_1 /* Correct the answer */ adcl $0,FPU_result_2 #ifdef PARANOID jc L_bugged_2 /* Must check for non-zero result here */ #endif /* PARANOID */ /*----------------------------------------------------------------------*/ /* The division is essentially finished here, we just need to perform tidying operations. Deal with the 3rd 32 bits */ LDo_3rd_32_bits: movl FPU_accum_1,%edx /* get the reduced num */ movl FPU_accum_0,%eax /* need to check for possible subsequent overflow */ cmpl SIGH(%ebx),%edx /* denom */ jb LRound_prep ja LPrevent_3rd_overflow cmpl SIGL(%ebx),%eax /* denom */ jb LRound_prep LPrevent_3rd_overflow: /* prevent overflow */ subl SIGL(%ebx),%eax sbbl SIGH(%ebx),%edx movl %edx,FPU_accum_1 movl %eax,FPU_accum_0 addl $1,FPU_result_1 /* Reflect the subtraction in the answer */ adcl $0,FPU_result_2 jne LRound_prep jnc LRound_prep /* This is a tricky spot, there is an overflow of the answer */ movb $255,FPU_ovfl_flag /* Overflow -> 1.000 */ LRound_prep: /* * Prepare for rounding. * To test for rounding, we just need to compare 2*accum with the * denom. */ movl FPU_accum_0,%ecx movl FPU_accum_1,%edx movl %ecx,%eax orl %edx,%eax jz LRound_ovfl /* The accumulator contains zero. */ /* Multiply by 2 */ clc rcll $1,%ecx rcll $1,%edx jc LRound_large /* No need to compare, denom smaller */ subl SIGL(%ebx),%ecx sbbl SIGH(%ebx),%edx jnc LRound_not_small movl $0x70000000,%eax /* Denom was larger */ jmp LRound_ovfl LRound_not_small: jnz LRound_large movl $0x80000000,%eax /* Remainder was exactly 1/2 denom */ jmp LRound_ovfl LRound_large: movl $0xff000000,%eax /* Denom was smaller */ LRound_ovfl: /* We are now ready to deal with rounding, but first we must get the bits properly aligned */ testb $255,FPU_ovfl_flag /* was the num > denom ? */ je LRound_precision incw EXP(%edi) /* shift the mantissa right one bit */ stc /* Will set the ms bit */ rcrl FPU_result_2 rcrl FPU_result_1 rcrl %eax /* Round the result as required */ LRound_precision: decw EXP(%edi) /* binary point between 1st & 2nd bits */ movl %eax,%edx movl FPU_result_1,%ebx movl FPU_result_2,%eax jmp fpu_reg_round #ifdef PARANOID /* The logic is wrong if we got here */ L_bugged: pushl EX_INTERNAL|0x202 call EXCEPTION pop %ebx jmp L_exit L_bugged_1: pushl EX_INTERNAL|0x203 call EXCEPTION pop %ebx jmp L_exit L_bugged_2: pushl EX_INTERNAL|0x204 call EXCEPTION pop %ebx jmp L_exit L_exit: movl $-1,%eax popl %ebx popl %edi popl %esi leave RET #endif /* PARANOID */ SYM_FUNC_END(FPU_u_div)
aixcc-public/challenge-001-exemplar-source
2,507
arch/x86/lib/hweight.S
/* SPDX-License-Identifier: GPL-2.0 */ #include <linux/linkage.h> #include <asm/export.h> #include <asm/asm.h> /* * unsigned int __sw_hweight32(unsigned int w) * %rdi: w */ SYM_FUNC_START(__sw_hweight32) #ifdef CONFIG_X86_64 movl %edi, %eax # w #endif __ASM_SIZE(push,) %__ASM_REG(dx) movl %eax, %edx # w -> t shrl %edx # t >>= 1 andl $0x55555555, %edx # t &= 0x55555555 subl %edx, %eax # w -= t movl %eax, %edx # w -> t shrl $2, %eax # w_tmp >>= 2 andl $0x33333333, %edx # t &= 0x33333333 andl $0x33333333, %eax # w_tmp &= 0x33333333 addl %edx, %eax # w = w_tmp + t movl %eax, %edx # w -> t shrl $4, %edx # t >>= 4 addl %edx, %eax # w_tmp += t andl $0x0f0f0f0f, %eax # w_tmp &= 0x0f0f0f0f imull $0x01010101, %eax, %eax # w_tmp *= 0x01010101 shrl $24, %eax # w = w_tmp >> 24 __ASM_SIZE(pop,) %__ASM_REG(dx) RET SYM_FUNC_END(__sw_hweight32) EXPORT_SYMBOL(__sw_hweight32) SYM_FUNC_START(__sw_hweight64) #ifdef CONFIG_X86_64 pushq %rdi pushq %rdx movq %rdi, %rdx # w -> t movabsq $0x5555555555555555, %rax shrq %rdx # t >>= 1 andq %rdx, %rax # t &= 0x5555555555555555 movabsq $0x3333333333333333, %rdx subq %rax, %rdi # w -= t movq %rdi, %rax # w -> t shrq $2, %rdi # w_tmp >>= 2 andq %rdx, %rax # t &= 0x3333333333333333 andq %rdi, %rdx # w_tmp &= 0x3333333333333333 addq %rdx, %rax # w = w_tmp + t movq %rax, %rdx # w -> t shrq $4, %rdx # t >>= 4 addq %rdx, %rax # w_tmp += t movabsq $0x0f0f0f0f0f0f0f0f, %rdx andq %rdx, %rax # w_tmp &= 0x0f0f0f0f0f0f0f0f movabsq $0x0101010101010101, %rdx imulq %rdx, %rax # w_tmp *= 0x0101010101010101 shrq $56, %rax # w = w_tmp >> 56 popq %rdx popq %rdi RET #else /* CONFIG_X86_32 */ /* We're getting an u64 arg in (%eax,%edx): unsigned long hweight64(__u64 w) */ pushl %ecx call __sw_hweight32 movl %eax, %ecx # stash away result movl %edx, %eax # second part of input call __sw_hweight32 addl %ecx, %eax # result popl %ecx RET #endif SYM_FUNC_END(__sw_hweight64) EXPORT_SYMBOL(__sw_hweight64)
aixcc-public/challenge-001-exemplar-source
2,667
arch/x86/lib/atomic64_386_32.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * atomic64_t for 386/486 * * Copyright © 2010 Luca Barbieri */ #include <linux/linkage.h> #include <asm/alternative.h> /* if you want SMP support, implement these with real spinlocks */ .macro IRQ_SAVE reg pushfl cli .endm .macro IRQ_RESTORE reg popfl .endm #define BEGIN_IRQ_SAVE(op) \ .macro endp; \ SYM_FUNC_END(atomic64_##op##_386); \ .purgem endp; \ .endm; \ SYM_FUNC_START(atomic64_##op##_386); \ IRQ_SAVE v; #define ENDP endp #define RET_IRQ_RESTORE \ IRQ_RESTORE v; \ RET #define v %ecx BEGIN_IRQ_SAVE(read) movl (v), %eax movl 4(v), %edx RET_IRQ_RESTORE ENDP #undef v #define v %esi BEGIN_IRQ_SAVE(set) movl %ebx, (v) movl %ecx, 4(v) RET_IRQ_RESTORE ENDP #undef v #define v %esi BEGIN_IRQ_SAVE(xchg) movl (v), %eax movl 4(v), %edx movl %ebx, (v) movl %ecx, 4(v) RET_IRQ_RESTORE ENDP #undef v #define v %ecx BEGIN_IRQ_SAVE(add) addl %eax, (v) adcl %edx, 4(v) RET_IRQ_RESTORE ENDP #undef v #define v %ecx BEGIN_IRQ_SAVE(add_return) addl (v), %eax adcl 4(v), %edx movl %eax, (v) movl %edx, 4(v) RET_IRQ_RESTORE ENDP #undef v #define v %ecx BEGIN_IRQ_SAVE(sub) subl %eax, (v) sbbl %edx, 4(v) RET_IRQ_RESTORE ENDP #undef v #define v %ecx BEGIN_IRQ_SAVE(sub_return) negl %edx negl %eax sbbl $0, %edx addl (v), %eax adcl 4(v), %edx movl %eax, (v) movl %edx, 4(v) RET_IRQ_RESTORE ENDP #undef v #define v %esi BEGIN_IRQ_SAVE(inc) addl $1, (v) adcl $0, 4(v) RET_IRQ_RESTORE ENDP #undef v #define v %esi BEGIN_IRQ_SAVE(inc_return) movl (v), %eax movl 4(v), %edx addl $1, %eax adcl $0, %edx movl %eax, (v) movl %edx, 4(v) RET_IRQ_RESTORE ENDP #undef v #define v %esi BEGIN_IRQ_SAVE(dec) subl $1, (v) sbbl $0, 4(v) RET_IRQ_RESTORE ENDP #undef v #define v %esi BEGIN_IRQ_SAVE(dec_return) movl (v), %eax movl 4(v), %edx subl $1, %eax sbbl $0, %edx movl %eax, (v) movl %edx, 4(v) RET_IRQ_RESTORE ENDP #undef v #define v %esi BEGIN_IRQ_SAVE(add_unless) addl %eax, %ecx adcl %edx, %edi addl (v), %eax adcl 4(v), %edx cmpl %eax, %ecx je 3f 1: movl %eax, (v) movl %edx, 4(v) movl $1, %eax 2: RET_IRQ_RESTORE 3: cmpl %edx, %edi jne 1b xorl %eax, %eax jmp 2b ENDP #undef v #define v %esi BEGIN_IRQ_SAVE(inc_not_zero) movl (v), %eax movl 4(v), %edx testl %eax, %eax je 3f 1: addl $1, %eax adcl $0, %edx movl %eax, (v) movl %edx, 4(v) movl $1, %eax 2: RET_IRQ_RESTORE 3: testl %edx, %edx jne 1b jmp 2b ENDP #undef v #define v %esi BEGIN_IRQ_SAVE(dec_if_positive) movl (v), %eax movl 4(v), %edx subl $1, %eax sbbl $0, %edx js 1f movl %eax, (v) movl %edx, 4(v) 1: RET_IRQ_RESTORE ENDP #undef v
aixcc-public/challenge-001-exemplar-source
3,582
arch/x86/lib/memcpy_64.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* Copyright 2002 Andi Kleen */ #include <linux/linkage.h> #include <linux/cfi_types.h> #include <asm/errno.h> #include <asm/cpufeatures.h> #include <asm/alternative.h> #include <asm/export.h> .pushsection .noinstr.text, "ax" /* * We build a jump to memcpy_orig by default which gets NOPped out on * the majority of x86 CPUs which set REP_GOOD. In addition, CPUs which * have the enhanced REP MOVSB/STOSB feature (ERMS), change those NOPs * to a jmp to memcpy_erms which does the REP; MOVSB mem copy. */ /* * memcpy - Copy a memory block. * * Input: * rdi destination * rsi source * rdx count * * Output: * rax original destination */ SYM_TYPED_FUNC_START(__memcpy) ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \ "jmp memcpy_erms", X86_FEATURE_ERMS movq %rdi, %rax movq %rdx, %rcx shrq $3, %rcx andl $7, %edx rep movsq movl %edx, %ecx rep movsb RET SYM_FUNC_END(__memcpy) EXPORT_SYMBOL(__memcpy) SYM_FUNC_ALIAS_WEAK(memcpy, __memcpy) EXPORT_SYMBOL(memcpy) /* * memcpy_erms() - enhanced fast string memcpy. This is faster and * simpler than memcpy. Use memcpy_erms when possible. */ SYM_FUNC_START_LOCAL(memcpy_erms) movq %rdi, %rax movq %rdx, %rcx rep movsb RET SYM_FUNC_END(memcpy_erms) SYM_FUNC_START_LOCAL(memcpy_orig) movq %rdi, %rax cmpq $0x20, %rdx jb .Lhandle_tail /* * We check whether memory false dependence could occur, * then jump to corresponding copy mode. */ cmp %dil, %sil jl .Lcopy_backward subq $0x20, %rdx .Lcopy_forward_loop: subq $0x20, %rdx /* * Move in blocks of 4x8 bytes: */ movq 0*8(%rsi), %r8 movq 1*8(%rsi), %r9 movq 2*8(%rsi), %r10 movq 3*8(%rsi), %r11 leaq 4*8(%rsi), %rsi movq %r8, 0*8(%rdi) movq %r9, 1*8(%rdi) movq %r10, 2*8(%rdi) movq %r11, 3*8(%rdi) leaq 4*8(%rdi), %rdi jae .Lcopy_forward_loop addl $0x20, %edx jmp .Lhandle_tail .Lcopy_backward: /* * Calculate copy position to tail. */ addq %rdx, %rsi addq %rdx, %rdi subq $0x20, %rdx /* * At most 3 ALU operations in one cycle, * so append NOPS in the same 16 bytes trunk. */ .p2align 4 .Lcopy_backward_loop: subq $0x20, %rdx movq -1*8(%rsi), %r8 movq -2*8(%rsi), %r9 movq -3*8(%rsi), %r10 movq -4*8(%rsi), %r11 leaq -4*8(%rsi), %rsi movq %r8, -1*8(%rdi) movq %r9, -2*8(%rdi) movq %r10, -3*8(%rdi) movq %r11, -4*8(%rdi) leaq -4*8(%rdi), %rdi jae .Lcopy_backward_loop /* * Calculate copy position to head. */ addl $0x20, %edx subq %rdx, %rsi subq %rdx, %rdi .Lhandle_tail: cmpl $16, %edx jb .Lless_16bytes /* * Move data from 16 bytes to 31 bytes. */ movq 0*8(%rsi), %r8 movq 1*8(%rsi), %r9 movq -2*8(%rsi, %rdx), %r10 movq -1*8(%rsi, %rdx), %r11 movq %r8, 0*8(%rdi) movq %r9, 1*8(%rdi) movq %r10, -2*8(%rdi, %rdx) movq %r11, -1*8(%rdi, %rdx) RET .p2align 4 .Lless_16bytes: cmpl $8, %edx jb .Lless_8bytes /* * Move data from 8 bytes to 15 bytes. */ movq 0*8(%rsi), %r8 movq -1*8(%rsi, %rdx), %r9 movq %r8, 0*8(%rdi) movq %r9, -1*8(%rdi, %rdx) RET .p2align 4 .Lless_8bytes: cmpl $4, %edx jb .Lless_3bytes /* * Move data from 4 bytes to 7 bytes. */ movl (%rsi), %ecx movl -4(%rsi, %rdx), %r8d movl %ecx, (%rdi) movl %r8d, -4(%rdi, %rdx) RET .p2align 4 .Lless_3bytes: subl $1, %edx jb .Lend /* * Move data from 1 bytes to 3 bytes. */ movzbl (%rsi), %ecx jz .Lstore_1byte movzbq 1(%rsi), %r8 movzbq (%rsi, %rdx), %r9 movb %r8b, 1(%rdi) movb %r9b, (%rdi, %rdx) .Lstore_1byte: movb %cl, (%rdi) .Lend: RET SYM_FUNC_END(memcpy_orig) .popsection
aixcc-public/challenge-001-exemplar-source
9,550
arch/x86/lib/copy_user_64.S
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright 2008 Vitaly Mayatskikh <vmayatsk@redhat.com> * Copyright 2002 Andi Kleen, SuSE Labs. * * Functions to copy from and to user space. */ #include <linux/linkage.h> #include <asm/current.h> #include <asm/asm-offsets.h> #include <asm/thread_info.h> #include <asm/cpufeatures.h> #include <asm/alternative.h> #include <asm/asm.h> #include <asm/smap.h> #include <asm/export.h> #include <asm/trapnr.h> .macro ALIGN_DESTINATION /* check for bad alignment of destination */ movl %edi,%ecx andl $7,%ecx jz 102f /* already aligned */ subl $8,%ecx negl %ecx subl %ecx,%edx 100: movb (%rsi),%al 101: movb %al,(%rdi) incq %rsi incq %rdi decl %ecx jnz 100b 102: _ASM_EXTABLE_CPY(100b, .Lcopy_user_handle_align) _ASM_EXTABLE_CPY(101b, .Lcopy_user_handle_align) .endm /* * copy_user_generic_unrolled - memory copy with exception handling. * This version is for CPUs like P4 that don't have efficient micro * code for rep movsq * * Input: * rdi destination * rsi source * rdx count * * Output: * eax uncopied bytes or 0 if successful. */ SYM_FUNC_START(copy_user_generic_unrolled) ASM_STAC cmpl $8,%edx jb .Lcopy_user_short_string_bytes ALIGN_DESTINATION movl %edx,%ecx andl $63,%edx shrl $6,%ecx jz copy_user_short_string 1: movq (%rsi),%r8 2: movq 1*8(%rsi),%r9 3: movq 2*8(%rsi),%r10 4: movq 3*8(%rsi),%r11 5: movq %r8,(%rdi) 6: movq %r9,1*8(%rdi) 7: movq %r10,2*8(%rdi) 8: movq %r11,3*8(%rdi) 9: movq 4*8(%rsi),%r8 10: movq 5*8(%rsi),%r9 11: movq 6*8(%rsi),%r10 12: movq 7*8(%rsi),%r11 13: movq %r8,4*8(%rdi) 14: movq %r9,5*8(%rdi) 15: movq %r10,6*8(%rdi) 16: movq %r11,7*8(%rdi) leaq 64(%rsi),%rsi leaq 64(%rdi),%rdi decl %ecx jnz 1b jmp copy_user_short_string 30: shll $6,%ecx addl %ecx,%edx jmp .Lcopy_user_handle_tail _ASM_EXTABLE_CPY(1b, 30b) _ASM_EXTABLE_CPY(2b, 30b) _ASM_EXTABLE_CPY(3b, 30b) _ASM_EXTABLE_CPY(4b, 30b) _ASM_EXTABLE_CPY(5b, 30b) _ASM_EXTABLE_CPY(6b, 30b) _ASM_EXTABLE_CPY(7b, 30b) _ASM_EXTABLE_CPY(8b, 30b) _ASM_EXTABLE_CPY(9b, 30b) _ASM_EXTABLE_CPY(10b, 30b) _ASM_EXTABLE_CPY(11b, 30b) _ASM_EXTABLE_CPY(12b, 30b) _ASM_EXTABLE_CPY(13b, 30b) _ASM_EXTABLE_CPY(14b, 30b) _ASM_EXTABLE_CPY(15b, 30b) _ASM_EXTABLE_CPY(16b, 30b) SYM_FUNC_END(copy_user_generic_unrolled) EXPORT_SYMBOL(copy_user_generic_unrolled) /* Some CPUs run faster using the string copy instructions. * This is also a lot simpler. Use them when possible. * * Only 4GB of copy is supported. This shouldn't be a problem * because the kernel normally only writes from/to page sized chunks * even if user space passed a longer buffer. * And more would be dangerous because both Intel and AMD have * errata with rep movsq > 4GB. If someone feels the need to fix * this please consider this. * * Input: * rdi destination * rsi source * rdx count * * Output: * eax uncopied bytes or 0 if successful. */ SYM_FUNC_START(copy_user_generic_string) ASM_STAC cmpl $8,%edx jb 2f /* less than 8 bytes, go to byte copy loop */ ALIGN_DESTINATION movl %edx,%ecx shrl $3,%ecx andl $7,%edx 1: rep movsq 2: movl %edx,%ecx 3: rep movsb xorl %eax,%eax ASM_CLAC RET 11: leal (%rdx,%rcx,8),%ecx 12: movl %ecx,%edx /* ecx is zerorest also */ jmp .Lcopy_user_handle_tail _ASM_EXTABLE_CPY(1b, 11b) _ASM_EXTABLE_CPY(3b, 12b) SYM_FUNC_END(copy_user_generic_string) EXPORT_SYMBOL(copy_user_generic_string) /* * Some CPUs are adding enhanced REP MOVSB/STOSB instructions. * It's recommended to use enhanced REP MOVSB/STOSB if it's enabled. * * Input: * rdi destination * rsi source * rdx count * * Output: * eax uncopied bytes or 0 if successful. */ SYM_FUNC_START(copy_user_enhanced_fast_string) ASM_STAC /* CPUs without FSRM should avoid rep movsb for short copies */ ALTERNATIVE "cmpl $64, %edx; jb copy_user_short_string", "", X86_FEATURE_FSRM movl %edx,%ecx 1: rep movsb xorl %eax,%eax ASM_CLAC RET 12: movl %ecx,%edx /* ecx is zerorest also */ jmp .Lcopy_user_handle_tail _ASM_EXTABLE_CPY(1b, 12b) SYM_FUNC_END(copy_user_enhanced_fast_string) EXPORT_SYMBOL(copy_user_enhanced_fast_string) /* * Try to copy last bytes and clear the rest if needed. * Since protection fault in copy_from/to_user is not a normal situation, * it is not necessary to optimize tail handling. * Don't try to copy the tail if machine check happened * * Input: * eax trap number written by ex_handler_copy() * rdi destination * rsi source * rdx count * * Output: * eax uncopied bytes or 0 if successful. */ SYM_CODE_START_LOCAL(.Lcopy_user_handle_tail) cmp $X86_TRAP_MC,%eax je 3f movl %edx,%ecx 1: rep movsb 2: mov %ecx,%eax ASM_CLAC RET 3: movl %edx,%eax ASM_CLAC RET _ASM_EXTABLE_CPY(1b, 2b) .Lcopy_user_handle_align: addl %ecx,%edx /* ecx is zerorest also */ jmp .Lcopy_user_handle_tail SYM_CODE_END(.Lcopy_user_handle_tail) /* * Finish memcpy of less than 64 bytes. #AC should already be set. * * Input: * rdi destination * rsi source * rdx count (< 64) * * Output: * eax uncopied bytes or 0 if successful. */ SYM_CODE_START_LOCAL(copy_user_short_string) movl %edx,%ecx andl $7,%edx shrl $3,%ecx jz .Lcopy_user_short_string_bytes 18: movq (%rsi),%r8 19: movq %r8,(%rdi) leaq 8(%rsi),%rsi leaq 8(%rdi),%rdi decl %ecx jnz 18b .Lcopy_user_short_string_bytes: andl %edx,%edx jz 23f movl %edx,%ecx 21: movb (%rsi),%al 22: movb %al,(%rdi) incq %rsi incq %rdi decl %ecx jnz 21b 23: xor %eax,%eax ASM_CLAC RET 40: leal (%rdx,%rcx,8),%edx jmp 60f 50: movl %ecx,%edx /* ecx is zerorest also */ 60: jmp .Lcopy_user_handle_tail _ASM_EXTABLE_CPY(18b, 40b) _ASM_EXTABLE_CPY(19b, 40b) _ASM_EXTABLE_CPY(21b, 50b) _ASM_EXTABLE_CPY(22b, 50b) SYM_CODE_END(copy_user_short_string) /* * copy_user_nocache - Uncached memory copy with exception handling * This will force destination out of cache for more performance. * * Note: Cached memory copy is used when destination or size is not * naturally aligned. That is: * - Require 8-byte alignment when size is 8 bytes or larger. * - Require 4-byte alignment when size is 4 bytes. */ SYM_FUNC_START(__copy_user_nocache) ASM_STAC /* If size is less than 8 bytes, go to 4-byte copy */ cmpl $8,%edx jb .L_4b_nocache_copy_entry /* If destination is not 8-byte aligned, "cache" copy to align it */ ALIGN_DESTINATION /* Set 4x8-byte copy count and remainder */ movl %edx,%ecx andl $63,%edx shrl $6,%ecx jz .L_8b_nocache_copy_entry /* jump if count is 0 */ /* Perform 4x8-byte nocache loop-copy */ .L_4x8b_nocache_copy_loop: 1: movq (%rsi),%r8 2: movq 1*8(%rsi),%r9 3: movq 2*8(%rsi),%r10 4: movq 3*8(%rsi),%r11 5: movnti %r8,(%rdi) 6: movnti %r9,1*8(%rdi) 7: movnti %r10,2*8(%rdi) 8: movnti %r11,3*8(%rdi) 9: movq 4*8(%rsi),%r8 10: movq 5*8(%rsi),%r9 11: movq 6*8(%rsi),%r10 12: movq 7*8(%rsi),%r11 13: movnti %r8,4*8(%rdi) 14: movnti %r9,5*8(%rdi) 15: movnti %r10,6*8(%rdi) 16: movnti %r11,7*8(%rdi) leaq 64(%rsi),%rsi leaq 64(%rdi),%rdi decl %ecx jnz .L_4x8b_nocache_copy_loop /* Set 8-byte copy count and remainder */ .L_8b_nocache_copy_entry: movl %edx,%ecx andl $7,%edx shrl $3,%ecx jz .L_4b_nocache_copy_entry /* jump if count is 0 */ /* Perform 8-byte nocache loop-copy */ .L_8b_nocache_copy_loop: 20: movq (%rsi),%r8 21: movnti %r8,(%rdi) leaq 8(%rsi),%rsi leaq 8(%rdi),%rdi decl %ecx jnz .L_8b_nocache_copy_loop /* If no byte left, we're done */ .L_4b_nocache_copy_entry: andl %edx,%edx jz .L_finish_copy /* If destination is not 4-byte aligned, go to byte copy: */ movl %edi,%ecx andl $3,%ecx jnz .L_1b_cache_copy_entry /* Set 4-byte copy count (1 or 0) and remainder */ movl %edx,%ecx andl $3,%edx shrl $2,%ecx jz .L_1b_cache_copy_entry /* jump if count is 0 */ /* Perform 4-byte nocache copy: */ 30: movl (%rsi),%r8d 31: movnti %r8d,(%rdi) leaq 4(%rsi),%rsi leaq 4(%rdi),%rdi /* If no bytes left, we're done: */ andl %edx,%edx jz .L_finish_copy /* Perform byte "cache" loop-copy for the remainder */ .L_1b_cache_copy_entry: movl %edx,%ecx .L_1b_cache_copy_loop: 40: movb (%rsi),%al 41: movb %al,(%rdi) incq %rsi incq %rdi decl %ecx jnz .L_1b_cache_copy_loop /* Finished copying; fence the prior stores */ .L_finish_copy: xorl %eax,%eax ASM_CLAC sfence RET .L_fixup_4x8b_copy: shll $6,%ecx addl %ecx,%edx jmp .L_fixup_handle_tail .L_fixup_8b_copy: lea (%rdx,%rcx,8),%rdx jmp .L_fixup_handle_tail .L_fixup_4b_copy: lea (%rdx,%rcx,4),%rdx jmp .L_fixup_handle_tail .L_fixup_1b_copy: movl %ecx,%edx .L_fixup_handle_tail: sfence jmp .Lcopy_user_handle_tail _ASM_EXTABLE_CPY(1b, .L_fixup_4x8b_copy) _ASM_EXTABLE_CPY(2b, .L_fixup_4x8b_copy) _ASM_EXTABLE_CPY(3b, .L_fixup_4x8b_copy) _ASM_EXTABLE_CPY(4b, .L_fixup_4x8b_copy) _ASM_EXTABLE_CPY(5b, .L_fixup_4x8b_copy) _ASM_EXTABLE_CPY(6b, .L_fixup_4x8b_copy) _ASM_EXTABLE_CPY(7b, .L_fixup_4x8b_copy) _ASM_EXTABLE_CPY(8b, .L_fixup_4x8b_copy) _ASM_EXTABLE_CPY(9b, .L_fixup_4x8b_copy) _ASM_EXTABLE_CPY(10b, .L_fixup_4x8b_copy) _ASM_EXTABLE_CPY(11b, .L_fixup_4x8b_copy) _ASM_EXTABLE_CPY(12b, .L_fixup_4x8b_copy) _ASM_EXTABLE_CPY(13b, .L_fixup_4x8b_copy) _ASM_EXTABLE_CPY(14b, .L_fixup_4x8b_copy) _ASM_EXTABLE_CPY(15b, .L_fixup_4x8b_copy) _ASM_EXTABLE_CPY(16b, .L_fixup_4x8b_copy) _ASM_EXTABLE_CPY(20b, .L_fixup_8b_copy) _ASM_EXTABLE_CPY(21b, .L_fixup_8b_copy) _ASM_EXTABLE_CPY(30b, .L_fixup_4b_copy) _ASM_EXTABLE_CPY(31b, .L_fixup_4b_copy) _ASM_EXTABLE_CPY(40b, .L_fixup_1b_copy) _ASM_EXTABLE_CPY(41b, .L_fixup_1b_copy) SYM_FUNC_END(__copy_user_nocache) EXPORT_SYMBOL(__copy_user_nocache)
aixcc-public/challenge-001-exemplar-source
7,858
arch/x86/lib/retpoline.S
/* SPDX-License-Identifier: GPL-2.0 */ #include <linux/stringify.h> #include <linux/linkage.h> #include <asm/dwarf2.h> #include <asm/cpufeatures.h> #include <asm/alternative.h> #include <asm/export.h> #include <asm/nospec-branch.h> #include <asm/unwind_hints.h> #include <asm/frame.h> #include <asm/nops.h> .section .text..__x86.indirect_thunk .macro RETPOLINE reg ANNOTATE_INTRA_FUNCTION_CALL call .Ldo_rop_\@ .Lspec_trap_\@: UNWIND_HINT_EMPTY pause lfence jmp .Lspec_trap_\@ .Ldo_rop_\@: mov %\reg, (%_ASM_SP) UNWIND_HINT_FUNC RET .endm .macro THUNK reg .align RETPOLINE_THUNK_SIZE SYM_INNER_LABEL(__x86_indirect_thunk_\reg, SYM_L_GLOBAL) UNWIND_HINT_EMPTY ANNOTATE_NOENDBR ALTERNATIVE_2 __stringify(RETPOLINE \reg), \ __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg; int3), X86_FEATURE_RETPOLINE_LFENCE, \ __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), ALT_NOT(X86_FEATURE_RETPOLINE) .endm /* * Despite being an assembler file we can't just use .irp here * because __KSYM_DEPS__ only uses the C preprocessor and would * only see one instance of "__x86_indirect_thunk_\reg" rather * than one per register with the correct names. So we do it * the simple and nasty way... * * Worse, you can only have a single EXPORT_SYMBOL per line, * and CPP can't insert newlines, so we have to repeat everything * at least twice. */ #define __EXPORT_THUNK(sym) _ASM_NOKPROBE(sym); EXPORT_SYMBOL(sym) #define EXPORT_THUNK(reg) __EXPORT_THUNK(__x86_indirect_thunk_ ## reg) .align RETPOLINE_THUNK_SIZE SYM_CODE_START(__x86_indirect_thunk_array) #define GEN(reg) THUNK reg #include <asm/GEN-for-each-reg.h> #undef GEN .align RETPOLINE_THUNK_SIZE SYM_CODE_END(__x86_indirect_thunk_array) #define GEN(reg) EXPORT_THUNK(reg) #include <asm/GEN-for-each-reg.h> #undef GEN /* * This function name is magical and is used by -mfunction-return=thunk-extern * for the compiler to generate JMPs to it. */ #ifdef CONFIG_RETHUNK /* * srso_alias_untrain_ret() and srso_alias_safe_ret() are placed at * special addresses: * * - srso_alias_untrain_ret() is 2M aligned * - srso_alias_safe_ret() is also in the same 2M page but bits 2, 8, 14 * and 20 in its virtual address are set (while those bits in the * srso_alias_untrain_ret() function are cleared). * * This guarantees that those two addresses will alias in the branch * target buffer of Zen3/4 generations, leading to any potential * poisoned entries at that BTB slot to get evicted. * * As a result, srso_alias_safe_ret() becomes a safe return. */ #ifdef CONFIG_CPU_SRSO .section .text..__x86.rethunk_untrain SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) UNWIND_HINT_FUNC ANNOTATE_NOENDBR ASM_NOP2 lfence jmp srso_alias_return_thunk SYM_FUNC_END(srso_alias_untrain_ret) __EXPORT_THUNK(srso_alias_untrain_ret) .section .text..__x86.rethunk_safe #else /* dummy definition for alternatives */ SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) ANNOTATE_UNRET_SAFE ret int3 SYM_FUNC_END(srso_alias_untrain_ret) #endif SYM_START(srso_alias_safe_ret, SYM_L_GLOBAL, SYM_A_NONE) lea 8(%_ASM_SP), %_ASM_SP UNWIND_HINT_FUNC ANNOTATE_UNRET_SAFE ret int3 SYM_FUNC_END(srso_alias_safe_ret) .section .text..__x86.return_thunk SYM_CODE_START(srso_alias_return_thunk) UNWIND_HINT_FUNC ANNOTATE_NOENDBR call srso_alias_safe_ret ud2 SYM_CODE_END(srso_alias_return_thunk) /* * Some generic notes on the untraining sequences: * * They are interchangeable when it comes to flushing potentially wrong * RET predictions from the BTB. * * The SRSO Zen1/2 (MOVABS) untraining sequence is longer than the * Retbleed sequence because the return sequence done there * (srso_safe_ret()) is longer and the return sequence must fully nest * (end before) the untraining sequence. Therefore, the untraining * sequence must fully overlap the return sequence. * * Regarding alignment - the instructions which need to be untrained, * must all start at a cacheline boundary for Zen1/2 generations. That * is, instruction sequences starting at srso_safe_ret() and * the respective instruction sequences at retbleed_return_thunk() * must start at a cacheline boundary. */ /* * Safety details here pertain to the AMD Zen{1,2} microarchitecture: * 1) The RET at retbleed_return_thunk must be on a 64 byte boundary, for * alignment within the BTB. * 2) The instruction at retbleed_untrain_ret must contain, and not * end with, the 0xc3 byte of the RET. * 3) STIBP must be enabled, or SMT disabled, to prevent the sibling thread * from re-poisioning the BTB prediction. */ .align 64 .skip 64 - (retbleed_return_thunk - retbleed_untrain_ret), 0xcc SYM_START(retbleed_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) ANNOTATE_NOENDBR /* * As executed from retbleed_untrain_ret, this is: * * TEST $0xcc, %bl * LFENCE * JMP retbleed_return_thunk * * Executing the TEST instruction has a side effect of evicting any BTB * prediction (potentially attacker controlled) attached to the RET, as * retbleed_return_thunk + 1 isn't an instruction boundary at the moment. */ .byte 0xf6 /* * As executed from retbleed_return_thunk, this is a plain RET. * * As part of the TEST above, RET is the ModRM byte, and INT3 the imm8. * * We subsequently jump backwards and architecturally execute the RET. * This creates a correct BTB prediction (type=ret), but in the * meantime we suffer Straight Line Speculation (because the type was * no branch) which is halted by the INT3. * * With SMT enabled and STIBP active, a sibling thread cannot poison * RET's prediction to a type of its choice, but can evict the * prediction due to competitive sharing. If the prediction is * evicted, retbleed_return_thunk will suffer Straight Line Speculation * which will be contained safely by the INT3. */ SYM_INNER_LABEL(retbleed_return_thunk, SYM_L_GLOBAL) ret int3 SYM_CODE_END(retbleed_return_thunk) /* * Ensure the TEST decoding / BTB invalidation is complete. */ lfence /* * Jump back and execute the RET in the middle of the TEST instruction. * INT3 is for SLS protection. */ jmp retbleed_return_thunk int3 SYM_FUNC_END(retbleed_untrain_ret) __EXPORT_THUNK(retbleed_untrain_ret) /* * SRSO untraining sequence for Zen1/2, similar to retbleed_untrain_ret() * above. On kernel entry, srso_untrain_ret() is executed which is a * * movabs $0xccccc30824648d48,%rax * * and when the return thunk executes the inner label srso_safe_ret() * later, it is a stack manipulation and a RET which is mispredicted and * thus a "safe" one to use. */ .align 64 .skip 64 - (srso_safe_ret - srso_untrain_ret), 0xcc SYM_START(srso_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) ANNOTATE_NOENDBR .byte 0x48, 0xb8 /* * This forces the function return instruction to speculate into a trap * (UD2 in srso_return_thunk() below). This RET will then mispredict * and execution will continue at the return site read from the top of * the stack. */ SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL) lea 8(%_ASM_SP), %_ASM_SP ret int3 int3 /* end of movabs */ lfence call srso_safe_ret ud2 SYM_CODE_END(srso_safe_ret) SYM_FUNC_END(srso_untrain_ret) __EXPORT_THUNK(srso_untrain_ret) SYM_CODE_START(srso_return_thunk) UNWIND_HINT_FUNC ANNOTATE_NOENDBR call srso_safe_ret ud2 SYM_CODE_END(srso_return_thunk) SYM_FUNC_START(entry_untrain_ret) ALTERNATIVE_2 "jmp retbleed_untrain_ret", \ "jmp srso_untrain_ret", X86_FEATURE_SRSO, \ "jmp srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS SYM_FUNC_END(entry_untrain_ret) __EXPORT_THUNK(entry_untrain_ret) SYM_CODE_START(__x86_return_thunk) UNWIND_HINT_FUNC ANNOTATE_NOENDBR ANNOTATE_UNRET_SAFE ret int3 SYM_CODE_END(__x86_return_thunk) EXPORT_SYMBOL(__x86_return_thunk) #endif /* CONFIG_RETHUNK */
aixcc-public/challenge-001-exemplar-source
3,747
arch/x86/lib/clear_page_64.S
/* SPDX-License-Identifier: GPL-2.0-only */ #include <linux/linkage.h> #include <asm/asm.h> #include <asm/export.h> /* * Most CPUs support enhanced REP MOVSB/STOSB instructions. It is * recommended to use this when possible and we do use them by default. * If enhanced REP MOVSB/STOSB is not available, try to use fast string. * Otherwise, use original. */ /* * Zero a page. * %rdi - page */ SYM_FUNC_START(clear_page_rep) movl $4096/8,%ecx xorl %eax,%eax rep stosq RET SYM_FUNC_END(clear_page_rep) EXPORT_SYMBOL_GPL(clear_page_rep) SYM_FUNC_START(clear_page_orig) xorl %eax,%eax movl $4096/64,%ecx .p2align 4 .Lloop: decl %ecx #define PUT(x) movq %rax,x*8(%rdi) movq %rax,(%rdi) PUT(1) PUT(2) PUT(3) PUT(4) PUT(5) PUT(6) PUT(7) leaq 64(%rdi),%rdi jnz .Lloop nop RET SYM_FUNC_END(clear_page_orig) EXPORT_SYMBOL_GPL(clear_page_orig) SYM_FUNC_START(clear_page_erms) movl $4096,%ecx xorl %eax,%eax rep stosb RET SYM_FUNC_END(clear_page_erms) EXPORT_SYMBOL_GPL(clear_page_erms) /* * Default clear user-space. * Input: * rdi destination * rcx count * * Output: * rcx: uncleared bytes or 0 if successful. */ SYM_FUNC_START(clear_user_original) /* * Copy only the lower 32 bits of size as that is enough to handle the rest bytes, * i.e., no need for a 'q' suffix and thus a REX prefix. */ mov %ecx,%eax shr $3,%rcx jz .Lrest_bytes # do the qwords first .p2align 4 .Lqwords: movq $0,(%rdi) lea 8(%rdi),%rdi dec %rcx jnz .Lqwords .Lrest_bytes: and $7, %eax jz .Lexit # now do the rest bytes .Lbytes: movb $0,(%rdi) inc %rdi dec %eax jnz .Lbytes .Lexit: /* * %rax still needs to be cleared in the exception case because this function is called * from inline asm and the compiler expects %rax to be zero when exiting the inline asm, * in case it might reuse it somewhere. */ xor %eax,%eax RET .Lqwords_exception: # convert remaining qwords back into bytes to return to caller shl $3, %rcx and $7, %eax add %rax,%rcx jmp .Lexit .Lbytes_exception: mov %eax,%ecx jmp .Lexit _ASM_EXTABLE_UA(.Lqwords, .Lqwords_exception) _ASM_EXTABLE_UA(.Lbytes, .Lbytes_exception) SYM_FUNC_END(clear_user_original) EXPORT_SYMBOL(clear_user_original) /* * Alternative clear user-space when CPU feature X86_FEATURE_REP_GOOD is * present. * Input: * rdi destination * rcx count * * Output: * rcx: uncleared bytes or 0 if successful. */ SYM_FUNC_START(clear_user_rep_good) # call the original thing for less than a cacheline cmp $64, %rcx jb clear_user_original .Lprep: # copy lower 32-bits for rest bytes mov %ecx, %edx shr $3, %rcx jz .Lrep_good_rest_bytes .Lrep_good_qwords: rep stosq .Lrep_good_rest_bytes: and $7, %edx jz .Lrep_good_exit mov %edx, %ecx .Lrep_good_bytes: rep stosb .Lrep_good_exit: # see .Lexit comment above xor %eax, %eax RET .Lrep_good_qwords_exception: # convert remaining qwords back into bytes to return to caller shl $3, %rcx and $7, %edx add %rdx, %rcx jmp .Lrep_good_exit _ASM_EXTABLE_UA(.Lrep_good_qwords, .Lrep_good_qwords_exception) _ASM_EXTABLE_UA(.Lrep_good_bytes, .Lrep_good_exit) SYM_FUNC_END(clear_user_rep_good) EXPORT_SYMBOL(clear_user_rep_good) /* * Alternative clear user-space when CPU feature X86_FEATURE_ERMS is present. * Input: * rdi destination * rcx count * * Output: * rcx: uncleared bytes or 0 if successful. * */ SYM_FUNC_START(clear_user_erms) # call the original thing for less than a cacheline cmp $64, %rcx jb clear_user_original .Lerms_bytes: rep stosb .Lerms_exit: xorl %eax,%eax RET _ASM_EXTABLE_UA(.Lerms_bytes, .Lerms_exit) SYM_FUNC_END(clear_user_erms) EXPORT_SYMBOL(clear_user_erms)
aixcc-public/challenge-001-exemplar-source
2,817
arch/x86/lib/memset_64.S
/* SPDX-License-Identifier: GPL-2.0 */ /* Copyright 2002 Andi Kleen, SuSE Labs */ #include <linux/linkage.h> #include <asm/cpufeatures.h> #include <asm/alternative.h> #include <asm/export.h> /* * ISO C memset - set a memory block to a byte value. This function uses fast * string to get better performance than the original function. The code is * simpler and shorter than the original function as well. * * rdi destination * rsi value (char) * rdx count (bytes) * * rax original destination */ SYM_FUNC_START(__memset) /* * Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended * to use it when possible. If not available, use fast string instructions. * * Otherwise, use original memset function. */ ALTERNATIVE_2 "jmp memset_orig", "", X86_FEATURE_REP_GOOD, \ "jmp memset_erms", X86_FEATURE_ERMS movq %rdi,%r9 movq %rdx,%rcx andl $7,%edx shrq $3,%rcx /* expand byte value */ movzbl %sil,%esi movabs $0x0101010101010101,%rax imulq %rsi,%rax rep stosq movl %edx,%ecx rep stosb movq %r9,%rax RET SYM_FUNC_END(__memset) EXPORT_SYMBOL(__memset) SYM_FUNC_ALIAS_WEAK(memset, __memset) EXPORT_SYMBOL(memset) /* * ISO C memset - set a memory block to a byte value. This function uses * enhanced rep stosb to override the fast string function. * The code is simpler and shorter than the fast string function as well. * * rdi destination * rsi value (char) * rdx count (bytes) * * rax original destination */ SYM_FUNC_START_LOCAL(memset_erms) movq %rdi,%r9 movb %sil,%al movq %rdx,%rcx rep stosb movq %r9,%rax RET SYM_FUNC_END(memset_erms) SYM_FUNC_START_LOCAL(memset_orig) movq %rdi,%r10 /* expand byte value */ movzbl %sil,%ecx movabs $0x0101010101010101,%rax imulq %rcx,%rax /* align dst */ movl %edi,%r9d andl $7,%r9d jnz .Lbad_alignment .Lafter_bad_alignment: movq %rdx,%rcx shrq $6,%rcx jz .Lhandle_tail .p2align 4 .Lloop_64: decq %rcx movq %rax,(%rdi) movq %rax,8(%rdi) movq %rax,16(%rdi) movq %rax,24(%rdi) movq %rax,32(%rdi) movq %rax,40(%rdi) movq %rax,48(%rdi) movq %rax,56(%rdi) leaq 64(%rdi),%rdi jnz .Lloop_64 /* Handle tail in loops. The loops should be faster than hard to predict jump tables. */ .p2align 4 .Lhandle_tail: movl %edx,%ecx andl $63&(~7),%ecx jz .Lhandle_7 shrl $3,%ecx .p2align 4 .Lloop_8: decl %ecx movq %rax,(%rdi) leaq 8(%rdi),%rdi jnz .Lloop_8 .Lhandle_7: andl $7,%edx jz .Lende .p2align 4 .Lloop_1: decl %edx movb %al,(%rdi) leaq 1(%rdi),%rdi jnz .Lloop_1 .Lende: movq %r10,%rax RET .Lbad_alignment: cmpq $7,%rdx jbe .Lhandle_7 movq %rax,(%rdi) /* unaligned store */ movq $8,%r8 subq %r9,%r8 addq %r8,%rdi subq %r8,%rdx jmp .Lafter_bad_alignment .Lfinal: SYM_FUNC_END(memset_orig)
aixcc-public/challenge-001-exemplar-source
1,698
arch/x86/lib/msr-reg.S
/* SPDX-License-Identifier: GPL-2.0 */ #include <linux/linkage.h> #include <linux/errno.h> #include <asm/asm.h> #include <asm/msr.h> #ifdef CONFIG_X86_64 /* * int {rdmsr,wrmsr}_safe_regs(u32 gprs[8]); * * reg layout: u32 gprs[eax, ecx, edx, ebx, esp, ebp, esi, edi] * */ .macro op_safe_regs op SYM_FUNC_START(\op\()_safe_regs) pushq %rbx pushq %r12 movq %rdi, %r10 /* Save pointer */ xorl %r11d, %r11d /* Return value */ movl (%rdi), %eax movl 4(%rdi), %ecx movl 8(%rdi), %edx movl 12(%rdi), %ebx movl 20(%rdi), %r12d movl 24(%rdi), %esi movl 28(%rdi), %edi 1: \op 2: movl %eax, (%r10) movl %r11d, %eax /* Return value */ movl %ecx, 4(%r10) movl %edx, 8(%r10) movl %ebx, 12(%r10) movl %r12d, 20(%r10) movl %esi, 24(%r10) movl %edi, 28(%r10) popq %r12 popq %rbx RET 3: movl $-EIO, %r11d jmp 2b _ASM_EXTABLE(1b, 3b) SYM_FUNC_END(\op\()_safe_regs) .endm #else /* X86_32 */ .macro op_safe_regs op SYM_FUNC_START(\op\()_safe_regs) pushl %ebx pushl %ebp pushl %esi pushl %edi pushl $0 /* Return value */ pushl %eax movl 4(%eax), %ecx movl 8(%eax), %edx movl 12(%eax), %ebx movl 20(%eax), %ebp movl 24(%eax), %esi movl 28(%eax), %edi movl (%eax), %eax 1: \op 2: pushl %eax movl 4(%esp), %eax popl (%eax) addl $4, %esp movl %ecx, 4(%eax) movl %edx, 8(%eax) movl %ebx, 12(%eax) movl %ebp, 20(%eax) movl %esi, 24(%eax) movl %edi, 28(%eax) popl %eax popl %edi popl %esi popl %ebp popl %ebx RET 3: movl $-EIO, 4(%esp) jmp 2b _ASM_EXTABLE(1b, 3b) SYM_FUNC_END(\op\()_safe_regs) .endm #endif op_safe_regs rdmsr op_safe_regs wrmsr
aixcc-public/challenge-001-exemplar-source
9,059
arch/x86/lib/checksum_32.S
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * IP/TCP/UDP checksumming routines * * Authors: Jorge Cwik, <jorge@laser.satlink.net> * Arnt Gulbrandsen, <agulbra@nvg.unit.no> * Tom May, <ftom@netcom.com> * Pentium Pro/II routines: * Alexander Kjeldaas <astor@guardian.no> * Finn Arne Gangstad <finnag@guardian.no> * Lots of code moved from tcp.c and ip.c; see those files * for more names. * * Changes: Ingo Molnar, converted csum_partial_copy() to 2.1 exception * handling. * Andi Kleen, add zeroing on error * converted to pure assembler */ #include <linux/linkage.h> #include <asm/errno.h> #include <asm/asm.h> #include <asm/export.h> #include <asm/nospec-branch.h> /* * computes a partial checksum, e.g. for TCP/UDP fragments */ /* unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum) */ .text #ifndef CONFIG_X86_USE_PPRO_CHECKSUM /* * Experiments with Ethernet and SLIP connections show that buff * is aligned on either a 2-byte or 4-byte boundary. We get at * least a twofold speedup on 486 and Pentium if it is 4-byte aligned. * Fortunately, it is easy to convert 2-byte alignment to 4-byte * alignment for the unrolled loop. */ SYM_FUNC_START(csum_partial) pushl %esi pushl %ebx movl 20(%esp),%eax # Function arg: unsigned int sum movl 16(%esp),%ecx # Function arg: int len movl 12(%esp),%esi # Function arg: unsigned char *buff testl $3, %esi # Check alignment. jz 2f # Jump if alignment is ok. testl $1, %esi # Check alignment. jz 10f # Jump if alignment is boundary of 2 bytes. # buf is odd dec %ecx jl 8f movzbl (%esi), %ebx adcl %ebx, %eax roll $8, %eax inc %esi testl $2, %esi jz 2f 10: subl $2, %ecx # Alignment uses up two bytes. jae 1f # Jump if we had at least two bytes. addl $2, %ecx # ecx was < 2. Deal with it. jmp 4f 1: movw (%esi), %bx addl $2, %esi addw %bx, %ax adcl $0, %eax 2: movl %ecx, %edx shrl $5, %ecx jz 2f testl %esi, %esi 1: movl (%esi), %ebx adcl %ebx, %eax movl 4(%esi), %ebx adcl %ebx, %eax movl 8(%esi), %ebx adcl %ebx, %eax movl 12(%esi), %ebx adcl %ebx, %eax movl 16(%esi), %ebx adcl %ebx, %eax movl 20(%esi), %ebx adcl %ebx, %eax movl 24(%esi), %ebx adcl %ebx, %eax movl 28(%esi), %ebx adcl %ebx, %eax lea 32(%esi), %esi dec %ecx jne 1b adcl $0, %eax 2: movl %edx, %ecx andl $0x1c, %edx je 4f shrl $2, %edx # This clears CF 3: adcl (%esi), %eax lea 4(%esi), %esi dec %edx jne 3b adcl $0, %eax 4: andl $3, %ecx jz 7f cmpl $2, %ecx jb 5f movw (%esi),%cx leal 2(%esi),%esi je 6f shll $16,%ecx 5: movb (%esi),%cl 6: addl %ecx,%eax adcl $0, %eax 7: testb $1, 12(%esp) jz 8f roll $8, %eax 8: popl %ebx popl %esi RET SYM_FUNC_END(csum_partial) #else /* Version for PentiumII/PPro */ SYM_FUNC_START(csum_partial) pushl %esi pushl %ebx movl 20(%esp),%eax # Function arg: unsigned int sum movl 16(%esp),%ecx # Function arg: int len movl 12(%esp),%esi # Function arg: const unsigned char *buf testl $3, %esi jnz 25f 10: movl %ecx, %edx movl %ecx, %ebx andl $0x7c, %ebx shrl $7, %ecx addl %ebx,%esi shrl $2, %ebx negl %ebx lea 45f(%ebx,%ebx,2), %ebx testl %esi, %esi JMP_NOSPEC ebx # Handle 2-byte-aligned regions 20: addw (%esi), %ax lea 2(%esi), %esi adcl $0, %eax jmp 10b 25: testl $1, %esi jz 30f # buf is odd dec %ecx jl 90f movzbl (%esi), %ebx addl %ebx, %eax adcl $0, %eax roll $8, %eax inc %esi testl $2, %esi jz 10b 30: subl $2, %ecx ja 20b je 32f addl $2, %ecx jz 80f movzbl (%esi),%ebx # csumming 1 byte, 2-aligned addl %ebx, %eax adcl $0, %eax jmp 80f 32: addw (%esi), %ax # csumming 2 bytes, 2-aligned adcl $0, %eax jmp 80f 40: addl -128(%esi), %eax adcl -124(%esi), %eax adcl -120(%esi), %eax adcl -116(%esi), %eax adcl -112(%esi), %eax adcl -108(%esi), %eax adcl -104(%esi), %eax adcl -100(%esi), %eax adcl -96(%esi), %eax adcl -92(%esi), %eax adcl -88(%esi), %eax adcl -84(%esi), %eax adcl -80(%esi), %eax adcl -76(%esi), %eax adcl -72(%esi), %eax adcl -68(%esi), %eax adcl -64(%esi), %eax adcl -60(%esi), %eax adcl -56(%esi), %eax adcl -52(%esi), %eax adcl -48(%esi), %eax adcl -44(%esi), %eax adcl -40(%esi), %eax adcl -36(%esi), %eax adcl -32(%esi), %eax adcl -28(%esi), %eax adcl -24(%esi), %eax adcl -20(%esi), %eax adcl -16(%esi), %eax adcl -12(%esi), %eax adcl -8(%esi), %eax adcl -4(%esi), %eax 45: lea 128(%esi), %esi adcl $0, %eax dec %ecx jge 40b movl %edx, %ecx 50: andl $3, %ecx jz 80f # Handle the last 1-3 bytes without jumping notl %ecx # 1->2, 2->1, 3->0, higher bits are masked movl $0xffffff,%ebx # by the shll and shrl instructions shll $3,%ecx shrl %cl,%ebx andl -128(%esi),%ebx # esi is 4-aligned so should be ok addl %ebx,%eax adcl $0,%eax 80: testb $1, 12(%esp) jz 90f roll $8, %eax 90: popl %ebx popl %esi RET SYM_FUNC_END(csum_partial) #endif EXPORT_SYMBOL(csum_partial) /* unsigned int csum_partial_copy_generic (const char *src, char *dst, int len) */ /* * Copy from ds while checksumming, otherwise like csum_partial */ #define EXC(y...) \ 9999: y; \ _ASM_EXTABLE_TYPE(9999b, 7f, EX_TYPE_UACCESS | EX_FLAG_CLEAR_AX) #ifndef CONFIG_X86_USE_PPRO_CHECKSUM #define ARGBASE 16 #define FP 12 SYM_FUNC_START(csum_partial_copy_generic) subl $4,%esp pushl %edi pushl %esi pushl %ebx movl ARGBASE+12(%esp),%ecx # len movl ARGBASE+4(%esp),%esi # src movl ARGBASE+8(%esp),%edi # dst movl $-1, %eax # sum testl $2, %edi # Check alignment. jz 2f # Jump if alignment is ok. subl $2, %ecx # Alignment uses up two bytes. jae 1f # Jump if we had at least two bytes. addl $2, %ecx # ecx was < 2. Deal with it. jmp 4f EXC(1: movw (%esi), %bx ) addl $2, %esi EXC( movw %bx, (%edi) ) addl $2, %edi addw %bx, %ax adcl $0, %eax 2: movl %ecx, FP(%esp) shrl $5, %ecx jz 2f testl %esi, %esi # what's wrong with clc? EXC(1: movl (%esi), %ebx ) EXC( movl 4(%esi), %edx ) adcl %ebx, %eax EXC( movl %ebx, (%edi) ) adcl %edx, %eax EXC( movl %edx, 4(%edi) ) EXC( movl 8(%esi), %ebx ) EXC( movl 12(%esi), %edx ) adcl %ebx, %eax EXC( movl %ebx, 8(%edi) ) adcl %edx, %eax EXC( movl %edx, 12(%edi) ) EXC( movl 16(%esi), %ebx ) EXC( movl 20(%esi), %edx ) adcl %ebx, %eax EXC( movl %ebx, 16(%edi) ) adcl %edx, %eax EXC( movl %edx, 20(%edi) ) EXC( movl 24(%esi), %ebx ) EXC( movl 28(%esi), %edx ) adcl %ebx, %eax EXC( movl %ebx, 24(%edi) ) adcl %edx, %eax EXC( movl %edx, 28(%edi) ) lea 32(%esi), %esi lea 32(%edi), %edi dec %ecx jne 1b adcl $0, %eax 2: movl FP(%esp), %edx movl %edx, %ecx andl $0x1c, %edx je 4f shrl $2, %edx # This clears CF EXC(3: movl (%esi), %ebx ) adcl %ebx, %eax EXC( movl %ebx, (%edi) ) lea 4(%esi), %esi lea 4(%edi), %edi dec %edx jne 3b adcl $0, %eax 4: andl $3, %ecx jz 7f cmpl $2, %ecx jb 5f EXC( movw (%esi), %cx ) leal 2(%esi), %esi EXC( movw %cx, (%edi) ) leal 2(%edi), %edi je 6f shll $16,%ecx EXC(5: movb (%esi), %cl ) EXC( movb %cl, (%edi) ) 6: addl %ecx, %eax adcl $0, %eax 7: popl %ebx popl %esi popl %edi popl %ecx # equivalent to addl $4,%esp RET SYM_FUNC_END(csum_partial_copy_generic) #else /* Version for PentiumII/PPro */ #define ROUND1(x) \ EXC(movl x(%esi), %ebx ) ; \ addl %ebx, %eax ; \ EXC(movl %ebx, x(%edi) ) ; #define ROUND(x) \ EXC(movl x(%esi), %ebx ) ; \ adcl %ebx, %eax ; \ EXC(movl %ebx, x(%edi) ) ; #define ARGBASE 12 SYM_FUNC_START(csum_partial_copy_generic) pushl %ebx pushl %edi pushl %esi movl ARGBASE+4(%esp),%esi #src movl ARGBASE+8(%esp),%edi #dst movl ARGBASE+12(%esp),%ecx #len movl $-1, %eax #sum # movl %ecx, %edx movl %ecx, %ebx movl %esi, %edx shrl $6, %ecx andl $0x3c, %ebx negl %ebx subl %ebx, %esi subl %ebx, %edi lea -1(%esi),%edx andl $-32,%edx lea 3f(%ebx,%ebx), %ebx testl %esi, %esi JMP_NOSPEC ebx 1: addl $64,%esi addl $64,%edi EXC(movb -32(%edx),%bl) ; EXC(movb (%edx),%bl) ROUND1(-64) ROUND(-60) ROUND(-56) ROUND(-52) ROUND (-48) ROUND(-44) ROUND(-40) ROUND(-36) ROUND (-32) ROUND(-28) ROUND(-24) ROUND(-20) ROUND (-16) ROUND(-12) ROUND(-8) ROUND(-4) 3: adcl $0,%eax addl $64, %edx dec %ecx jge 1b 4: movl ARGBASE+12(%esp),%edx #len andl $3, %edx jz 7f cmpl $2, %edx jb 5f EXC( movw (%esi), %dx ) leal 2(%esi), %esi EXC( movw %dx, (%edi) ) leal 2(%edi), %edi je 6f shll $16,%edx 5: EXC( movb (%esi), %dl ) EXC( movb %dl, (%edi) ) 6: addl %edx, %eax adcl $0, %eax 7: popl %esi popl %edi popl %ebx RET SYM_FUNC_END(csum_partial_copy_generic) #undef ROUND #undef ROUND1 #endif EXPORT_SYMBOL(csum_partial_copy_generic)
aixcc-public/challenge-001-exemplar-source
4,335
arch/x86/lib/csum-copy_64.S
/* * Copyright 2002, 2003 Andi Kleen, SuSE Labs. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. No warranty for anything given at all. */ #include <linux/linkage.h> #include <asm/errno.h> #include <asm/asm.h> /* * Checksum copy with exception handling. * On exceptions src_err_ptr or dst_err_ptr is set to -EFAULT and the * destination is zeroed. * * Input * rdi source * rsi destination * edx len (32bit) * * Output * eax 64bit sum. undefined in case of exception. * * Wrappers need to take care of valid exception sum and zeroing. * They also should align source or destination to 8 bytes. */ .macro source 10: _ASM_EXTABLE_UA(10b, .Lfault) .endm .macro dest 20: _ASM_EXTABLE_UA(20b, .Lfault) .endm SYM_FUNC_START(csum_partial_copy_generic) subq $5*8, %rsp movq %rbx, 0*8(%rsp) movq %r12, 1*8(%rsp) movq %r14, 2*8(%rsp) movq %r13, 3*8(%rsp) movq %r15, 4*8(%rsp) movl $-1, %eax xorl %r9d, %r9d movl %edx, %ecx cmpl $8, %ecx jb .Lshort testb $7, %sil jne .Lunaligned .Laligned: movl %ecx, %r12d shrq $6, %r12 jz .Lhandle_tail /* < 64 */ clc /* main loop. clear in 64 byte blocks */ /* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */ /* r11: temp3, rdx: temp4, r12 loopcnt */ /* r10: temp5, r15: temp6, r14 temp7, r13 temp8 */ .p2align 4 .Lloop: source movq (%rdi), %rbx source movq 8(%rdi), %r8 source movq 16(%rdi), %r11 source movq 24(%rdi), %rdx source movq 32(%rdi), %r10 source movq 40(%rdi), %r15 source movq 48(%rdi), %r14 source movq 56(%rdi), %r13 30: /* * No _ASM_EXTABLE_UA; this is used for intentional prefetch on a * potentially unmapped kernel address. */ _ASM_EXTABLE(30b, 2f) prefetcht0 5*64(%rdi) 2: adcq %rbx, %rax adcq %r8, %rax adcq %r11, %rax adcq %rdx, %rax adcq %r10, %rax adcq %r15, %rax adcq %r14, %rax adcq %r13, %rax decl %r12d dest movq %rbx, (%rsi) dest movq %r8, 8(%rsi) dest movq %r11, 16(%rsi) dest movq %rdx, 24(%rsi) dest movq %r10, 32(%rsi) dest movq %r15, 40(%rsi) dest movq %r14, 48(%rsi) dest movq %r13, 56(%rsi) leaq 64(%rdi), %rdi leaq 64(%rsi), %rsi jnz .Lloop adcq %r9, %rax /* do last up to 56 bytes */ .Lhandle_tail: /* ecx: count, rcx.63: the end result needs to be rol8 */ movq %rcx, %r10 andl $63, %ecx shrl $3, %ecx jz .Lfold clc .p2align 4 .Lloop_8: source movq (%rdi), %rbx adcq %rbx, %rax decl %ecx dest movq %rbx, (%rsi) leaq 8(%rsi), %rsi /* preserve carry */ leaq 8(%rdi), %rdi jnz .Lloop_8 adcq %r9, %rax /* add in carry */ .Lfold: /* reduce checksum to 32bits */ movl %eax, %ebx shrq $32, %rax addl %ebx, %eax adcl %r9d, %eax /* do last up to 6 bytes */ .Lhandle_7: movl %r10d, %ecx andl $7, %ecx .L1: /* .Lshort rejoins the common path here */ shrl $1, %ecx jz .Lhandle_1 movl $2, %edx xorl %ebx, %ebx clc .p2align 4 .Lloop_1: source movw (%rdi), %bx adcl %ebx, %eax decl %ecx dest movw %bx, (%rsi) leaq 2(%rdi), %rdi leaq 2(%rsi), %rsi jnz .Lloop_1 adcl %r9d, %eax /* add in carry */ /* handle last odd byte */ .Lhandle_1: testb $1, %r10b jz .Lende xorl %ebx, %ebx source movb (%rdi), %bl dest movb %bl, (%rsi) addl %ebx, %eax adcl %r9d, %eax /* carry */ .Lende: testq %r10, %r10 js .Lwas_odd .Lout: movq 0*8(%rsp), %rbx movq 1*8(%rsp), %r12 movq 2*8(%rsp), %r14 movq 3*8(%rsp), %r13 movq 4*8(%rsp), %r15 addq $5*8, %rsp RET .Lshort: movl %ecx, %r10d jmp .L1 .Lunaligned: xorl %ebx, %ebx testb $1, %sil jne .Lodd 1: testb $2, %sil je 2f source movw (%rdi), %bx dest movw %bx, (%rsi) leaq 2(%rdi), %rdi subq $2, %rcx leaq 2(%rsi), %rsi addq %rbx, %rax 2: testb $4, %sil je .Laligned source movl (%rdi), %ebx dest movl %ebx, (%rsi) leaq 4(%rdi), %rdi subq $4, %rcx leaq 4(%rsi), %rsi addq %rbx, %rax jmp .Laligned .Lodd: source movb (%rdi), %bl dest movb %bl, (%rsi) leaq 1(%rdi), %rdi leaq 1(%rsi), %rsi /* decrement, set MSB */ leaq -1(%rcx, %rcx), %rcx rorq $1, %rcx shll $8, %ebx addq %rbx, %rax jmp 1b .Lwas_odd: roll $8, %eax jmp .Lout /* Exception: just return 0 */ .Lfault: xorl %eax, %eax jmp .Lout SYM_FUNC_END(csum_partial_copy_generic)