repo_id
stringlengths 5
115
| size
int64 590
5.01M
| file_path
stringlengths 4
212
| content
stringlengths 590
5.01M
|
|---|---|---|---|
KAVYANSHTYAGI/pytorch
| 39,799
|
aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm_sparse/8x8c1x4-dq-packedA-aarch64-neon.S
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <qnnpack/assembly.h>
#ifndef IGNORE_CODE_ALIGN_DIRECTIVES
#define NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_5 .p2align 5
#define NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_4 .p2align 4
#define NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_3 .p2align 3
#else
#define NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_5
#define NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_4
#define NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_3
#endif
# Macro for separating instructions. For most builds, ; can be used, but for
# ARM64 + Mach, ; begins a comment, and %% is used to separate instructions
#if defined(__MACH__)
#define XX %%
#else
#define XX ;
#endif
.macro TRANSPOSE_4X4_S32 vin0, vin1, vin2, vin3, temp0, temp1, temp2, temp3
TRN1 \temp0\().4s, \vin0\().4s, \vin1\().4s
TRN2 \temp1\().4s, \vin0\().4s, \vin1\().4s
TRN1 \temp2\().4s, \vin2\().4s, \vin3\().4s
TRN2 \temp3\().4s, \vin2\().4s, \vin3\().4s
TRN1 \vin0\().2d, \temp0\().2d, \temp2\().2d
TRN1 \vin1\().2d, \temp1\().2d, \temp3\().2d
TRN2 \vin2\().2d, \temp0\().2d, \temp2\().2d
TRN2 \vin3\().2d, \temp1\().2d, \temp3\().2d
.endm
# params
# c_stride
# Args passed via stack.
# TOS
# |-----------|
# |c_stride | 0
# |out ch indx| 8
# |params | 16
# |-----------|
# void pytorch_q8gemm_dq_sparse_1x4_ukernel_8x8_packedA_w##W_INDEX_DTYPE_NUM_BITS##__aarch64_neon(
# size_t mr,
# size_t nr,
# const uint8_t* a_packed,
# const uint8_t* packed_w,
# const uint##W_INDEX_DTYPE_NUM_BITS##_t* w_row_ptr,
# const uint##W_INDEX_DTYPE_NUM_BITS##_t* w_block_ids_ptr,
# const float* b,
# uint8_t* restrict c,
# size_t c_stride,
# size_t output_channel_index,
# const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1])
#define MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_1X4_UKERNEL_8X8_PACKEDA__AARCH64_NEON(W_INDEX_DTYPE_NUM_BITS, W_INDEX_DTYPE_NUM_BYTES_ARG, W_INDEX_DTYPE_LOG_NUM_BYTES_ARG, LOAD_INDEX_INSTRUCTION) XX\
BEGIN_FUNCTION pytorch_q8gemm_dq_sparse_1x4_ukernel_8x8_packedA_w##W_INDEX_DTYPE_NUM_BITS##__aarch64_neon XX\
XX\
STP d15, d14, [sp, -16] XX\
STP d13, d12, [sp, -32] XX\
STP d11, d10, [sp, -48] XX\
STP d9, d8, [sp, -64] XX\
XX\
MOV x11, x1 XX\
/* Load output channel index */ XX\
LDR x10, [sp, 8] XX\
/* Load params */ XX\
LDR x8, [sp, 16] XX\
XX\
/* Load a_zero_point */ XX\
LD1R {v24.8b}, [x8] XX\
ADD x8, x8, 8 XX\
XX\
/* Load pointer to per channel zero points array */ XX\
LDR x17, [x8], 8 XX\
XX\
/* Load pointer to per channel multiplier */ XX\
LDR x13, [x8] XX\
XX\
/* Add offset to the base pointer */ XX\
ADD x17, x17, x10 XX\
/* Mul by 4 to get byte offset for multiplier */ XX\
LSL x10, x10, 2 XX\
/* Add offset to the base pointer for multiplier */ XX\
ADD x13, x13, x10 XX\
XX\
/* Load b_zero_point */ XX\
LD1 {v25.8b}, [x17] XX\
/* Load multiplier c0123 */ XX\
LD1 {v26.4s}, [x13], 16 XX\
/* Load multiplier c4567 */ XX\
LD1 {v30.4s}, [x13] XX\
XX\
EOR x12, x12, x12 XX\
EOR x13, x13, x13 XX\
XX\
CMP x1, 1 XX\
B.LO _7_w##W_INDEX_DTYPE_NUM_BITS XX\
XX\
NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_5 XX\
_0_w##W_INDEX_DTYPE_NUM_BITS##: XX\
/* v8 := zero */ XX\
EOR v8.16b, v8.16b, v8.16b XX\
/* v9 := zero */ XX\
EOR v9.16b, v9.16b, v9.16b XX\
XX\
DUP v29.8b, v25.b[0] XX\
/* w12 = w_row_ptr[n], x13 = w_row_ptr[n+1] */ XX\
/* x4 = x4 + W_INDEX_DTYPE_NUM_BYTES_ARG to point to next n */ XX\
LOAD_INDEX_INSTRUCTION w12, [x4], W_INDEX_DTYPE_NUM_BYTES_ARG XX\
LOAD_INDEX_INSTRUCTION w13, [x4] XX\
/* x10 = temp_packed_w = packed_w + w_row_ptr[n] * 4 */ XX\
/* This points to the first block of nonzero value */ XX\
/* for the nth row. */ XX\
ADD x10, x3, x12, LSL #2 XX\
/* x9 = temp_w_block_ids_ptr = w_block_ids_ptr (x5) + w_row_ptr[n] */ XX\
/* LSL for when elements are >1 byte */ XX\
/* (4 bytes: LSL #2, 2 bytes: LSL #1, 1 byte: LSL #0) */ XX\
/* This points to the block id of the first block */ XX\
/* It should contain x13 - x12 number of block ids */ XX\
ADD x9, x5, x12, LSL W_INDEX_DTYPE_LOG_NUM_BYTES_ARG XX\
/* x8 = num_blocks that needs to be processed */ XX\
SUB x8, x13, x12 XX\
SUBS x8, x8, 2 XX\
B.LO _1_w##W_INDEX_DTYPE_NUM_BITS XX\
XX\
k_loop_w##W_INDEX_DTYPE_NUM_BITS##: XX\
/* b0-7 (channel 0) */ XX\
LD1 {v10.8b}, [x10], 8 XX\
USUBL v10.8h, v10.8b, v29.8b XX\
XX\
/* x12 = block_id_ptr[0] */ XX\
/* x13 = block_id_ptr[1] */ XX\
LOAD_INDEX_INSTRUCTION w12, [x9], W_INDEX_DTYPE_NUM_BYTES_ARG XX\
LOAD_INDEX_INSTRUCTION w13, [x9], W_INDEX_DTYPE_NUM_BYTES_ARG XX\
/* Add offset to x2 */ XX\
/* Shift by 5 because each packed block is a block of 8x4 */ XX\
/* which 32 bytes */ XX\
ADD x16, x2, x12, LSL #5 XX\
ADD x17, x2, x13, LSL #5 XX\
XX\
LD1 {v0.8b}, [x16], 8 XX\
LD1 {v1.8b}, [x16], 8 XX\
LD1 {v2.8b}, [x16], 8 XX\
LD1 {v3.8b}, [x16] XX\
LD1 {v4.8b}, [x17], 8 XX\
LD1 {v5.8b}, [x17], 8 XX\
LD1 {v6.8b}, [x17], 8 XX\
LD1 {v7.8b}, [x17] XX\
XX\
USUBL v0.8h, v0.8b, v24.8b XX\
USUBL v1.8h, v1.8b, v24.8b XX\
USUBL v2.8h, v2.8b, v24.8b XX\
USUBL v3.8h, v3.8b, v24.8b XX\
USUBL v4.8h, v4.8b, v24.8b XX\
USUBL v5.8h, v5.8b, v24.8b XX\
USUBL v6.8h, v6.8b, v24.8b XX\
USUBL v7.8h, v7.8b, v24.8b XX\
XX\
SMLAL v8.4s, v0.4h, v10.h[0] XX\
SMLAL2 v9.4s, v0.8h, v10.h[0] XX\
SMLAL v8.4s, v1.4h, v10.h[1] XX\
SMLAL2 v9.4s, v1.8h, v10.h[1] XX\
SMLAL v8.4s, v2.4h, v10.h[2] XX\
SMLAL2 v9.4s, v2.8h, v10.h[2] XX\
SMLAL v8.4s, v3.4h, v10.h[3] XX\
SMLAL2 v9.4s, v3.8h, v10.h[3] XX\
SMLAL v8.4s, v4.4h, v10.h[4] XX\
SMLAL2 v9.4s, v4.8h, v10.h[4] XX\
SMLAL v8.4s, v5.4h, v10.h[5] XX\
SMLAL2 v9.4s, v5.8h, v10.h[5] XX\
SMLAL v8.4s, v6.4h, v10.h[6] XX\
SMLAL2 v9.4s, v6.8h, v10.h[6] XX\
SUBS x8, x8, 2 XX\
SMLAL v8.4s, v7.4h, v10.h[7] XX\
SMLAL2 v9.4s, v7.8h, v10.h[7] XX\
XX\
XX\
B.HS k_loop_w##W_INDEX_DTYPE_NUM_BITS XX\
XX\
_1_w##W_INDEX_DTYPE_NUM_BITS##: XX\
CMP x8, -2 XX\
B.EQ _2_w##W_INDEX_DTYPE_NUM_BITS XX\
XX\
/* b0-7 (channel 0) */ XX\
LD1R {v10.4s}, [x10] XX\
USUBL v10.8h, v10.8b, v29.8b XX\
XX\
/* x12 = block_id_ptr[0] */ XX\
LOAD_INDEX_INSTRUCTION w12, [x9] XX\
/* Add offset to x2 */ XX\
/* Shift by 5 because each packed block is a block of 8x4 */ XX\
/* which 32 bytes */ XX\
ADD x16, x2, x12, LSL #5 XX\
XX\
LD1 {v0.8b}, [x16], 8 XX\
LD1 {v1.8b}, [x16], 8 XX\
LD1 {v2.8b}, [x16], 8 XX\
LD1 {v3.8b}, [x16] XX\
XX\
USUBL v0.8h, v0.8b, v24.8b XX\
USUBL v1.8h, v1.8b, v24.8b XX\
USUBL v2.8h, v2.8b, v24.8b XX\
USUBL v3.8h, v3.8b, v24.8b XX\
XX\
SMLAL v8.4s, v0.4h, v10.h[0] XX\
SMLAL2 v9.4s, v0.8h, v10.h[0] XX\
SMLAL v8.4s, v1.4h, v10.h[1] XX\
SMLAL2 v9.4s, v1.8h, v10.h[1] XX\
SMLAL v8.4s, v2.4h, v10.h[2] XX\
SMLAL2 v9.4s, v2.8h, v10.h[2] XX\
SMLAL v8.4s, v3.4h, v10.h[3] XX\
SMLAL2 v9.4s, v3.8h, v10.h[3] XX\
XX\
NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_4 XX\
_2_w##W_INDEX_DTYPE_NUM_BITS##: XX\
/* Store result on stack */ XX\
XX\
/* -64 because all d8-d15 are on stack */ XX\
/* + 256 bytes of buffer when nr = 1 */ XX\
/* 256 because we are doing 8x8 block with each value being 4 bytes */ XX\
/* Thus 64 * 4 = 256 */ XX\
/* 256 + 64 = 320 */ XX\
/* This is needed because after processing all nrs we will */ XX\
/* load 256 bytes from stack. */ XX\
/* Thus we will load accumulators back in v8, v9, v10, v11, v12, v13, v14, v15 */ XX\
/* v16, v17, v18, v19, v20, v21, v22, v23 */ XX\
/* When nr < 8, say nr = 1, extra v values will be fetched from stack which may overlap */ XX\
/* with other parts of stack storing local variables. To avoid that we just */ XX\
/* create a buffer of 256 bytes inbetween to make sure pointer increment */ XX\
/* never produces address that is beyond the stack frame of this function. */ XX\
SUB x9, sp, 320 XX\
/* Each iteration produce 8 values each of 4 bytes */ XX\
/* Thus 8 x 4 = 32 bytes 2^5 */ XX\
/* In this implementation, first value will be stored at */ XX\
/* 1st value: sp - 64 - r1 * 32 */ XX\
/* 2nd value: sp - 12 - (r1 - 1) * 32 */ XX\
/* and so on. */ XX\
SUB x9, x9, x1, LSL #5 XX\
ST1 {v8.4s}, [x9], 16 XX\
ST1 {v9.4s}, [x9] XX\
XX\
/* Shift zero point vector by 8 to load */ XX\
/* zero point of the next channel */ XX\
SRI v25.2d, v25.2d, #8 XX\
/* Check if nr >=1 */ XX\
SUBS x1, x1, 1 XX\
BHI _0_w##W_INDEX_DTYPE_NUM_BITS XX\
_3_w##W_INDEX_DTYPE_NUM_BITS##: XX\
/* First load all the accumulators from stack */ XX\
/* Load nr */ XX\
SUB x9, sp, 320 XX\
SUB x9, x9, x11, LSL #5 XX\
/* Now load v8-v15 */ XX\
/* This is 8x4 block (nrxmr) */ XX\
/* We will transpose this to 4x8 (mrxnr) */ XX\
/* v8, v9 : x00, x10, x20, x30; x40, x50, x60, x70 */ XX\
/* v10, v11 : x01, x11, x21, x31; x41, x51, x61, x71 */ XX\
/* v12, v13 : x02, x12, x22, x32; x42, x52, x62, x72 */ XX\
/* v14, v15 : x03, x13, x23, x33; x43, x53, x63, x73 */ XX\
/* */ XX\
/* v16, v17 : x04, x14, x24, x34; x44, x54, x64, x74 */ XX\
/* v18, v19 : x05, x15, x25, x35; x45, x55, x65, x75 */ XX\
/* v20, v21 : x06, x16, x26, x36; x46, x56, x66, x76 */ XX\
/* v22, v23 : x07, x17, x27, x37; x47, x57, x67, x77 */ XX\
LD1 {v8.4s}, [x9], 16 XX\
LD1 {v9.4s}, [x9], 16 XX\
LD1 {v10.4s}, [x9], 16 XX\
LD1 {v11.4s}, [x9], 16 XX\
LD1 {v12.4s}, [x9], 16 XX\
LD1 {v13.4s}, [x9], 16 XX\
LD1 {v14.4s}, [x9], 16 XX\
LD1 {v15.4s}, [x9], 16 XX\
LD1 {v16.4s}, [x9], 16 XX\
LD1 {v17.4s}, [x9], 16 XX\
LD1 {v18.4s}, [x9], 16 XX\
LD1 {v19.4s}, [x9], 16 XX\
LD1 {v20.4s}, [x9], 16 XX\
LD1 {v21.4s}, [x9], 16 XX\
LD1 {v22.4s}, [x9], 16 XX\
LD1 {v23.4s}, [x9] XX\
XX\
/* We can tranpose one 4x4 block using macro */ XX\
/* TRANSPOSE_4X4_S32 v8, v10, v12, v14, v0, v1, v2, v3 */ XX\
/* After this we have */ XX\
/* v8 : x00, x01, x02, x03 */ XX\
/* v10 : x10, x11, x12, x13 */ XX\
/* v12 : x20, x21, x22, x23 */ XX\
/* v14 : x30, x31, x32, x33 */ XX\
/* Then using */ XX\
/* TRANSPOSE_4X4_S32 v16, v18, v20, v22, v4, v5, v6, v7 */ XX\
/* We get */ XX\
/* v16 : x04, x05, x06, x07 */ XX\
/* v18 : x14, x15, x16, x17 */ XX\
/* v20 : x24, x25, x26, x27 */ XX\
/* v22 : x34, x35, x36, x37 */ XX\
/* Similarly we can transpose other two 4x4 blocks and we get */ XX\
/* tranposed 8x8 */ XX\
XX\
TRANSPOSE_4X4_S32 v8, v10, v12, v14, v0, v1, v2, v3 XX\
TRANSPOSE_4X4_S32 v16, v18, v20, v22, v4, v5, v6, v7 XX\
TRANSPOSE_4X4_S32 v9, v11, v13, v15, v0, v1, v2, v3 XX\
TRANSPOSE_4X4_S32 v17, v19, v21, v23, v4, v5, v6, v7 XX\
XX\
/* row 0: v8, v16 */ XX\
/* row 1: v10, v18 */ XX\
/* row 2: v12, v20 */ XX\
/* row 3: v14, v22 */ XX\
/* row 4: v9, v17 */ XX\
/* row 5: v11, v19 */ XX\
/* row 6: v13, v21 */ XX\
/* row 7: v15, v23 */ XX\
XX\
/* Load c_stride & params */ XX\
LDR x16, [sp] XX\
LSL x16, x16, 2 XX\
LD1 {v24.4s}, [x6], 16 XX\
LD1 {v25.4s}, [x6] XX\
XX\
SCVTF v8.4s, v8.4s XX\
SCVTF v9.4s, v9.4s XX\
SCVTF v10.4s, v10.4s XX\
SCVTF v11.4s, v11.4s XX\
SCVTF v12.4s, v12.4s XX\
SCVTF v13.4s, v13.4s XX\
SCVTF v14.4s, v14.4s XX\
SCVTF v15.4s, v15.4s XX\
SCVTF v16.4s, v16.4s XX\
SCVTF v17.4s, v17.4s XX\
SCVTF v18.4s, v18.4s XX\
SCVTF v19.4s, v19.4s XX\
SCVTF v20.4s, v20.4s XX\
SCVTF v21.4s, v21.4s XX\
SCVTF v22.4s, v22.4s XX\
SCVTF v23.4s, v23.4s XX\
XX\
FMUL v8.4s, v8.4s, v26.4s XX\
FMUL v16.4s, v16.4s, v30.4s XX\
FMUL v10.4s, v10.4s, v26.4s XX\
FMUL v18.4s, v18.4s, v30.4s XX\
FMUL v12.4s, v12.4s, v26.4s XX\
FMUL v20.4s, v20.4s, v30.4s XX\
FMUL v14.4s, v14.4s, v26.4s XX\
FMUL v22.4s, v22.4s, v30.4s XX\
FMUL v9.4s, v9.4s, v26.4s XX\
FMUL v17.4s, v17.4s, v30.4s XX\
FMUL v11.4s, v11.4s, v26.4s XX\
FMUL v19.4s, v19.4s, v30.4s XX\
FMUL v13.4s, v13.4s, v26.4s XX\
FMUL v21.4s, v21.4s, v30.4s XX\
FMUL v15.4s, v15.4s, v26.4s XX\
FMUL v23.4s, v23.4s, v30.4s XX\
XX\
FADD v8.4s, v8.4s, v24.4s XX\
FADD v16.4s, v16.4s, v25.4s XX\
FADD v10.4s, v10.4s, v24.4s XX\
FADD v18.4s, v18.4s, v25.4s XX\
FADD v12.4s, v12.4s, v24.4s XX\
FADD v20.4s, v20.4s, v25.4s XX\
FADD v14.4s, v14.4s, v24.4s XX\
FADD v22.4s, v22.4s, v25.4s XX\
FADD v9.4s, v9.4s, v24.4s XX\
FADD v17.4s, v17.4s, v25.4s XX\
FADD v11.4s, v11.4s, v24.4s XX\
FADD v19.4s, v19.4s, v25.4s XX\
FADD v13.4s, v13.4s, v24.4s XX\
FADD v21.4s, v21.4s, v25.4s XX\
FADD v15.4s, v15.4s, v24.4s XX\
FADD v23.4s, v23.4s, v25.4s XX\
XX\
/* Compute c0-c7 */ XX\
XX\
ADD x9, x7, x16 XX\
CMP x0, 2 XX\
CSEL x9, x7, x9, LO XX\
XX\
ADD x10, x9, x16 XX\
CSEL x10, x9, x10, LS XX\
XX\
ADD x8, x10, x16 XX\
CMP x0, 4 XX\
CSEL x8, x10, x8, LO XX\
XX\
ADD x12, x8, x16 XX\
CSEL x12, x8, x12, LS XX\
XX\
ADD x13, x12, x16 XX\
CMP x0, 6 XX\
CSEL x13, x12, x13, LO XX\
XX\
ADD x14, x13, x16 XX\
CSEL x14, x13, x14, LS XX\
XX\
ADD x15, x14, x16 XX\
CMP x0, 8 XX\
CSEL x15, x14, x15, NE XX\
XX\
CMP x11, 8 XX\
B.NE _4_w##W_INDEX_DTYPE_NUM_BITS XX\
XX\
ST1 {v8.4s}, [x7], 16 XX\
ST1 {v16.4s}, [x7] XX\
ST1 {v10.4s}, [x9], 16 XX\
ST1 {v18.4s}, [x9] XX\
ST1 {v12.4s}, [x10], 16 XX\
ST1 {v20.4s}, [x10] XX\
ST1 {v14.4s}, [x8], 16 XX\
ST1 {v22.4s}, [x8] XX\
ST1 {v9.4s}, [x12], 16 XX\
ST1 {v17.4s}, [x12] XX\
ST1 {v11.4s}, [x13], 16 XX\
ST1 {v19.4s}, [x13] XX\
ST1 {v13.4s}, [x14], 16 XX\
ST1 {v21.4s}, [x14] XX\
ST1 {v15.4s}, [x15], 16 XX\
ST1 {v23.4s}, [x15] XX\
XX\
LDP d9, d8, [sp, -64] XX\
LDP d11, d10, [sp, -48] XX\
LDP d13, d12, [sp, -32] XX\
LDP d15, d14, [sp, -16] XX\
XX\
RET XX\
XX\
NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_3 XX\
_4_w##W_INDEX_DTYPE_NUM_BITS##: XX\
CMP x11, 4 XX\
B.LO _5_w##W_INDEX_DTYPE_NUM_BITS XX\
XX\
ST1 {v8.4s}, [x7], 16 XX\
ST1 {v10.4s}, [x9], 16 XX\
ST1 {v12.4s}, [x10], 16 XX\
ST1 {v14.4s}, [x8], 16 XX\
ST1 {v9.4s}, [x12], 16 XX\
ST1 {v11.4s}, [x13], 16 XX\
ST1 {v13.4s}, [x14], 16 XX\
ST1 {v15.4s}, [x15], 16 XX\
XX\
SUB x11, x11, 4 XX\
XX\
MOV v8.16b, v16.16b XX\
MOV v10.16b, v18.16b XX\
MOV v12.16b, v20.16b XX\
MOV v14.16b, v22.16b XX\
MOV v9.16b, v17.16b XX\
MOV v11.16b, v19.16b XX\
MOV v13.16b, v21.16b XX\
MOV v15.16b, v23.16b XX\
XX\
_5_w##W_INDEX_DTYPE_NUM_BITS##: XX\
CMP x11, 2 XX\
B.LO _6_w##W_INDEX_DTYPE_NUM_BITS XX\
XX\
ST1 {v8.2s}, [x7], 8 XX\
ST1 {v10.2s}, [x9], 8 XX\
ST1 {v12.2s}, [x10], 8 XX\
ST1 {v14.2s}, [x8], 8 XX\
ST1 {v9.2s}, [x12], 8 XX\
ST1 {v11.2s}, [x13], 8 XX\
ST1 {v13.2s}, [x14], 8 XX\
ST1 {v15.2s}, [x15], 8 XX\
XX\
SUB x11, x11, 2 XX\
XX\
EXT v8.16b, v8.16b, v8.16b, 8 XX\
EXT v10.16b, v10.16b, v10.16b, 8 XX\
EXT v12.16b, v12.16b, v12.16b, 8 XX\
EXT v14.16b, v14.16b, v14.16b, 8 XX\
EXT v9.16b, v9.16b, v9.16b, 8 XX\
EXT v11.16b, v11.16b, v11.16b, 8 XX\
EXT v13.16b, v13.16b, v13.16b, 8 XX\
EXT v15.16b, v15.16b, v15.16b, 8 XX\
XX\
_6_w##W_INDEX_DTYPE_NUM_BITS##: XX\
CMP x11, 1 XX\
B.LO _7_w##W_INDEX_DTYPE_NUM_BITS XX\
XX\
ST1 {v8.s}[0], [x7] XX\
ST1 {v10.s}[0], [x9] XX\
ST1 {v12.s}[0], [x10] XX\
ST1 {v14.s}[0], [x8] XX\
ST1 {v9.s}[0], [x12] XX\
ST1 {v11.s}[0], [x13] XX\
ST1 {v13.s}[0], [x14] XX\
ST1 {v15.s}[0], [x15] XX\
XX\
_7_w##W_INDEX_DTYPE_NUM_BITS##: XX\
LDP d9, d8, [sp, -64] XX\
LDP d11, d10, [sp, -48] XX\
LDP d13, d12, [sp, -32] XX\
LDP d15, d14, [sp, -16] XX\
XX\
RET XX\
XX\
END_FUNCTION pytorch_q8gemm_dq_sparse_1x4_ukernel_8x8_packedA_w##W_INDEX_DTYPE_NUM_BITS##__aarch64_neon
# void pytorch_q8gemm_dq_sparse_1x4_ukernel_8x8_packedA_w32__aarch64_neon(
# size_t mr,
# size_t nr,
# const uint8_t* a_packed,
# const uint8_t* packed_w,
# const uint32_t* w_row_ptr,
# const uint32_t* w_block_ids_ptr,
# const float* b,
# uint8_t* restrict c,
# size_t c_stride,
# size_t output_channel_index,
# const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1])
MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_1X4_UKERNEL_8X8_PACKEDA__AARCH64_NEON(32, #4, #2, LDR)
# void pytorch_q8gemm_dq_sparse_1x4_ukernel_8x8_packedA_w16__aarch64_neon(
# size_t mr,
# size_t nr,
# const uint8_t* a_packed,
# const uint8_t* packed_w,
# const uint16_t* w_row_ptr,
# const uint16_t* w_block_ids_ptr,
# const float* b,
# uint8_t* restrict c,
# size_t c_stride,
# size_t output_channel_index,
# const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1])
MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_1X4_UKERNEL_8X8_PACKEDA__AARCH64_NEON(16, #2, #1, LDRH)
# void pytorch_q8gemm_dq_sparse_1x4_ukernel_8x8_packedA_w8__aarch64_neon(
# size_t mr,
# size_t nr,
# const uint8_t* a_packed,
# const uint8_t* packed_w,
# const uint8_t* w_row_ptr,
# const uint8_t* w_block_ids_ptr,
# const float* b,
# uint8_t* restrict c,
# size_t c_stride,
# size_t output_channel_index,
# const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1])
MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_1X4_UKERNEL_8X8_PACKEDA__AARCH64_NEON(8, #1, #0, LDRB)
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
#undef NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_5
#undef NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_4
#undef NDEF_IGNORE_CODE_ALIGN_DIRECTIVES_P2ALIGN_3
#undef MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_1X4_UKERNEL_8X8_PACKEDA__AARCH64_NEON
#undef XX
|
KAVYANSHTYAGI/pytorch
| 26,974
|
aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm_sparse/4x8c8x1-dq-packedA-aarch32-neon.S
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <qnnpack/assembly.h>
#include <requantization/runtime-assembly.h>
#ifndef __APPLE__
#define NDEF_APPLE_SYMBOLS .arch armv7-a; .fpu neon
#else
#define NDEF_APPLE_SYMBOLS
#endif
# r0 mr
# r1 nr
# r2 packed_a
# r3 packed_w
# d14 a_zero_point
# d15 b_zero_point
## Stack
# 4 a_stride
# 4 packed_w
# 4 w_row_ptr
# 4 w_block_ids_ptr
# 4 b
# 4 c
# 4 c_stride
# 4 output channel index
# 4 quantization_params
# --
.syntax unified
# Args passed via stack.
# TOS
# |----------------|
# |packed_w | 0
# |w_row_ptr | 4
# |w_block_ids_ptr | 8
# |b | 12
# |c | 16
# |c_stride | 20
# |out ch indx | 24
# |params | 28
# |----------------|
#
# After loading w pointer in ip reg.
# And after pushing r4-r9 and d8-d15 on stack
# |----------------|
# |d8 - d15 | 0
# |r4 - r11,lr | 64
# |w_row_ptr | 100
# |w_block_ids_ptr | 104
# |b | 108
# |c | 112
# |c_stride | 116
# |out ch indx | 120
# |params | 124
# |----------------|
#
# void pytorch_q8gemm_dq_sparse_8x1_ukernel_4x8_packedA_w##W_INDEX_DTYPE_NUM_BITS##__aarch32_neon(
# size_t mr,
# size_t nr,
# const uint8_t* a_packed,
# const uint8_t* packed_w,
# const uint##W_INDEX_DTYPE_NUM_BITS##_t* w_row_ptr,
# const uint##W_INDEX_DTYPE_NUM_BITS##_t* w_block_ids_ptr,
# const float* b,
# uint8_t* restrict c,
# size_t c_stride,
# size_t output_channel_index,
# const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1])
#define MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_8X1_UKERNEL_4X8_PACKEDA__AARCH32_NEON(W_INDEX_DTYPE_NUM_BITS, W_INDEX_DTYPE_NUM_BYTES_ARG, W_INDEX_DTYPE_LOG_NUM_BYTES_ARG, LOAD_INDEX_INSTRUCTION) ;\
BEGIN_FUNCTION pytorch_q8gemm_dq_sparse_8x1_ukernel_4x8_packedA_w##W_INDEX_DTYPE_NUM_BITS##__aarch32_neon ;\
.arm ;\
NDEF_APPLE_SYMBOLS ;\
;\
PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr} ;\
VPUSH {d8-d15} ;\
;\
/* Store nr in r11 as well for late user. */ ;\
MOV r11, r1 ;\
/* Load output channel index */ ;\
LDR r5, [sp, 120] ;\
/* Load quantization params */ ;\
/* - r7 = quantization_params */ ;\
LDR r7, [sp, 124] ;\
/* Load input_zero_point */ ;\
VLD1.8 {d14[]}, [r7] ;\
ADD r7, r7, 4 ;\
/* Load pointer to per channel zero points array */ ;\
LDR r4, [r7] ;\
/* Add output_channel_index to the b_zero_point pointer */ ;\
ADD r4, r4, r5 ;\
;\
/* Load w_row_ptr + n */ ;\
LDR r5, [sp, 100] ;\
/* r7 = blocks_id_ptr */ ;\
LDR r7, [sp, 104] ;\
;\
VEOR q8, q8, q8 ;\
VEOR q9, q9, q9 ;\
VEOR q10, q10, q10 ;\
VEOR q11, q11, q11 ;\
VEOR q12, q12, q12 ;\
VEOR q13, q13, q13 ;\
VEOR q14, q14, q14 ;\
VEOR q15, q15, q15 ;\
VLD1.8 {d15}, [r4] ;\
/* ip = w_row_ptr[n], lr = w_row_ptr[n+1] */ ;\
/* r5 = r5 + W_INDEX_DTYPE_NUM_BYTES_ARG to point to next n */ ;\
LOAD_INDEX_INSTRUCTION ip, [r5], W_INDEX_DTYPE_NUM_BYTES_ARG ;\
LOAD_INDEX_INSTRUCTION lr, [r5] ;\
/* r6 = temp_packed_w = packed_w + w_row_ptr[n] * 8 */ ;\
/* * 8 because each block contains 8 values */ ;\
/* This points to the first block of nonzero value */ ;\
/* for the nth row. */ ;\
ADD r6, r3, ip, LSL #3 ;\
/* r9 = temp_w_block_ids_ptr = w_block_ids_ptr (r7) + w_row_ptr[n] */ ;\
/* LSL for when elements are >1 byte */ ;\
/* (4 bytes: LSL #2, 2 bytes: LSL #1, 1 byte: LSL #0) */ ;\
/* This points to the col block id of the first block */ ;\
/* It should contain lr - ip number of block ids */ ;\
/* Note that in this kernel sparsity pattern is 8x1. */ ;\
/* Thus each block contains only 1 k as opposed to */ ;\
/* 1x4 where each block contains 4 k. */ ;\
ADD r9, r7, ip, LSL W_INDEX_DTYPE_LOG_NUM_BYTES_ARG ;\
/* r8 = num_blocks that needs to be processed */ ;\
SUB r8, lr, ip ;\
SUBS r8, r8, 2 ;\
BLO _1_w##W_INDEX_DTYPE_NUM_BITS ;\
;\
.p2align 5 ;\
k_loop_w##W_INDEX_DTYPE_NUM_BITS##: ;\
/* Load 2 non zero blocks of weights. Each block = 8x1. */ ;\
VLD1.8 {d0}, [r6]! ;\
VLD1.8 {d2}, [r6]! ;\
;\
/* ip = block_id_ptr[0] */ ;\
/* lr = block_id_ptr[1] */ ;\
LOAD_INDEX_INSTRUCTION ip, [r9], W_INDEX_DTYPE_NUM_BYTES_ARG ;\
LOAD_INDEX_INSTRUCTION lr, [r9], W_INDEX_DTYPE_NUM_BYTES_ARG ;\
;\
/* Add offset to r2 */ ;\
/* Shift by 4 because each packed block is a block of 4x1 */ ;\
/* which 4 bytes */ ;\
ADD r10, r2, ip, LSL #2 ;\
/* q9 = vxb */ ;\
VSUBL.U8 q0, d0, d15 ;\
VSUBL.U8 q1, d2, d15 ;\
;\
/* d4 = 4x1 transposed */ ;\
VLD1.32 {d4[]}, [r10] ;\
;\
ADD r10, r2, lr, LSL #2 ;\
;\
VSUBL.U8 q2, d4, d14 /* vxa0_t */ ;\
;\
/* d5 = next 4x1 transposed */ ;\
VLD1.32 {d6[]}, [r10] ;\
;\
VSUBL.U8 q3, d6, d14 /* vxa1_t */ ;\
;\
/* q0 = d0, d1 = 8x1 block of weight for k */ ;\
/* q1 = d2, d3 = 8x1 block of weight for k + 1 */ ;\
/* q2's d4 = 4x1 block of activation for k */ ;\
/* q3's d6 = 4x1 block of activation for k + 1 */ ;\
;\
/* Generate 4x8 block as two 4x4 blocks */ ;\
;\
VMLAL.S16 q8, d0, d4[0] ;\
VMLAL.S16 q9, d1, d4[0] ;\
VMLAL.S16 q10, d0, d4[1] ;\
VMLAL.S16 q11, d1, d4[1] ;\
VMLAL.S16 q12, d0, d4[2] ;\
VMLAL.S16 q13, d1, d4[2] ;\
VMLAL.S16 q14, d0, d4[3] ;\
VMLAL.S16 q15, d1, d4[3] ;\
;\
VMLAL.S16 q8, d2, d6[0] ;\
VMLAL.S16 q9, d3, d6[0] ;\
VMLAL.S16 q10, d2, d6[1] ;\
VMLAL.S16 q11, d3, d6[1] ;\
VMLAL.S16 q12, d2, d6[2] ;\
VMLAL.S16 q13, d3, d6[2] ;\
VMLAL.S16 q14, d2, d6[3] ;\
VMLAL.S16 q15, d3, d6[3] ;\
;\
SUBS r8, r8, 2 ;\
;\
BHS k_loop_w##W_INDEX_DTYPE_NUM_BITS ;\
_1_w##W_INDEX_DTYPE_NUM_BITS##: ;\
CMP r8, -2 ;\
BEQ _3_w##W_INDEX_DTYPE_NUM_BITS ;\
;\
/* Load last nonzero block */ ;\
/* For this we will load 4 8 bit values as one 32 bit value */ ;\
VLD1.8 {d0}, [r6] ;\
/* q9 = vxb */ ;\
VSUBL.U8 q0, d0, d15 ;\
;\
/* ip = block_id_ptr[0] */ ;\
LOAD_INDEX_INSTRUCTION ip, [r9] ;\
;\
/* Add offset to r2 */ ;\
/* Shift by 4 because each packed block is a block of 4x1 */ ;\
/* which 4 bytes */ ;\
ADD r10, r2, ip, LSL #2 ;\
;\
VLD1.32 {d4[]}, [r10]! ;\
;\
VSUBL.U8 q2, d4, d14 /* vxa0_t */ ;\
;\
VMLAL.S16 q8, d0, d4[0] ;\
VMLAL.S16 q9, d1, d4[0] ;\
VMLAL.S16 q10, d0, d4[1] ;\
VMLAL.S16 q11, d1, d4[1] ;\
VMLAL.S16 q12, d0, d4[2] ;\
VMLAL.S16 q13, d1, d4[2] ;\
VMLAL.S16 q14, d0, d4[3] ;\
VMLAL.S16 q15, d1, d4[3] ;\
;\
;\
.p2align 4 ;\
_3_w##W_INDEX_DTYPE_NUM_BITS##: ;\
/* Load output channel index */ ;\
LDR r5, [sp, 120] ;\
/* Load quantization params */ ;\
/* - r7 = quantization_params */ ;\
LDR r7, [sp, 124] ;\
ADD r7, r7, 8 ;\
/* Load pointer to per channel requant scale */ ;\
LDR r7, [r7] ;\
/* Now r7 has the base_addr + offset for multipliers */ ;\
ADD r7, r7, r5, LSL #2 ;\
;\
LDR r6, [sp, 108] ;\
/* Load q6: vmultiplier_c0123 */ ;\
VLD1.32 {d12, d13}, [r7]! ;\
/* Load q7: vmultiplier_c4567 */ ;\
VLD1.32 {d14, d15}, [r7] ;\
VCVT.F32.S32 q8, q8 ;\
VCVT.F32.S32 q9, q9 ;\
VCVT.F32.S32 q10, q10 ;\
VLD1.32 {q0}, [r6]! ;\
VLD1.32 {q1}, [r6] ;\
;\
VCVT.F32.S32 q11, q11 ;\
VCVT.F32.S32 q12, q12 ;\
VCVT.F32.S32 q13, q13 ;\
VCVT.F32.S32 q14, q14 ;\
VCVT.F32.S32 q15, q15 ;\
;\
VMUL.F32 q8, q8, q6 ;\
VMUL.F32 q9, q9, q7 ;\
VMUL.F32 q10, q10, q6 ;\
VMUL.F32 q11, q11, q7 ;\
VMUL.F32 q12, q12, q6 ;\
VMUL.F32 q13, q13, q7 ;\
VMUL.F32 q14, q14, q6 ;\
VMUL.F32 q15, q15, q7 ;\
;\
VADD.F32 q8, q8, q0 ;\
VADD.F32 q9, q9, q1 ;\
VADD.F32 q10, q10, q0 ;\
VADD.F32 q11, q11, q1 ;\
VADD.F32 q12, q12, q0 ;\
VADD.F32 q13, q13, q1 ;\
VADD.F32 q14, q14, q0 ;\
VADD.F32 q15, q15, q1 ;\
;\
/* Load c, c_stride: */ ;\
/* - r1 = c */ ;\
/* - r9 = c_stride */ ;\
LDR r1, [sp, 112] ;\
LDR r9, [sp, 116] ;\
LSL r9, r9, 2 ;\
;\
/* r1 = c0 = c pointer */ ;\
;\
CMP r0, 2 ;\
/* r2 = c1 */ ;\
ADD r2, r1, r9 ;\
MOVLO r2, r1 ;\
;\
/* r3 = c2 */ ;\
ADD r3, r2, r9 ;\
MOVLS r3, r2 ;\
;\
CMP r0, 4 ;\
/* r4 = c3 */ ;\
ADD r4, r3, r9 ;\
MOVNE r4, r3 ;\
;\
CMP r11, 8 ;\
BNE _4_w##W_INDEX_DTYPE_NUM_BITS ;\
;\
VST1.32 {q8}, [r1]! ;\
VST1.32 {q10}, [r2]! ;\
VST1.32 {q12}, [r3]! ;\
VST1.32 {q14}, [r4]! ;\
VST1.32 {q9}, [r1] ;\
VST1.32 {q11}, [r2] ;\
VST1.32 {q13}, [r3] ;\
VST1.32 {q15}, [r4] ;\
;\
VPOP {d8-d15} ;\
POP {r4, r5, r6, r7, r8, r9, r10, r11, lr} ;\
BX lr ;\
;\
.p2align 3 ;\
_4_w##W_INDEX_DTYPE_NUM_BITS##: ;\
CMP r11, 4 ;\
BLO _5_w##W_INDEX_DTYPE_NUM_BITS ;\
;\
VST1.32 {q8}, [r1]! ;\
VST1.32 {q10}, [r2]! ;\
VST1.32 {q12}, [r3]! ;\
VST1.32 {q14}, [r4]! ;\
;\
SUB r11, 4 ;\
;\
VMOV.32 q8, q9 ;\
VMOV.32 q10, q11 ;\
VMOV.32 q12, q13 ;\
VMOV.32 q14, q15 ;\
;\
_5_w##W_INDEX_DTYPE_NUM_BITS##: ;\
CMP r11, 2 ;\
BLO _6_w##W_INDEX_DTYPE_NUM_BITS ;\
;\
VST1.32 {d16}, [r1]! ;\
VST1.32 {d20}, [r2]! ;\
VST1.32 {d24}, [r3]! ;\
VST1.32 {d28}, [r4]! ;\
;\
SUB r11, 2 ;\
;\
VEXT.32 q8, q8, 2 ;\
VEXT.32 q10, q10, 2 ;\
VEXT.32 q12, q12, 2 ;\
VEXT.32 q14, q14, 2 ;\
;\
_6_w##W_INDEX_DTYPE_NUM_BITS##: ;\
TEQ r11, 0 ;\
BEQ _7_w##W_INDEX_DTYPE_NUM_BITS ;\
;\
VST1.32 {d16[0]}, [r1] ;\
VST1.32 {d20[0]}, [r2] ;\
VST1.32 {d24[0]}, [r3] ;\
VST1.32 {d28[0]}, [r4] ;\
;\
_7_w##W_INDEX_DTYPE_NUM_BITS##: ;\
VPOP {d8-d15} ;\
POP {r4, r5, r6, r7, r8, r9, r10, r11, lr} ;\
BX lr ;\
;\
END_FUNCTION pytorch_q8gemm_dq_sparse_8x1_ukernel_4x8_packedA_w##W_INDEX_DTYPE_NUM_BITS##__aarch32_neon
# void pytorch_q8gemm_dq_sparse_8x1_ukernel_4x8_packedA_w32__aarch32_neon(
# size_t mr,
# size_t nr,
# const uint8_t* a_packed,
# const uint8_t* packed_w,
# const uint32_t* w_row_ptr,
# const uint32_t* w_block_ids_ptr,
# const float* b,
# uint8_t* restrict c,
# size_t c_stride,
# size_t output_channel_index,
# const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1])
MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_8X1_UKERNEL_4X8_PACKEDA__AARCH32_NEON(32, #4, #2, LDR)
# void pytorch_q8gemm_dq_sparse_8x1_ukernel_4x8_packedA_w16__aarch32_neon(
# size_t mr,
# size_t nr,
# const uint8_t* a_packed,
# const uint8_t* packed_w,
# const uint16_t* w_row_ptr,
# const uint16_t* w_block_ids_ptr,
# const float* b,
# uint8_t* restrict c,
# size_t c_stride,
# size_t output_channel_index,
# const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1])
MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_8X1_UKERNEL_4X8_PACKEDA__AARCH32_NEON(16, #2, #1, LDRH)
# void pytorch_q8gemm_dq_sparse_8x1_ukernel_4x8_packedA_w8__aarch32_neon(
# size_t mr,
# size_t nr,
# const uint8_t* a_packed,
# const uint8_t* packed_w,
# const uint8_t* w_row_ptr,
# const uint8_t* w_block_ids_ptr,
# const float* b,
# uint8_t* restrict c,
# size_t c_stride,
# size_t output_channel_index,
# const union pytorch_qnnp_conv_dynamic_quantization_params quantization_params[restrict static 1])
MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_8X1_UKERNEL_4X8_PACKEDA__AARCH32_NEON(8, #1, #0, LDRB)
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
#undef NDEF_APPLE_SYMBOLS
#undef MAKE_PYTORCH_Q8GEMM_DQ_SPARSE_8X1_UKERNEL_4X8_PACKEDA__AARCH32_NEON
|
KAVYANSHTYAGI/pytorch
| 27,616
|
aten/src/ATen/native/quantized/cpu/qnnpack/src/q8conv/8x8-aarch64-neon.S
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <qnnpack/assembly.h>
#include <requantization/runtime-assembly.h>
# Args passed via 8 registers (64 bytes)
# x0: mr
# x1: nr
# x2: kc
# x3: ks
# x4: a
# x5: w
# x6: c
# x7: c_stride
#
# Args passed via stack.
# TOS
# |-----------|
# |out ch indx| 0
# |params | 8
# |-----------|
# void pytorch_q8conv_ukernel_8x8__aarch64_neon(
# size_t mr,
# size_t nr,
# size_t kc,
# size_t ks,
# const uint8_t** restrict a,
# const void* restrict w,
# uint8_t* restrict c,
# size_t c_stride,
# size_t output_channel_index,
# const union pytorch_qnnp_q31_requantization_params quantization_params[restrict static 1])
BEGIN_FUNCTION pytorch_q8conv_ukernel_8x8__aarch64_neon
# Load params: x8
# Load output channel index: x9
# Note since this is an offset into a byte pointer
# We do not need to multiply with size of pointer type
LDP x9, x8, [sp]
STP d15, d14, [sp, -16]
STP d13, d12, [sp, -32]
STP d11, d10, [sp, -48]
STP d9, d8, [sp, -64]
# Load bias0123, bias4567
LD1 {v8.4s, v9.4s}, [x5], 32
# Load pointer to per channel zero points array
# And go to a_zero_point with post-index
LDR x10, [x8], 8
# Add offset to the base pointer
ADD x10, x10, x9
# v10 := vacc1x0123
MOV v10.16b, v8.16b
# v11 := vacc1x4567
MOV v11.16b, v9.16b
# Load b_zero_point
LD1 {v25.8b}, [x10]
# Load a_zero_point
LD1R {v24.8b}, [x8]
# Load pointer to per channel requant scale
LDR x10, [x8, 8]!
ADD x8, x8, 8
# v12 := vacc2x0123
MOV v12.16b, v8.16b
# v13 := vacc2x4567
MOV v13.16b, v9.16b
# v14 := vacc3x0123
MOV v14.16b, v8.16b
# v15 := vacc3x4567
MOV v15.16b, v9.16b
# v16 := vacc4x0123
MOV v16.16b, v8.16b
# v17 := vacc4x4567
MOV v17.16b, v9.16b
# v18 := vacc5x0123
MOV v18.16b, v8.16b
# v19 := vacc5x4567
MOV v19.16b, v9.16b
# v20 := vacc6x0123
MOV v20.16b, v8.16b
# v21 := vacc6x4567
MOV v21.16b, v9.16b
# v22 := vacc7x0123
MOV v22.16b, v8.16b
# v23 := vacc7x4567
MOV v23.16b, v9.16b
# Fold mul by 4 to get byte offset for requant scale.
# Add offset to the base pointer
ADD x10, x10, x9, lsl#2
// Load requantization_scale
// - v26 = requantization_scale channels 0-3
// - v31 = requantization_scale channels 4-7
LD1 {v26.4s}, [x10], 16
LD1 {v30.4s}, [x10]
#ifndef IGNORE_CODE_ALIGN_DIRECTIVES
.p2align 4
#endif
3:
MOV x17, x2
LDR x16, [x4], 8 // a0
LDR x9, [x4], 8 // a1
LDR x10, [x4], 8 // a2
LDR x11, [x4], 8 // a3
LDR x12, [x4], 8 // a4
LDR x13, [x4], 8 // a5
LDR x14, [x4], 8 // a6
LDR x15, [x4], 8 // a7
SUBS x17, x17, 8
B.LO 1f
#ifndef IGNORE_CODE_ALIGN_DIRECTIVES
.p2align 5
#endif
0:
# b0-7 (channel 0)
LD1 {v27.8b}, [x5], 8
USUBL v27.8h, v27.8b, v25.8b
# va0 - va7 := va - va_offset
LD1 {v0.8b}, [x16], 8
SUB_ZERO_POINT v0.8h, v0.8b, v24.8b
LD1 {v1.8b}, [x9], 8
SUB_ZERO_POINT v1.8h, v1.8b, v24.8b
LD1 {v2.8b}, [x10], 8
SUB_ZERO_POINT v2.8h, v2.8b, v24.8b
LD1 {v3.8b}, [x11], 8
SUB_ZERO_POINT v3.8h, v3.8b, v24.8b
LD1 {v4.8b}, [x12], 8
SUB_ZERO_POINT v4.8h, v4.8b, v24.8b
LD1 {v5.8b}, [x13], 8
SUB_ZERO_POINT v5.8h, v5.8b, v24.8b
LD1 {v6.8b}, [x14], 8
SUB_ZERO_POINT v6.8h, v6.8b, v24.8b
LD1 {v7.8b}, [x15], 8
SUB_ZERO_POINT v7.8h, v7.8b, v24.8b
// b0-7 (channel 1)
LD1 {v28.8b}, [x5], 8
SMLAL v8.4s, v27.4h, v0.h[0] // vacc0x0123 += vb0123 * va0[0]
SMLAL2 v9.4s, v27.8h, v0.h[0] // vacc0x4567 += vb4567 * va0[0]
SMLAL v10.4s, v27.4h, v1.h[0] // vacc1x0123 += vb0123 * va1[0]
SMLAL2 v11.4s, v27.8h, v1.h[0] // vacc1x4567 += vb4567 * va1[0]
SMLAL v12.4s, v27.4h, v2.h[0] // vacc2x0123 += vb0123 * va2[0]
SMLAL2 v13.4s, v27.8h, v2.h[0] // vacc2x4567 += vb4567 * va2[0]
SMLAL v14.4s, v27.4h, v3.h[0] // vacc3x0123 += vb0123 * va3[0]
SMLAL2 v15.4s, v27.8h, v3.h[0] // vacc3x4567 += vb4567 * va3[0]
USUBL v28.8h, v28.8b, v25.8b
SMLAL v16.4s, v27.4h, v4.h[0] // vacc4x0123 += vb0123 * va4[0]
SMLAL2 v17.4s, v27.8h, v4.h[0] // vacc4x4567 += vb4567 * va4[0]
SMLAL v18.4s, v27.4h, v5.h[0] // vacc5x0123 += vb0123 * va5[0]
SMLAL2 v19.4s, v27.8h, v5.h[0] // vacc5x4567 += vb4567 * va5[0]
SMLAL v20.4s, v27.4h, v6.h[0] // vacc6x0123 += vb0123 * va6[0]
SMLAL2 v21.4s, v27.8h, v6.h[0] // vacc6x4567 += vb4567 * va6[0]
SMLAL v22.4s, v27.4h, v7.h[0] // vacc7x0123 += vb0123 * va7[0]
SMLAL2 v23.4s, v27.8h, v7.h[0] // vacc7x4567 += vb4567 * va7[0]
// b0-7 (channel 2)
LD1 {v27.8b}, [x5], 8
SMLAL v8.4s, v28.4h, v0.h[1] // vacc0x0123 += vb0123 * va0[1]
SMLAL2 v9.4s, v28.8h, v0.h[1] // vacc0x4567 += vb4567 * va0[1]
SMLAL v10.4s, v28.4h, v1.h[1] // vacc1x0123 += vb0123 * va1[1]
SMLAL2 v11.4s, v28.8h, v1.h[1] // vacc1x4567 += vb4567 * va1[1]
SMLAL v12.4s, v28.4h, v2.h[1] // vacc2x0123 += vb0123 * va2[1]
SMLAL2 v13.4s, v28.8h, v2.h[1] // vacc2x4567 += vb4567 * va2[1]
SMLAL v14.4s, v28.4h, v3.h[1] // vacc3x0123 += vb0123 * va3[1]
SMLAL2 v15.4s, v28.8h, v3.h[1] // vacc3x4567 += vb4567 * va3[1]
USUBL v27.8h, v27.8b, v25.8b
SMLAL v16.4s, v28.4h, v4.h[1] // vacc4x0123 += vb0123 * va4[1]
SMLAL2 v17.4s, v28.8h, v4.h[1] // vacc4x4567 += vb4567 * va4[1]
SMLAL v18.4s, v28.4h, v5.h[1] // vacc5x0123 += vb0123 * va5[1]
SMLAL2 v19.4s, v28.8h, v5.h[1] // vacc5x4567 += vb4567 * va5[1]
SMLAL v20.4s, v28.4h, v6.h[1] // vacc6x0123 += vb0123 * va6[1]
SMLAL2 v21.4s, v28.8h, v6.h[1] // vacc6x4567 += vb4567 * va6[1]
SMLAL v22.4s, v28.4h, v7.h[1] // vacc7x0123 += vb0123 * va7[1]
SMLAL2 v23.4s, v28.8h, v7.h[1] // vacc7x4567 += vb4567 * va7[1]
// b0-7 (channel 3)
LD1 {v28.8b}, [x5], 8
SMLAL v8.4s, v27.4h, v0.h[2] // vacc0x0123 += vb0123 * va0[2]
SMLAL2 v9.4s, v27.8h, v0.h[2] // vacc0x4567 += vb4567 * va0[2]
SMLAL v10.4s, v27.4h, v1.h[2] // vacc1x0123 += vb0123 * va1[2]
SMLAL2 v11.4s, v27.8h, v1.h[2] // vacc1x4567 += vb4567 * va1[2]
SMLAL v12.4s, v27.4h, v2.h[2] // vacc2x0123 += vb0123 * va2[2]
SMLAL2 v13.4s, v27.8h, v2.h[2] // vacc2x4567 += vb4567 * va2[2]
SMLAL v14.4s, v27.4h, v3.h[2] // vacc3x0123 += vb0123 * va3[2]
SMLAL2 v15.4s, v27.8h, v3.h[2] // vacc3x4567 += vb4567 * va3[2]
USUBL v28.8h, v28.8b, v25.8b
SMLAL v16.4s, v27.4h, v4.h[2] // vacc4x0123 += vb0123 * va4[2]
SMLAL2 v17.4s, v27.8h, v4.h[2] // vacc4x4567 += vb4567 * va4[2]
SMLAL v18.4s, v27.4h, v5.h[2] // vacc5x0123 += vb0123 * va5[2]
SMLAL2 v19.4s, v27.8h, v5.h[2] // vacc5x4567 += vb4567 * va5[2]
SMLAL v20.4s, v27.4h, v6.h[2] // vacc6x0123 += vb0123 * va6[2]
SMLAL2 v21.4s, v27.8h, v6.h[2] // vacc6x4567 += vb4567 * va6[2]
SMLAL v22.4s, v27.4h, v7.h[2] // vacc7x0123 += vb0123 * va7[2]
SMLAL2 v23.4s, v27.8h, v7.h[2] // vacc7x4567 += vb4567 * va7[2]
// b0-7 (channel 4)
LD1 {v27.8b}, [x5], 8
SMLAL v8.4s, v28.4h, v0.h[3] // vacc0x0123 += vb0123 * va0[3]
SMLAL2 v9.4s, v28.8h, v0.h[3] // vacc0x4567 += vb4567 * va0[3]
SMLAL v10.4s, v28.4h, v1.h[3] // vacc1x0123 += vb0123 * va1[3]
SMLAL2 v11.4s, v28.8h, v1.h[3] // vacc1x4567 += vb4567 * va1[3]
SMLAL v12.4s, v28.4h, v2.h[3] // vacc2x0123 += vb0123 * va2[3]
SMLAL2 v13.4s, v28.8h, v2.h[3] // vacc2x4567 += vb4567 * va2[3]
SMLAL v14.4s, v28.4h, v3.h[3] // vacc3x0123 += vb0123 * va3[3]
SMLAL2 v15.4s, v28.8h, v3.h[3] // vacc3x4567 += vb4567 * va3[3]
USUBL v27.8h, v27.8b, v25.8b
SMLAL v16.4s, v28.4h, v4.h[3] // vacc4x0123 += vb0123 * va4[3]
SMLAL2 v17.4s, v28.8h, v4.h[3] // vacc4x4567 += vb4567 * va4[3]
SMLAL v18.4s, v28.4h, v5.h[3] // vacc5x0123 += vb0123 * va5[3]
SMLAL2 v19.4s, v28.8h, v5.h[3] // vacc5x4567 += vb4567 * va5[3]
SMLAL v20.4s, v28.4h, v6.h[3] // vacc6x0123 += vb0123 * va6[3]
SMLAL2 v21.4s, v28.8h, v6.h[3] // vacc6x4567 += vb4567 * va6[3]
SMLAL v22.4s, v28.4h, v7.h[3] // vacc7x0123 += vb0123 * va7[3]
SMLAL2 v23.4s, v28.8h, v7.h[3] // vacc7x4567 += vb4567 * va7[3]
// b0-7 (channel 5)
LD1 {v28.8b}, [x5], 8
SMLAL v8.4s, v27.4h, v0.h[4] // vacc0x0123 += vb0123 * va0[4]
SMLAL2 v9.4s, v27.8h, v0.h[4] // vacc0x4567 += vb4567 * va0[4]
SMLAL v10.4s, v27.4h, v1.h[4] // vacc1x0123 += vb0123 * va1[4]
SMLAL2 v11.4s, v27.8h, v1.h[4] // vacc1x4567 += vb4567 * va1[4]
SMLAL v12.4s, v27.4h, v2.h[4] // vacc2x0123 += vb0123 * va2[4]
SMLAL2 v13.4s, v27.8h, v2.h[4] // vacc2x4567 += vb4567 * va2[4]
SMLAL v14.4s, v27.4h, v3.h[4] // vacc3x0123 += vb0123 * va3[4]
SMLAL2 v15.4s, v27.8h, v3.h[4] // vacc3x4567 += vb4567 * va3[4]
USUBL v28.8h, v28.8b, v25.8b
SMLAL v16.4s, v27.4h, v4.h[4] // vacc4x0123 += vb0123 * va4[4]
SMLAL2 v17.4s, v27.8h, v4.h[4] // vacc4x4567 += vb4567 * va4[4]
SMLAL v18.4s, v27.4h, v5.h[4] // vacc5x0123 += vb0123 * va5[4]
SMLAL2 v19.4s, v27.8h, v5.h[4] // vacc5x4567 += vb4567 * va5[4]
SMLAL v20.4s, v27.4h, v6.h[4] // vacc6x0123 += vb0123 * va6[4]
SMLAL2 v21.4s, v27.8h, v6.h[4] // vacc6x4567 += vb4567 * va6[4]
SMLAL v22.4s, v27.4h, v7.h[4] // vacc7x0123 += vb0123 * va7[4]
SMLAL2 v23.4s, v27.8h, v7.h[4] // vacc7x4567 += vb4567 * va7[4]
// b0-7 (channel 6)
LD1 {v27.8b}, [x5], 8
SMLAL v8.4s, v28.4h, v0.h[5] // vacc0x0123 += vb0123 * va0[5]
SMLAL2 v9.4s, v28.8h, v0.h[5] // vacc0x4567 += vb4567 * va0[5]
SMLAL v10.4s, v28.4h, v1.h[5] // vacc1x0123 += vb0123 * va1[5]
SMLAL2 v11.4s, v28.8h, v1.h[5] // vacc1x4567 += vb4567 * va1[5]
SMLAL v12.4s, v28.4h, v2.h[5] // vacc2x0123 += vb0123 * va2[5]
SMLAL2 v13.4s, v28.8h, v2.h[5] // vacc2x4567 += vb4567 * va2[5]
SMLAL v14.4s, v28.4h, v3.h[5] // vacc3x0123 += vb0123 * va3[5]
SMLAL2 v15.4s, v28.8h, v3.h[5] // vacc3x4567 += vb4567 * va3[5]
USUBL v27.8h, v27.8b, v25.8b
SMLAL v16.4s, v28.4h, v4.h[5] // vacc4x0123 += vb0123 * va4[5]
SMLAL2 v17.4s, v28.8h, v4.h[5] // vacc4x4567 += vb4567 * va4[5]
SMLAL v18.4s, v28.4h, v5.h[5] // vacc5x0123 += vb0123 * va5[5]
SMLAL2 v19.4s, v28.8h, v5.h[5] // vacc5x4567 += vb4567 * va5[5]
SMLAL v20.4s, v28.4h, v6.h[5] // vacc6x0123 += vb0123 * va6[5]
SMLAL2 v21.4s, v28.8h, v6.h[5] // vacc6x4567 += vb4567 * va6[5]
SMLAL v22.4s, v28.4h, v7.h[5] // vacc7x0123 += vb0123 * va7[5]
SMLAL2 v23.4s, v28.8h, v7.h[5] // vacc7x4567 += vb4567 * va7[5]
// b0-7 (channel 7)
LD1 {v28.8b}, [x5], 8
SMLAL v8.4s, v27.4h, v0.h[6] // vacc0x0123 += vb0123 * va0[6]
SMLAL2 v9.4s, v27.8h, v0.h[6] // vacc0x4567 += vb4567 * va0[6]
SMLAL v10.4s, v27.4h, v1.h[6] // vacc1x0123 += vb0123 * va1[6]
SMLAL2 v11.4s, v27.8h, v1.h[6] // vacc1x4567 += vb4567 * va1[6]
SMLAL v12.4s, v27.4h, v2.h[6] // vacc2x0123 += vb0123 * va2[6]
SMLAL2 v13.4s, v27.8h, v2.h[6] // vacc2x4567 += vb4567 * va2[6]
SMLAL v14.4s, v27.4h, v3.h[6] // vacc3x0123 += vb0123 * va3[6]
SMLAL2 v15.4s, v27.8h, v3.h[6] // vacc3x4567 += vb4567 * va3[6]
USUBL v28.8h, v28.8b, v25.8b
SMLAL v16.4s, v27.4h, v4.h[6] // vacc4x0123 += vb0123 * va4[6]
SMLAL2 v17.4s, v27.8h, v4.h[6] // vacc4x4567 += vb4567 * va4[6]
SMLAL v18.4s, v27.4h, v5.h[6] // vacc5x0123 += vb0123 * va5[6]
SMLAL2 v19.4s, v27.8h, v5.h[6] // vacc5x4567 += vb4567 * va5[6]
SMLAL v20.4s, v27.4h, v6.h[6] // vacc6x0123 += vb0123 * va6[6]
SMLAL2 v21.4s, v27.8h, v6.h[6] // vacc6x4567 += vb4567 * va6[6]
SMLAL v22.4s, v27.4h, v7.h[6] // vacc7x0123 += vb0123 * va7[6]
SMLAL2 v23.4s, v27.8h, v7.h[6] // vacc7x4567 += vb4567 * va7[6]
SUBS x17, x17, 8
SMLAL v8.4s, v28.4h, v0.h[7] // vacc0x0123 += vb0123 * va0[7]
SMLAL2 v9.4s, v28.8h, v0.h[7] // vacc0x4567 += vb4567 * va0[7]
SMLAL v10.4s, v28.4h, v1.h[7] // vacc1x0123 += vb0123 * va1[7]
SMLAL2 v11.4s, v28.8h, v1.h[7] // vacc1x4567 += vb4567 * va1[7]
SMLAL v12.4s, v28.4h, v2.h[7] // vacc2x0123 += vb0123 * va2[7]
SMLAL2 v13.4s, v28.8h, v2.h[7] // vacc2x4567 += vb4567 * va2[7]
SMLAL v14.4s, v28.4h, v3.h[7] // vacc3x0123 += vb0123 * va3[7]
SMLAL2 v15.4s, v28.8h, v3.h[7] // vacc3x4567 += vb4567 * va3[7]
SMLAL v16.4s, v28.4h, v4.h[7] // vacc4x0123 += vb0123 * va4[7]
SMLAL2 v17.4s, v28.8h, v4.h[7] // vacc4x4567 += vb4567 * va4[7]
SMLAL v18.4s, v28.4h, v5.h[7] // vacc5x0123 += vb0123 * va5[7]
SMLAL2 v19.4s, v28.8h, v5.h[7] // vacc5x4567 += vb4567 * va5[7]
SMLAL v20.4s, v28.4h, v6.h[7] // vacc6x0123 += vb0123 * va6[7]
SMLAL2 v21.4s, v28.8h, v6.h[7] // vacc6x4567 += vb4567 * va6[7]
SMLAL v22.4s, v28.4h, v7.h[7] // vacc7x0123 += vb0123 * va7[7]
SMLAL2 v23.4s, v28.8h, v7.h[7] // vacc7x4567 += vb4567 * va7[7]
B.HS 0b
1:
CMP x17, -8
B.EQ 2f
// Adjust a0-a7
ADD x16, x16, x17
ADD x9, x9, x17
ADD x10, x10, x17
ADD x11, x11, x17
ADD x12, x12, x17
ADD x13, x13, x17
ADD x14, x14, x17
ADD x15, x15, x17
// a_shift = 8 * k - 64
LSL x17, x17, 3
FMOV d29, x17
USHL d31, d24, d29
// Load x0-a7
LD1 {v0.8b}, [x16], 8
USHL d0, d0, d29
SUB_ZERO_POINT v0.8h, v0.8b, v24.8b
LD1 {v1.8b}, [x9], 8
USHL d1, d1, d29
SUB_ZERO_POINT v1.8h, v1.8b, v24.8b
LD1 {v2.8b}, [x10], 8
USHL d2, d2, d29
SUB_ZERO_POINT v2.8h, v2.8b, v24.8b
LD1 {v3.8b}, [x11], 8
USHL d3, d3, d29
SUB_ZERO_POINT v3.8h, v3.8b, v24.8b
LD1 {v4.8b}, [x12], 8
USHL d4, d4, d29
SUB_ZERO_POINT v4.8h, v4.8b, v24.8b
LD1 {v5.8b}, [x13], 8
USHL d5, d5, d29
SUB_ZERO_POINT v5.8h, v5.8b, v24.8b
LD1 {v6.8b}, [x14], 8
USHL d6, d6, d29
SUB_ZERO_POINT v6.8h, v6.8b, v24.8b
LD1 {v7.8b}, [x15], 8
USHL d7, d7, d29
SUB_ZERO_POINT v7.8h, v7.8b, v24.8b
// Channel 0
LD1 {v27.8b}, [x5], 8
USUBL v27.8h, v27.8b, v25.8b
SMLAL v8.4s, v27.4h, v0.h[0] // vacc0x0123 += vb0123 * va0[0]
SMLAL2 v9.4s, v27.8h, v0.h[0] // vacc0x4567 += vb4567 * va0[0]
SMLAL v10.4s, v27.4h, v1.h[0] // vacc1x0123 += vb0123 * va1[0]
SMLAL2 v11.4s, v27.8h, v1.h[0] // vacc1x4567 += vb4567 * va1[0]
SMLAL v12.4s, v27.4h, v2.h[0] // vacc2x0123 += vb0123 * va2[0]
SMLAL2 v13.4s, v27.8h, v2.h[0] // vacc2x4567 += vb4567 * va2[0]
SMLAL v14.4s, v27.4h, v3.h[0] // vacc3x0123 += vb0123 * va3[0]
SMLAL2 v15.4s, v27.8h, v3.h[0] // vacc3x4567 += vb4567 * va3[0]
SMLAL v16.4s, v27.4h, v4.h[0] // vacc4x0123 += vb0123 * va4[0]
SMLAL2 v17.4s, v27.8h, v4.h[0] // vacc4x4567 += vb4567 * va4[0]
SMLAL v18.4s, v27.4h, v5.h[0] // vacc5x0123 += vb0123 * va5[0]
SMLAL2 v19.4s, v27.8h, v5.h[0] // vacc5x4567 += vb4567 * va5[0]
SMLAL v20.4s, v27.4h, v6.h[0] // vacc6x0123 += vb0123 * va6[0]
SMLAL2 v21.4s, v27.8h, v6.h[0] // vacc6x4567 += vb4567 * va6[0]
SMLAL v22.4s, v27.4h, v7.h[0] // vacc7x0123 += vb0123 * va7[0]
SMLAL2 v23.4s, v27.8h, v7.h[0] // vacc7x4567 += vb4567 * va7[0]
CMP x17, -48
B.LO 2f
// Channel 1
LD1 {v28.8b}, [x5], 8
USUBL v28.8h, v28.8b, v25.8b
SMLAL v8.4s, v28.4h, v0.h[1] // vacc0x0123 += vb0123 * va0[1]
SMLAL2 v9.4s, v28.8h, v0.h[1] // vacc0x4567 += vb4567 * va0[1]
SMLAL v10.4s, v28.4h, v1.h[1] // vacc1x0123 += vb0123 * va1[1]
SMLAL2 v11.4s, v28.8h, v1.h[1] // vacc1x4567 += vb4567 * va1[1]
SMLAL v12.4s, v28.4h, v2.h[1] // vacc2x0123 += vb0123 * va2[1]
SMLAL2 v13.4s, v28.8h, v2.h[1] // vacc2x4567 += vb4567 * va2[1]
SMLAL v14.4s, v28.4h, v3.h[1] // vacc3x0123 += vb0123 * va3[1]
SMLAL2 v15.4s, v28.8h, v3.h[1] // vacc3x4567 += vb4567 * va3[1]
SMLAL v16.4s, v28.4h, v4.h[1] // vacc4x0123 += vb0123 * va4[1]
SMLAL2 v17.4s, v28.8h, v4.h[1] // vacc4x4567 += vb4567 * va4[1]
SMLAL v18.4s, v28.4h, v5.h[1] // vacc5x0123 += vb0123 * va5[1]
SMLAL2 v19.4s, v28.8h, v5.h[1] // vacc5x4567 += vb4567 * va5[1]
SMLAL v20.4s, v28.4h, v6.h[1] // vacc6x0123 += vb0123 * va6[1]
SMLAL2 v21.4s, v28.8h, v6.h[1] // vacc6x4567 += vb4567 * va6[1]
SMLAL v22.4s, v28.4h, v7.h[1] // vacc7x0123 += vb0123 * va7[1]
SMLAL2 v23.4s, v28.8h, v7.h[1] // vacc7x4567 += vb4567 * va7[1]
B.LS 2f
// Channel 2
LD1 {v27.8b}, [x5], 8
USUBL v27.8h, v27.8b, v25.8b
SMLAL v8.4s, v27.4h, v0.h[2] // vacc0x0123 += vb0123 * va0[2]
SMLAL2 v9.4s, v27.8h, v0.h[2] // vacc0x4567 += vb4567 * va0[2]
SMLAL v10.4s, v27.4h, v1.h[2] // vacc1x0123 += vb0123 * va1[2]
SMLAL2 v11.4s, v27.8h, v1.h[2] // vacc1x4567 += vb4567 * va1[2]
SMLAL v12.4s, v27.4h, v2.h[2] // vacc2x0123 += vb0123 * va2[2]
SMLAL2 v13.4s, v27.8h, v2.h[2] // vacc2x4567 += vb4567 * va2[2]
SMLAL v14.4s, v27.4h, v3.h[2] // vacc3x0123 += vb0123 * va3[2]
SMLAL2 v15.4s, v27.8h, v3.h[2] // vacc3x4567 += vb4567 * va3[2]
SMLAL v16.4s, v27.4h, v4.h[2] // vacc4x0123 += vb0123 * va4[2]
SMLAL2 v17.4s, v27.8h, v4.h[2] // vacc4x4567 += vb4567 * va4[2]
SMLAL v18.4s, v27.4h, v5.h[2] // vacc5x0123 += vb0123 * va5[2]
SMLAL2 v19.4s, v27.8h, v5.h[2] // vacc5x4567 += vb4567 * va5[2]
SMLAL v20.4s, v27.4h, v6.h[2] // vacc6x0123 += vb0123 * va6[2]
SMLAL2 v21.4s, v27.8h, v6.h[2] // vacc6x4567 += vb4567 * va6[2]
SMLAL v22.4s, v27.4h, v7.h[2] // vacc7x0123 += vb0123 * va7[2]
SMLAL2 v23.4s, v27.8h, v7.h[2] // vacc7x4567 += vb4567 * va7[2]
CMP x17, -32
B.LO 2f
// Channel 3
LD1 {v28.8b}, [x5], 8
USUBL v28.8h, v28.8b, v25.8b
SMLAL v8.4s, v28.4h, v0.h[3] // vacc0x0123 += vb0123 * va0[3]
SMLAL2 v9.4s, v28.8h, v0.h[3] // vacc0x4567 += vb4567 * va0[3]
SMLAL v10.4s, v28.4h, v1.h[3] // vacc1x0123 += vb0123 * va1[3]
SMLAL2 v11.4s, v28.8h, v1.h[3] // vacc1x4567 += vb4567 * va1[3]
SMLAL v12.4s, v28.4h, v2.h[3] // vacc2x0123 += vb0123 * va2[3]
SMLAL2 v13.4s, v28.8h, v2.h[3] // vacc2x4567 += vb4567 * va2[3]
SMLAL v14.4s, v28.4h, v3.h[3] // vacc3x0123 += vb0123 * va3[3]
SMLAL2 v15.4s, v28.8h, v3.h[3] // vacc3x4567 += vb4567 * va3[3]
SMLAL v16.4s, v28.4h, v4.h[3] // vacc4x0123 += vb0123 * va4[3]
SMLAL2 v17.4s, v28.8h, v4.h[3] // vacc4x4567 += vb4567 * va4[3]
SMLAL v18.4s, v28.4h, v5.h[3] // vacc5x0123 += vb0123 * va5[3]
SMLAL2 v19.4s, v28.8h, v5.h[3] // vacc5x4567 += vb4567 * va5[3]
SMLAL v20.4s, v28.4h, v6.h[3] // vacc6x0123 += vb0123 * va6[3]
SMLAL2 v21.4s, v28.8h, v6.h[3] // vacc6x4567 += vb4567 * va6[3]
SMLAL v22.4s, v28.4h, v7.h[3] // vacc7x0123 += vb0123 * va7[3]
SMLAL2 v23.4s, v28.8h, v7.h[3] // vacc7x4567 += vb4567 * va7[3]
B.LS 2f
// Channel 4
LD1 {v27.8b}, [x5], 8
USUBL v27.8h, v27.8b, v25.8b
SMLAL v8.4s, v27.4h, v0.h[4] // vacc0x0123 += vb0123 * va0[4]
SMLAL2 v9.4s, v27.8h, v0.h[4] // vacc0x4567 += vb4567 * va0[4]
SMLAL v10.4s, v27.4h, v1.h[4] // vacc1x0123 += vb0123 * va1[4]
SMLAL2 v11.4s, v27.8h, v1.h[4] // vacc1x4567 += vb4567 * va1[4]
SMLAL v12.4s, v27.4h, v2.h[4] // vacc2x0123 += vb0123 * va2[4]
SMLAL2 v13.4s, v27.8h, v2.h[4] // vacc2x4567 += vb4567 * va2[4]
SMLAL v14.4s, v27.4h, v3.h[4] // vacc3x0123 += vb0123 * va3[4]
SMLAL2 v15.4s, v27.8h, v3.h[4] // vacc3x4567 += vb4567 * va3[4]
SMLAL v16.4s, v27.4h, v4.h[4] // vacc4x0123 += vb0123 * va4[4]
SMLAL2 v17.4s, v27.8h, v4.h[4] // vacc4x4567 += vb4567 * va4[4]
SMLAL v18.4s, v27.4h, v5.h[4] // vacc5x0123 += vb0123 * va5[4]
SMLAL2 v19.4s, v27.8h, v5.h[4] // vacc5x4567 += vb4567 * va5[4]
SMLAL v20.4s, v27.4h, v6.h[4] // vacc6x0123 += vb0123 * va6[4]
SMLAL2 v21.4s, v27.8h, v6.h[4] // vacc6x4567 += vb4567 * va6[4]
SMLAL v22.4s, v27.4h, v7.h[4] // vacc7x0123 += vb0123 * va7[4]
SMLAL2 v23.4s, v27.8h, v7.h[4] // vacc7x4567 += vb4567 * va7[4]
CMP x17, -16
B.LO 2f
// Channel 5
LD1 {v28.8b}, [x5], 8
USUBL v28.8h, v28.8b, v25.8b
SMLAL v8.4s, v28.4h, v0.h[5] // vacc0x0123 += vb0123 * va0[5]
SMLAL2 v9.4s, v28.8h, v0.h[5] // vacc0x4567 += vb4567 * va0[5]
SMLAL v10.4s, v28.4h, v1.h[5] // vacc1x0123 += vb0123 * va1[5]
SMLAL2 v11.4s, v28.8h, v1.h[5] // vacc1x4567 += vb4567 * va1[5]
SMLAL v12.4s, v28.4h, v2.h[5] // vacc2x0123 += vb0123 * va2[5]
SMLAL2 v13.4s, v28.8h, v2.h[5] // vacc2x4567 += vb4567 * va2[5]
SMLAL v14.4s, v28.4h, v3.h[5] // vacc3x0123 += vb0123 * va3[5]
SMLAL2 v15.4s, v28.8h, v3.h[5] // vacc3x4567 += vb4567 * va3[5]
SMLAL v16.4s, v28.4h, v4.h[5] // vacc4x0123 += vb0123 * va4[5]
SMLAL2 v17.4s, v28.8h, v4.h[5] // vacc4x4567 += vb4567 * va4[5]
SMLAL v18.4s, v28.4h, v5.h[5] // vacc5x0123 += vb0123 * va5[5]
SMLAL2 v19.4s, v28.8h, v5.h[5] // vacc5x4567 += vb4567 * va5[5]
SMLAL v20.4s, v28.4h, v6.h[5] // vacc6x0123 += vb0123 * va6[5]
SMLAL2 v21.4s, v28.8h, v6.h[5] // vacc6x4567 += vb4567 * va6[5]
SMLAL v22.4s, v28.4h, v7.h[5] // vacc7x0123 += vb0123 * va7[5]
SMLAL2 v23.4s, v28.8h, v7.h[5] // vacc7x4567 += vb4567 * va7[5]
B.LS 2f
// Channel 6
LD1 {v27.8b}, [x5], 8
USUBL v27.8h, v27.8b, v25.8b
SMLAL v8.4s, v27.4h, v0.h[6] // vacc0x0123 += vb0123 * va0[6]
SMLAL2 v9.4s, v27.8h, v0.h[6] // vacc0x4567 += vb4567 * va0[6]
SMLAL v10.4s, v27.4h, v1.h[6] // vacc1x0123 += vb0123 * va1[6]
SMLAL2 v11.4s, v27.8h, v1.h[6] // vacc1x4567 += vb4567 * va1[6]
SMLAL v12.4s, v27.4h, v2.h[6] // vacc2x0123 += vb0123 * va2[6]
SMLAL2 v13.4s, v27.8h, v2.h[6] // vacc2x4567 += vb4567 * va2[6]
SMLAL v14.4s, v27.4h, v3.h[6] // vacc3x0123 += vb0123 * va3[6]
SMLAL2 v15.4s, v27.8h, v3.h[6] // vacc3x4567 += vb4567 * va3[6]
SMLAL v16.4s, v27.4h, v4.h[6] // vacc4x0123 += vb0123 * va4[6]
SMLAL2 v17.4s, v27.8h, v4.h[6] // vacc4x4567 += vb4567 * va4[6]
SMLAL v18.4s, v27.4h, v5.h[6] // vacc5x0123 += vb0123 * va5[6]
SMLAL2 v19.4s, v27.8h, v5.h[6] // vacc5x4567 += vb4567 * va5[6]
SMLAL v20.4s, v27.4h, v6.h[6] // vacc6x0123 += vb0123 * va6[6]
SMLAL2 v21.4s, v27.8h, v6.h[6] // vacc6x4567 += vb4567 * va6[6]
SMLAL v22.4s, v27.4h, v7.h[6] // vacc7x0123 += vb0123 * va7[6]
SMLAL2 v23.4s, v27.8h, v7.h[6] // vacc7x4567 += vb4567 * va7[6]
#ifndef IGNORE_CODE_ALIGN_DIRECTIVES
.p2align 4
#endif
2:
SUB x3, x3, 1
CBNZ x3, 3b
// Load zero_point:
// - v29 = vzero_point
LD1R {v29.8h}, [x8], 2
SCVTF v8.4s, v8.4s
SCVTF v9.4s, v9.4s
SCVTF v10.4s, v10.4s
SCVTF v11.4s, v11.4s
SCVTF v12.4s, v12.4s
SCVTF v13.4s, v13.4s
SCVTF v14.4s, v14.4s
SCVTF v15.4s, v15.4s
SCVTF v16.4s, v16.4s
SCVTF v17.4s, v17.4s
SCVTF v18.4s, v18.4s
SCVTF v19.4s, v19.4s
SCVTF v20.4s, v20.4s
SCVTF v21.4s, v21.4s
SCVTF v22.4s, v22.4s
SCVTF v23.4s, v23.4s
FMUL v8.4s, v8.4s, v26.4s
FMUL v9.4s, v9.4s, v30.4s
FMUL v10.4s, v10.4s, v26.4s
FMUL v11.4s, v11.4s, v30.4s
FMUL v12.4s, v12.4s, v26.4s
FMUL v13.4s, v13.4s, v30.4s
FMUL v14.4s, v14.4s, v26.4s
FMUL v15.4s, v15.4s, v30.4s
FMUL v16.4s, v16.4s, v26.4s
FMUL v17.4s, v17.4s, v30.4s
FMUL v18.4s, v18.4s, v26.4s
FMUL v19.4s, v19.4s, v30.4s
FMUL v20.4s, v20.4s, v26.4s
FMUL v21.4s, v21.4s, v30.4s
FMUL v22.4s, v22.4s, v26.4s
FMUL v23.4s, v23.4s, v30.4s
// Load max:
// - v30 = vmax
LD1R {v30.16b}, [x8], 1
// Load min:
// - v31 = vmin
LD1R {v31.16b}, [x8]
FCVTNS v8.4s, v8.4s
FCVTNS v9.4s, v9.4s
FCVTNS v10.4s, v10.4s
FCVTNS v11.4s, v11.4s
FCVTNS v12.4s, v12.4s
FCVTNS v13.4s, v13.4s
FCVTNS v14.4s, v14.4s
FCVTNS v15.4s, v15.4s
FCVTNS v16.4s, v16.4s
FCVTNS v17.4s, v17.4s
FCVTNS v18.4s, v18.4s
FCVTNS v19.4s, v19.4s
FCVTNS v20.4s, v20.4s
FCVTNS v21.4s, v21.4s
FCVTNS v22.4s, v22.4s
FCVTNS v23.4s, v23.4s
SQXTN v8.4h, v8.4s
SQXTN v10.4h, v10.4s
SQXTN v12.4h, v12.4s
SQXTN v14.4h, v14.4s
SQXTN v16.4h, v16.4s
SQXTN v18.4h, v18.4s
SQXTN v20.4h, v20.4s
SQXTN v22.4h, v22.4s
SQXTN2 v8.8h, v9.4s
SQXTN2 v10.8h, v11.4s
SQXTN2 v12.8h, v13.4s
SQXTN2 v14.8h, v15.4s
SQXTN2 v16.8h, v17.4s
SQXTN2 v18.8h, v19.4s
SQXTN2 v20.8h, v21.4s
SQXTN2 v22.8h, v23.4s
SQADD v8.8h, v8.8h, v29.8h
SQADD v10.8h, v10.8h, v29.8h
SQADD v12.8h, v12.8h, v29.8h
SQADD v14.8h, v14.8h, v29.8h
SQADD v16.8h, v16.8h, v29.8h
SQADD v18.8h, v18.8h, v29.8h
SQADD v20.8h, v20.8h, v29.8h
SQADD v22.8h, v22.8h, v29.8h
SQXTUN v8.8b, v8.8h
SQXTUN v12.8b, v12.8h
SQXTUN v16.8b, v16.8h
SQXTUN v20.8b, v20.8h
SQXTUN2 v8.16b, v10.8h
SQXTUN2 v12.16b, v14.8h
SQXTUN2 v16.16b, v18.8h
SQXTUN2 v20.16b, v22.8h
UMIN v8.16b, v8.16b, v30.16b
UMIN v12.16b, v12.16b, v30.16b
UMIN v16.16b, v16.16b, v30.16b
UMIN v20.16b, v20.16b, v30.16b
UMAX v8.16b, v8.16b, v31.16b
UMAX v12.16b, v12.16b, v31.16b
UMAX v16.16b, v16.16b, v31.16b
UMAX v20.16b, v20.16b, v31.16b
// Compute c0-c7
ADD x9, x6, x7
CMP x0, 2
CSEL x9, x6, x9, LO
ADD x10, x9, x7
CSEL x10, x9, x10, LS
ADD x11, x10, x7
CMP x0, 4
CSEL x11, x10, x11, LO
ADD x12, x11, x7
CSEL x12, x11, x12, LS
ADD x13, x12, x7
CMP x0, 6
CSEL x13, x12, x13, LO
ADD x14, x13, x7
CSEL x14, x13, x14, LS
ADD x15, x14, x7
CMP x0, 8
CSEL x15, x14, x15, NE
CMP x1, 8
B.NE 4f
// Store results
ST1 {v8.d}[0], [x6]
ST1 {v8.d}[1], [x9]
ST1 {v12.d}[0], [x10]
ST1 {v12.d}[1], [x11]
ST1 {v16.d}[0], [x12]
ST1 {v16.d}[1], [x13]
ST1 {v20.d}[0], [x14]
ST1 {v20.d}[1], [x15]
LDP d9, d8, [sp, -64]
LDP d11, d10, [sp, -48]
LDP d13, d12, [sp, -32]
LDP d15, d14, [sp, -16]
RET
#ifndef IGNORE_CODE_ALIGN_DIRECTIVES
.p2align 3
#endif
4:
CMP x1, 4
B.LO 5f
ST1 {v8.s}[0], [x6], 4
ST1 {v8.s}[2], [x9], 4
ST1 {v12.s}[0], [x10], 4
ST1 {v12.s}[2], [x11], 4
ST1 {v16.s}[0], [x12], 4
ST1 {v16.s}[2], [x13], 4
ST1 {v20.s}[0], [x14], 4
ST1 {v20.s}[2], [x15], 4
SUB x1, x1, 4
EXT v8.16b, v8.16b, v8.16b, 4
EXT v12.16b, v12.16b, v12.16b, 4
EXT v16.16b, v16.16b, v16.16b, 4
EXT v20.16b, v20.16b, v20.16b, 4
5:
CMP x1, 2
B.LO 6f
ST1 {v8.h}[0], [x6], 2
ST1 {v8.h}[4], [x9], 2
ST1 {v12.h}[0], [x10], 2
ST1 {v12.h}[4], [x11], 2
ST1 {v16.h}[0], [x12], 2
ST1 {v16.h}[4], [x13], 2
ST1 {v20.h}[0], [x14], 2
ST1 {v20.h}[4], [x15], 2
SUB x1, x1, 2
EXT v8.16b, v8.16b, v8.16b, 2
EXT v12.16b, v12.16b, v12.16b, 2
EXT v16.16b, v16.16b, v16.16b, 2
EXT v20.16b, v20.16b, v20.16b, 2
6:
CMP x1, 1
B.LO 7f
ST1 {v8.b}[0], [x6]
ST1 {v8.b}[8], [x9]
ST1 {v12.b}[0], [x10]
ST1 {v12.b}[8], [x11]
ST1 {v16.b}[0], [x12]
ST1 {v16.b}[8], [x13]
ST1 {v20.b}[0], [x14]
ST1 {v20.b}[8], [x15]
7:
LDP d9, d8, [sp, -64]
LDP d11, d10, [sp, -48]
LDP d13, d12, [sp, -32]
LDP d15, d14, [sp, -16]
RET
END_FUNCTION pytorch_q8conv_ukernel_8x8__aarch64_neon
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
KAVYANSHTYAGI/pytorch
| 18,255
|
aten/src/ATen/native/quantized/cpu/qnnpack/src/q8conv/4x8-aarch32-neon.S
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <qnnpack/assembly.h>
#include <requantization/runtime-assembly.h>
.syntax unified
# Args passed via 4 registers (16 bytes)
# r0: mr
# r1: nr
# r2: kc
# r3: ks
#
# Args passed via stack.
# TOS
# |-----------|
# |a | 0
# |w | 4
# |c | 8
# |c_stride | 12
# |out ch indx| 16
# |params | 20
# |-----------|
#
# After loading w pointer in ip reg.
# And after pushing r4-r8 and d8-d15 on stack
# |-----------|
# |d8 - d15 | 0
# |r4 - r11 | 64
# |a | 96
# |w | 100
# |c | 104
# |c_stride | 108
# |out ch indx| 112
# |params | 116
# |-----------|
#
# void pytorch_q8conv_ukernel_4x8__aarch32_neon(
# size_t mr,
# size_t nr,
# size_t kc,
# size_t ks,
# const uint8_t**restrict a,
# const void*restrict w,
# uint8_t*restrict c,
# size_t c_stride,
# const union pytorch_qnnp_conv_quantization_params quantization_params[restrict static 1])
BEGIN_FUNCTION pytorch_q8conv_ukernel_4x8__aarch32_neon
.arm
#ifndef __APPLE__
.arch armv7-a
.fpu neon
#endif
# Load w
# - ip = w
LDR ip, [sp, 4]
PUSH {r4, r5, r6, r7, r8, r9, r10, r11}
# Load params:
# - r9 = params
LDR r9, [sp, 52]
VPUSH {d8-d15}
# Load bias0123, bias4567
VLDM ip!, {d16-d19}
# Load a
# - r8 = a
LDR r8, [sp, 96]
# Load output channel index
LDR r5, [sp, 112]
ADD r7, r9, 4
# Load pointer to per channel zero points array
LDR r4, [r9], 8
# Load pointer to per channel requant scale
# add 8 bytes to get to vfmax
LDR r11, [r9], 8
# Load a_zero_point:
# - d14 = a_zero_point
VLD1.8 {d14[]}, [r7]
# Byte offset of output channel index for requant scale.
LSL r6, r5, 2
# Add offset to the base pointer
ADD r5, r4, r5
# Store in r11 pointer from where to load requant scale.
ADD r11, r11, r6
# q10 := vacc1x0123
VMOV.I32 q10, q8
# q11 := vacc1x4567
VMOV.I32 q11, q9
# q12 := vacc2x0123
VMOV.I32 q12, q8
# q13 := vacc2x4567
VMOV.I32 q13, q9
# q14 := vacc3x0123
VMOV.I32 q14, q8
# Load b_zero_point:
# - d15 = b_zero_point
VLD1.8 {d15}, [r5]
# q15 := vacc3x4567
VMOV.I32 q15, q9
.p2align 5
0:
SUBS r10, r2, 8
# Load a0, a1, a2, a3
# - r4 = a0
# - r5 = a1
# - r6 = a2
# - r7 = a3
LDM r8!, {r4-r7}
BLO 2f
1:
# Load va0
# - d1 = va0
VLD1.8 {d1}, [r4]!
# Load va1
# - d3 = va1
VLD1.8 {d3}, [r5]!
# Load vb0-vb7 (channel 0)
# - d9 = vb0-vb7
VLD1.8 {d9}, [ip:64]!
# Load va2
# - d5 = va2
VLD1.8 {d5}, [r6]!
# q0 = va0 = a0
SUB_ZERO_POINT q0, d1, d14
# Load va3
# - d7 = va3
VLD1.8 {d7}, [r7]!
# q1 = va1 = a1
SUB_ZERO_POINT q1, d3, d14
# q4 = b0:7 - vb_zero_point
# - d8 = vb0123 (channel 0)
# - d9 = vb4567 (channel 0)
VSUBL.U8 q4, d9, d15
# q2 = va2 = a2
SUB_ZERO_POINT q2, d5, d14
# q3 = va3 = a3
SUB_ZERO_POINT q3, d7, d14
### Channel 0 ###
# Load b0-b7 (channel 1)
# - d11 = b0-b7
VLD1.8 {d11}, [ip:64]!
# vacc0x0123 += vb0123 * va0[0]
VMLAL.S16 q8, d8, d0[0]
# vacc0x4567 += vb4567 * va0[0]
VMLAL.S16 q9, d9, d0[0]
# vacc1x0123 += vb0123 * va1[0]
VMLAL.S16 q10, d8, d2[0]
# vacc1x4567 += vb4567 * va1[0]
VMLAL.S16 q11, d9, d2[0]
# vacc2x0123 += vb0123 * va2[0]
VMLAL.S16 q12, d8, d4[0]
# vacc2x4567 += vb4567 * va2[0]
VMLAL.S16 q13, d9, d4[0]
# q5 = b0:7 - vb_zero_point
# - d10 = vb0123 (channel 1)
# - d11 = vb4567 (channel 1)
VSUBL.U8 q5, d11, d15
# vacc3x0123 += vb0123 * va3[0]
VMLAL.S16 q14, d8, d6[0]
# vacc3x4567 += vb4567 * va3[0]
VMLAL.S16 q15, d9, d6[0]
### Channel 1 ###
# Load b0-b7 (channel 2)
# - d9 = b0-b7
VLD1.8 {d9}, [ip:64]!
# vacc0x0123 += vb0123 * va0[1]
VMLAL.S16 q8, d10, d0[1]
# vacc0x4567 += vb4567 * va0[1]
VMLAL.S16 q9, d11, d0[1]
# vacc1x0123 += vb0123 * va1[1]
VMLAL.S16 q10, d10, d2[1]
# vacc1x4567 += vb4567 * va1[1]
VMLAL.S16 q11, d11, d2[1]
# vacc2x0123 += vb0123 * va2[1]
VMLAL.S16 q12, d10, d4[1]
# vacc2x4567 += vb4567 * va2[1]
VMLAL.S16 q13, d11, d4[1]
# q4 = b0:7 - vb_zero_point
# - d8 = vb0123 (channel 2)
# - d9 = vb4567 (channel 2)
VSUBL.U8 q4, d9, d15
# vacc3x0123 += vb0123 * va3[1]
VMLAL.S16 q14, d10, d6[1]
# vacc3x4567 += vb4567 * va3[1]
VMLAL.S16 q15, d11, d6[1]
### Channel 2 ###
# Load b0-b7 (channel 3)
# - d11 = b0-b7
VLD1.8 {d11}, [ip:64]!
# vacc0x0123 += vb0123 * va0[2]
VMLAL.S16 q8, d8, d0[2]
# vacc0x4567 += vb4567 * va0[2]
VMLAL.S16 q9, d9, d0[2]
# vacc1x0123 += vb0123 * va1[2]
VMLAL.S16 q10, d8, d2[2]
# vacc1x4567 += vb4567 * va1[2]
VMLAL.S16 q11, d9, d2[2]
# vacc2x0123 += vb0123 * va2[2]
VMLAL.S16 q12, d8, d4[2]
# vacc2x4567 += vb4567 * va2[2]
VMLAL.S16 q13, d9, d4[2]
# q5 = b0:7 - vb_zero_point
# - d10 = vb0123 (channel 3)
# - d11 = vb4567 (channel 3)
VSUBL.U8 q5, d11, d15
# vacc3x0123 += vb0123 * va3[2]
VMLAL.S16 q14, d8, d6[2]
# vacc3x4567 += vb4567 * va3[2]
VMLAL.S16 q15, d9, d6[2]
### Channel 3 ###
# Load b0-b7 (channel 4)
# - d9 = b0-b7
VLD1.8 {d9}, [ip:64]!
# vacc0x0123 += vb0123 * va0[3]
VMLAL.S16 q8, d10, d0[3]
# vacc0x4567 += vb4567 * va0[3]
VMLAL.S16 q9, d11, d0[3]
# vacc1x0123 += vb0123 * va1[3]
VMLAL.S16 q10, d10, d2[3]
# vacc1x4567 += vb4567 * va1[3]
VMLAL.S16 q11, d11, d2[3]
# vacc2x0123 += vb0123 * va2[3]
VMLAL.S16 q12, d10, d4[3]
# vacc2x4567 += vb4567 * va2[3]
VMLAL.S16 q13, d11, d4[3]
# q5 = b0:7 - vb_zero_point
# - d10 = vb0123 (channel 4)
# - d11 = vb4567 (channel 4)
VSUBL.U8 q4, d9, d15
# vacc3x0123 += vb0123 * va3[3]
VMLAL.S16 q14, d10, d6[3]
# vacc3x4567 += vb4567 * va3[3]
VMLAL.S16 q15, d11, d6[3]
### Channel 4 ###
# Load b0-b7 (channel 5)
# - d11 = b0-b7
VLD1.8 {d11}, [ip:64]!
# vacc0x0123 += vb0123 * va0[4]
VMLAL.S16 q8, d8, d1[0]
# vacc0x4567 += vb4567 * va0[4]
VMLAL.S16 q9, d9, d1[0]
# vacc1x0123 += vb0123 * va1[4]
VMLAL.S16 q10, d8, d3[0]
# vacc1x4567 += vb4567 * va1[4]
VMLAL.S16 q11, d9, d3[0]
# vacc2x0123 += vb0123 * va2[4]
VMLAL.S16 q12, d8, d5[0]
# vacc2x4567 += vb4567 * va2[4]
VMLAL.S16 q13, d9, d5[0]
# q4 = b0:7 - vb_zero_point
# - d8 = vb0123 (channel 5)
# - d9 = vb4567 (channel 5)
VSUBL.U8 q5, d11, d15
# vacc3x0123 += vb0123 * va3[4]
VMLAL.S16 q14, d8, d7[0]
# vacc3x4567 += vb4567 * va3[4]
VMLAL.S16 q15, d9, d7[0]
### Channel 5 ###
# Load b0-b7 (channel 6)
# - d9 = b0-b7
VLD1.8 {d9}, [ip:64]!
# vacc0x0123 += vb0123 * va0[5]
VMLAL.S16 q8, d10, d1[1]
# vacc0x4567 += vb4567 * va0[5]
VMLAL.S16 q9, d11, d1[1]
# vacc1x0123 += vb0123 * va1[5]
VMLAL.S16 q10, d10, d3[1]
# vacc1x4567 += vb4567 * va1[5]
VMLAL.S16 q11, d11, d3[1]
# vacc2x0123 += vb0123 * va2[5]
VMLAL.S16 q12, d10, d5[1]
# vacc2x4567 += vb4567 * va2[5]
VMLAL.S16 q13, d11, d5[1]
# q4 = b0:7 - vb_zero_point
# - d8 = vb0123 (channel 6)
# - d9 = vb4567 (channel 6)
VSUBL.U8 q4, d9, d15
# vacc3x0123 += vb0123 * va3[5]
VMLAL.S16 q14, d10, d7[1]
# vacc3x4567 += vb4567 * va3[5]
VMLAL.S16 q15, d11, d7[1]
### Channel 6 ###
# Load b0-b7 (channel 7)
# - d11 = b0-b7
VLD1.8 {d11}, [ip:64]!
# vacc0x0123 += vb0123 * va0[6]
VMLAL.S16 q8, d8, d1[2]
# vacc0x4567 += vb4567 * va0[6]
VMLAL.S16 q9, d9, d1[2]
# vacc1x0123 += vb0123 * va1[6]
VMLAL.S16 q10, d8, d3[2]
# vacc1x4567 += vb4567 * va1[6]
VMLAL.S16 q11, d9, d3[2]
# vacc2x0123 += vb0123 * va2[6]
VMLAL.S16 q12, d8, d5[2]
# q5 = b0:7 - vb_zero_point
# - d10 = vb0123 (channel 7)
# - d11 = vb4567 (channel 7)
VSUBL.U8 q5, d11, d15
# vacc2x4567 += vb4567 * va2[6]
VMLAL.S16 q13, d9, d5[2]
# vacc3x0123 += vb0123 * va3[6]
VMLAL.S16 q14, d8, d7[2]
# vacc3x4567 += vb4567 * va3[6]
VMLAL.S16 q15, d9, d7[2]
### Channel 8 ###
SUBS r10, r10, 8
# vacc0x0123 += vb0123 * va0[7]
VMLAL.S16 q8, d10, d1[3]
# vacc0x4567 += vb4567 * va0[7]
VMLAL.S16 q9, d11, d1[3]
# vacc1x0123 += vb0123 * va1[7]
VMLAL.S16 q10, d10, d3[3]
# vacc1x4567 += vb4567 * va1[7]
VMLAL.S16 q11, d11, d3[3]
# vacc2x0123 += vb0123 * va2[7]
VMLAL.S16 q12, d10, d5[3]
# vacc2x4567 += vb4567 * va2[7]
VMLAL.S16 q13, d11, d5[3]
# vacc3x0123 += vb0123 * va3[7]
VMLAL.S16 q14, d10, d7[3]
# vacc3x4567 += vb4567 * va3[7]
VMLAL.S16 q15, d11, d7[3]
BHS 1b
2:
CMP r10, -8
BEQ 3f
# Adjust a0, a1, a2, a3
ADD r4, r10
ADD r5, r10
ADD r6, r10
ADD r7, r10
# a_shift = 8 * k - 64
LSL r10, r10, 3
VDUP.32 d13, r10
# Load va0
# - d1 = va0
VLD1.8 {d1}, [r4]
# Load va1
# - d3 = va1
VLD1.8 {d3}, [r5]
# Load b0-b7 (channel 0)
# - d9 = b0-b7
VLD1.8 {d9}, [ip:64]!
# Load a2
# - d5 = a2
VLD1.8 {d5}, [r6]
# q0 = va0 = a0
VSHL.U64 d1, d1, d13
SUB_ZERO_POINT q0, d1, d14
# Load a3
# - d7 = a3
VLD1.8 {d7}, [r7]
# q1 = va1 = a1
VSHL.U64 d3, d3, d13
SUB_ZERO_POINT q1, d3, d14
# q4 = b0:7 - vb_zero_point
# - d8 = vb0123 (channel 0)
# - d9 = vb4567 (channel 0)
VSUBL.U8 q4, d9, d15
# q2 = va2 = a2
VSHL.U64 d5, d5, d13
SUB_ZERO_POINT q2, d5, d14
# q3 = va3 = a3
VSHL.U64 d7, d7, d13
SUB_ZERO_POINT q3, d7, d14
### Channel 0 ###
# vacc0x0123 += vb0123 * va0[0]
VMLAL.S16 q8, d8, d0[0]
# vacc0x4567 += vb4567 * va0[0]
VMLAL.S16 q9, d9, d0[0]
# vacc1x0123 += vb0123 * va1[0]
VMLAL.S16 q10, d8, d2[0]
# vacc1x4567 += vb4567 * va1[0]
VMLAL.S16 q11, d9, d2[0]
# vacc2x0123 += vb0123 * va2[0]
VMLAL.S16 q12, d8, d4[0]
# vacc2x4567 += vb4567 * va2[0]
VMLAL.S16 q13, d9, d4[0]
# vacc3x0123 += vb0123 * va3[0]
VMLAL.S16 q14, d8, d6[0]
# vacc3x4567 += vb4567 * va3[0]
VMLAL.S16 q15, d9, d6[0]
CMP r10, -48
BLO 3f
### Channel 1 ###
# Load b0-b7 (channel 1)
# - d11 = b0-b7
VLD1.8 {d11}, [ip:64]!
# q5 = b0:7 - vb_zero_point
# - d10 = vb0123 (channel 1)
# - d11 = vb4567 (channel 1)
VSUBL.U8 q5, d11, d15
# vacc0x0123 += vb0123 * va0[1]
VMLAL.S16 q8, d10, d0[1]
# vacc0x4567 += vb4567 * va0[1]
VMLAL.S16 q9, d11, d0[1]
# vacc1x0123 += vb0123 * va1[1]
VMLAL.S16 q10, d10, d2[1]
# vacc1x4567 += vb4567 * va1[1]
VMLAL.S16 q11, d11, d2[1]
# vacc2x0123 += vb0123 * va2[1]
VMLAL.S16 q12, d10, d4[1]
# vacc2x4567 += vb4567 * va2[1]
VMLAL.S16 q13, d11, d4[1]
# vacc3x0123 += vb0123 * va3[1]
VMLAL.S16 q14, d10, d6[1]
# vacc3x4567 += vb4567 * va3[1]
VMLAL.S16 q15, d11, d6[1]
### Channel 2 ###
BLS 3f
# Load b0-b7 (channel 2)
# - d9 = b0-b7
VLD1.8 {d9}, [ip:64]!
# q4 = b0:7 - vb_zero_point
# - d8 = vb0123 (channel 2)
# - d9 = vb4567 (channel 2)
VSUBL.U8 q4, d9, d15
# vacc0x0123 += vb0123 * va0[2]
VMLAL.S16 q8, d8, d0[2]
# vacc0x4567 += vb4567 * va0[2]
VMLAL.S16 q9, d9, d0[2]
# vacc1x0123 += vb0123 * va1[2]
VMLAL.S16 q10, d8, d2[2]
# vacc1x4567 += vb4567 * va1[2]
VMLAL.S16 q11, d9, d2[2]
# vacc2x0123 += vb0123 * va2[2]
VMLAL.S16 q12, d8, d4[2]
# vacc2x4567 += vb4567 * va2[2]
VMLAL.S16 q13, d9, d4[2]
# vacc3x0123 += vb0123 * va3[2]
VMLAL.S16 q14, d8, d6[2]
# vacc3x4567 += vb4567 * va3[2]
VMLAL.S16 q15, d9, d6[2]
### Channel 3 ###
CMP r10, -32
BLO 3f
# Load b0-b7 (channel 3)
# - d9 = b0-b7
VLD1.8 {d11}, [ip:64]!
# q4 = b0:7 - vb_zero_point
# - d8 = vb0123 (channel 3)
# - d9 = vb4567 (channel 3)
VSUBL.U8 q5, d11, d15
# vacc0x0123 += vb0123 * va0[3]
VMLAL.S16 q8, d10, d0[3]
# vacc0x4567 += vb4567 * va0[3]
VMLAL.S16 q9, d11, d0[3]
# vacc1x0123 += vb0123 * va1[3]
VMLAL.S16 q10, d10, d2[3]
# vacc1x4567 += vb4567 * va1[3]
VMLAL.S16 q11, d11, d2[3]
# vacc2x0123 += vb0123 * va2[3]
VMLAL.S16 q12, d10, d4[3]
# vacc2x4567 += vb4567 * va2[3]
VMLAL.S16 q13, d11, d4[3]
# vacc3x0123 += vb0123 * va3[3]
VMLAL.S16 q14, d10, d6[3]
# vacc3x4567 += vb4567 * va3[3]
VMLAL.S16 q15, d11, d6[3]
### Channel 4 ###
BLS 3f
# Load b0-b7 (channel 4)
# - d11 = b0-b7
VLD1.8 {d9}, [ip:64]!
# q5 = b0:7 - vb_zero_point
# - d10 = vb0123 (channel 4)
# - d11 = vb4567 (channel 4)
VSUBL.U8 q4, d9, d15
# vacc0x0123 += vb0123 * va0[4]
VMLAL.S16 q8, d8, d1[0]
# vacc0x4567 += vb4567 * va0[4]
VMLAL.S16 q9, d9, d1[0]
# vacc1x0123 += vb0123 * va1[4]
VMLAL.S16 q10, d8, d3[0]
# vacc1x4567 += vb4567 * va1[4]
VMLAL.S16 q11, d9, d3[0]
# vacc2x0123 += vb0123 * va2[4]
VMLAL.S16 q12, d8, d5[0]
# vacc2x4567 += vb4567 * va2[4]
VMLAL.S16 q13, d9, d5[0]
# vacc3x0123 += vb0123 * va3[4]
VMLAL.S16 q14, d8, d7[0]
# vacc3x4567 += vb4567 * va3[4]
VMLAL.S16 q15, d9, d7[0]
### Channel 5 ###
CMP r10, -16
BLO 3f
# Load b0-b7 (channel 5)
# - d13 = b0-b7
VLD1.8 {d11}, [ip:64]!
# q5 = b0:7 - vb_zero_point
# - d10 = vb0123 (channel 5)
# - d11 = vb4567 (channel 5)
VSUBL.U8 q5, d11, d15
# vacc0x0123 += vb0123 * va0[5]
VMLAL.S16 q8, d10, d1[1]
# vacc0x4567 += vb4567 * va0[5]
VMLAL.S16 q9, d11, d1[1]
# vacc1x0123 += vb0123 * va1[5]
VMLAL.S16 q10, d10, d3[1]
# vacc1x4567 += vb4567 * va1[5]
VMLAL.S16 q11, d11, d3[1]
# vacc2x0123 += vb0123 * va2[5]
VMLAL.S16 q12, d10, d5[1]
# vacc2x4567 += vb4567 * va2[5]
VMLAL.S16 q13, d11, d5[1]
# vacc3x0123 += vb0123 * va3[5]
VMLAL.S16 q14, d10, d7[1]
# vacc3x4567 += vb4567 * va3[5]
VMLAL.S16 q15, d11, d7[1]
### Channel 6 ###
BLS 3f
# Load b0-b7 (channel 6)
# - d9 = b0-b7
VLD1.8 {d9}, [ip:64]!
# q4 = b0:7 - vb_zero_point
# - d8 = vb0123 (channel 6)
# - d9 = vb4567 (channel 6)
VSUBL.U8 q4, d9, d15
# vacc0x0123 += vb0123 * va0[6]
VMLAL.S16 q8, d8, d1[2]
# vacc0x4567 += vb4567 * va0[6]
VMLAL.S16 q9, d9, d1[2]
# vacc1x0123 += vb0123 * va1[6]
VMLAL.S16 q10, d8, d3[2]
# vacc1x4567 += vb4567 * va1[6]
VMLAL.S16 q11, d9, d3[2]
# vacc2x0123 += vb0123 * va2[6]
VMLAL.S16 q12, d8, d5[2]
# vacc2x4567 += vb4567 * va2[6]
VMLAL.S16 q13, d9, d5[2]
# vacc3x0123 += vb0123 * va3[6]
VMLAL.S16 q14, d8, d7[2]
# vacc3x4567 += vb4567 * va3[6]
VMLAL.S16 q15, d9, d7[2]
.p2align 4
3:
SUBS r3, r3, 1
BNE 0b
# Load requantization_scale:
# - d12 = requantization_scale
VLD1.32 {d12, d13}, [r11]!
# Load vfmax:
VLD1.32 {d10[], d11[]}, [r9]!
VLD1.32 {d4, d5}, [r11]
# Load vfmin:
VLD1.32 {d8[], d9[]}, [r9]!
# Load vfmagic:
VLD1.32 {d0[], d1[]}, [r9]!
# Load vimagic:
VLD1.32 {d2[], d3[]}, [r9]!
VCVT.F32.S32 q8, q8
VCVT.F32.S32 q9, q9
VCVT.F32.S32 q10, q10
VCVT.F32.S32 q11, q11
VCVT.F32.S32 q12, q12
VCVT.F32.S32 q13, q13
VCVT.F32.S32 q14, q14
VCVT.F32.S32 q15, q15
VMUL.F32 q8, q8, q6
VMUL.F32 q9, q9, q2
VMUL.F32 q10, q10, q6
VMUL.F32 q11, q11, q2
VMUL.F32 q12, q12, q6
VMUL.F32 q13, q13, q2
VMUL.F32 q14, q14, q6
VMUL.F32 q15, q15, q2
VMIN.F32 q8, q8, q5
VMIN.F32 q9, q9, q5
VMIN.F32 q10, q10, q5
VMIN.F32 q11, q11, q5
VMIN.F32 q12, q12, q5
VMIN.F32 q13, q13, q5
VMIN.F32 q14, q14, q5
VMIN.F32 q15, q15, q5
VMAX.F32 q8, q8, q4
VMAX.F32 q9, q9, q4
VMAX.F32 q10, q10, q4
VMAX.F32 q11, q11, q4
VMAX.F32 q12, q12, q4
VMAX.F32 q13, q13, q4
VMAX.F32 q14, q14, q4
VMAX.F32 q15, q15, q4
VADD.F32 q8, q8, q0
VADD.F32 q9, q9, q0
VADD.F32 q10, q10, q0
VADD.F32 q11, q11, q0
VADD.F32 q12, q12, q0
VADD.F32 q13, q13, q0
VADD.F32 q14, q14, q0
VADD.F32 q15, q15, q0
# Load c, c_stride:
# - r2 = c
# - r2 = c_stride
LDRD r2, r3, [sp, 104]
VSUB.S32 q8, q8, q1
VSUB.S32 q9, q9, q1
VSUB.S32 q10, q10, q1
VSUB.S32 q11, q11, q1
VSUB.S32 q12, q12, q1
VSUB.S32 q13, q13, q1
VSUB.S32 q14, q14, q1
VSUB.S32 q15, q15, q1
ADD r4, r2, r3
VQMOVN.S32 d16, q8
VQMOVN.S32 d17, q9
CMP r0, 2
VQMOVN.S32 d18, q10
VQMOVN.S32 d19, q11
MOVLO r4, r2
VQMOVN.S32 d20, q12
VQMOVN.S32 d21, q13
VQMOVN.S32 d22, q14
VQMOVN.S32 d23, q15
ADD r5, r4, r3
VQMOVUN.S16 d16, q8
MOVLS r5, r4
VQMOVUN.S16 d17, q9
VQMOVUN.S16 d18, q10
CMP r0, 4
ADD r3, r5, r3
MOVNE r3, r5
CMP r1, 8
VQMOVUN.S16 d19, q11
BNE 5f
VST1.8 {d16}, [r2]
VST1.8 {d17}, [r4]
VST1.8 {d18}, [r5]
VST1.8 {d19}, [r3]
VPOP {d8-d15}
POP {r4, r5, r6, r7, r8, r9, r10, r11}
BX lr
.p2align 3
5:
CMP r1, 4
BLO 6f
VST1.32 {d16[0]}, [r2]!
VST1.32 {d17[0]}, [r4]!
VST1.32 {d18[0]}, [r5]!
VST1.32 {d19[0]}, [r3]!
SUB r1, 4
VEXT.8 q8, q8, q8, 4
VEXT.8 q9, q9, q9, 4
6:
CMP r1, 2
BLO 7f
VST1.16 {d16[0]}, [r2]!
VST1.16 {d17[0]}, [r4]!
VST1.16 {d18[0]}, [r5]!
VST1.16 {d19[0]}, [r3]!
SUB r1, 2
VEXT.8 q8, q8, q8, 2
VEXT.8 q9, q9, q9, 2
7:
TEQ r1, 0
BEQ 8f
VST1.8 {d16[0]}, [r2]
VST1.8 {d17[0]}, [r4]
VST1.8 {d18[0]}, [r5]
VST1.8 {d19[0]}, [r3]
8:
VPOP {d8-d15}
POP {r4, r5, r6, r7, r8, r9, r10, r11}
BX lr
END_FUNCTION pytorch_q8conv_ukernel_4x8__aarch32_neon
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
KAVYANSHTYAGI/pytorch
| 7,829
|
aten/src/ATen/native/quantized/cpu/qnnpack/src/q8dwconv/up8x9-aarch32-neon.S
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <qnnpack/assembly.h>
#include <requantization/runtime-assembly.h>
.syntax unified
# void pytorch_q8dwconv_ukernel_up8x9__aarch32_neon(
# size_t channels,
# size_t output_width,
# const uint8_t** input,
# const void* weights,
# uint8_t* output,
# size_t input_stride,
# size_t output_increment,
# const union pytorch_qnnp_conv_quantization_params quantization_params[restrict static 1])
BEGIN_FUNCTION pytorch_q8dwconv_ukernel_up8x9__aarch32_neon
.arm
#ifndef __APPLE__
.arch armv7-a
.fpu neon
#endif
# Load params
# - r12 = quantization_params
LDR r12, [sp, 12]
PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr}
VPUSH {d8-d15}
STR r0, [sp, #-8]
STR r3, [sp, #-4]
# Load the address zero_point array.
# For depth wise kernels the array is of single element.
LDR r5, [r12], 4
# Load o:
# - lr = o = output
LDR lr, [sp, 100]
# Load kernel zero point:
# - d31 = vkernel_zero_point
VLD1.8 {d31[]}, [r5]
# Load input zero point:
# - d30 = vinput_zero_point
VLD1.8 {d30[]}, [r12]
# Load the address requantization_scale array.
# For depth wise kernels the array is of single element.
# pre-index r12 = r12 + 4
LDR r5, [r12, 4]!
# add 8 bytes to get to vfmax
ADD r12, r12, 8
# Load requantization_scale:
# - q14 = d28:d29 = requantization_scale
VLD1.32 {d28[], d29[]}, [r5]
# Load vfmax:
# - q13 = d26:d27 = vfmax
VLD1.32 {d26[], d27[]}, [r12]!
# Load vfmin:
# - q12 = d24:d25 = vfmin
VLD1.32 {d24[], d25[]}, [r12]!
# Load vfmagic:
# - q10 = d20:d21 = vfmagic
VLD1.32 {d20[], d21[]}, [r12]!
# Load vimagic:
# - q11 = d22:d23 = vimagic
# Since q11/d22 gets used in the remainder channels section
# This load will have to occur in that section again.
# But since r12 is overwritten below, we will have to push it
# on the stack and pop it back.
VLD1.32 {d22[], d23[]}, [r12]
VSTR d22, [sp, #-16]
VSTR d23, [sp, #-24]
.p2align 3
0:
# Load input stride
# - r3 = input_stride
LDR r3, [sp, 104]
# Load c:
# - r0 = c = channels
LDR r0, [sp, #-8]
# Load i0, i1, i2, i3, i4, i5, i6, i7, i8
# - r4 = i0
# - r5 = i1
# - r6 = i2
# - r7 = i3
# - r8 = i4
# - r9 = i5
# - r10 = i6
# - r11 = i7
# - r12 = i8
LDM r2, {r4, r5, r6, r7, r8, r9, r10, r11, r12}
# Pre-decrement c
SUBS r0, r0, 8
# Increment input by input stride
# - input = r2 := input + input_stride
ADD r2, r2, r3
# Load w:
# - r3 = w = weights
LDR r3, [sp, #-4]
BLO 2f
.p2align 4
1:
VLDM r3!, {d0-d3}
VLD1.8 {d4}, [r4]!
VLD1.8 {d6}, [r3]!
VLD1.8 {d8}, [r5]!
VLD1.8 {d10}, [r3]!
SUB_ZERO_POINT q2, d4, d30
VSUBL.U8 q3, d6, d31
VLD1.8 {d12}, [r6]!
VLD1.8 {d14}, [r3]!
SUB_ZERO_POINT q4, d8, d30
VSUBL.U8 q5, d10, d31
VMLAL.S16 q0, d4, d6
VMLAL.S16 q1, d5, d7
VLD1.8 {d4}, [r7]!
VLD1.8 {d6}, [r3]!
SUB_ZERO_POINT q6, d12, d30
VSUBL.U8 q7, d14, d31
VMLAL.S16 q0, d8, d10
VMLAL.S16 q1, d9, d11
VLD1.8 {d8}, [r8]!
VLD1.8 {d10}, [r3]!
SUB_ZERO_POINT q2, d4, d30
VSUBL.U8 q3, d6, d31
VMLAL.S16 q0, d12, d14
VMLAL.S16 q1, d13, d15
VLD1.8 {d12}, [r9]!
VLD1.8 {d14}, [r3]!
SUB_ZERO_POINT q4, d8, d30
VSUBL.U8 q5, d10, d31
VMLAL.S16 q0, d4, d6
VMLAL.S16 q1, d5, d7
VLD1.8 {d4}, [r10]!
VLD1.8 {d6}, [r3]!
SUB_ZERO_POINT q6, d12, d30
VSUBL.U8 q7, d14, d31
VMLAL.S16 q0, d8, d10
VMLAL.S16 q1, d9, d11
VLD1.8 {d8}, [r11]!
VLD1.8 {d10}, [r3]!
SUB_ZERO_POINT q2, d4, d30
VSUBL.U8 q3, d6, d31
VMLAL.S16 q0, d12, d14
VMLAL.S16 q1, d13, d15
VLD1.8 {d12}, [r12]!
VLD1.8 {d14}, [r3]!
SUB_ZERO_POINT q4, d8, d30
VSUBL.U8 q5, d10, d31
VMLAL.S16 q0, d4, d6
VMLAL.S16 q1, d5, d7
SUB_ZERO_POINT q6, d12, d30
VSUBL.U8 q7, d14, d31
VMLAL.S16 q0, d8, d10
VMLAL.S16 q1, d9, d11
VMLAL.S16 q0, d12, d14
VMLAL.S16 q1, d13, d15
VCVT.F32.S32 q0, q0
VCVT.F32.S32 q1, q1
VMUL.F32 q0, q0, q14
VMUL.F32 q1, q1, q14
VMIN.F32 q0, q0, q13
VMIN.F32 q1, q1, q13
VMAX.F32 q0, q0, q12
VMAX.F32 q1, q1, q12
VADD.F32 q0, q0, q10
VADD.F32 q1, q1, q10
VSUB.S32 q0, q0, q11
VSUB.S32 q1, q1, q11
VQMOVN.S32 d0, q0
VQMOVN.S32 d1, q1
VQMOVUN.S16 d0, q0
VST1.8 {d0}, [lr]!
SUBS r0, r0, 8
BHS 1b
2:
CMP r0, -8
BEQ 5f
ADD r4, r4, r0
ADD r5, r5, r0
ADD r6, r6, r0
ADD r7, r7, r0
ADD r8, r8, r0
ADD r9, r9, r0
ADD r10, r10, r0
ADD r11, r11, r0
ADD r12, r12, r0
LSL r0, r0, 3
VDUP.32 d22, r0
VLDM r3!, {d0-d3}
VLD1.8 {d4}, [r4]!
VLD1.8 {d6}, [r3]!
VLD1.8 {d8}, [r5]!
VLD1.8 {d10}, [r3]!
VSHL.U64 d4, d4, d22
VLD1.8 {d12}, [r6]!
VLD1.8 {d14}, [r3]!
SUB_ZERO_POINT q2, d4, d30
VSUBL.U8 q3, d6, d31
VSHL.U64 d8, d8, d22
VLD1.8 {d16}, [r7]!
VLD1.8 {d18}, [r3]!
VSHL.U64 d12, d12, d22
SUB_ZERO_POINT q4, d8, d30
VSUBL.U8 q5, d10, d31
VMLAL.S16 q0, d4, d6
VMLAL.S16 q1, d5, d7
VLD1.8 {d4}, [r8]!
VLD1.8 {d6}, [r3]!
VSHL.U64 d16, d16, d22
SUB_ZERO_POINT q6, d12, d30
VSUBL.U8 q7, d14, d31
VMLAL.S16 q0, d8, d10
VMLAL.S16 q1, d9, d11
VLD1.8 {d8}, [r9]!
VLD1.8 {d10}, [r3]!
VSHL.U64 d4, d4, d22
SUB_ZERO_POINT q8, d16, d30
VSUBL.U8 q9, d18, d31
VMLAL.S16 q0, d12, d14
VMLAL.S16 q1, d13, d15
VLD1.8 {d12}, [r10]!
VLD1.8 {d14}, [r3]!
VSHL.U64 d8, d8, d22
SUB_ZERO_POINT q2, d4, d30
VSUBL.U8 q3, d6, d31
VMLAL.S16 q0, d16, d18
VMLAL.S16 q1, d17, d19
VLD1.8 {d16}, [r11]!
VLD1.8 {d18}, [r3]!
VSHL.U64 d12, d12, d22
SUB_ZERO_POINT q4, d8, d30
VSUBL.U8 q5, d10, d31
VMLAL.S16 q0, d4, d6
VMLAL.S16 q1, d5, d7
VLD1.8 {d4}, [r12]!
VLD1.8 {d6}, [r3]!
VSHL.U64 d16, d16, d22
SUB_ZERO_POINT q6, d12, d30
VSUBL.U8 q7, d14, d31
VMLAL.S16 q0, d8, d10
VMLAL.S16 q1, d9, d11
VSHL.U64 d4, d4, d22
SUB_ZERO_POINT q8, d16, d30
VSUBL.U8 q9, d18, d31
VMLAL.S16 q0, d12, d14
VMLAL.S16 q1, d13, d15
SUB_ZERO_POINT q2, d4, d30
VSUBL.U8 q3, d6, d31
VMLAL.S16 q0, d16, d18
VMLAL.S16 q1, d17, d19
VMLAL.S16 q0, d4, d6
VMLAL.S16 q1, d5, d7
VLDR.64 d22, [sp, #-16]
VLDR.64 d23, [sp, #-24]
VCVT.F32.S32 q0, q0
VCVT.F32.S32 q1, q1
VMUL.F32 q0, q0, q14
VMUL.F32 q1, q1, q14
VMIN.F32 q0, q0, q13
VMIN.F32 q1, q1, q13
VMAX.F32 q0, q0, q12
VMAX.F32 q1, q1, q12
VADD.F32 q0, q0, q10
VADD.F32 q1, q1, q10
VSUB.S32 q0, q0, q11
VSUB.S32 q1, q1, q11
VQMOVN.S32 d0, q0
VQMOVN.S32 d1, q1
VQMOVUN.S16 d0, q0
TST r0, 32
BEQ 3f
VST1.32 {d0[0]}, [lr]!
VEXT.8 d0, d0, 4
3:
TST r0, 16
BEQ 4f
VST1.16 {d0[0]}, [lr]!
VEXT.8 d0, d0, 2
4:
TST r0, 8
BEQ 5f
VST1.8 {d0[0]}, [lr]!
5:
# Load output increment
# - r3 = output_increment
LDR r3, [sp, 108]
# Decrement output width
SUBS r1, r1, 1
# Increment output by output_increment
ADD lr, lr, r3
# If output width is non-zero, process another pixel
BNE 0b
VPOP {d8-d15}
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
END_FUNCTION pytorch_q8dwconv_ukernel_up8x9__aarch32_neon
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
KAVYANSHTYAGI/pytorch
| 9,433
|
aten/src/ATen/native/quantized/cpu/qnnpack/src/q8dwconv/up8x9-aarch32-neon-per-channel.S
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <qnnpack/assembly.h>
#include <requantization/runtime-assembly.h>
.syntax unified
# void pytorch_q8dwconv_ukernel_up8x9_per_channel__aarch32_neon(
# size_t channels,
# size_t output_width,
# const uint8_t** input,
# const void* weights,
# uint8_t* output,
# size_t input_stride,
# size_t output_increment,
# const union pytorch_qnnp_conv_quantization_params quantization_params[restrict static 1])
BEGIN_FUNCTION pytorch_q8dwconv_ukernel_up8x9_per_channel__aarch32_neon
.arm
#ifndef __APPLE__
.arch armv7-a
.fpu neon
#endif
# Load params
# - r12 = quantization_params
LDR r12, [sp, 12]
PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr}
VPUSH {d8-d15}
STR r0, [sp, #-8]
STR r3, [sp, #-4]
STR r1, [sp, #-12]
STR r2, [sp, #-16]
# Load the address zero_point array.
LDR r5, [r12], 4
# Push the zero_point_array base pointer on stack
# We dont have enough registers to maintain
# base pointers. Thus we will have to do some pushes
# and pops.
# At sp #-20 we store updated/working copy pointers
# At sp #-28 we store orig pointers that can be reloaded
# for more output pixels
STR r5, [sp, #-28]
# Load o:
# - lr = o = output
LDR lr, [sp, 100]
# Load input zero point:
# - d30 = vinput_zero_point
VLD1.8 {d30[]}, [r12]
# Load the address requantization_scale array.
# For depth wise kernels the array is of single element.
# pre-index r12 = r12 + 4
LDR r5, [r12, 4]!
# Push the requantization_scales base pointer on stack
# At sp #-24 we store updated/working copy pointers
# At sp #-32 we store orig pointers that can be reloaded
# for more output pixels
STR r5, [sp, #-32]
# add 8 bytes to get to vfmax
ADD r12, r12, 8
# Load vfmax:
# - q13 = d26:d27 = vfmax
VLD1.32 {d26[], d27[]}, [r12]!
# Load vfmin:
# - q12 = d24:d25 = vfmin
VLD1.32 {d24[], d25[]}, [r12]!
# Load vfmagic:
# - q10 = d20:d21 = vfmagic
VLD1.32 {d20[], d21[]}, [r12]!
# Load vimagic:
# - q11 = d22:d23 = vimagic
# Since q11/d22 gets used in the remainder channels section
# This load will have to occur in that section again.
# But since r12 is overwritten below, we will have to push it
# on the stack and pop it back.
VLD1.32 {d22[], d23[]}, [r12]
VSTR d22, [sp, #-40]
VSTR d23, [sp, #-48]
.p2align 3
0:
# Load original zero point base pointer
LDR r4, [sp, #-28]
# Load original requant scale base pointer
LDR r5, [sp, #-32]
# Load indirection pointer from stack
LDR r2, [sp, #-16]
# Load input stride
# - r3 = input_stride
LDR r3, [sp, 104]
# Store original zero point to working copy
STR r4, [sp, #-20]
# Store original requant scale to working copy
STR r5, [sp, #-24]
# Load c:
# - r0 = c = channels
LDR r0, [sp, #-8]
# Load i0, i1, i2, i3, i4, i5, i6, i7, i8
# - r4 = i0
# - r5 = i1
# - r6 = i2
# - r7 = i3
# - r8 = i4
# - r9 = i5
# - r10 = i6
# - r11 = i7
# - r12 = i8
LDM r2, {r4, r5, r6, r7, r8, r9, r10, r11, r12}
# Pre-decrement c
SUBS r0, r0, 8
# Increment input by input stride
# - input = r2 := input + input_stride
ADD r2, r2, r3
STR r2, [sp, #-16]
# Load w:
# - r3 = w = weights
LDR r3, [sp, #-4]
BLO 2f
.p2align 4
1:
VLDM r3!, {d0-d3}
VLD1.8 {d4}, [r4]!
VLD1.8 {d6}, [r3]!
# zero point array base address
LDR r1, [sp, #-20]
# requantization scale array base address
LDR r2, [sp, #-24]
VLD1.8 {d8}, [r5]!
VLD1.8 {d10}, [r3]!
# - d31 = vkernel_zero_point
VLD1.8 {d31}, [r1]!
# - q8 = d16:d17= requantization_scale_lo
VLD1.32 {d16, d17}, [r2]!
# - q14 = d28:d29 = requantization_scale_hi
VLD1.32 {d28, d29}, [r2]!
STR r1, [sp, #-20]
STR r2, [sp, #-24]
SUB_ZERO_POINT q2, d4, d30
VSUBL.U8 q3, d6, d31
VLD1.8 {d12}, [r6]!
VLD1.8 {d14}, [r3]!
SUB_ZERO_POINT q4, d8, d30
VSUBL.U8 q5, d10, d31
VMLAL.S16 q0, d4, d6
VMLAL.S16 q1, d5, d7
VLD1.8 {d4}, [r7]!
VLD1.8 {d6}, [r3]!
SUB_ZERO_POINT q6, d12, d30
VSUBL.U8 q7, d14, d31
VMLAL.S16 q0, d8, d10
VMLAL.S16 q1, d9, d11
VLD1.8 {d8}, [r8]!
VLD1.8 {d10}, [r3]!
SUB_ZERO_POINT q2, d4, d30
VSUBL.U8 q3, d6, d31
VMLAL.S16 q0, d12, d14
VMLAL.S16 q1, d13, d15
VLD1.8 {d12}, [r9]!
VLD1.8 {d14}, [r3]!
SUB_ZERO_POINT q4, d8, d30
VSUBL.U8 q5, d10, d31
VMLAL.S16 q0, d4, d6
VMLAL.S16 q1, d5, d7
VLD1.8 {d4}, [r10]!
VLD1.8 {d6}, [r3]!
SUB_ZERO_POINT q6, d12, d30
VSUBL.U8 q7, d14, d31
VMLAL.S16 q0, d8, d10
VMLAL.S16 q1, d9, d11
VLD1.8 {d8}, [r11]!
VLD1.8 {d10}, [r3]!
SUB_ZERO_POINT q2, d4, d30
VSUBL.U8 q3, d6, d31
VMLAL.S16 q0, d12, d14
VMLAL.S16 q1, d13, d15
VLD1.8 {d12}, [r12]!
VLD1.8 {d14}, [r3]!
SUB_ZERO_POINT q4, d8, d30
VSUBL.U8 q5, d10, d31
VMLAL.S16 q0, d4, d6
VMLAL.S16 q1, d5, d7
SUB_ZERO_POINT q6, d12, d30
VSUBL.U8 q7, d14, d31
VMLAL.S16 q0, d8, d10
VMLAL.S16 q1, d9, d11
VMLAL.S16 q0, d12, d14
VMLAL.S16 q1, d13, d15
VCVT.F32.S32 q0, q0
VCVT.F32.S32 q1, q1
VMUL.F32 q0, q0, q8
VMUL.F32 q1, q1, q14
VMIN.F32 q0, q0, q13
VMIN.F32 q1, q1, q13
VMAX.F32 q0, q0, q12
VMAX.F32 q1, q1, q12
VADD.F32 q0, q0, q10
VADD.F32 q1, q1, q10
VSUB.S32 q0, q0, q11
VSUB.S32 q1, q1, q11
VQMOVN.S32 d0, q0
VQMOVN.S32 d1, q1
VQMOVUN.S16 d0, q0
VST1.8 {d0}, [lr]!
SUBS r0, r0, 8
BHS 1b
2:
CMP r0, -8
BEQ 5f
# zero point array base address
LDR r1, [sp, #-20]
# requantization scale array base address
LDR r2, [sp, #-24]
ADD r4, r4, r0
ADD r5, r5, r0
ADD r6, r6, r0
ADD r7, r7, r0
ADD r8, r8, r0
ADD r9, r9, r0
ADD r10, r10, r0
ADD r11, r11, r0
ADD r12, r12, r0
# - d31 = vkernel_zero_point
VLD1.8 {d31}, [r1]
LSL r0, r0, 3
VDUP.32 d22, r0
VLDM r3!, {d0-d3}
VLD1.8 {d4}, [r4]!
VLD1.8 {d6}, [r3]!
VLD1.8 {d8}, [r5]!
VLD1.8 {d10}, [r3]!
VSHL.U64 d4, d4, d22
VLD1.8 {d12}, [r6]!
VLD1.8 {d14}, [r3]!
SUB_ZERO_POINT q2, d4, d30
VSUBL.U8 q3, d6, d31
VSHL.U64 d8, d8, d22
VLD1.8 {d16}, [r7]!
VLD1.8 {d18}, [r3]!
VSHL.U64 d12, d12, d22
SUB_ZERO_POINT q4, d8, d30
VSUBL.U8 q5, d10, d31
VMLAL.S16 q0, d4, d6
VMLAL.S16 q1, d5, d7
VLD1.8 {d4}, [r8]!
VLD1.8 {d6}, [r3]!
VSHL.U64 d16, d16, d22
SUB_ZERO_POINT q6, d12, d30
VSUBL.U8 q7, d14, d31
VMLAL.S16 q0, d8, d10
VMLAL.S16 q1, d9, d11
VLD1.8 {d8}, [r9]!
VLD1.8 {d10}, [r3]!
VSHL.U64 d4, d4, d22
SUB_ZERO_POINT q8, d16, d30
VSUBL.U8 q9, d18, d31
VMLAL.S16 q0, d12, d14
VMLAL.S16 q1, d13, d15
VLD1.8 {d12}, [r10]!
VLD1.8 {d14}, [r3]!
VSHL.U64 d8, d8, d22
SUB_ZERO_POINT q2, d4, d30
VSUBL.U8 q3, d6, d31
VMLAL.S16 q0, d16, d18
VMLAL.S16 q1, d17, d19
VLD1.8 {d16}, [r11]!
VLD1.8 {d18}, [r3]!
VSHL.U64 d12, d12, d22
SUB_ZERO_POINT q4, d8, d30
VSUBL.U8 q5, d10, d31
VMLAL.S16 q0, d4, d6
VMLAL.S16 q1, d5, d7
VLD1.8 {d4}, [r12]!
VLD1.8 {d6}, [r3]!
VSHL.U64 d16, d16, d22
SUB_ZERO_POINT q6, d12, d30
VSUBL.U8 q7, d14, d31
VMLAL.S16 q0, d8, d10
VMLAL.S16 q1, d9, d11
VSHL.U64 d4, d4, d22
SUB_ZERO_POINT q8, d16, d30
VSUBL.U8 q9, d18, d31
VMLAL.S16 q0, d12, d14
VMLAL.S16 q1, d13, d15
SUB_ZERO_POINT q2, d4, d30
VSUBL.U8 q3, d6, d31
VMLAL.S16 q0, d16, d18
VMLAL.S16 q1, d17, d19
# - q8 = d16:d17= requantization_scale_lo
VLD1.32 {d16, d17}, [r2]!
# - q14 = d28:d29 = requantization_scale_hi
VLD1.32 {d28, d29}, [r2]
VMLAL.S16 q0, d4, d6
VMLAL.S16 q1, d5, d7
VLDR.64 d22, [sp, #-40]
VLDR.64 d23, [sp, #-48]
VCVT.F32.S32 q0, q0
VCVT.F32.S32 q1, q1
VMUL.F32 q0, q0, q8
VMUL.F32 q1, q1, q14
VMIN.F32 q0, q0, q13
VMIN.F32 q1, q1, q13
VMAX.F32 q0, q0, q12
VMAX.F32 q1, q1, q12
VADD.F32 q0, q0, q10
VADD.F32 q1, q1, q10
VSUB.S32 q0, q0, q11
VSUB.S32 q1, q1, q11
VQMOVN.S32 d0, q0
VQMOVN.S32 d1, q1
VQMOVUN.S16 d0, q0
TST r0, 32
BEQ 3f
VST1.32 {d0[0]}, [lr]!
VEXT.8 d0, d0, 4
3:
TST r0, 16
BEQ 4f
VST1.16 {d0[0]}, [lr]!
VEXT.8 d0, d0, 2
4:
TST r0, 8
BEQ 5f
VST1.8 {d0[0]}, [lr]!
5:
# Load output_width from stack
LDR r1, [sp, #-12]
# Load output increment
# - r3 = output_increment
LDR r3, [sp, 108]
# Decrement output width
SUBS r1, r1, 1
# store output_width on stack
STR r1, [sp, #-12]
# Increment output by output_increment
ADD lr, lr, r3
# If output width is non-zero, process another pixel
BNE 0b
VPOP {d8-d15}
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
END_FUNCTION pytorch_q8dwconv_ukernel_up8x9_per_channel__aarch32_neon
#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif
|
keith-hall/bat
| 7,962
|
tests/syntax-tests/highlighted/ARM Assembly/test.S
|
[38;2;248;248;242m.[0m[38;2;248;248;242mdata[0m
[38;2;249;38;114m.balign[0m[38;2;190;132;255m 4[0m
[38;2;248;248;242mred[0m[38;2;248;248;242m: [0m[38;2;249;38;114m.word[0m[38;2;190;132;255m 0[0m
[38;2;248;248;242mgreen[0m[38;2;248;248;242m: [0m[38;2;249;38;114m.word[0m[38;2;190;132;255m 0[0m
[38;2;248;248;242mblue[0m[38;2;248;248;242m: [0m[38;2;249;38;114m.word[0m[38;2;190;132;255m 0[0m
[38;2;249;38;114m.text[0m
[38;2;249;38;114m.global[0m[38;2;248;248;242m [0m[38;2;248;248;242mgrayscale[0m
[38;2;249;38;114m.func[0m[38;2;248;248;242m [0m[38;2;248;248;242mgrayscale[0m
[38;2;248;248;242mgrayscale[0m[38;2;248;248;242m:[0m
[38;2;248;248;242massign[0m[38;2;248;248;242m:[0m
[38;2;248;248;242m [0m[38;2;117;113;94m/* some comment */[0m
[38;2;248;248;242m [0m[38;2;102;217;239mldr[0m[38;2;248;248;242m [0m[38;2;248;248;242mip[0m[38;2;248;248;242m, [0m[38;2;248;248;242maddr_red[0m
[38;2;248;248;242m [0m[38;2;102;217;239mstr[0m[38;2;248;248;242m [0m[38;2;248;248;242mr3[0m[38;2;248;248;242m,[0m[3;38;2;102;217;239m [[0m[3;38;2;102;217;239mip[0m[3;38;2;102;217;239m][0m
[38;2;248;248;242m [0m[38;2;102;217;239mldr[0m[38;2;248;248;242m [0m[38;2;248;248;242mip[0m[38;2;248;248;242m, [0m[38;2;248;248;242maddr_green[0m
[38;2;248;248;242m [0m[38;2;102;217;239mldmfd[0m[38;2;248;248;242m [0m[38;2;248;248;242mr13[0m[38;2;248;248;242m!, {[0m[38;2;248;248;242mr3[0m[38;2;248;248;242m}[0m
[38;2;248;248;242m [0m[38;2;102;217;239mstr[0m[38;2;248;248;242m [0m[38;2;248;248;242mr3[0m[38;2;248;248;242m,[0m[3;38;2;102;217;239m [[0m[3;38;2;102;217;239mip[0m[3;38;2;102;217;239m][0m
[38;2;248;248;242m [0m[38;2;102;217;239mldr[0m[38;2;248;248;242m [0m[38;2;248;248;242mip[0m[38;2;248;248;242m, [0m[38;2;248;248;242maddr_blue[0m
[38;2;248;248;242m [0m[38;2;102;217;239mldmfd[0m[38;2;248;248;242m [0m[38;2;248;248;242mr13[0m[38;2;248;248;242m!, {[0m[38;2;248;248;242mr3[0m[38;2;248;248;242m}[0m
[38;2;248;248;242m [0m[38;2;102;217;239mstr[0m[38;2;248;248;242m [0m[38;2;248;248;242mr3[0m[38;2;248;248;242m,[0m[3;38;2;102;217;239m [[0m[3;38;2;102;217;239mip[0m[3;38;2;102;217;239m][0m
[38;2;248;248;242m [0m[38;2;102;217;239mstmfd[0m[38;2;248;248;242m [0m[38;2;248;248;242mr13[0m[38;2;248;248;242m!, {[0m[38;2;248;248;242mr4[0m[38;2;248;248;242m-[0m[38;2;248;248;242mr8[0m[38;2;248;248;242m}[0m
[38;2;248;248;242m [0m[38;2;102;217;239mldr[0m[38;2;248;248;242m [0m[38;2;248;248;242mip[0m[38;2;248;248;242m, [0m[38;2;248;248;242maddr_red[0m
[38;2;248;248;242m [0m[38;2;102;217;239mldr[0m[38;2;248;248;242m [0m[38;2;248;248;242mr3[0m[38;2;248;248;242m,[0m[3;38;2;102;217;239m [[0m[3;38;2;102;217;239mip[0m[3;38;2;102;217;239m][0m
[38;2;248;248;242m [0m[38;2;102;217;239mldr[0m[38;2;248;248;242m [0m[38;2;248;248;242mip[0m[38;2;248;248;242m, [0m[38;2;248;248;242maddr_green[0m
[38;2;248;248;242m [0m[38;2;102;217;239mldr[0m[38;2;248;248;242m [0m[38;2;248;248;242mr4[0m[38;2;248;248;242m,[0m[3;38;2;102;217;239m [[0m[3;38;2;102;217;239mip[0m[3;38;2;102;217;239m][0m
[38;2;248;248;242m [0m[38;2;102;217;239mldr[0m[38;2;248;248;242m [0m[38;2;248;248;242mip[0m[38;2;248;248;242m, [0m[38;2;248;248;242maddr_blue[0m
[38;2;248;248;242m [0m[38;2;102;217;239mldr[0m[38;2;248;248;242m [0m[38;2;248;248;242mr5[0m[38;2;248;248;242m,[0m[3;38;2;102;217;239m [[0m[3;38;2;102;217;239mip[0m[3;38;2;102;217;239m][0m[38;2;248;248;242m [0m[38;2;117;113;94m/* another comment */[0m
[38;2;248;248;242mgrayscale_loop[0m[38;2;248;248;242m:[0m
[38;2;248;248;242m [0m[38;2;102;217;239mldrb[0m[38;2;248;248;242m [0m[38;2;248;248;242mr6[0m[38;2;248;248;242m,[0m[3;38;2;102;217;239m [[0m[3;38;2;102;217;239mr1[0m[3;38;2;102;217;239m][0m
[38;2;248;248;242m [0m[38;2;102;217;239mmul[0m[38;2;248;248;242m [0m[38;2;248;248;242mr6[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr3[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr6[0m
[38;2;248;248;242m [0m[38;2;102;217;239madd[0m[38;2;248;248;242m [0m[38;2;248;248;242mr1[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr1[0m[38;2;248;248;242m,[0m[38;2;190;132;255m #1[0m
[38;2;248;248;242m [0m[38;2;102;217;239mldrb[0m[38;2;248;248;242m [0m[38;2;248;248;242mr7[0m[38;2;248;248;242m,[0m[3;38;2;102;217;239m [[0m[3;38;2;102;217;239mr1[0m[3;38;2;102;217;239m][0m
[38;2;248;248;242m [0m[38;2;102;217;239mmul[0m[38;2;248;248;242m [0m[38;2;248;248;242mr7[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr4[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr7[0m
[38;2;248;248;242m [0m[38;2;102;217;239madd[0m[38;2;248;248;242m [0m[38;2;248;248;242mr1[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr1[0m[38;2;248;248;242m,[0m[38;2;190;132;255m #1[0m
[38;2;248;248;242m [0m[38;2;102;217;239mldrb[0m[38;2;248;248;242m [0m[38;2;248;248;242mr8[0m[38;2;248;248;242m,[0m[3;38;2;102;217;239m [[0m[3;38;2;102;217;239mr1[0m[3;38;2;102;217;239m][0m
[38;2;248;248;242m [0m[38;2;102;217;239mmul[0m[38;2;248;248;242m [0m[38;2;248;248;242mr8[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr5[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr8[0m
[38;2;248;248;242m [0m[38;2;102;217;239madd[0m[38;2;248;248;242m [0m[38;2;248;248;242mr1[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr1[0m[38;2;248;248;242m,[0m[38;2;190;132;255m #1[0m
[38;2;248;248;242m [0m[38;2;102;217;239madd[0m[38;2;248;248;242m [0m[38;2;248;248;242mr6[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr6[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr7[0m
[38;2;248;248;242m [0m[38;2;102;217;239madd[0m[38;2;248;248;242m [0m[38;2;248;248;242mr6[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr6[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr8[0m
[38;2;248;248;242m [0m[38;2;102;217;239masr[0m[38;2;248;248;242m [0m[38;2;248;248;242mr6[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr6[0m[38;2;248;248;242m,[0m[38;2;190;132;255m #8[0m
[38;2;248;248;242m [0m[38;2;102;217;239mstr[0m[38;2;248;248;242m [0m[38;2;248;248;242mr6[0m[38;2;248;248;242m,[0m[3;38;2;102;217;239m [[0m[3;38;2;102;217;239mr2[0m[3;38;2;102;217;239m][0m
[38;2;248;248;242m [0m[38;2;102;217;239madd[0m[38;2;248;248;242m [0m[38;2;248;248;242mr2[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr2[0m[38;2;248;248;242m,[0m[38;2;190;132;255m #1[0m
[38;2;248;248;242m [0m[38;2;102;217;239msub[0m[38;2;248;248;242m [0m[38;2;248;248;242mr0[0m[38;2;248;248;242m, [0m[38;2;248;248;242mr0[0m[38;2;248;248;242m,[0m[38;2;190;132;255m #1[0m
[38;2;248;248;242m [0m[38;2;102;217;239mcmp[0m[38;2;248;248;242m [0m[38;2;248;248;242mr0[0m[38;2;248;248;242m,[0m[38;2;190;132;255m #0[0m
[38;2;248;248;242m [0m[38;2;102;217;239mbne[0m[38;2;248;248;242m [0m[38;2;248;248;242mgrayscale_loop[0m
[38;2;248;248;242m [0m[38;2;102;217;239mldmfd[0m[38;2;248;248;242m [0m[38;2;248;248;242mr13[0m[38;2;248;248;242m!, {[0m[38;2;248;248;242mr4[0m[38;2;248;248;242m-[0m[38;2;248;248;242mr8[0m[38;2;248;248;242m}[0m
[38;2;248;248;242m [0m[38;2;102;217;239mstmfd[0m[38;2;248;248;242m [0m[38;2;248;248;242mr13[0m[38;2;248;248;242m!, {[0m[38;2;248;248;242mr0[0m[38;2;248;248;242m-[0m[38;2;248;248;242mr1[0m[38;2;248;248;242m}[0m
[38;2;248;248;242m [0m[38;2;102;217;239mbx[0m[38;2;248;248;242m [0m[38;2;248;248;242mlr[0m
[38;2;248;248;242maddr_red[0m[38;2;248;248;242m: [0m[38;2;249;38;114m.word[0m[38;2;248;248;242m [0m[38;2;248;248;242mred[0m
[38;2;248;248;242maddr_green[0m[38;2;248;248;242m: [0m[38;2;249;38;114m.word[0m[38;2;248;248;242m [0m[38;2;248;248;242mgreen[0m
[38;2;248;248;242maddr_blue[0m[38;2;248;248;242m: [0m[38;2;249;38;114m.word[0m[38;2;248;248;242m [0m[38;2;248;248;242mblue[0m
|
Kennystflr/2425_Maker_S2C
| 12,523
|
Software/S2C_motorBoard/Core/Startup/startup_stm32g431cbux.s
|
/**
******************************************************************************
* @file startup_stm32g431xx.s
* @author MCD Application Team
* @brief STM32G431xx devices vector table GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address,
* - Configure the clock system
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M4 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
* @attention
*
* Copyright (c) 2019 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m4
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.equ BootRAM, 0xF1E0F85F
/**
* @brief This is the code that gets called when the processor first
* starts execution following a reset event. Only the absolutely
* necessary set is performed, after which the application
* supplied main() routine is called.
* @param None
* @retval : None
*/
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Call the clock system initialization function.*/
bl SystemInit
/* Copy the data segment initializers from flash to SRAM */
ldr r0, =_sdata
ldr r1, =_edata
ldr r2, =_sidata
movs r3, #0
b LoopCopyDataInit
CopyDataInit:
ldr r4, [r2, r3]
str r4, [r0, r3]
adds r3, r3, #4
LoopCopyDataInit:
adds r4, r0, r3
cmp r4, r1
bcc CopyDataInit
/* Zero fill the bss segment. */
ldr r2, =_sbss
ldr r4, =_ebss
movs r3, #0
b LoopFillZerobss
FillZerobss:
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
cmp r2, r4
bcc FillZerobss
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex-M4. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word MemManage_Handler
.word BusFault_Handler
.word UsageFault_Handler
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word DebugMon_Handler
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler
.word PVD_PVM_IRQHandler
.word RTC_TAMP_LSECSS_IRQHandler
.word RTC_WKUP_IRQHandler
.word FLASH_IRQHandler
.word RCC_IRQHandler
.word EXTI0_IRQHandler
.word EXTI1_IRQHandler
.word EXTI2_IRQHandler
.word EXTI3_IRQHandler
.word EXTI4_IRQHandler
.word DMA1_Channel1_IRQHandler
.word DMA1_Channel2_IRQHandler
.word DMA1_Channel3_IRQHandler
.word DMA1_Channel4_IRQHandler
.word DMA1_Channel5_IRQHandler
.word DMA1_Channel6_IRQHandler
.word 0
.word ADC1_2_IRQHandler
.word USB_HP_IRQHandler
.word USB_LP_IRQHandler
.word FDCAN1_IT0_IRQHandler
.word FDCAN1_IT1_IRQHandler
.word EXTI9_5_IRQHandler
.word TIM1_BRK_TIM15_IRQHandler
.word TIM1_UP_TIM16_IRQHandler
.word TIM1_TRG_COM_TIM17_IRQHandler
.word TIM1_CC_IRQHandler
.word TIM2_IRQHandler
.word TIM3_IRQHandler
.word TIM4_IRQHandler
.word I2C1_EV_IRQHandler
.word I2C1_ER_IRQHandler
.word I2C2_EV_IRQHandler
.word I2C2_ER_IRQHandler
.word SPI1_IRQHandler
.word SPI2_IRQHandler
.word USART1_IRQHandler
.word USART2_IRQHandler
.word USART3_IRQHandler
.word EXTI15_10_IRQHandler
.word RTC_Alarm_IRQHandler
.word USBWakeUp_IRQHandler
.word TIM8_BRK_IRQHandler
.word TIM8_UP_IRQHandler
.word TIM8_TRG_COM_IRQHandler
.word TIM8_CC_IRQHandler
.word 0
.word 0
.word LPTIM1_IRQHandler
.word 0
.word SPI3_IRQHandler
.word UART4_IRQHandler
.word 0
.word TIM6_DAC_IRQHandler
.word TIM7_IRQHandler
.word DMA2_Channel1_IRQHandler
.word DMA2_Channel2_IRQHandler
.word DMA2_Channel3_IRQHandler
.word DMA2_Channel4_IRQHandler
.word DMA2_Channel5_IRQHandler
.word 0
.word 0
.word UCPD1_IRQHandler
.word COMP1_2_3_IRQHandler
.word COMP4_IRQHandler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word CRS_IRQHandler
.word SAI1_IRQHandler
.word 0
.word 0
.word 0
.word 0
.word FPU_IRQHandler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word RNG_IRQHandler
.word LPUART1_IRQHandler
.word I2C3_EV_IRQHandler
.word I2C3_ER_IRQHandler
.word DMAMUX_OVR_IRQHandler
.word 0
.word 0
.word DMA2_Channel6_IRQHandler
.word 0
.word 0
.word CORDIC_IRQHandler
.word FMAC_IRQHandler
.size g_pfnVectors, .-g_pfnVectors
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak MemManage_Handler
.thumb_set MemManage_Handler,Default_Handler
.weak BusFault_Handler
.thumb_set BusFault_Handler,Default_Handler
.weak UsageFault_Handler
.thumb_set UsageFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak DebugMon_Handler
.thumb_set DebugMon_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_PVM_IRQHandler
.thumb_set PVD_PVM_IRQHandler,Default_Handler
.weak RTC_TAMP_LSECSS_IRQHandler
.thumb_set RTC_TAMP_LSECSS_IRQHandler,Default_Handler
.weak RTC_WKUP_IRQHandler
.thumb_set RTC_WKUP_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_IRQHandler
.thumb_set RCC_IRQHandler,Default_Handler
.weak EXTI0_IRQHandler
.thumb_set EXTI0_IRQHandler,Default_Handler
.weak EXTI1_IRQHandler
.thumb_set EXTI1_IRQHandler,Default_Handler
.weak EXTI2_IRQHandler
.thumb_set EXTI2_IRQHandler,Default_Handler
.weak EXTI3_IRQHandler
.thumb_set EXTI3_IRQHandler,Default_Handler
.weak EXTI4_IRQHandler
.thumb_set EXTI4_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_IRQHandler
.thumb_set DMA1_Channel2_IRQHandler,Default_Handler
.weak DMA1_Channel3_IRQHandler
.thumb_set DMA1_Channel3_IRQHandler,Default_Handler
.weak DMA1_Channel4_IRQHandler
.thumb_set DMA1_Channel4_IRQHandler,Default_Handler
.weak DMA1_Channel5_IRQHandler
.thumb_set DMA1_Channel5_IRQHandler,Default_Handler
.weak DMA1_Channel6_IRQHandler
.thumb_set DMA1_Channel6_IRQHandler,Default_Handler
.weak ADC1_2_IRQHandler
.thumb_set ADC1_2_IRQHandler,Default_Handler
.weak USB_HP_IRQHandler
.thumb_set USB_HP_IRQHandler,Default_Handler
.weak USB_LP_IRQHandler
.thumb_set USB_LP_IRQHandler,Default_Handler
.weak FDCAN1_IT0_IRQHandler
.thumb_set FDCAN1_IT0_IRQHandler,Default_Handler
.weak FDCAN1_IT1_IRQHandler
.thumb_set FDCAN1_IT1_IRQHandler,Default_Handler
.weak EXTI9_5_IRQHandler
.thumb_set EXTI9_5_IRQHandler,Default_Handler
.weak TIM1_BRK_TIM15_IRQHandler
.thumb_set TIM1_BRK_TIM15_IRQHandler,Default_Handler
.weak TIM1_UP_TIM16_IRQHandler
.thumb_set TIM1_UP_TIM16_IRQHandler,Default_Handler
.weak TIM1_TRG_COM_TIM17_IRQHandler
.thumb_set TIM1_TRG_COM_TIM17_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM4_IRQHandler
.thumb_set TIM4_IRQHandler,Default_Handler
.weak I2C1_EV_IRQHandler
.thumb_set I2C1_EV_IRQHandler,Default_Handler
.weak I2C1_ER_IRQHandler
.thumb_set I2C1_ER_IRQHandler,Default_Handler
.weak I2C2_EV_IRQHandler
.thumb_set I2C2_EV_IRQHandler,Default_Handler
.weak I2C2_ER_IRQHandler
.thumb_set I2C2_ER_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_IRQHandler
.thumb_set USART3_IRQHandler,Default_Handler
.weak EXTI15_10_IRQHandler
.thumb_set EXTI15_10_IRQHandler,Default_Handler
.weak RTC_Alarm_IRQHandler
.thumb_set RTC_Alarm_IRQHandler,Default_Handler
.weak USBWakeUp_IRQHandler
.thumb_set USBWakeUp_IRQHandler,Default_Handler
.weak TIM8_BRK_IRQHandler
.thumb_set TIM8_BRK_IRQHandler,Default_Handler
.weak TIM8_UP_IRQHandler
.thumb_set TIM8_UP_IRQHandler,Default_Handler
.weak TIM8_TRG_COM_IRQHandler
.thumb_set TIM8_TRG_COM_IRQHandler,Default_Handler
.weak TIM8_CC_IRQHandler
.thumb_set TIM8_CC_IRQHandler,Default_Handler
.weak LPTIM1_IRQHandler
.thumb_set LPTIM1_IRQHandler,Default_Handler
.weak SPI3_IRQHandler
.thumb_set SPI3_IRQHandler,Default_Handler
.weak UART4_IRQHandler
.thumb_set UART4_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak DMA2_Channel1_IRQHandler
.thumb_set DMA2_Channel1_IRQHandler,Default_Handler
.weak DMA2_Channel2_IRQHandler
.thumb_set DMA2_Channel2_IRQHandler,Default_Handler
.weak DMA2_Channel3_IRQHandler
.thumb_set DMA2_Channel3_IRQHandler,Default_Handler
.weak DMA2_Channel4_IRQHandler
.thumb_set DMA2_Channel4_IRQHandler,Default_Handler
.weak DMA2_Channel5_IRQHandler
.thumb_set DMA2_Channel5_IRQHandler,Default_Handler
.weak UCPD1_IRQHandler
.thumb_set UCPD1_IRQHandler,Default_Handler
.weak COMP1_2_3_IRQHandler
.thumb_set COMP1_2_3_IRQHandler,Default_Handler
.weak COMP4_IRQHandler
.thumb_set COMP4_IRQHandler,Default_Handler
.weak CRS_IRQHandler
.thumb_set CRS_IRQHandler,Default_Handler
.weak SAI1_IRQHandler
.thumb_set SAI1_IRQHandler,Default_Handler
.weak FPU_IRQHandler
.thumb_set FPU_IRQHandler,Default_Handler
.weak RNG_IRQHandler
.thumb_set RNG_IRQHandler,Default_Handler
.weak LPUART1_IRQHandler
.thumb_set LPUART1_IRQHandler,Default_Handler
.weak I2C3_EV_IRQHandler
.thumb_set I2C3_EV_IRQHandler,Default_Handler
.weak I2C3_ER_IRQHandler
.thumb_set I2C3_ER_IRQHandler,Default_Handler
.weak DMAMUX_OVR_IRQHandler
.thumb_set DMAMUX_OVR_IRQHandler,Default_Handler
.weak DMA2_Channel6_IRQHandler
.thumb_set DMA2_Channel6_IRQHandler,Default_Handler
.weak CORDIC_IRQHandler
.thumb_set CORDIC_IRQHandler,Default_Handler
.weak FMAC_IRQHandler
.thumb_set FMAC_IRQHandler,Default_Handler
|
kevinjoseph1995/sdb-rs
| 1,514
|
tools/reg_write/asm/reg_write.s
|
.global main
.section .data
hex_format: .asciz "%#x"
float_format: .asciz "%.2f"
long_float_format: .asciz "%.2Lf"
.section .text
.macro trap
# 62 is the syscall ID for kill
movq $62, %rax
# Set rdi to the PID we just got
movq %r12, %rdi
# The signal ID for SIGTRAP is 5
movq $5, %rsi
syscall
.endm
main:
# Setup the stack frame
push %rbp
movq %rsp, %rbp # Save the base pointer
# Get PID of the current process. 39 is the syscall ID for getpid
movq $39, %rax
syscall
# Store the PID in r12
movq %rax, %r12
trap
leaq hex_format(%rip), %rdi
movq $0, %rax
call printf@plt
movq $0, %rdi
call fflush@plt
trap
# Move the contents of mm0 to rsi
movq %mm0, %rsi
leaq hex_format(%rip), %rdi
movq $0, %rax
call printf@plt
movq $0, %rdi
call fflush@plt
trap
# Print contents of xmm0
leaq float_format(%rip), %rdi
movq $1, %rax
call printf@plt
movq $0, %rdi
call fflush@plt
trap
# Print contents of st0
subq $16, %rsp
fstpt (%rsp) # Store st0 on the stack
leaq long_float_format(%rip), %rdi
movq $0, %rax # Prepare for printf
call printf@plt # Print long double Because a long double is 16 bytes and variadic, GCC passes it only on the stack. The call site has already placed it at the correct stack address (%rsp).
movq $0, %rdi
call fflush@plt
addq $16, %rsp
trap
popq %rbp
# Return value
movq $0, %rax
ret
|
khbalhandawi/cpython
| 9,719
|
Modules/_ctypes/libffi_osx/powerpc/ppc-darwin.S
|
#if defined(__ppc__) || defined(__ppc64__)
/* -----------------------------------------------------------------------
ppc-darwin.S - Copyright (c) 2000 John Hornkvist
Copyright (c) 2004 Free Software Foundation, Inc.
PowerPC Assembly glue.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
``Software''), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
----------------------------------------------------------------------- */
#define LIBFFI_ASM
#include <fficonfig.h>
#include <ffi.h>
#include <ppc-darwin.h>
#include <architecture/ppc/mode_independent_asm.h>
.text
.align 2
.globl _ffi_prep_args
.text
.align 2
.globl _ffi_call_DARWIN
.text
.align 2
_ffi_call_DARWIN:
LFB0:
mr r12,r8 /* We only need r12 until the call,
so it doesn't have to be saved. */
LFB1:
/* Save the old stack pointer as AP. */
mr r8,r1
LCFI0:
#if defined(__ppc64__)
/* Allocate the stack space we need.
r4 (size of input data)
48 bytes (linkage area)
40 bytes (saved registers)
8 bytes (extra FPR)
r4 + 96 bytes total
*/
addi r4,r4,-96 // Add our overhead.
li r0,-32 // Align to 32 bytes.
and r4,r4,r0
#endif
stgux r1,r1,r4 // Grow the stack.
mflr r9
/* Save registers we use. */
#if defined(__ppc64__)
std r27,-40(r8)
#endif
stg r28,MODE_CHOICE(-16,-32)(r8)
stg r29,MODE_CHOICE(-12,-24)(r8)
stg r30,MODE_CHOICE(-8,-16)(r8)
stg r31,MODE_CHOICE(-4,-8)(r8)
stg r9,SF_RETURN(r8) /* return address */
#if !defined(POWERPC_DARWIN) /* TOC unused in OS X */
stg r2,MODE_CHOICE(20,40)(r1)
#endif
LCFI1:
#if defined(__ppc64__)
mr r27,r3 // our extended_cif
#endif
/* Save arguments over call. */
mr r31,r5 /* flags, */
mr r30,r6 /* rvalue, */
mr r29,r7 /* function address, */
mr r28,r8 /* our AP. */
LCFI2:
/* Call ffi_prep_args. */
mr r4,r1
li r9,0
mtctr r12 /* r12 holds address of _ffi_prep_args. */
bctrl
#if !defined(POWERPC_DARWIN) /* TOC unused in OS X */
lg r2,MODE_CHOICE(20,40)(r1)
#endif
/* Now do the call.
Set up cr1 with bits 4-7 of the flags. */
mtcrf 0x40,r31
/* Load all those argument registers.
We have set up a nice stack frame, just load it into registers. */
lg r3,SF_ARG1(r1)
lg r4,SF_ARG2(r1)
lg r5,SF_ARG3(r1)
lg r6,SF_ARG4(r1)
nop
lg r7,SF_ARG5(r1)
lg r8,SF_ARG6(r1)
lg r9,SF_ARG7(r1)
lg r10,SF_ARG8(r1)
/* Load all the FP registers. */
bf 6,L2 /* No floats to load. */
#if defined(__ppc64__)
lfd f1,MODE_CHOICE(-16,-40)-(14*8)(r28)
lfd f2,MODE_CHOICE(-16,-40)-(13*8)(r28)
lfd f3,MODE_CHOICE(-16,-40)-(12*8)(r28)
lfd f4,MODE_CHOICE(-16,-40)-(11*8)(r28)
nop
lfd f5,MODE_CHOICE(-16,-40)-(10*8)(r28)
lfd f6,MODE_CHOICE(-16,-40)-(9*8)(r28)
lfd f7,MODE_CHOICE(-16,-40)-(8*8)(r28)
lfd f8,MODE_CHOICE(-16,-40)-(7*8)(r28)
nop
lfd f9,MODE_CHOICE(-16,-40)-(6*8)(r28)
lfd f10,MODE_CHOICE(-16,-40)-(5*8)(r28)
lfd f11,MODE_CHOICE(-16,-40)-(4*8)(r28)
lfd f12,MODE_CHOICE(-16,-40)-(3*8)(r28)
nop
lfd f13,MODE_CHOICE(-16,-40)-(2*8)(r28)
lfd f14,MODE_CHOICE(-16,-40)-(1*8)(r28)
#elif defined(__ppc__)
lfd f1,MODE_CHOICE(-16,-40)-(13*8)(r28)
lfd f2,MODE_CHOICE(-16,-40)-(12*8)(r28)
lfd f3,MODE_CHOICE(-16,-40)-(11*8)(r28)
lfd f4,MODE_CHOICE(-16,-40)-(10*8)(r28)
nop
lfd f5,MODE_CHOICE(-16,-40)-(9*8)(r28)
lfd f6,MODE_CHOICE(-16,-40)-(8*8)(r28)
lfd f7,MODE_CHOICE(-16,-40)-(7*8)(r28)
lfd f8,MODE_CHOICE(-16,-40)-(6*8)(r28)
nop
lfd f9,MODE_CHOICE(-16,-40)-(5*8)(r28)
lfd f10,MODE_CHOICE(-16,-40)-(4*8)(r28)
lfd f11,MODE_CHOICE(-16,-40)-(3*8)(r28)
lfd f12,MODE_CHOICE(-16,-40)-(2*8)(r28)
nop
lfd f13,MODE_CHOICE(-16,-40)-(1*8)(r28)
#else
#error undefined architecture
#endif
L2:
mr r12,r29 // Put the target address in r12 as specified.
mtctr r12 // Get the address to call into CTR.
nop
nop
bctrl // Make the call.
// Deal with the return value.
#if defined(__ppc64__)
mtcrf 0x3,r31 // flags in cr6 and cr7
bt 27,L(st_return_value)
#elif defined(__ppc__)
mtcrf 0x1,r31 // flags in cr7
#else
#error undefined architecture
#endif
bt 30,L(done_return_value)
bt 29,L(fp_return_value)
stg r3,0(r30)
#if defined(__ppc__)
bf 28,L(done_return_value) // Store the second long if necessary.
stg r4,4(r30)
#endif
// Fall through
L(done_return_value):
lg r1,0(r1) // Restore stack pointer.
// Restore the registers we used.
lg r9,SF_RETURN(r1) // return address
lg r31,MODE_CHOICE(-4,-8)(r1)
mtlr r9
lg r30,MODE_CHOICE(-8,-16)(r1)
lg r29,MODE_CHOICE(-12,-24)(r1)
lg r28,MODE_CHOICE(-16,-32)(r1)
#if defined(__ppc64__)
ld r27,-40(r1)
#endif
blr
#if defined(__ppc64__)
L(st_return_value):
// Grow the stack enough to fit the registers. Leave room for 8 args
// to trample the 1st 8 slots in param area.
stgu r1,-SF_ROUND(280)(r1) // 64 + 104 + 48 + 64
// Store GPRs
std r3,SF_ARG9(r1)
std r4,SF_ARG10(r1)
std r5,SF_ARG11(r1)
std r6,SF_ARG12(r1)
nop
std r7,SF_ARG13(r1)
std r8,SF_ARG14(r1)
std r9,SF_ARG15(r1)
std r10,SF_ARG16(r1)
// Store FPRs
nop
bf 26,L(call_struct_to_ram_form)
stfd f1,SF_ARG17(r1)
stfd f2,SF_ARG18(r1)
stfd f3,SF_ARG19(r1)
stfd f4,SF_ARG20(r1)
nop
stfd f5,SF_ARG21(r1)
stfd f6,SF_ARG22(r1)
stfd f7,SF_ARG23(r1)
stfd f8,SF_ARG24(r1)
nop
stfd f9,SF_ARG25(r1)
stfd f10,SF_ARG26(r1)
stfd f11,SF_ARG27(r1)
stfd f12,SF_ARG28(r1)
nop
stfd f13,SF_ARG29(r1)
L(call_struct_to_ram_form):
ld r3,0(r27) // extended_cif->cif*
ld r3,16(r3) // ffi_cif->rtype*
addi r4,r1,SF_ARG9 // stored GPRs
addi r6,r1,SF_ARG17 // stored FPRs
li r5,0 // GPR size ptr (NULL)
li r7,0 // FPR size ptr (NULL)
li r8,0 // FPR count ptr (NULL)
li r10,0 // struct offset (NULL)
mr r9,r30 // return area
bl Lffi64_struct_to_ram_form$stub
lg r1,0(r1) // Restore stack pointer.
b L(done_return_value)
#endif
L(fp_return_value):
/* Do we have long double to store? */
bf 31,L(fd_return_value)
stfd f1,0(r30)
stfd f2,8(r30)
b L(done_return_value)
L(fd_return_value):
/* Do we have double to store? */
bf 28,L(float_return_value)
stfd f1,0(r30)
b L(done_return_value)
L(float_return_value):
/* We only have a float to store. */
stfs f1,0(r30)
b L(done_return_value)
LFE1:
/* END(_ffi_call_DARWIN) */
/* Provide a null definition of _ffi_call_AIX. */
.text
.align 2
.globl _ffi_call_AIX
.text
.align 2
_ffi_call_AIX:
blr
/* END(_ffi_call_AIX) */
.section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms
EH_frame1:
.set L$set$0,LECIE1-LSCIE1
.long L$set$0 ; Length of Common Information Entry
LSCIE1:
.long 0x0 ; CIE Identifier Tag
.byte 0x1 ; CIE Version
.ascii "zR\0" ; CIE Augmentation
.byte 0x1 ; uleb128 0x1; CIE Code Alignment Factor
.byte 0x7c ; sleb128 -4; CIE Data Alignment Factor
.byte 0x41 ; CIE RA Column
.byte 0x1 ; uleb128 0x1; Augmentation size
.byte 0x10 ; FDE Encoding (pcrel)
.byte 0xc ; DW_CFA_def_cfa
.byte 0x1 ; uleb128 0x1
.byte 0x0 ; uleb128 0x0
.align LOG2_GPR_BYTES
LECIE1:
.globl _ffi_call_DARWIN.eh
_ffi_call_DARWIN.eh:
LSFDE1:
.set L$set$1,LEFDE1-LASFDE1
.long L$set$1 ; FDE Length
LASFDE1:
.long LASFDE1-EH_frame1 ; FDE CIE offset
.g_long LFB0-. ; FDE initial location
.set L$set$3,LFE1-LFB0
.g_long L$set$3 ; FDE address range
.byte 0x0 ; uleb128 0x0; Augmentation size
.byte 0x4 ; DW_CFA_advance_loc4
.set L$set$4,LCFI0-LFB1
.long L$set$4
.byte 0xd ; DW_CFA_def_cfa_register
.byte 0x08 ; uleb128 0x08
.byte 0x4 ; DW_CFA_advance_loc4
.set L$set$5,LCFI1-LCFI0
.long L$set$5
.byte 0x11 ; DW_CFA_offset_extended_sf
.byte 0x41 ; uleb128 0x41
.byte 0x7e ; sleb128 -2
.byte 0x9f ; DW_CFA_offset, column 0x1f
.byte 0x1 ; uleb128 0x1
.byte 0x9e ; DW_CFA_offset, column 0x1e
.byte 0x2 ; uleb128 0x2
.byte 0x9d ; DW_CFA_offset, column 0x1d
.byte 0x3 ; uleb128 0x3
.byte 0x9c ; DW_CFA_offset, column 0x1c
.byte 0x4 ; uleb128 0x4
.byte 0x4 ; DW_CFA_advance_loc4
.set L$set$6,LCFI2-LCFI1
.long L$set$6
.byte 0xd ; DW_CFA_def_cfa_register
.byte 0x1c ; uleb128 0x1c
.align LOG2_GPR_BYTES
LEFDE1:
#if defined(__ppc64__)
.section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32
.align LOG2_GPR_BYTES
Lffi64_struct_to_ram_form$stub:
.indirect_symbol _ffi64_struct_to_ram_form
mflr r0
bcl 20,31,LO$ffi64_struct_to_ram_form
LO$ffi64_struct_to_ram_form:
mflr r11
addis r11,r11,ha16(L_ffi64_struct_to_ram_form$lazy_ptr - LO$ffi64_struct_to_ram_form)
mtlr r0
lgu r12,lo16(L_ffi64_struct_to_ram_form$lazy_ptr - LO$ffi64_struct_to_ram_form)(r11)
mtctr r12
bctr
.lazy_symbol_pointer
L_ffi64_struct_to_ram_form$lazy_ptr:
.indirect_symbol _ffi64_struct_to_ram_form
.g_long dyld_stub_binding_helper
#endif // __ppc64__
#endif // __ppc__ || __ppc64__
|
khbalhandawi/cpython
| 7,234
|
Modules/_ctypes/libffi_osx/powerpc/ppc-darwin_closure.S
|
#if defined(__ppc__)
/* -----------------------------------------------------------------------
ppc-darwin_closure.S - Copyright (c) 2002, 2003, 2004, Free Software Foundation,
Inc. based on ppc_closure.S
PowerPC Assembly glue.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
``Software''), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
----------------------------------------------------------------------- */
#define LIBFFI_ASM
#include <ffi.h>
#include <ppc-ffitarget.h> // for FFI_TRAMPOLINE_SIZE
#include <ppc-darwin.h>
#include <architecture/ppc/mode_independent_asm.h>
.file "ppc-darwin_closure.S"
.text
.align LOG2_GPR_BYTES
.globl _ffi_closure_ASM
.text
.align LOG2_GPR_BYTES
_ffi_closure_ASM:
LFB1:
mflr r0 // Save return address
stg r0,SF_RETURN(r1)
LCFI0:
/* 24/48 bytes (Linkage Area)
32/64 bytes (outgoing parameter area, always reserved)
104 bytes (13*8 from FPR)
16/32 bytes (result)
176/232 total bytes */
/* skip over caller save area and keep stack aligned to 16/32. */
stgu r1,-SF_ROUND(176)(r1)
LCFI1:
/* We want to build up an area for the parameters passed
in registers. (both floating point and integer) */
/* 176/256 bytes (callee stack frame aligned to 16/32)
24/48 bytes (caller linkage area)
200/304 (start of caller parameter area aligned to 4/8)
*/
/* Save GPRs 3 - 10 (aligned to 4/8)
in the parents outgoing area. */
stg r3,200(r1)
stg r4,204(r1)
stg r5,208(r1)
stg r6,212(r1)
stg r7,216(r1)
stg r8,220(r1)
stg r9,224(r1)
stg r10,228(r1)
/* Save FPRs 1 - 13. (aligned to 8) */
stfd f1,56(r1)
stfd f2,64(r1)
stfd f3,72(r1)
stfd f4,80(r1)
stfd f5,88(r1)
stfd f6,96(r1)
stfd f7,104(r1)
stfd f8,112(r1)
stfd f9,120(r1)
stfd f10,128(r1)
stfd f11,136(r1)
stfd f12,144(r1)
stfd f13,152(r1)
// Set up registers for the routine that actually does the work.
mr r3,r11 // context pointer from the trampoline
addi r4,r1,160 // result storage
addi r5,r1,200 // saved GPRs
addi r6,r1,56 // saved FPRs
bl Lffi_closure_helper_DARWIN$stub
/* Now r3 contains the return type. Use it to look up in a table
so we know how to deal with each type. */
addi r5,r1,160 // Copy result storage pointer.
bl Lget_ret_type0_addr // Get pointer to Lret_type0 into LR.
mflr r4 // Move to r4.
slwi r3,r3,4 // Multiply return type by 16.
add r3,r3,r4 // Add contents of table to table address.
mtctr r3
bctr
LFE1:
/* Each of the ret_typeX code fragments has to be exactly 16 bytes long
(4 instructions). For cache effectiveness we align to a 16 byte boundary
first. */
.align 4
nop
nop
nop
Lget_ret_type0_addr:
blrl
/* case FFI_TYPE_VOID */
Lret_type0:
b Lfinish
nop
nop
nop
/* case FFI_TYPE_INT */
Lret_type1:
lwz r3,0(r5)
b Lfinish
nop
nop
/* case FFI_TYPE_FLOAT */
Lret_type2:
lfs f1,0(r5)
b Lfinish
nop
nop
/* case FFI_TYPE_DOUBLE */
Lret_type3:
lfd f1,0(r5)
b Lfinish
nop
nop
/* case FFI_TYPE_LONGDOUBLE */
Lret_type4:
lfd f1,0(r5)
lfd f2,8(r5)
b Lfinish
nop
/* case FFI_TYPE_UINT8 */
Lret_type5:
lbz r3,3(r5)
b Lfinish
nop
nop
/* case FFI_TYPE_SINT8 */
Lret_type6:
lbz r3,3(r5)
extsb r3,r3
b Lfinish
nop
/* case FFI_TYPE_UINT16 */
Lret_type7:
lhz r3,2(r5)
b Lfinish
nop
nop
/* case FFI_TYPE_SINT16 */
Lret_type8:
lha r3,2(r5)
b Lfinish
nop
nop
/* case FFI_TYPE_UINT32 */
Lret_type9: // same as Lret_type1
lwz r3,0(r5)
b Lfinish
nop
nop
/* case FFI_TYPE_SINT32 */
Lret_type10: // same as Lret_type1
lwz r3,0(r5)
b Lfinish
nop
nop
/* case FFI_TYPE_UINT64 */
Lret_type11:
lwz r3,0(r5)
lwz r4,4(r5)
b Lfinish
nop
/* case FFI_TYPE_SINT64 */
Lret_type12: // same as Lret_type11
lwz r3,0(r5)
lwz r4,4(r5)
b Lfinish
nop
/* case FFI_TYPE_STRUCT */
Lret_type13:
b Lfinish
nop
nop
nop
/* End 16-byte aligned cases */
/* case FFI_TYPE_POINTER */
// This case assumes that FFI_TYPE_POINTER == FFI_TYPE_LAST. If more types
// are added in future, the following code will need to be updated and
// padded to 16 bytes.
Lret_type14:
lg r3,0(r5)
// fall through
/* case done */
Lfinish:
addi r1,r1,SF_ROUND(176) // Restore stack pointer.
lg r0,SF_RETURN(r1) // Restore return address.
mtlr r0 // Restore link register.
blr
/* END(ffi_closure_ASM) */
.section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support
EH_frame1:
.set L$set$0,LECIE1-LSCIE1
.long L$set$0 ; Length of Common Information Entry
LSCIE1:
.long 0x0 ; CIE Identifier Tag
.byte 0x1 ; CIE Version
.ascii "zR\0" ; CIE Augmentation
.byte 0x1 ; uleb128 0x1; CIE Code Alignment Factor
.byte 0x7c ; sleb128 -4; CIE Data Alignment Factor
.byte 0x41 ; CIE RA Column
.byte 0x1 ; uleb128 0x1; Augmentation size
.byte 0x10 ; FDE Encoding (pcrel)
.byte 0xc ; DW_CFA_def_cfa
.byte 0x1 ; uleb128 0x1
.byte 0x0 ; uleb128 0x0
.align LOG2_GPR_BYTES
LECIE1:
.globl _ffi_closure_ASM.eh
_ffi_closure_ASM.eh:
LSFDE1:
.set L$set$1,LEFDE1-LASFDE1
.long L$set$1 ; FDE Length
LASFDE1:
.long LASFDE1-EH_frame1 ; FDE CIE offset
.g_long LFB1-. ; FDE initial location
.set L$set$3,LFE1-LFB1
.g_long L$set$3 ; FDE address range
.byte 0x0 ; uleb128 0x0; Augmentation size
.byte 0x4 ; DW_CFA_advance_loc4
.set L$set$3,LCFI1-LCFI0
.long L$set$3
.byte 0xe ; DW_CFA_def_cfa_offset
.byte 176,1 ; uleb128 176
.byte 0x4 ; DW_CFA_advance_loc4
.set L$set$4,LCFI0-LFB1
.long L$set$4
.byte 0x11 ; DW_CFA_offset_extended_sf
.byte 0x41 ; uleb128 0x41
.byte 0x7e ; sleb128 -2
.align LOG2_GPR_BYTES
LEFDE1:
.data
.align LOG2_GPR_BYTES
LDFCM0:
.section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32
.align LOG2_GPR_BYTES
Lffi_closure_helper_DARWIN$stub:
.indirect_symbol _ffi_closure_helper_DARWIN
mflr r0
bcl 20,31,LO$ffi_closure_helper_DARWIN
LO$ffi_closure_helper_DARWIN:
mflr r11
addis r11,r11,ha16(L_ffi_closure_helper_DARWIN$lazy_ptr - LO$ffi_closure_helper_DARWIN)
mtlr r0
lgu r12,lo16(L_ffi_closure_helper_DARWIN$lazy_ptr - LO$ffi_closure_helper_DARWIN)(r11)
mtctr r12
bctr
.lazy_symbol_pointer
L_ffi_closure_helper_DARWIN$lazy_ptr:
.indirect_symbol _ffi_closure_helper_DARWIN
.g_long dyld_stub_binding_helper
#endif // __ppc__
|
khbalhandawi/cpython
| 9,914
|
Modules/_ctypes/libffi_osx/powerpc/ppc64-darwin_closure.S
|
#if defined(__ppc64__)
/* -----------------------------------------------------------------------
ppc64-darwin_closure.S - Copyright (c) 2002, 2003, 2004, Free Software Foundation,
Inc. based on ppc_closure.S
PowerPC Assembly glue.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
``Software''), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
----------------------------------------------------------------------- */
#define LIBFFI_ASM
#include <ffi.h>
#include <ppc-ffitarget.h> // for FFI_TRAMPOLINE_SIZE
#include <ppc-darwin.h>
#include <architecture/ppc/mode_independent_asm.h>
.file "ppc64-darwin_closure.S"
.text
.align LOG2_GPR_BYTES
.globl _ffi_closure_ASM
.text
.align LOG2_GPR_BYTES
_ffi_closure_ASM:
LFB1:
mflr r0
stg r0,SF_RETURN(r1) // save return address
// Save GPRs 3 - 10 (aligned to 8) in the parents outgoing area.
stg r3,SF_ARG1(r1)
stg r4,SF_ARG2(r1)
stg r5,SF_ARG3(r1)
stg r6,SF_ARG4(r1)
stg r7,SF_ARG5(r1)
stg r8,SF_ARG6(r1)
stg r9,SF_ARG7(r1)
stg r10,SF_ARG8(r1)
LCFI0:
/* 48 bytes (Linkage Area)
64 bytes (outgoing parameter area, always reserved)
112 bytes (14*8 for incoming FPR)
? bytes (result)
112 bytes (14*8 for outgoing FPR)
16 bytes (2 saved registers)
352 + ? total bytes
*/
std r31,-8(r1) // Save registers we use.
std r30,-16(r1)
mr r30,r1 // Save the old SP.
mr r31,r11 // Save the ffi_closure around ffi64_data_size.
// Calculate the space we need.
stdu r1,-SF_MINSIZE(r1)
ld r3,FFI_TRAMPOLINE_SIZE(r31) // ffi_closure->cif*
ld r3,16(r3) // ffi_cif->rtype*
bl Lffi64_data_size$stub
ld r1,0(r1)
addi r3,r3,352 // Add our overhead.
neg r3,r3
li r0,-32 // Align to 32 bytes.
and r3,r3,r0
stdux r1,r1,r3 // Grow the stack.
mr r11,r31 // Copy the ffi_closure back.
LCFI1:
// We want to build up an area for the parameters passed
// in registers. (both floating point and integer)
/* 320 bytes (callee stack frame aligned to 32)
48 bytes (caller linkage area)
368 (start of caller parameter area aligned to 8)
*/
// Save FPRs 1 - 14. (aligned to 8)
stfd f1,112(r1)
stfd f2,120(r1)
stfd f3,128(r1)
stfd f4,136(r1)
stfd f5,144(r1)
stfd f6,152(r1)
stfd f7,160(r1)
stfd f8,168(r1)
stfd f9,176(r1)
stfd f10,184(r1)
stfd f11,192(r1)
stfd f12,200(r1)
stfd f13,208(r1)
stfd f14,216(r1)
// Set up registers for the routine that actually does the work.
mr r3,r11 // context pointer from the trampoline
addi r4,r1,224 // result storage
addi r5,r30,SF_ARG1 // saved GPRs
addi r6,r1,112 // saved FPRs
bl Lffi_closure_helper_DARWIN$stub
// Look the proper starting point in table
// by using return type as an offset.
addi r5,r1,224 // Get pointer to results area.
bl Lget_ret_type0_addr // Get pointer to Lret_type0 into LR.
mflr r4 // Move to r4.
slwi r3,r3,4 // Now multiply return type by 16.
add r3,r3,r4 // Add contents of table to table address.
mtctr r3
bctr
LFE1:
// Each of the ret_typeX code fragments has to be exactly 16 bytes long
// (4 instructions). For cache effectiveness we align to a 16 byte
// boundary first.
.align 4
nop
nop
nop
Lget_ret_type0_addr:
blrl
// case FFI_TYPE_VOID
Lret_type0:
b Lfinish
nop
nop
nop
// case FFI_TYPE_INT
Lret_type1:
lwz r3,4(r5)
b Lfinish
nop
nop
// case FFI_TYPE_FLOAT
Lret_type2:
lfs f1,0(r5)
b Lfinish
nop
nop
// case FFI_TYPE_DOUBLE
Lret_type3:
lfd f1,0(r5)
b Lfinish
nop
nop
// case FFI_TYPE_LONGDOUBLE
Lret_type4:
lfd f1,0(r5)
lfd f2,8(r5)
b Lfinish
nop
// case FFI_TYPE_UINT8
Lret_type5:
lbz r3,7(r5)
b Lfinish
nop
nop
// case FFI_TYPE_SINT8
Lret_type6:
lbz r3,7(r5)
extsb r3,r3
b Lfinish
nop
// case FFI_TYPE_UINT16
Lret_type7:
lhz r3,6(r5)
b Lfinish
nop
nop
// case FFI_TYPE_SINT16
Lret_type8:
lha r3,6(r5)
b Lfinish
nop
nop
// case FFI_TYPE_UINT32
Lret_type9: // same as Lret_type1
lwz r3,4(r5)
b Lfinish
nop
nop
// case FFI_TYPE_SINT32
Lret_type10: // same as Lret_type1
lwz r3,4(r5)
b Lfinish
nop
nop
// case FFI_TYPE_UINT64
Lret_type11:
ld r3,0(r5)
b Lfinish
nop
nop
// case FFI_TYPE_SINT64
Lret_type12: // same as Lret_type11
ld r3,0(r5)
b Lfinish
nop
nop
// case FFI_TYPE_STRUCT
Lret_type13:
b Lret_struct
nop
nop
nop
// ** End 16-byte aligned cases **
// case FFI_TYPE_POINTER
// This case assumes that FFI_TYPE_POINTER == FFI_TYPE_LAST. If more types
// are added in future, the following code will need to be updated and
// padded to 16 bytes.
Lret_type14:
lg r3,0(r5)
b Lfinish
// copy struct into registers
Lret_struct:
ld r31,FFI_TRAMPOLINE_SIZE(r31) // ffi_closure->cif*
ld r3,16(r31) // ffi_cif->rtype*
ld r31,24(r31) // ffi_cif->flags
mr r4,r5 // copy struct* to 2nd arg
addi r7,r1,SF_ARG9 // GPR return area
addi r9,r30,-16-(14*8) // FPR return area
li r5,0 // struct offset ptr (NULL)
li r6,0 // FPR used count ptr (NULL)
li r8,0 // GPR return area size ptr (NULL)
li r10,0 // FPR return area size ptr (NULL)
bl Lffi64_struct_to_reg_form$stub
// Load GPRs
ld r3,SF_ARG9(r1)
ld r4,SF_ARG10(r1)
ld r5,SF_ARG11(r1)
ld r6,SF_ARG12(r1)
nop
ld r7,SF_ARG13(r1)
ld r8,SF_ARG14(r1)
ld r9,SF_ARG15(r1)
ld r10,SF_ARG16(r1)
nop
// Load FPRs
mtcrf 0x2,r31
bf 26,Lfinish
lfd f1,-16-(14*8)(r30)
lfd f2,-16-(13*8)(r30)
lfd f3,-16-(12*8)(r30)
lfd f4,-16-(11*8)(r30)
nop
lfd f5,-16-(10*8)(r30)
lfd f6,-16-(9*8)(r30)
lfd f7,-16-(8*8)(r30)
lfd f8,-16-(7*8)(r30)
nop
lfd f9,-16-(6*8)(r30)
lfd f10,-16-(5*8)(r30)
lfd f11,-16-(4*8)(r30)
lfd f12,-16-(3*8)(r30)
nop
lfd f13,-16-(2*8)(r30)
lfd f14,-16-(1*8)(r30)
// Fall through
// case done
Lfinish:
lg r1,0(r1) // Restore stack pointer.
ld r31,-8(r1) // Restore registers we used.
ld r30,-16(r1)
lg r0,SF_RETURN(r1) // Get return address.
mtlr r0 // Reset link register.
blr
// END(ffi_closure_ASM)
.section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support
EH_frame1:
.set L$set$0,LECIE1-LSCIE1
.long L$set$0 ; Length of Common Information Entry
LSCIE1:
.long 0x0 ; CIE Identifier Tag
.byte 0x1 ; CIE Version
.ascii "zR\0" ; CIE Augmentation
.byte 0x1 ; uleb128 0x1; CIE Code Alignment Factor
.byte 0x7c ; sleb128 -4; CIE Data Alignment Factor
.byte 0x41 ; CIE RA Column
.byte 0x1 ; uleb128 0x1; Augmentation size
.byte 0x10 ; FDE Encoding (pcrel)
.byte 0xc ; DW_CFA_def_cfa
.byte 0x1 ; uleb128 0x1
.byte 0x0 ; uleb128 0x0
.align LOG2_GPR_BYTES
LECIE1:
.globl _ffi_closure_ASM.eh
_ffi_closure_ASM.eh:
LSFDE1:
.set L$set$1,LEFDE1-LASFDE1
.long L$set$1 ; FDE Length
LASFDE1:
.long LASFDE1-EH_frame1 ; FDE CIE offset
.g_long LFB1-. ; FDE initial location
.set L$set$3,LFE1-LFB1
.g_long L$set$3 ; FDE address range
.byte 0x0 ; uleb128 0x0; Augmentation size
.byte 0x4 ; DW_CFA_advance_loc4
.set L$set$3,LCFI1-LCFI0
.long L$set$3
.byte 0xe ; DW_CFA_def_cfa_offset
.byte 176,1 ; uleb128 176
.byte 0x4 ; DW_CFA_advance_loc4
.set L$set$4,LCFI0-LFB1
.long L$set$4
.byte 0x11 ; DW_CFA_offset_extended_sf
.byte 0x41 ; uleb128 0x41
.byte 0x7e ; sleb128 -2
.align LOG2_GPR_BYTES
LEFDE1:
.data
.align LOG2_GPR_BYTES
LDFCM0:
.section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32
.align LOG2_GPR_BYTES
Lffi_closure_helper_DARWIN$stub:
.indirect_symbol _ffi_closure_helper_DARWIN
mflr r0
bcl 20,31,LO$ffi_closure_helper_DARWIN
LO$ffi_closure_helper_DARWIN:
mflr r11
addis r11,r11,ha16(L_ffi_closure_helper_DARWIN$lazy_ptr - LO$ffi_closure_helper_DARWIN)
mtlr r0
lgu r12,lo16(L_ffi_closure_helper_DARWIN$lazy_ptr - LO$ffi_closure_helper_DARWIN)(r11)
mtctr r12
bctr
.lazy_symbol_pointer
L_ffi_closure_helper_DARWIN$lazy_ptr:
.indirect_symbol _ffi_closure_helper_DARWIN
.g_long dyld_stub_binding_helper
.section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32
.align LOG2_GPR_BYTES
Lffi64_struct_to_reg_form$stub:
.indirect_symbol _ffi64_struct_to_reg_form
mflr r0
bcl 20,31,LO$ffi64_struct_to_reg_form
LO$ffi64_struct_to_reg_form:
mflr r11
addis r11,r11,ha16(L_ffi64_struct_to_reg_form$lazy_ptr - LO$ffi64_struct_to_reg_form)
mtlr r0
lgu r12,lo16(L_ffi64_struct_to_reg_form$lazy_ptr - LO$ffi64_struct_to_reg_form)(r11)
mtctr r12
bctr
.section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32
.align LOG2_GPR_BYTES
Lffi64_data_size$stub:
.indirect_symbol _ffi64_data_size
mflr r0
bcl 20,31,LO$ffi64_data_size
LO$ffi64_data_size:
mflr r11
addis r11,r11,ha16(L_ffi64_data_size$lazy_ptr - LO$ffi64_data_size)
mtlr r0
lgu r12,lo16(L_ffi64_data_size$lazy_ptr - LO$ffi64_data_size)(r11)
mtctr r12
bctr
.lazy_symbol_pointer
L_ffi64_struct_to_reg_form$lazy_ptr:
.indirect_symbol _ffi64_struct_to_reg_form
.g_long dyld_stub_binding_helper
L_ffi64_data_size$lazy_ptr:
.indirect_symbol _ffi64_data_size
.g_long dyld_stub_binding_helper
#endif // __ppc64__
|
khbalhandawi/cpython
| 8,955
|
Modules/_ctypes/libffi_osx/x86/x86-darwin.S
|
#ifdef __i386__
/* -----------------------------------------------------------------------
darwin.S - Copyright (c) 1996, 1998, 2001, 2002, 2003 Red Hat, Inc.
X86 Foreign Function Interface
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
``Software''), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL CYGNUS SOLUTIONS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
----------------------------------------------------------------------- */
/*
* This file is based on sysv.S and then hacked up by Ronald who hasn't done
* assembly programming in 8 years.
*/
#ifndef __x86_64__
#define LIBFFI_ASM
#include <fficonfig.h>
#include <ffi.h>
#ifdef PyObjC_STRICT_DEBUGGING
/* XXX: Debugging of stack alignment, to be removed */
#define ASSERT_STACK_ALIGNED movdqa -16(%esp), %xmm0
#else
#define ASSERT_STACK_ALIGNED
#endif
.text
.globl _ffi_prep_args
.align 4
.globl _ffi_call_SYSV
_ffi_call_SYSV:
LFB1:
pushl %ebp
LCFI0:
movl %esp,%ebp
LCFI1:
subl $8,%esp
/* Make room for all of the new args. */
movl 16(%ebp),%ecx
subl %ecx,%esp
movl %esp,%eax
/* Place all of the ffi_prep_args in position */
subl $8,%esp
pushl 12(%ebp)
pushl %eax
call *8(%ebp)
/* Return stack to previous state and call the function */
addl $16,%esp
call *28(%ebp)
/* Remove the space we pushed for the args */
movl 16(%ebp),%ecx
addl %ecx,%esp
/* Load %ecx with the return type code */
movl 20(%ebp),%ecx
/* If the return value pointer is NULL, assume no return value. */
cmpl $0,24(%ebp)
jne Lretint
/* Even if there is no space for the return value, we are
obliged to handle floating-point values. */
cmpl $FFI_TYPE_FLOAT,%ecx
jne Lnoretval
fstp %st(0)
jmp Lepilogue
Lretint:
cmpl $FFI_TYPE_INT,%ecx
jne Lretfloat
/* Load %ecx with the pointer to storage for the return value */
movl 24(%ebp),%ecx
movl %eax,0(%ecx)
jmp Lepilogue
Lretfloat:
cmpl $FFI_TYPE_FLOAT,%ecx
jne Lretdouble
/* Load %ecx with the pointer to storage for the return value */
movl 24(%ebp),%ecx
fstps (%ecx)
jmp Lepilogue
Lretdouble:
cmpl $FFI_TYPE_DOUBLE,%ecx
jne Lretlongdouble
/* Load %ecx with the pointer to storage for the return value */
movl 24(%ebp),%ecx
fstpl (%ecx)
jmp Lepilogue
Lretlongdouble:
cmpl $FFI_TYPE_LONGDOUBLE,%ecx
jne Lretint64
/* Load %ecx with the pointer to storage for the return value */
movl 24(%ebp),%ecx
fstpt (%ecx)
jmp Lepilogue
Lretint64:
cmpl $FFI_TYPE_SINT64,%ecx
jne Lretstruct1b
/* Load %ecx with the pointer to storage for the return value */
movl 24(%ebp),%ecx
movl %eax,0(%ecx)
movl %edx,4(%ecx)
jmp Lepilogue
Lretstruct1b:
cmpl $FFI_TYPE_SINT8,%ecx
jne Lretstruct2b
/* Load %ecx with the pointer to storage for the return value */
movl 24(%ebp),%ecx
movb %al,0(%ecx)
jmp Lepilogue
Lretstruct2b:
cmpl $FFI_TYPE_SINT16,%ecx
jne Lretstruct
/* Load %ecx with the pointer to storage for the return value */
movl 24(%ebp),%ecx
movw %ax,0(%ecx)
jmp Lepilogue
Lretstruct:
cmpl $FFI_TYPE_STRUCT,%ecx
jne Lnoretval
/* Nothing to do! */
addl $4,%esp
popl %ebp
ret
Lnoretval:
Lepilogue:
addl $8,%esp
movl %ebp,%esp
popl %ebp
ret
LFE1:
.ffi_call_SYSV_end:
.align 4
FFI_HIDDEN (ffi_closure_SYSV)
.globl _ffi_closure_SYSV
_ffi_closure_SYSV:
LFB2:
pushl %ebp
LCFI2:
movl %esp, %ebp
LCFI3:
subl $56, %esp
leal -40(%ebp), %edx
movl %edx, -12(%ebp) /* resp */
leal 8(%ebp), %edx
movl %edx, 4(%esp) /* args = __builtin_dwarf_cfa () */
leal -12(%ebp), %edx
movl %edx, (%esp) /* &resp */
movl %ebx, 8(%esp)
LCFI7:
call L_ffi_closure_SYSV_inner$stub
movl 8(%esp), %ebx
movl -12(%ebp), %ecx
cmpl $FFI_TYPE_INT, %eax
je Lcls_retint
cmpl $FFI_TYPE_FLOAT, %eax
je Lcls_retfloat
cmpl $FFI_TYPE_DOUBLE, %eax
je Lcls_retdouble
cmpl $FFI_TYPE_LONGDOUBLE, %eax
je Lcls_retldouble
cmpl $FFI_TYPE_SINT64, %eax
je Lcls_retllong
cmpl $FFI_TYPE_UINT8, %eax
je Lcls_retstruct1
cmpl $FFI_TYPE_SINT8, %eax
je Lcls_retstruct1
cmpl $FFI_TYPE_UINT16, %eax
je Lcls_retstruct2
cmpl $FFI_TYPE_SINT16, %eax
je Lcls_retstruct2
cmpl $FFI_TYPE_STRUCT, %eax
je Lcls_retstruct
Lcls_epilogue:
movl %ebp, %esp
popl %ebp
ret
Lcls_retint:
movl (%ecx), %eax
jmp Lcls_epilogue
Lcls_retfloat:
flds (%ecx)
jmp Lcls_epilogue
Lcls_retdouble:
fldl (%ecx)
jmp Lcls_epilogue
Lcls_retldouble:
fldt (%ecx)
jmp Lcls_epilogue
Lcls_retllong:
movl (%ecx), %eax
movl 4(%ecx), %edx
jmp Lcls_epilogue
Lcls_retstruct1:
movsbl (%ecx), %eax
jmp Lcls_epilogue
Lcls_retstruct2:
movswl (%ecx), %eax
jmp Lcls_epilogue
Lcls_retstruct:
lea -8(%ebp),%esp
movl %ebp, %esp
popl %ebp
ret $4
LFE2:
#if !FFI_NO_RAW_API
#define RAW_CLOSURE_CIF_OFFSET ((FFI_TRAMPOLINE_SIZE + 3) & ~3)
#define RAW_CLOSURE_FUN_OFFSET (RAW_CLOSURE_CIF_OFFSET + 4)
#define RAW_CLOSURE_USER_DATA_OFFSET (RAW_CLOSURE_FUN_OFFSET + 4)
#define CIF_FLAGS_OFFSET 20
.align 4
FFI_HIDDEN (ffi_closure_raw_SYSV)
.globl _ffi_closure_raw_SYSV
_ffi_closure_raw_SYSV:
LFB3:
pushl %ebp
LCFI4:
movl %esp, %ebp
LCFI5:
pushl %esi
LCFI6:
subl $36, %esp
movl RAW_CLOSURE_CIF_OFFSET(%eax), %esi /* closure->cif */
movl RAW_CLOSURE_USER_DATA_OFFSET(%eax), %edx /* closure->user_data */
movl %edx, 12(%esp) /* user_data */
leal 8(%ebp), %edx /* __builtin_dwarf_cfa () */
movl %edx, 8(%esp) /* raw_args */
leal -24(%ebp), %edx
movl %edx, 4(%esp) /* &res */
movl %esi, (%esp) /* cif */
call *RAW_CLOSURE_FUN_OFFSET(%eax) /* closure->fun */
movl CIF_FLAGS_OFFSET(%esi), %eax /* rtype */
cmpl $FFI_TYPE_INT, %eax
je Lrcls_retint
cmpl $FFI_TYPE_FLOAT, %eax
je Lrcls_retfloat
cmpl $FFI_TYPE_DOUBLE, %eax
je Lrcls_retdouble
cmpl $FFI_TYPE_LONGDOUBLE, %eax
je Lrcls_retldouble
cmpl $FFI_TYPE_SINT64, %eax
je Lrcls_retllong
Lrcls_epilogue:
addl $36, %esp
popl %esi
popl %ebp
ret
Lrcls_retint:
movl -24(%ebp), %eax
jmp Lrcls_epilogue
Lrcls_retfloat:
flds -24(%ebp)
jmp Lrcls_epilogue
Lrcls_retdouble:
fldl -24(%ebp)
jmp Lrcls_epilogue
Lrcls_retldouble:
fldt -24(%ebp)
jmp Lrcls_epilogue
Lrcls_retllong:
movl -24(%ebp), %eax
movl -20(%ebp), %edx
jmp Lrcls_epilogue
LFE3:
#endif
.section __IMPORT,__jump_table,symbol_stubs,self_modifying_code+pure_instructions,5
L_ffi_closure_SYSV_inner$stub:
.indirect_symbol _ffi_closure_SYSV_inner
hlt ; hlt ; hlt ; hlt ; hlt
.section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support
EH_frame1:
.set L$set$0,LECIE1-LSCIE1
.long L$set$0
LSCIE1:
.long 0x0
.byte 0x1
.ascii "zR\0"
.byte 0x1
.byte 0x7c
.byte 0x8
.byte 0x1
.byte 0x10
.byte 0xc
.byte 0x5
.byte 0x4
.byte 0x88
.byte 0x1
.align 2
LECIE1:
.globl _ffi_call_SYSV.eh
_ffi_call_SYSV.eh:
LSFDE1:
.set L$set$1,LEFDE1-LASFDE1
.long L$set$1
LASFDE1:
.long LASFDE1-EH_frame1
.long LFB1-.
.set L$set$2,LFE1-LFB1
.long L$set$2
.byte 0x0
.byte 0x4
.set L$set$3,LCFI0-LFB1
.long L$set$3
.byte 0xe
.byte 0x8
.byte 0x84
.byte 0x2
.byte 0x4
.set L$set$4,LCFI1-LCFI0
.long L$set$4
.byte 0xd
.byte 0x4
.align 2
LEFDE1:
.globl _ffi_closure_SYSV.eh
_ffi_closure_SYSV.eh:
LSFDE2:
.set L$set$5,LEFDE2-LASFDE2
.long L$set$5
LASFDE2:
.long LASFDE2-EH_frame1
.long LFB2-.
.set L$set$6,LFE2-LFB2
.long L$set$6
.byte 0x0
.byte 0x4
.set L$set$7,LCFI2-LFB2
.long L$set$7
.byte 0xe
.byte 0x8
.byte 0x84
.byte 0x2
.byte 0x4
.set L$set$8,LCFI3-LCFI2
.long L$set$8
.byte 0xd
.byte 0x4
.align 2
LEFDE2:
#if !FFI_NO_RAW_API
.globl _ffi_closure_raw_SYSV.eh
_ffi_closure_raw_SYSV.eh:
LSFDE3:
.set L$set$10,LEFDE3-LASFDE3
.long L$set$10
LASFDE3:
.long LASFDE3-EH_frame1
.long LFB3-.
.set L$set$11,LFE3-LFB3
.long L$set$11
.byte 0x0
.byte 0x4
.set L$set$12,LCFI4-LFB3
.long L$set$12
.byte 0xe
.byte 0x8
.byte 0x84
.byte 0x2
.byte 0x4
.set L$set$13,LCFI5-LCFI4
.long L$set$13
.byte 0xd
.byte 0x4
.byte 0x4
.set L$set$14,LCFI6-LCFI5
.long L$set$14
.byte 0x85
.byte 0x3
.align 2
LEFDE3:
#endif
#endif /* ifndef __x86_64__ */
#endif /* defined __i386__ */
|
khbalhandawi/cpython
| 11,660
|
Modules/_ctypes/libffi_osx/x86/darwin64.S
|
/* -----------------------------------------------------------------------
darwin64.S - Copyright (c) 2006 Free Software Foundation, Inc.
derived from unix64.S
x86-64 Foreign Function Interface for Darwin.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
``Software''), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
----------------------------------------------------------------------- */
#ifdef __x86_64__
#define LIBFFI_ASM
#include <fficonfig.h>
#include <ffi.h>
.file "darwin64.S"
.text
/* ffi_call_unix64 (void *args, unsigned long bytes, unsigned flags,
void *raddr, void (*fnaddr)());
Bit o trickiness here -- ARGS+BYTES is the base of the stack frame
for this function. This has been allocated by ffi_call. We also
deallocate some of the stack that has been alloca'd. */
.align 3
.globl _ffi_call_unix64
_ffi_call_unix64:
LUW0:
movq (%rsp), %r10 /* Load return address. */
movq %rdi, %r12 /* Save a copy of the register area. */
leaq (%rdi, %rsi), %rax /* Find local stack base. */
movq %rdx, (%rax) /* Save flags. */
movq %rcx, 8(%rax) /* Save raddr. */
movq %rbp, 16(%rax) /* Save old frame pointer. */
movq %r10, 24(%rax) /* Relocate return address. */
movq %rax, %rbp /* Finalize local stack frame. */
LUW1:
/* movq %rdi, %r10 // Save a copy of the register area. */
movq %r12, %r10
movq %r8, %r11 /* Save a copy of the target fn. */
movl %r9d, %eax /* Set number of SSE registers. */
/* Load up all argument registers. */
movq (%r10), %rdi
movq 8(%r10), %rsi
movq 16(%r10), %rdx
movq 24(%r10), %rcx
movq 32(%r10), %r8
movq 40(%r10), %r9
testl %eax, %eax
jnz Lload_sse
Lret_from_load_sse:
/* Deallocate the reg arg area. */
leaq 176(%r10), %rsp
/* Call the user function. */
call *%r11
/* Deallocate stack arg area; local stack frame in redzone. */
leaq 24(%rbp), %rsp
movq 0(%rbp), %rcx /* Reload flags. */
movq 8(%rbp), %rdi /* Reload raddr. */
movq 16(%rbp), %rbp /* Reload old frame pointer. */
LUW2:
/* The first byte of the flags contains the FFI_TYPE. */
movzbl %cl, %r10d
leaq Lstore_table(%rip), %r11
movslq (%r11, %r10, 4), %r10
addq %r11, %r10
jmp *%r10
Lstore_table:
.long Lst_void-Lstore_table /* FFI_TYPE_VOID */
.long Lst_sint32-Lstore_table /* FFI_TYPE_INT */
.long Lst_float-Lstore_table /* FFI_TYPE_FLOAT */
.long Lst_double-Lstore_table /* FFI_TYPE_DOUBLE */
.long Lst_ldouble-Lstore_table /* FFI_TYPE_LONGDOUBLE */
.long Lst_uint8-Lstore_table /* FFI_TYPE_UINT8 */
.long Lst_sint8-Lstore_table /* FFI_TYPE_SINT8 */
.long Lst_uint16-Lstore_table /* FFI_TYPE_UINT16 */
.long Lst_sint16-Lstore_table /* FFI_TYPE_SINT16 */
.long Lst_uint32-Lstore_table /* FFI_TYPE_UINT32 */
.long Lst_sint32-Lstore_table /* FFI_TYPE_SINT32 */
.long Lst_int64-Lstore_table /* FFI_TYPE_UINT64 */
.long Lst_int64-Lstore_table /* FFI_TYPE_SINT64 */
.long Lst_struct-Lstore_table /* FFI_TYPE_STRUCT */
.long Lst_int64-Lstore_table /* FFI_TYPE_POINTER */
.text
.align 3
Lst_void:
ret
.align 3
Lst_uint8:
movzbq %al, %rax
movq %rax, (%rdi)
ret
.align 3
Lst_sint8:
movsbq %al, %rax
movq %rax, (%rdi)
ret
.align 3
Lst_uint16:
movzwq %ax, %rax
movq %rax, (%rdi)
.align 3
Lst_sint16:
movswq %ax, %rax
movq %rax, (%rdi)
ret
.align 3
Lst_uint32:
movl %eax, %eax
movq %rax, (%rdi)
.align 3
Lst_sint32:
cltq
movq %rax, (%rdi)
ret
.align 3
Lst_int64:
movq %rax, (%rdi)
ret
.align 3
Lst_float:
movss %xmm0, (%rdi)
ret
.align 3
Lst_double:
movsd %xmm0, (%rdi)
ret
Lst_ldouble:
fstpt (%rdi)
ret
.align 3
Lst_struct:
leaq -20(%rsp), %rsi /* Scratch area in redzone. */
/* We have to locate the values now, and since we don't want to
write too much data into the user's return value, we spill the
value to a 16 byte scratch area first. Bits 8, 9, and 10
control where the values are located. Only one of the three
bits will be set; see ffi_prep_cif_machdep for the pattern. */
movd %xmm0, %r10
movd %xmm1, %r11
testl $0x100, %ecx
cmovnz %rax, %rdx
cmovnz %r10, %rax
testl $0x200, %ecx
cmovnz %r10, %rdx
testl $0x400, %ecx
cmovnz %r10, %rax
cmovnz %r11, %rdx
movq %rax, (%rsi)
movq %rdx, 8(%rsi)
/* Bits 12-31 contain the true size of the structure. Copy from
the scratch area to the true destination. */
shrl $12, %ecx
rep movsb
ret
/* Many times we can avoid loading any SSE registers at all.
It's not worth an indirect jump to load the exact set of
SSE registers needed; zero or all is a good compromise. */
.align 3
LUW3:
Lload_sse:
movdqa 48(%r10), %xmm0
movdqa 64(%r10), %xmm1
movdqa 80(%r10), %xmm2
movdqa 96(%r10), %xmm3
movdqa 112(%r10), %xmm4
movdqa 128(%r10), %xmm5
movdqa 144(%r10), %xmm6
movdqa 160(%r10), %xmm7
jmp Lret_from_load_sse
LUW4:
.align 3
.globl _ffi_closure_unix64
_ffi_closure_unix64:
LUW5:
/* The carry flag is set by the trampoline iff SSE registers
are used. Don't clobber it before the branch instruction. */
leaq -200(%rsp), %rsp
LUW6:
movq %rdi, (%rsp)
movq %rsi, 8(%rsp)
movq %rdx, 16(%rsp)
movq %rcx, 24(%rsp)
movq %r8, 32(%rsp)
movq %r9, 40(%rsp)
jc Lsave_sse
Lret_from_save_sse:
movq %r10, %rdi
leaq 176(%rsp), %rsi
movq %rsp, %rdx
leaq 208(%rsp), %rcx
call _ffi_closure_unix64_inner
/* Deallocate stack frame early; return value is now in redzone. */
addq $200, %rsp
LUW7:
/* The first byte of the return value contains the FFI_TYPE. */
movzbl %al, %r10d
leaq Lload_table(%rip), %r11
movslq (%r11, %r10, 4), %r10
addq %r11, %r10
jmp *%r10
Lload_table:
.long Lld_void-Lload_table /* FFI_TYPE_VOID */
.long Lld_int32-Lload_table /* FFI_TYPE_INT */
.long Lld_float-Lload_table /* FFI_TYPE_FLOAT */
.long Lld_double-Lload_table /* FFI_TYPE_DOUBLE */
.long Lld_ldouble-Lload_table /* FFI_TYPE_LONGDOUBLE */
.long Lld_int8-Lload_table /* FFI_TYPE_UINT8 */
.long Lld_int8-Lload_table /* FFI_TYPE_SINT8 */
.long Lld_int16-Lload_table /* FFI_TYPE_UINT16 */
.long Lld_int16-Lload_table /* FFI_TYPE_SINT16 */
.long Lld_int32-Lload_table /* FFI_TYPE_UINT32 */
.long Lld_int32-Lload_table /* FFI_TYPE_SINT32 */
.long Lld_int64-Lload_table /* FFI_TYPE_UINT64 */
.long Lld_int64-Lload_table /* FFI_TYPE_SINT64 */
.long Lld_struct-Lload_table /* FFI_TYPE_STRUCT */
.long Lld_int64-Lload_table /* FFI_TYPE_POINTER */
.text
.align 3
Lld_void:
ret
.align 3
Lld_int8:
movzbl -24(%rsp), %eax
ret
.align 3
Lld_int16:
movzwl -24(%rsp), %eax
ret
.align 3
Lld_int32:
movl -24(%rsp), %eax
ret
.align 3
Lld_int64:
movq -24(%rsp), %rax
ret
.align 3
Lld_float:
movss -24(%rsp), %xmm0
ret
.align 3
Lld_double:
movsd -24(%rsp), %xmm0
ret
.align 3
Lld_ldouble:
fldt -24(%rsp)
ret
.align 3
Lld_struct:
/* There are four possibilities here, %rax/%rdx, %xmm0/%rax,
%rax/%xmm0, %xmm0/%xmm1. We collapse two by always loading
both rdx and xmm1 with the second word. For the remaining,
bit 8 set means xmm0 gets the second word, and bit 9 means
that rax gets the second word. */
movq -24(%rsp), %rcx
movq -16(%rsp), %rdx
movq -16(%rsp), %xmm1
testl $0x100, %eax
cmovnz %rdx, %rcx
movd %rcx, %xmm0
testl $0x200, %eax
movq -24(%rsp), %rax
cmovnz %rdx, %rax
ret
/* See the comment above Lload_sse; the same logic applies here. */
.align 3
LUW8:
Lsave_sse:
movdqa %xmm0, 48(%rsp)
movdqa %xmm1, 64(%rsp)
movdqa %xmm2, 80(%rsp)
movdqa %xmm3, 96(%rsp)
movdqa %xmm4, 112(%rsp)
movdqa %xmm5, 128(%rsp)
movdqa %xmm6, 144(%rsp)
movdqa %xmm7, 160(%rsp)
jmp Lret_from_save_sse
LUW9:
.section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support
EH_frame1:
.set L$set$0,LECIE1-LSCIE1 /* CIE Length */
.long L$set$0
LSCIE1:
.long 0x0 /* CIE Identifier Tag */
.byte 0x1 /* CIE Version */
.ascii "zR\0" /* CIE Augmentation */
.byte 0x1 /* uleb128 0x1; CIE Code Alignment Factor */
.byte 0x78 /* sleb128 -8; CIE Data Alignment Factor */
.byte 0x10 /* CIE RA Column */
.byte 0x1 /* uleb128 0x1; Augmentation size */
.byte 0x10 /* FDE Encoding (pcrel sdata4) */
.byte 0xc /* DW_CFA_def_cfa, %rsp offset 8 */
.byte 0x7 /* uleb128 0x7 */
.byte 0x8 /* uleb128 0x8 */
.byte 0x90 /* DW_CFA_offset, column 0x10 */
.byte 0x1
.align 3
LECIE1:
.globl _ffi_call_unix64.eh
_ffi_call_unix64.eh:
LSFDE1:
.set L$set$1,LEFDE1-LASFDE1 /* FDE Length */
.long L$set$1
LASFDE1:
.long LASFDE1-EH_frame1 /* FDE CIE offset */
.quad LUW0-. /* FDE initial location */
.set L$set$2,LUW4-LUW0 /* FDE address range */
.quad L$set$2
.byte 0x0 /* Augmentation size */
.byte 0x4 /* DW_CFA_advance_loc4 */
.set L$set$3,LUW1-LUW0
.long L$set$3
/* New stack frame based off rbp. This is an itty bit of unwind
trickery in that the CFA *has* changed. There is no easy way
to describe it correctly on entry to the function. Fortunately,
it doesn't matter too much since at all points we can correctly
unwind back to ffi_call. Note that the location to which we
moved the return address is (the new) CFA-8, so from the
perspective of the unwind info, it hasn't moved. */
.byte 0xc /* DW_CFA_def_cfa, %rbp offset 32 */
.byte 0x6
.byte 0x20
.byte 0x80+6 /* DW_CFA_offset, %rbp offset 2*-8 */
.byte 0x2
.byte 0xa /* DW_CFA_remember_state */
.byte 0x4 /* DW_CFA_advance_loc4 */
.set L$set$4,LUW2-LUW1
.long L$set$4
.byte 0xc /* DW_CFA_def_cfa, %rsp offset 8 */
.byte 0x7
.byte 0x8
.byte 0xc0+6 /* DW_CFA_restore, %rbp */
.byte 0x4 /* DW_CFA_advance_loc4 */
.set L$set$5,LUW3-LUW2
.long L$set$5
.byte 0xb /* DW_CFA_restore_state */
.align 3
LEFDE1:
.globl _ffi_closure_unix64.eh
_ffi_closure_unix64.eh:
LSFDE3:
.set L$set$6,LEFDE3-LASFDE3 /* FDE Length */
.long L$set$6
LASFDE3:
.long LASFDE3-EH_frame1 /* FDE CIE offset */
.quad LUW5-. /* FDE initial location */
.set L$set$7,LUW9-LUW5 /* FDE address range */
.quad L$set$7
.byte 0x0 /* Augmentation size */
.byte 0x4 /* DW_CFA_advance_loc4 */
.set L$set$8,LUW6-LUW5
.long L$set$8
.byte 0xe /* DW_CFA_def_cfa_offset */
.byte 208,1 /* uleb128 208 */
.byte 0xa /* DW_CFA_remember_state */
.byte 0x4 /* DW_CFA_advance_loc4 */
.set L$set$9,LUW7-LUW6
.long L$set$9
.byte 0xe /* DW_CFA_def_cfa_offset */
.byte 0x8
.byte 0x4 /* DW_CFA_advance_loc4 */
.set L$set$10,LUW8-LUW7
.long L$set$10
.byte 0xb /* DW_CFA_restore_state */
.align 3
LEFDE3:
.subsections_via_symbols
#endif /* __x86_64__ */
|
khuonglm/kecc-public
| 2,041
|
hello.S
|
.globl fibonacci
.section .text
.type fibonacci, @function
.fibonacci_L3:
j .fibonacci_L1
.fibonacci_L4:
j .fibonacci_L2
fibonacci:
addi sp,sp,-64
sd ra,56(sp)
sd s0,48(sp)
sd a0,40(sp)
addi s0,sp,64
li s10,4
sub s11,a0,s10
lw s10,0(s11)
sw s10,32(sp)
lw s11,32(sp)
li s10,2
slt s11,s11,s10
sb s11,31(sp)
lbu s10,31(sp)
bne s10,zero, .fibonacci_L3
j .fibonacci_L4
.fibonacci_L2:
lw s10,32(sp)
li s11,2
subw s10,s10,s11
sw s10,27(sp)
addi s11,sp,27
lw s10,0(s11)
sw s10,19(sp)
addi a0,sp,23
call fibonacci
lw s11,32(sp)
li s10,1
subw s11,s11,s10
sw s11,15(sp)
addi s10,sp,15
lw s11,0(s10)
sw s11,7(sp)
addi a0,sp,11
call fibonacci
lw s10,23(sp)
lw s11,11(sp)
addw s10,s10,s11
sw s10,3(sp)
ld a0,40(sp)
addi s11,sp,3
lw s10,0(s11)
sw s10,0(a0)
ld ra,56(sp)
ld s0,48(sp)
addi sp,sp,64
ret
.fibonacci_L1:
ld a0,40(sp)
addi s11,sp,32
lw s10,0(s11)
sw s10,0(a0)
ld ra,56(sp)
ld s0,48(sp)
addi sp,sp,64
ret
.globl main
.section .text
.type main, @function
main:
addi sp,sp,-48
sd ra,40(sp)
sd s0,32(sp)
sd a0,24(sp)
addi s0,sp,48
la s11,nonce
lw s10,0(s11)
sw s10,20(sp)
lw s11,20(sp)
li s10,20
remw s11,s11,s10
sw s11,16(sp)
addi s10,sp,16
lw s11,0(s10)
sw s11,8(sp)
addi a0,sp,12
call fibonacci
lw s10,12(sp)
xori a0,s10,0
ld ra,40(sp)
ld s0,32(sp)
addi sp,sp,48
ret
.globl nonce
.section .data
.type nonce, @object
nonce:
.word 0x1b
|
kiritantakechi/os
| 1,640
|
addressos/kernel/src/trap/trap.S
|
.altmacro
.macro SAVE_GP n
sd x\n, \n*8(sp)
.endm
.macro LOAD_GP n
ld x\n, \n*8(sp)
.endm
.section .text.trampoline
.globl __alltraps
.globl __restore
.align 2
__alltraps:
csrrw sp, sscratch, sp
# now sp->*TrapContext in user space, sscratch->user stack
# save other general purpose registers
sd x1, 1*8(sp)
# skip sp(x2), we will save it later
sd x3, 3*8(sp)
# skip tp(x4), application does not use it
# save x5~x31
.set n, 5
.rept 27
SAVE_GP %n
.set n, n+1
.endr
# we can use t0/t1/t2 freely, because they have been saved in TrapContext
csrr t0, sstatus
csrr t1, sepc
sd t0, 32*8(sp)
sd t1, 33*8(sp)
# read user stack from sscratch and save it in TrapContext
csrr t2, sscratch
sd t2, 2*8(sp)
# load kernel_satp into t0
ld t0, 34*8(sp)
# load trap_handler into t1
ld t1, 36*8(sp)
# move to kernel_sp
ld sp, 35*8(sp)
# switch to kernel space
csrw satp, t0
sfence.vma
# jump to trap_handler
jr t1
__restore:
# a0: *TrapContext in user space(Constant); a1: user space token
# switch to user space
csrw satp, a1
sfence.vma
csrw sscratch, a0
mv sp, a0
# now sp points to TrapContext in user space, start restoring based on it
# restore sstatus/sepc
ld t0, 32*8(sp)
ld t1, 33*8(sp)
csrw sstatus, t0
csrw sepc, t1
# restore general purpose registers except x0/sp/tp
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
# back to user stack
ld sp, 2*8(sp)
sret
|
kiritantakechi/os
| 1,025
|
batchos/kernel/src/link_app.S
|
.align 3
.section .data
.global _num_app
_num_app:
.quad 5
.quad app_0_start
.quad app_1_start
.quad app_2_start
.quad app_3_start
.quad app_4_start
.quad app_4_end
.section .data
.global app_0_start
.global app_0_end
app_0_start:
.incbin "./target/riscv64gc-unknown-none-elf/release/00hello_world.bin"
app_0_end:
.section .data
.global app_1_start
.global app_1_end
app_1_start:
.incbin "./target/riscv64gc-unknown-none-elf/release/01store_fault.bin"
app_1_end:
.section .data
.global app_2_start
.global app_2_end
app_2_start:
.incbin "./target/riscv64gc-unknown-none-elf/release/02power.bin"
app_2_end:
.section .data
.global app_3_start
.global app_3_end
app_3_start:
.incbin "./target/riscv64gc-unknown-none-elf/release/03priv_inst.bin"
app_3_end:
.section .data
.global app_4_start
.global app_4_end
app_4_start:
.incbin "./target/riscv64gc-unknown-none-elf/release/04priv_csr.bin"
app_4_end:
|
kiritantakechi/os
| 1,589
|
batchos/kernel/src/trap/trap.S
|
.altmacro
.macro SAVE_GP n
sd x\n, \n*8(sp)
.endm
.macro LOAD_GP n
ld x\n, \n*8(sp)
.endm
.section .text
.globl __alltraps
.globl __restore
.align 2
__alltraps:
csrrw sp, sscratch, sp
# now sp->kernel stack, sscratch->user stack
# allocate a TrapContext on kernel stack
addi sp, sp, -34*8
# save general-purpose registers
sd x1, 1*8(sp)
# skip sp(x2), we will save it later
sd x3, 3*8(sp)
# skip tp(x4), application does not use it
# save x5~x31
.set n, 5
.rept 27
SAVE_GP %n
.set n, n+1
.endr
# we can use t0/t1/t2 freely, because they were saved on kernel stack
csrr t0, sstatus
csrr t1, sepc
sd t0, 32*8(sp)
sd t1, 33*8(sp)
# read user stack from sscratch and save it on the kernel stack
csrr t2, sscratch
sd t2, 2*8(sp)
# set input argument of trap_handler(cx: &mut TrapContext)
mv a0, sp
call trap_handler
__restore:
# case1: start running app by __restore
# case2: back to U after handling trap
mv sp, a0
# now sp->kernel stack(after allocated), sscratch->user stack
# restore sstatus/sepc
ld t0, 32*8(sp)
ld t1, 33*8(sp)
ld t2, 2*8(sp)
csrw sstatus, t0
csrw sepc, t1
csrw sscratch, t2
# restore general-purpuse registers except sp/tp
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
# release TrapContext on kernel stack
addi sp, sp, 34*8
# now sp->kernel stack, sscratch->user stack
csrrw sp, sscratch, sp
sret
|
kjughx/ruix
| 3,593
|
crates/kernel/src/boot/x86.S
|
.code16
.org 0x7c00
.section .boot, "ax"
.global _entry
.intel_syntax
_entry:
jmp short step1
nop
.att_syntax
OEMIdentifier: .byte 'R', 'U', 'I', 'X', ' ', ' ', ' ', ' '
BytesPerSector: .word 0x200
SectorsPerCluster: .byte 0x80
ReservedSectors: .word 200
FATCopies: .byte 0x02
RootDirEntries: .word 0x40
NumSectors: .word 0x00
MediaType: .byte 0xF8
SectorsPerFat: .word 0x100
SectorsPerTrack: .word 0x20
NumberOfHeads: .word 0x40
HiddenSectors: .long 0x00
SectorsBig: .long 0x773594
// Extended BPB (Dos 4.0)
DriveNumber: .byte 0x80
WinNTBit: .byte 0x00
Signature: .byte 0x29
VolumeID: .long 0xD105
VolumeIDString: .byte 'R', 'U', 'I', 'X', ' ', ' ', ' ', 'B', 'O', 'O', 'T'
SystemIDString: .byte 'F', 'A', 'T', '1', '6', ' ', ' ', ' '
step1:
jmp $0, $step2
step2:
cli
mov $0x00, %ax
mov %ax, %ds
mov %ax, %es
mov %ax, %ss
mov %ax, %fs
mov %ax, %gs
mov $0x7c00, %sp
.load_protected:
lgdt gdt_descriptor
mov %cr0, %eax
or $0x1, %eax
mov %eax, %cr0
jmp $0x08, $load32
gdt_start:
gdt_null:
.long 0x0
.long 0x0
// offset 0x8
gdt_code: // CS should point here
.word 0xffff // segment limit first 0-15 bits
.word 0 // Base first 0-15 bits
.byte 0 // Base first 16-23 bits
.byte 0x9a // Access byte
.byte 0b11001111 // High 4 bit flags and low 4 bit flags
.byte 0 // Base 24-31 bits
// offset 0x10
gdt_data: // DS, SS, ES, FS, GS should point here
.word 0xffff // segment limit first 0-15 bits
.word 0 // Base first 0-15 bits
.byte 0 // Base first 16-23 bits
.byte 0x92 // Access byte
.byte 0b11001111 // High 4 bit flags and low 4 bit flags
.byte 0 // Base 24-31 bits
gdt_end:
gdt_descriptor:
.word gdt_end - gdt_start -1
.long gdt_start
.code32
load32:
mov $0x10, %ax
mov %ax, %ds
mov %ax, %es
mov %ax, %ss
mov %ax, %fs
mov %ax, %gs
mov $1, %eax
mov $250, %ecx
mov $0x100000, %edi
call ata_lba_read
ljmp $0x08, $0x100000
ata_lba_read:
mov %eax, %ebx //Backup the LBA
//Send the highest 8 bits of the lba to hard disk controller
.intel_syntax
shr eax, 24
.att_syntax
or $0xe0, %eax //Select master drive
mov $0x1F6, %dx
out %al, %dx
//Finished sending the highest 8 bits of the lba
//Send total #sectors to read
mov %ecx, %eax
mov $0x1F2, %dx
out %al, %dx
//Finished sending #sectors
//Send more bits of the LBA
mov %ebx, %eax //Restore backed up LBA
mov $0x1F3, %dx
out %al, %dx
//Finished sending more bits
//Send more bits of LBA
mov %ebx, %eax //Restore backed up LBA
mov $0x1F4, %dx
.intel_syntax
shr eax, 8
.att_syntax
out %al, %dx
//Finished
//Send upper 16 bits of LBA
mov $0x1F5, %dx
mov %ebx, %eax //Restore backed up LBA
.intel_syntax
shr eax, 16
.att_syntax
out %al, %dx
//Finished
mov $0x1F7, %dx
mov $0x20, %al
out %al, %dx
//Read all sectors into memory
.next_sector:
push %ecx
//Check if we can read
.try_again:
mov $0x1F7, %dx
in %dx, %al
test $8, %al
jz .try_again
//We need to read 256 words at a time
mov $256, %ecx
mov $0x1F0, %dx
rep insw //Read a word from dx into edi, ecx times
pop %ecx
loop .next_sector
//Finished reading sectors into memory
ret
.fill 510 - (.- _entry), 1, 0
.word 0xAA55
|
kleineOS/kernel
| 1,719
|
src/kernelvec.s
|
.section .text.trap
.global ktrapvec
# We send the stack pointer in as the first argument to the kernel trap handler.
# This allows us to use that value in order to load the saved registers into a
# struct. We save the registers in the order of their internal names (x0-31),
# and not in the order of their ABI names (e.g. saving t0-6 then a0-7 ...)
# TODO: save floating point registers
ktrapvec:
allocspace:
addi sp, sp, -8*30 # -240
save:
sd ra, 0(sp)
sd sp, 8(sp)
sd gp, 16(sp)
sd tp, 24(sp)
sd t0, 32(sp)
sd t1, 40(sp)
sd t2, 48(sp)
sd fp, 56(sp)
sd s1, 64(sp)
sd a0, 72(sp)
sd a1, 80(sp)
sd a2, 88(sp)
sd a3, 96(sp)
sd a4, 104(sp)
sd a5, 112(sp)
sd a6, 120(sp)
sd a7, 128(sp)
sd s2, 136(sp)
sd s3, 144(sp)
sd s4, 152(sp)
sd s5, 160(sp)
sd s6, 168(sp)
sd s7, 176(sp)
sd s8, 184(sp)
sd s9, 192(sp)
sd s10, 200(sp)
sd s11, 208(sp)
sd t3, 216(sp)
sd t4, 224(sp)
sd t5, 232(sp)
sd t6, 240(sp)
calltrap:
mv a0, sp
call kerneltrap
load:
ld ra, 0(sp)
ld sp, 8(sp)
ld gp, 16(sp)
ld tp, 24(sp)
ld t0, 32(sp)
ld t1, 40(sp)
ld t2, 48(sp)
ld fp, 56(sp)
ld s1, 64(sp)
ld a0, 72(sp)
ld a1, 80(sp)
ld a2, 88(sp)
ld a3, 96(sp)
ld a4, 104(sp)
ld a5, 112(sp)
ld a6, 120(sp)
ld a7, 128(sp)
ld s2, 136(sp)
ld s3, 144(sp)
ld s4, 152(sp)
ld s5, 160(sp)
ld s6, 168(sp)
ld s7, 176(sp)
ld s8, 184(sp)
ld s9, 192(sp)
ld s10, 200(sp)
ld s11, 208(sp)
ld t3, 216(sp)
ld t4, 224(sp)
ld t5, 232(sp)
ld t6, 240(sp)
deallocspace:
addi sp, sp, 8*30 # 240
ret_to_supervisor:
sret
|
ktheindifferent/react-os-rust
| 1,526
|
kernel/src/smp/ap_trampoline.S
|
.code16
.section .ap_boot, "ax"
.global ap_trampoline_start
.global ap_trampoline_end
ap_trampoline_start:
cli
cld
# Load GDT pointer from boot info structure
lgdt ap_gdt_ptr
# Enable protected mode
mov %cr0, %eax
or $1, %eax
mov %eax, %cr0
# Far jump to 32-bit code
ljmp $0x08, $ap_protected_mode
.code32
ap_protected_mode:
# Set up segments
mov $0x10, %ax
mov %ax, %ds
mov %ax, %es
mov %ax, %fs
mov %ax, %gs
mov %ax, %ss
# Enable PAE
mov %cr4, %eax
or $0x20, %eax
mov %eax, %cr4
# Load CR3 with page table
mov ap_cr3, %eax
mov %eax, %cr3
# Enable long mode
mov $0xC0000080, %ecx
rdmsr
or $0x100, %eax
wrmsr
# Enable paging
mov %cr0, %eax
or $0x80000000, %eax
mov %eax, %cr0
# Jump to 64-bit code
ljmp $0x08, $ap_long_mode
.code64
ap_long_mode:
# Set up 64-bit segments
mov $0x10, %ax
mov %ax, %ds
mov %ax, %es
mov %ax, %fs
mov %ax, %gs
mov %ax, %ss
# Load stack pointer
mov ap_stack_top, %rsp
# Load IDT
lidt ap_idt_ptr
# Jump to C entry point
mov ap_entry_point, %rax
jmp *%rax
.align 8
ap_boot_info:
ap_cr3: .quad 0
ap_gdt_ptr: .quad 0
ap_idt_ptr: .quad 0
ap_stack_top: .quad 0
ap_entry_point: .quad 0
ap_cpu_id: .long 0
ap_apic_id: .byte 0
.byte 0
.byte 0
.byte 0
ap_trampoline_end:
|
kuechenrole/UaFESOM
| 1,397
|
fesomCpl/namelist.config.s
|
! This is the namelist file for model general configuration
&modelname
runid='RG47911'
case_initial='warm'
case_forcing='warm'
variable_GammaTS=.false.
GammaT=0.01
/
×tep
step_per_day= 120!360 ! 960
run_length=5
run_length_unit='y' ! y, m, d, s
/
&clockinit ! the model starts at
timenew=0.0
daynew=1
yearnew=2021
/
&paths
MeshPath='/work/ollie/orichter/mesh/oce0_s/'
OpbndPath=' '
ClimateDataPath= ''
ForcingDataPath=''
TideForcingPath= ''
ResultPath = '/work/ollie/orichter/data/oce0_s/'
/
&initialization
use_prepared_init_ice=.false. !how to init. ice; runid.initial_ice.nc
OceClimaDataName=''
/
&inout
restartflag='last' !restart from which saved record,'last,'#'
output_length=1 !only required for m,d,h,s cases, y takes 1
output_length_unit='m' !output period: y, m, d, h, s
logfile_outfreq=1 !in logfile info. output frequency, # steps
/
&mesh_def
grid_type=2 !1 z-level, 2 sigma, 3 z+sigma
/
&geometry
domain_length=360. ![degree]
cartesian=.false.
fplane=.true.
betaplane=.false.
f_fplane=-1.405e-4 ![1/s]
beta_betaplane=2.0e-11 ![1/s/m]
rotated_grid=.false. !option only valid for coupled model case now
alphaEuler=50. ![degree] Euler angles, convention:
betaEuler=15. ![degree] first around z, then around new x,
gammaEuler=-90. ![degree] then around new z.
/
&calendar
include_fleapyear=.false.
/
|
kuechenrole/UaFESOM
| 9,132
|
uaStandAlone/UaSource/SuiteSparse/MATLAB_Tools/SFMULT/sfmult_atxtyt_k.s
|
.file "sfmult_atxtyt_k.c"
.text
.globl sfmult_AT_XT_YT_2
.type sfmult_AT_XT_YT_2, @function
sfmult_AT_XT_YT_2:
.LFB29:
pushl %ebp
.LCFI0:
movl %esp, %ebp
.LCFI1:
pushl %edi
.LCFI2:
pushl %esi
.LCFI3:
pushl %ebx
.LCFI4:
subl $20, %esp
.LCFI5:
movl 40(%ebp), %esi
movl 36(%ebp), %eax
testl %eax, %eax
jle .L13
movl 60(%ebp), %eax
sall $3, %eax
movl %eax, -16(%ebp)
movl 8(%ebp), %eax
movl %eax, -20(%ebp)
xorl %edi, %edi
movl $0, -24(%ebp)
pxor %xmm6, %xmm6
movl -24(%ebp), %edx
.L4:
movl 16(%ebp), %eax
movl 4(%eax,%edx,4), %edx
movl %edx, -28(%ebp)
subl %edi, %edx
movl %edx, -32(%ebp)
movl $1431655766, %edx
movl -32(%ebp), %eax
imull %edx
movl -32(%ebp), %ecx
sarl $31, %ecx
subl %ecx, %edx
leal (%edx,%edx,2), %edx
movl -32(%ebp), %eax
subl %edx, %eax
cmpl $1, %eax
je .L6
cmpl $2, %eax
je .L7
movapd %xmm6, %xmm4
movapd %xmm6, %xmm5
.L8:
cmpl %edi, -28(%ebp)
jle .L10
movl 20(%ebp), %eax
leal (%eax,%edi,4), %ecx
movl 24(%ebp), %eax
leal (%eax,%edi,8), %edx
.L12:
movsd (%edx), %xmm1
movsd 8(%edx), %xmm2
movsd 16(%edx), %xmm3
movl (%ecx), %eax
sall $4, %eax
movapd %xmm1, %xmm0
mulsd (%eax,%esi), %xmm0
addsd %xmm0, %xmm5
mulsd 8(%esi,%eax), %xmm1
addsd %xmm1, %xmm4
movl 4(%ecx), %eax
sall $4, %eax
movapd %xmm2, %xmm0
mulsd (%esi,%eax), %xmm0
addsd %xmm0, %xmm5
mulsd 8(%esi,%eax), %xmm2
addsd %xmm2, %xmm4
movl 8(%ecx), %eax
sall $4, %eax
movapd %xmm3, %xmm0
mulsd (%esi,%eax), %xmm0
addsd %xmm0, %xmm5
mulsd 8(%esi,%eax), %xmm3
addsd %xmm3, %xmm4
addl $3, %edi
addl $12, %ecx
addl $24, %edx
cmpl %edi, -28(%ebp)
jg .L12
.L10:
movl -20(%ebp), %edx
movsd %xmm5, (%edx)
movsd %xmm4, 8(%edx)
addl $1, -24(%ebp)
movl -16(%ebp), %eax
addl %eax, %edx
movl %edx, -20(%ebp)
movl -24(%ebp), %edx
cmpl %edx, 36(%ebp)
jne .L4
.L13:
addl $20, %esp
popl %ebx
popl %esi
popl %edi
popl %ebp
ret
.L6:
movapd %xmm6, %xmm1
movapd %xmm6, %xmm2
movl 20(%ebp), %edx
.L9:
movl 24(%ebp), %eax
movsd (%eax,%edi,8), %xmm0
movl (%edx,%edi,4), %eax
sall $4, %eax
movapd %xmm0, %xmm5
mulsd (%esi,%eax), %xmm5
addsd %xmm2, %xmm5
movapd %xmm0, %xmm4
mulsd 8(%esi,%eax), %xmm4
addsd %xmm1, %xmm4
addl $1, %edi
jmp .L8
.L7:
movl 24(%ebp), %edx
movsd (%edx,%edi,8), %xmm0
movl 20(%ebp), %edx
movl (%edx,%edi,4), %eax
sall $4, %eax
movapd %xmm0, %xmm2
mulsd (%esi,%eax), %xmm2
addsd %xmm6, %xmm2
movapd %xmm0, %xmm1
mulsd 8(%esi,%eax), %xmm1
addsd %xmm6, %xmm1
addl $1, %edi
jmp .L9
.LFE29:
.size sfmult_AT_XT_YT_2, .-sfmult_AT_XT_YT_2
.globl sfmult_AT_XT_YT_3
.type sfmult_AT_XT_YT_3, @function
sfmult_AT_XT_YT_3:
.LFB30:
pushl %ebp
.LCFI6:
movl %esp, %ebp
.LCFI7:
pushl %edi
.LCFI8:
pushl %esi
.LCFI9:
pushl %ebx
.LCFI10:
subl $16, %esp
.LCFI11:
movl 40(%ebp), %edi
movl 36(%ebp), %edx
testl %edx, %edx
jle .L27
movl 60(%ebp), %eax
sall $3, %eax
movl %eax, -16(%ebp)
movl 8(%ebp), %eax
movl %eax, -20(%ebp)
xorl %esi, %esi
movl $0, -24(%ebp)
pxor %xmm6, %xmm6
movl -24(%ebp), %edx
.L20:
movl 16(%ebp), %eax
movl 4(%eax,%edx,4), %edx
movl %edx, -28(%ebp)
movl %edx, %eax
subl %esi, %eax
testb $1, %al
jne .L21
movapd %xmm6, %xmm3
movapd %xmm6, %xmm4
movapd %xmm6, %xmm5
.L23:
cmpl %esi, -28(%ebp)
jle .L24
movl 20(%ebp), %eax
leal (%eax,%esi,4), %ecx
movl 24(%ebp), %eax
leal (%eax,%esi,8), %edx
.L26:
movsd (%edx), %xmm1
movsd 8(%edx), %xmm2
movl (%ecx), %eax
sall $5, %eax
movapd %xmm1, %xmm0
mulsd (%eax,%edi), %xmm0
addsd %xmm0, %xmm5
leal (%edi,%eax), %eax
movapd %xmm1, %xmm0
mulsd 8(%eax), %xmm0
addsd %xmm0, %xmm4
mulsd 16(%eax), %xmm1
addsd %xmm1, %xmm3
movl 4(%ecx), %eax
sall $5, %eax
movapd %xmm2, %xmm0
mulsd (%edi,%eax), %xmm0
addsd %xmm0, %xmm5
leal (%edi,%eax), %eax
movapd %xmm2, %xmm0
mulsd 8(%eax), %xmm0
addsd %xmm0, %xmm4
mulsd 16(%eax), %xmm2
addsd %xmm2, %xmm3
addl $2, %esi
addl $8, %ecx
addl $16, %edx
cmpl %esi, -28(%ebp)
jg .L26
.L24:
movl -20(%ebp), %edx
movsd %xmm5, (%edx)
movsd %xmm4, 8(%edx)
movsd %xmm3, 16(%edx)
addl $1, -24(%ebp)
movl -16(%ebp), %eax
addl %eax, %edx
movl %edx, -20(%ebp)
movl -24(%ebp), %edx
cmpl %edx, 36(%ebp)
jne .L20
.L27:
addl $16, %esp
popl %ebx
popl %esi
popl %edi
popl %ebp
ret
.L21:
movl 24(%ebp), %eax
movsd (%eax,%esi,8), %xmm0
movl 20(%ebp), %edx
movl (%edx,%esi,4), %eax
sall $5, %eax
movapd %xmm0, %xmm5
mulsd (%edi,%eax), %xmm5
addsd %xmm6, %xmm5
leal (%edi,%eax), %eax
movapd %xmm0, %xmm4
mulsd 8(%eax), %xmm4
addsd %xmm6, %xmm4
movapd %xmm0, %xmm3
mulsd 16(%eax), %xmm3
addsd %xmm6, %xmm3
addl $1, %esi
jmp .L23
.LFE30:
.size sfmult_AT_XT_YT_3, .-sfmult_AT_XT_YT_3
.globl sfmult_AT_XT_YT_4
.type sfmult_AT_XT_YT_4, @function
sfmult_AT_XT_YT_4:
.LFB31:
pushl %ebp
.LCFI12:
movl %esp, %ebp
.LCFI13:
pushl %edi
.LCFI14:
pushl %esi
.LCFI15:
pushl %ebx
.LCFI16:
subl $20, %esp
.LCFI17:
movl 36(%ebp), %ecx
testl %ecx, %ecx
jle .L39
movl 60(%ebp), %eax
sall $3, %eax
movl %eax, -16(%ebp)
movl 8(%ebp), %eax
movl %eax, -20(%ebp)
movl $0, -28(%ebp)
movl $0, -24(%ebp)
pxor %xmm6, %xmm6
.L33:
movl -24(%ebp), %edx
movl 16(%ebp), %ecx
movl 4(%ecx,%edx,4), %eax
cmpl -28(%ebp), %eax
jle .L43
movl -28(%ebp), %esi
movl 20(%ebp), %edi
leal (%edi,%esi,4), %ecx
movl 24(%ebp), %edi
leal (%edi,%esi,8), %edx
movapd %xmm6, %xmm2
movapd %xmm6, %xmm5
movapd %xmm6, %xmm4
movapd %xmm6, %xmm3
xorl %esi, %esi
subl -28(%ebp), %eax
movl %eax, -32(%ebp)
movl 40(%ebp), %edi
.L37:
movsd (%edx), %xmm1
movl (%ecx), %eax
sall $5, %eax
movapd %xmm1, %xmm0
mulsd (%eax,%edi), %xmm0
addsd %xmm0, %xmm3
addl %edi, %eax
movapd %xmm1, %xmm0
mulsd 8(%eax), %xmm0
addsd %xmm0, %xmm4
movapd %xmm1, %xmm0
mulsd 16(%eax), %xmm0
addsd %xmm0, %xmm5
mulsd 24(%eax), %xmm1
addsd %xmm1, %xmm2
addl $1, %esi
addl $4, %ecx
addl $8, %edx
cmpl -32(%ebp), %esi
jne .L37
addl %esi, -28(%ebp)
.L36:
movl -20(%ebp), %eax
movsd %xmm3, (%eax)
movsd %xmm4, 8(%eax)
movsd %xmm5, 16(%eax)
movsd %xmm2, 24(%eax)
addl $1, -24(%ebp)
movl -16(%ebp), %edx
addl %edx, %eax
movl %eax, -20(%ebp)
movl -24(%ebp), %ecx
cmpl %ecx, 36(%ebp)
jne .L33
.L39:
addl $20, %esp
popl %ebx
popl %esi
popl %edi
popl %ebp
ret
.L43:
movapd %xmm6, %xmm2
movapd %xmm6, %xmm5
movapd %xmm6, %xmm4
movapd %xmm6, %xmm3
jmp .L36
.LFE31:
.size sfmult_AT_XT_YT_4, .-sfmult_AT_XT_YT_4
.globl sfmult_AT_XT_YT_k
.type sfmult_AT_XT_YT_k, @function
sfmult_AT_XT_YT_k:
.LFB32:
pushl %ebp
.LCFI18:
movl %esp, %ebp
.LCFI19:
pushl %edi
.LCFI20:
pushl %esi
.LCFI21:
pushl %ebx
.LCFI22:
subl $36, %esp
.LCFI23:
movl 60(%ebp), %esi
movl 36(%ebp), %edi
testl %edi, %edi
jle .L64
movl %esi, %edi
andl $-2147483645, %edi
js .L69
.L47:
leal 0(,%esi,8), %eax
movl %eax, -44(%ebp)
leal 0(,%edi,8), %edx
movl %edx, -16(%ebp)
movl 8(%ebp), %ecx
addl %edx, %ecx
movl %ecx, -20(%ebp)
movl $0, -40(%ebp)
movl $0, -36(%ebp)
.L48:
movl -36(%ebp), %eax
movl 16(%ebp), %ecx
movl 4(%ecx,%eax,4), %edx
testl %esi, %esi
jle .L49
xorl %eax, %eax
movl 8(%ebp), %ecx
.L51:
movl $0, (%ecx,%eax,8)
movl $0, 4(%ecx,%eax,8)
addl $1, %eax
cmpl %eax, %esi
jne .L51
.L49:
cmpl -40(%ebp), %edx
jle .L52
movl -40(%ebp), %eax
movl 20(%ebp), %ecx
leal (%ecx,%eax,4), %eax
movl %eax, -28(%ebp)
movl -40(%ebp), %ecx
movl 24(%ebp), %eax
leal (%eax,%ecx,8), %ecx
movl %ecx, -24(%ebp)
movl $0, -32(%ebp)
subl -40(%ebp), %edx
movl %edx, -48(%ebp)
.L54:
movl -24(%ebp), %eax
movsd (%eax), %xmm1
movl %esi, %eax
movl -28(%ebp), %edx
imull (%edx), %eax
movl 40(%ebp), %ecx
leal (%ecx,%eax,8), %edx
cmpl $2, %edi
je .L57
cmpl $3, %edi
je .L58
cmpl $1, %edi
je .L70
.L55:
cmpl %edi, %esi
.p2align 4,,5
jle .L59
movl -20(%ebp), %eax
addl -16(%ebp), %edx
movl %edi, %ecx
.L61:
movapd %xmm1, %xmm0
mulsd (%edx), %xmm0
addsd (%eax), %xmm0
movsd %xmm0, (%eax)
movapd %xmm1, %xmm0
mulsd 8(%edx), %xmm0
addsd 8(%eax), %xmm0
movsd %xmm0, 8(%eax)
movapd %xmm1, %xmm0
mulsd 16(%edx), %xmm0
addsd 16(%eax), %xmm0
movsd %xmm0, 16(%eax)
movapd %xmm1, %xmm0
mulsd 24(%edx), %xmm0
addsd 24(%eax), %xmm0
movsd %xmm0, 24(%eax)
addl $4, %ecx
addl $32, %eax
addl $32, %edx
cmpl %ecx, %esi
jg .L61
.L59:
addl $1, -32(%ebp)
addl $4, -28(%ebp)
addl $8, -24(%ebp)
movl -48(%ebp), %edx
cmpl %edx, -32(%ebp)
jne .L54
movl -32(%ebp), %ecx
addl %ecx, -40(%ebp)
.L52:
addl $1, -36(%ebp)
movl -44(%ebp), %eax
addl %eax, -20(%ebp)
movl -36(%ebp), %edx
cmpl %edx, 36(%ebp)
je .L64
addl %eax, 8(%ebp)
jmp .L48
.L58:
movapd %xmm1, %xmm0
mulsd 16(%edx), %xmm0
movl 8(%ebp), %eax
addsd 16(%eax), %xmm0
movsd %xmm0, 16(%eax)
.L57:
movapd %xmm1, %xmm0
mulsd 8(%edx), %xmm0
movl 8(%ebp), %ecx
addsd 8(%ecx), %xmm0
movsd %xmm0, 8(%ecx)
movl %ecx, %eax
.L56:
movapd %xmm1, %xmm0
mulsd (%edx), %xmm0
addsd (%eax), %xmm0
movsd %xmm0, (%eax)
jmp .L55
.L70:
movl 8(%ebp), %eax
jmp .L56
.L64:
addl $36, %esp
popl %ebx
popl %esi
popl %edi
popl %ebp
ret
.L69:
subl $1, %edi
orl $-4, %edi
addl $1, %edi
jmp .L47
.LFE32:
.size sfmult_AT_XT_YT_k, .-sfmult_AT_XT_YT_k
.ident "GCC: (GNU) 4.1.0 (SUSE Linux)"
.section .note.GNU-stack,"",@progbits
|
kuechenrole/UaFESOM
| 9,132
|
uaCpl/UaSource/SuiteSparse/MATLAB_Tools/SFMULT/sfmult_atxtyt_k.s
|
.file "sfmult_atxtyt_k.c"
.text
.globl sfmult_AT_XT_YT_2
.type sfmult_AT_XT_YT_2, @function
sfmult_AT_XT_YT_2:
.LFB29:
pushl %ebp
.LCFI0:
movl %esp, %ebp
.LCFI1:
pushl %edi
.LCFI2:
pushl %esi
.LCFI3:
pushl %ebx
.LCFI4:
subl $20, %esp
.LCFI5:
movl 40(%ebp), %esi
movl 36(%ebp), %eax
testl %eax, %eax
jle .L13
movl 60(%ebp), %eax
sall $3, %eax
movl %eax, -16(%ebp)
movl 8(%ebp), %eax
movl %eax, -20(%ebp)
xorl %edi, %edi
movl $0, -24(%ebp)
pxor %xmm6, %xmm6
movl -24(%ebp), %edx
.L4:
movl 16(%ebp), %eax
movl 4(%eax,%edx,4), %edx
movl %edx, -28(%ebp)
subl %edi, %edx
movl %edx, -32(%ebp)
movl $1431655766, %edx
movl -32(%ebp), %eax
imull %edx
movl -32(%ebp), %ecx
sarl $31, %ecx
subl %ecx, %edx
leal (%edx,%edx,2), %edx
movl -32(%ebp), %eax
subl %edx, %eax
cmpl $1, %eax
je .L6
cmpl $2, %eax
je .L7
movapd %xmm6, %xmm4
movapd %xmm6, %xmm5
.L8:
cmpl %edi, -28(%ebp)
jle .L10
movl 20(%ebp), %eax
leal (%eax,%edi,4), %ecx
movl 24(%ebp), %eax
leal (%eax,%edi,8), %edx
.L12:
movsd (%edx), %xmm1
movsd 8(%edx), %xmm2
movsd 16(%edx), %xmm3
movl (%ecx), %eax
sall $4, %eax
movapd %xmm1, %xmm0
mulsd (%eax,%esi), %xmm0
addsd %xmm0, %xmm5
mulsd 8(%esi,%eax), %xmm1
addsd %xmm1, %xmm4
movl 4(%ecx), %eax
sall $4, %eax
movapd %xmm2, %xmm0
mulsd (%esi,%eax), %xmm0
addsd %xmm0, %xmm5
mulsd 8(%esi,%eax), %xmm2
addsd %xmm2, %xmm4
movl 8(%ecx), %eax
sall $4, %eax
movapd %xmm3, %xmm0
mulsd (%esi,%eax), %xmm0
addsd %xmm0, %xmm5
mulsd 8(%esi,%eax), %xmm3
addsd %xmm3, %xmm4
addl $3, %edi
addl $12, %ecx
addl $24, %edx
cmpl %edi, -28(%ebp)
jg .L12
.L10:
movl -20(%ebp), %edx
movsd %xmm5, (%edx)
movsd %xmm4, 8(%edx)
addl $1, -24(%ebp)
movl -16(%ebp), %eax
addl %eax, %edx
movl %edx, -20(%ebp)
movl -24(%ebp), %edx
cmpl %edx, 36(%ebp)
jne .L4
.L13:
addl $20, %esp
popl %ebx
popl %esi
popl %edi
popl %ebp
ret
.L6:
movapd %xmm6, %xmm1
movapd %xmm6, %xmm2
movl 20(%ebp), %edx
.L9:
movl 24(%ebp), %eax
movsd (%eax,%edi,8), %xmm0
movl (%edx,%edi,4), %eax
sall $4, %eax
movapd %xmm0, %xmm5
mulsd (%esi,%eax), %xmm5
addsd %xmm2, %xmm5
movapd %xmm0, %xmm4
mulsd 8(%esi,%eax), %xmm4
addsd %xmm1, %xmm4
addl $1, %edi
jmp .L8
.L7:
movl 24(%ebp), %edx
movsd (%edx,%edi,8), %xmm0
movl 20(%ebp), %edx
movl (%edx,%edi,4), %eax
sall $4, %eax
movapd %xmm0, %xmm2
mulsd (%esi,%eax), %xmm2
addsd %xmm6, %xmm2
movapd %xmm0, %xmm1
mulsd 8(%esi,%eax), %xmm1
addsd %xmm6, %xmm1
addl $1, %edi
jmp .L9
.LFE29:
.size sfmult_AT_XT_YT_2, .-sfmult_AT_XT_YT_2
.globl sfmult_AT_XT_YT_3
.type sfmult_AT_XT_YT_3, @function
sfmult_AT_XT_YT_3:
.LFB30:
pushl %ebp
.LCFI6:
movl %esp, %ebp
.LCFI7:
pushl %edi
.LCFI8:
pushl %esi
.LCFI9:
pushl %ebx
.LCFI10:
subl $16, %esp
.LCFI11:
movl 40(%ebp), %edi
movl 36(%ebp), %edx
testl %edx, %edx
jle .L27
movl 60(%ebp), %eax
sall $3, %eax
movl %eax, -16(%ebp)
movl 8(%ebp), %eax
movl %eax, -20(%ebp)
xorl %esi, %esi
movl $0, -24(%ebp)
pxor %xmm6, %xmm6
movl -24(%ebp), %edx
.L20:
movl 16(%ebp), %eax
movl 4(%eax,%edx,4), %edx
movl %edx, -28(%ebp)
movl %edx, %eax
subl %esi, %eax
testb $1, %al
jne .L21
movapd %xmm6, %xmm3
movapd %xmm6, %xmm4
movapd %xmm6, %xmm5
.L23:
cmpl %esi, -28(%ebp)
jle .L24
movl 20(%ebp), %eax
leal (%eax,%esi,4), %ecx
movl 24(%ebp), %eax
leal (%eax,%esi,8), %edx
.L26:
movsd (%edx), %xmm1
movsd 8(%edx), %xmm2
movl (%ecx), %eax
sall $5, %eax
movapd %xmm1, %xmm0
mulsd (%eax,%edi), %xmm0
addsd %xmm0, %xmm5
leal (%edi,%eax), %eax
movapd %xmm1, %xmm0
mulsd 8(%eax), %xmm0
addsd %xmm0, %xmm4
mulsd 16(%eax), %xmm1
addsd %xmm1, %xmm3
movl 4(%ecx), %eax
sall $5, %eax
movapd %xmm2, %xmm0
mulsd (%edi,%eax), %xmm0
addsd %xmm0, %xmm5
leal (%edi,%eax), %eax
movapd %xmm2, %xmm0
mulsd 8(%eax), %xmm0
addsd %xmm0, %xmm4
mulsd 16(%eax), %xmm2
addsd %xmm2, %xmm3
addl $2, %esi
addl $8, %ecx
addl $16, %edx
cmpl %esi, -28(%ebp)
jg .L26
.L24:
movl -20(%ebp), %edx
movsd %xmm5, (%edx)
movsd %xmm4, 8(%edx)
movsd %xmm3, 16(%edx)
addl $1, -24(%ebp)
movl -16(%ebp), %eax
addl %eax, %edx
movl %edx, -20(%ebp)
movl -24(%ebp), %edx
cmpl %edx, 36(%ebp)
jne .L20
.L27:
addl $16, %esp
popl %ebx
popl %esi
popl %edi
popl %ebp
ret
.L21:
movl 24(%ebp), %eax
movsd (%eax,%esi,8), %xmm0
movl 20(%ebp), %edx
movl (%edx,%esi,4), %eax
sall $5, %eax
movapd %xmm0, %xmm5
mulsd (%edi,%eax), %xmm5
addsd %xmm6, %xmm5
leal (%edi,%eax), %eax
movapd %xmm0, %xmm4
mulsd 8(%eax), %xmm4
addsd %xmm6, %xmm4
movapd %xmm0, %xmm3
mulsd 16(%eax), %xmm3
addsd %xmm6, %xmm3
addl $1, %esi
jmp .L23
.LFE30:
.size sfmult_AT_XT_YT_3, .-sfmult_AT_XT_YT_3
.globl sfmult_AT_XT_YT_4
.type sfmult_AT_XT_YT_4, @function
sfmult_AT_XT_YT_4:
.LFB31:
pushl %ebp
.LCFI12:
movl %esp, %ebp
.LCFI13:
pushl %edi
.LCFI14:
pushl %esi
.LCFI15:
pushl %ebx
.LCFI16:
subl $20, %esp
.LCFI17:
movl 36(%ebp), %ecx
testl %ecx, %ecx
jle .L39
movl 60(%ebp), %eax
sall $3, %eax
movl %eax, -16(%ebp)
movl 8(%ebp), %eax
movl %eax, -20(%ebp)
movl $0, -28(%ebp)
movl $0, -24(%ebp)
pxor %xmm6, %xmm6
.L33:
movl -24(%ebp), %edx
movl 16(%ebp), %ecx
movl 4(%ecx,%edx,4), %eax
cmpl -28(%ebp), %eax
jle .L43
movl -28(%ebp), %esi
movl 20(%ebp), %edi
leal (%edi,%esi,4), %ecx
movl 24(%ebp), %edi
leal (%edi,%esi,8), %edx
movapd %xmm6, %xmm2
movapd %xmm6, %xmm5
movapd %xmm6, %xmm4
movapd %xmm6, %xmm3
xorl %esi, %esi
subl -28(%ebp), %eax
movl %eax, -32(%ebp)
movl 40(%ebp), %edi
.L37:
movsd (%edx), %xmm1
movl (%ecx), %eax
sall $5, %eax
movapd %xmm1, %xmm0
mulsd (%eax,%edi), %xmm0
addsd %xmm0, %xmm3
addl %edi, %eax
movapd %xmm1, %xmm0
mulsd 8(%eax), %xmm0
addsd %xmm0, %xmm4
movapd %xmm1, %xmm0
mulsd 16(%eax), %xmm0
addsd %xmm0, %xmm5
mulsd 24(%eax), %xmm1
addsd %xmm1, %xmm2
addl $1, %esi
addl $4, %ecx
addl $8, %edx
cmpl -32(%ebp), %esi
jne .L37
addl %esi, -28(%ebp)
.L36:
movl -20(%ebp), %eax
movsd %xmm3, (%eax)
movsd %xmm4, 8(%eax)
movsd %xmm5, 16(%eax)
movsd %xmm2, 24(%eax)
addl $1, -24(%ebp)
movl -16(%ebp), %edx
addl %edx, %eax
movl %eax, -20(%ebp)
movl -24(%ebp), %ecx
cmpl %ecx, 36(%ebp)
jne .L33
.L39:
addl $20, %esp
popl %ebx
popl %esi
popl %edi
popl %ebp
ret
.L43:
movapd %xmm6, %xmm2
movapd %xmm6, %xmm5
movapd %xmm6, %xmm4
movapd %xmm6, %xmm3
jmp .L36
.LFE31:
.size sfmult_AT_XT_YT_4, .-sfmult_AT_XT_YT_4
.globl sfmult_AT_XT_YT_k
.type sfmult_AT_XT_YT_k, @function
sfmult_AT_XT_YT_k:
.LFB32:
pushl %ebp
.LCFI18:
movl %esp, %ebp
.LCFI19:
pushl %edi
.LCFI20:
pushl %esi
.LCFI21:
pushl %ebx
.LCFI22:
subl $36, %esp
.LCFI23:
movl 60(%ebp), %esi
movl 36(%ebp), %edi
testl %edi, %edi
jle .L64
movl %esi, %edi
andl $-2147483645, %edi
js .L69
.L47:
leal 0(,%esi,8), %eax
movl %eax, -44(%ebp)
leal 0(,%edi,8), %edx
movl %edx, -16(%ebp)
movl 8(%ebp), %ecx
addl %edx, %ecx
movl %ecx, -20(%ebp)
movl $0, -40(%ebp)
movl $0, -36(%ebp)
.L48:
movl -36(%ebp), %eax
movl 16(%ebp), %ecx
movl 4(%ecx,%eax,4), %edx
testl %esi, %esi
jle .L49
xorl %eax, %eax
movl 8(%ebp), %ecx
.L51:
movl $0, (%ecx,%eax,8)
movl $0, 4(%ecx,%eax,8)
addl $1, %eax
cmpl %eax, %esi
jne .L51
.L49:
cmpl -40(%ebp), %edx
jle .L52
movl -40(%ebp), %eax
movl 20(%ebp), %ecx
leal (%ecx,%eax,4), %eax
movl %eax, -28(%ebp)
movl -40(%ebp), %ecx
movl 24(%ebp), %eax
leal (%eax,%ecx,8), %ecx
movl %ecx, -24(%ebp)
movl $0, -32(%ebp)
subl -40(%ebp), %edx
movl %edx, -48(%ebp)
.L54:
movl -24(%ebp), %eax
movsd (%eax), %xmm1
movl %esi, %eax
movl -28(%ebp), %edx
imull (%edx), %eax
movl 40(%ebp), %ecx
leal (%ecx,%eax,8), %edx
cmpl $2, %edi
je .L57
cmpl $3, %edi
je .L58
cmpl $1, %edi
je .L70
.L55:
cmpl %edi, %esi
.p2align 4,,5
jle .L59
movl -20(%ebp), %eax
addl -16(%ebp), %edx
movl %edi, %ecx
.L61:
movapd %xmm1, %xmm0
mulsd (%edx), %xmm0
addsd (%eax), %xmm0
movsd %xmm0, (%eax)
movapd %xmm1, %xmm0
mulsd 8(%edx), %xmm0
addsd 8(%eax), %xmm0
movsd %xmm0, 8(%eax)
movapd %xmm1, %xmm0
mulsd 16(%edx), %xmm0
addsd 16(%eax), %xmm0
movsd %xmm0, 16(%eax)
movapd %xmm1, %xmm0
mulsd 24(%edx), %xmm0
addsd 24(%eax), %xmm0
movsd %xmm0, 24(%eax)
addl $4, %ecx
addl $32, %eax
addl $32, %edx
cmpl %ecx, %esi
jg .L61
.L59:
addl $1, -32(%ebp)
addl $4, -28(%ebp)
addl $8, -24(%ebp)
movl -48(%ebp), %edx
cmpl %edx, -32(%ebp)
jne .L54
movl -32(%ebp), %ecx
addl %ecx, -40(%ebp)
.L52:
addl $1, -36(%ebp)
movl -44(%ebp), %eax
addl %eax, -20(%ebp)
movl -36(%ebp), %edx
cmpl %edx, 36(%ebp)
je .L64
addl %eax, 8(%ebp)
jmp .L48
.L58:
movapd %xmm1, %xmm0
mulsd 16(%edx), %xmm0
movl 8(%ebp), %eax
addsd 16(%eax), %xmm0
movsd %xmm0, 16(%eax)
.L57:
movapd %xmm1, %xmm0
mulsd 8(%edx), %xmm0
movl 8(%ebp), %ecx
addsd 8(%ecx), %xmm0
movsd %xmm0, 8(%ecx)
movl %ecx, %eax
.L56:
movapd %xmm1, %xmm0
mulsd (%edx), %xmm0
addsd (%eax), %xmm0
movsd %xmm0, (%eax)
jmp .L55
.L70:
movl 8(%ebp), %eax
jmp .L56
.L64:
addl $36, %esp
popl %ebx
popl %esi
popl %edi
popl %ebp
ret
.L69:
subl $1, %edi
orl $-4, %edi
addl $1, %edi
jmp .L47
.LFE32:
.size sfmult_AT_XT_YT_k, .-sfmult_AT_XT_YT_k
.ident "GCC: (GNU) 4.1.0 (SUSE Linux)"
.section .note.GNU-stack,"",@progbits
|
kumomi-systems/eisen
| 4,411
|
eisen-kernel/src/stub/bootinfo.S
|
## Eisen Operating System
## Copyright (C) 2025 Kumomi Systems
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <https://www.gnu.org/licenses/>.
.code64
.section .bootinfo, "adw"
.global bootinfo_start
.equ SECTION_SIZE, 0x40
.equ SPACE, 0x00
.equ PLACEHOLDER, 0x30
.equ HEADER_VERSION, 0
.equ HEADER_SIZE, 0x200
.equ KERNEL_TYPE_FLAT, 0
.equ KERNEL_TYPE_ELF, 1
.equ KERNEL_TYPE, KERNEL_TYPE_ELF
.equ MAJOR_VERSION, 0
.equ MINOR_VERSION, 1
.equ PATCH_VERSION, 0
.equ VERSION_FLAG_ALPHA, 0b00000001 # Alpha release
.equ VERSION_FLAG_BETA, 0b00000010 # Beta release
.equ VERSION_FLAG_RC, 0b00000100 # Release candidate
.equ VERSION_FLAG_XPER, 0b00001000 # Experimental
.equ VERSION_FLAGS, 0 | VERSION_FLAG_ALPHA
.extern _kentry
.extern _kargs
.extern _ksysinfo
.extern STUB_END
.extern KERNEL_VMA
.extern KERNEL_SIZE
.extern STACK_TOP
bootinfo_start:
## Product ID section
.align SECTION_SIZE
.asciz "Eisen" # Magic start number
.byte HEADER_VERSION # Header version
.byte KERNEL_TYPE # Kernel type
.word HEADER_SIZE # Header size in bytes
.align 0x10, SPACE # Reserved
.space 0x10, PLACEHOLDER # Product UUID (unique to each installation of Eisen)
.byte SECTION_SIZE # Section Size
.align 0x04
.space 0x2, PLACEHOLDER # Year built
.space 0x1, PLACEHOLDER # Month built
.space 0x1, PLACEHOLDER # Day built
.byte MAJOR_VERSION # Major Version
.byte MINOR_VERSION # Minor Version
.byte PATCH_VERSION # Patch Version
.byte VERSION_FLAGS # Version Flags
.align 0x10
.asciz "Hydrogen" # Version name
.balign SECTION_SIZE, SPACE # Reserved
## Kernel section
.align SECTION_SIZE
.word 0x9459 # Eisen UUID (bytes 0 and 1)
.space 6, SPACE # Reserved
.quad _kentry # Address of kernel entry
.quad _kargs # Address of kernel args
.quad _ksysinfo # Address of system information
.quad STUB_END # End of stub / beginning of ELF header
.quad KERNEL_VMA # Kernel Virtual Memory Address
.quad KERNEL_SIZE # Size of the kernel
.quad STACK_TOP # Top of the stack
.balign SECTION_SIZE, SPACE # Reserved
.align SECTION_SIZE
.word 0x5C96 # Eisen UUID (bytes 2 and 3)
.balign SECTION_SIZE, SPACE # Reserved
.align SECTION_SIZE
.word 0xBD12 # Eisen UUID (bytes 4 and 5)
.balign SECTION_SIZE, SPACE # Reserved
.align SECTION_SIZE
.word 0x40E1 # Eisen UUID (bytes 6 and 7)
.balign SECTION_SIZE, SPACE # Reserved
.align SECTION_SIZE
.word 0xA7FB # Eisen UUID (bytes 8 and 9)
.balign SECTION_SIZE, SPACE
.align SECTION_SIZE
.word 0x61C5 # Eisen UUID (bytes 10 and 11)
.balign SECTION_SIZE, SPACE
.align SECTION_SIZE
.word 0x0F3D # Eisen UUID (bytes 12 and 13)
.space HEADER_SIZE-0x10-(.-bootinfo_start), SPACE
.space 2, SPACE
.word 0xCA9A # Eisen UUID (bytes 14 and 15)
.space 4, 0x00 # CRC32 checksum
.ascii "InfoEnd" # Magic end number
.byte 26 # Magic end number
bootinfo_end:
|
kumomi-systems/eisen
| 1,267
|
eisen-kernel/src/stub/mbrtrap.S
|
## Eisen Operating System
## Copyright (C) 2025 Kumomi Systems
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <https://www.gnu.org/licenses/>.
.code16
.global mbr_trap_start
.section .mbrtrap, "a"
mbr_trap_start:
# Clear blinking cursor
movw $0x1003, %ax
movw $0x0000, %bx
int $0x10
# Print trap message
mov mbr_trap_message, %si
movb $0x0E, %ah
movb $0x07, %bl
.printstr:
movb (%si), %al
cmpb $0, %al
je .printstr_end
int $0x10
inc %si
jmp .printstr
.printstr_end:
# Actual trap
jmp .
mbr_trap_message:
.asciz "Eisen must be booted from 64-bit UEFI!"
.space 510-(.-mbr_trap_start)
.word 0xAA55
|
kumomi-systems/eisen
| 2,677
|
eisen-kernel/src/arch/x86_64/int/isr.S
|
## Eisen Operating System
## Copyright (C) 2025 Kumomi Systems
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <https://www.gnu.org/licenses/>.
.code64
.text
.macro pushstate
pushq %rax
pushq %rbx
pushq %rcx
pushq %rdx
pushq %rsi
pushq %rdi
pushq %rbp
pushq %r8
pushq %r9
pushq %r10
pushq %r11
pushq %r12
pushq %r13
pushq %r14
pushq %r15
.endm
.macro popstate
popq %r15
popq %r14
popq %r13
popq %r12
popq %r11
popq %r10
popq %r9
popq %r8
popq %rbp
popq %rdi
popq %rsi
popq %rdx
popq %rcx
popq %rbx
popq %rax
.endm
.altmacro
.macro isr_no_error number
.global isr\number
.align 0x10
isr\number:
cli
push $0x00
push $\number
.iflt \number-0x20
jmp isr_common
.else
jmp irq_common
.endif
.endm
.macro isr_error number
.global isr\number
.align 0x10
isr\number:
cli
nop
nop
push $\number
jmp isr_common
.endm
.macro isr number
.quad isr\number
.endm
.global ISR_TABLE
.align 0x10
ISR_TABLE:
.set x, 0
.rept 256
.quad isr0 + x
.set x, x+0x10
.endr
.extern isr_handler
.align 0x10
isr_common:
cld
pushstate
xor %rdi, %rdi
movb 120(%rsp), %dil
movq %rsp, %rsi
call isr_handler
movq %rax, %rsp
popstate
add $0x10, %rsp
iretq
.extern irq_handler
.align 0x10
irq_common:
pushstate
xor %rdi, %rdi
movb 120(%rsp), %dil
movq %rsp, %rsi
call irq_handler
popstate
add $0x10, %rsp
sti
iretq
isr_no_error 0
isr_no_error 1
isr_no_error 2
isr_no_error 3
isr_no_error 4
isr_no_error 5
isr_no_error 6
isr_no_error 7
isr_error 8
isr_no_error 9
isr_error 10
isr_error 11
isr_error 12
isr_error 13
isr_error 14
isr_no_error 15
isr_no_error 16
isr_error 17
isr_no_error 18
isr_no_error 19
isr_no_error 20
isr_no_error 21
isr_no_error 22
isr_no_error 23
isr_no_error 24
isr_no_error 25
isr_no_error 26
isr_no_error 27
isr_no_error 28
isr_no_error 29
isr_error 30
isr_no_error 31
.set x, 32
.rept 256-32
isr_no_error %x
.set x, x+1
.endr
|
kurumi8686/FDU2023-introduction-to-computer-system
| 78,257
|
lab2_bomblab/bomblab-handout/bomb.S
|
./bomb: file format elf64-x86-64
Disassembly of section .init:
0000000000401000 <_init>:
401000: f3 0f 1e fa endbr64
401004: 48 83 ec 08 sub $0x8,%rsp
401008: 48 8b 05 e1 4f 00 00 mov 0x4fe1(%rip),%rax # 405ff0 <__gmon_start__>
40100f: 48 85 c0 test %rax,%rax
401012: 74 02 je 401016 <_init+0x16>
401014: ff d0 call *%rax
401016: 48 83 c4 08 add $0x8,%rsp
40101a: c3 ret
Disassembly of section .plt:
0000000000401020 <.plt>:
401020: ff 35 e2 4f 00 00 push 0x4fe2(%rip) # 406008 <_GLOBAL_OFFSET_TABLE_+0x8>
401026: f2 ff 25 e3 4f 00 00 bnd jmp *0x4fe3(%rip) # 406010 <_GLOBAL_OFFSET_TABLE_+0x10>
40102d: 0f 1f 00 nopl (%rax)
401030: f3 0f 1e fa endbr64
401034: 68 00 00 00 00 push $0x0
401039: f2 e9 e1 ff ff ff bnd jmp 401020 <.plt>
40103f: 90 nop
401040: f3 0f 1e fa endbr64
401044: 68 01 00 00 00 push $0x1
401049: f2 e9 d1 ff ff ff bnd jmp 401020 <.plt>
40104f: 90 nop
401050: f3 0f 1e fa endbr64
401054: 68 02 00 00 00 push $0x2
401059: f2 e9 c1 ff ff ff bnd jmp 401020 <.plt>
40105f: 90 nop
401060: f3 0f 1e fa endbr64
401064: 68 03 00 00 00 push $0x3
401069: f2 e9 b1 ff ff ff bnd jmp 401020 <.plt>
40106f: 90 nop
401070: f3 0f 1e fa endbr64
401074: 68 04 00 00 00 push $0x4
401079: f2 e9 a1 ff ff ff bnd jmp 401020 <.plt>
40107f: 90 nop
401080: f3 0f 1e fa endbr64
401084: 68 05 00 00 00 push $0x5
401089: f2 e9 91 ff ff ff bnd jmp 401020 <.plt>
40108f: 90 nop
401090: f3 0f 1e fa endbr64
401094: 68 06 00 00 00 push $0x6
401099: f2 e9 81 ff ff ff bnd jmp 401020 <.plt>
40109f: 90 nop
4010a0: f3 0f 1e fa endbr64
4010a4: 68 07 00 00 00 push $0x7
4010a9: f2 e9 71 ff ff ff bnd jmp 401020 <.plt>
4010af: 90 nop
4010b0: f3 0f 1e fa endbr64
4010b4: 68 08 00 00 00 push $0x8
4010b9: f2 e9 61 ff ff ff bnd jmp 401020 <.plt>
4010bf: 90 nop
4010c0: f3 0f 1e fa endbr64
4010c4: 68 09 00 00 00 push $0x9
4010c9: f2 e9 51 ff ff ff bnd jmp 401020 <.plt>
4010cf: 90 nop
4010d0: f3 0f 1e fa endbr64
4010d4: 68 0a 00 00 00 push $0xa
4010d9: f2 e9 41 ff ff ff bnd jmp 401020 <.plt>
4010df: 90 nop
4010e0: f3 0f 1e fa endbr64
4010e4: 68 0b 00 00 00 push $0xb
4010e9: f2 e9 31 ff ff ff bnd jmp 401020 <.plt>
4010ef: 90 nop
4010f0: f3 0f 1e fa endbr64
4010f4: 68 0c 00 00 00 push $0xc
4010f9: f2 e9 21 ff ff ff bnd jmp 401020 <.plt>
4010ff: 90 nop
401100: f3 0f 1e fa endbr64
401104: 68 0d 00 00 00 push $0xd
401109: f2 e9 11 ff ff ff bnd jmp 401020 <.plt>
40110f: 90 nop
401110: f3 0f 1e fa endbr64
401114: 68 0e 00 00 00 push $0xe
401119: f2 e9 01 ff ff ff bnd jmp 401020 <.plt>
40111f: 90 nop
401120: f3 0f 1e fa endbr64
401124: 68 0f 00 00 00 push $0xf
401129: f2 e9 f1 fe ff ff bnd jmp 401020 <.plt>
40112f: 90 nop
Disassembly of section .plt.sec:
0000000000401130 <printf@plt>:
401130: f3 0f 1e fa endbr64
401134: f2 ff 25 dd 4e 00 00 bnd jmp *0x4edd(%rip) # 406018 <printf@GLIBC_2.2.5>
40113b: 0f 1f 44 00 00 nopl 0x0(%rax,%rax,1)
0000000000401140 <nanosleep@plt>:
401140: f3 0f 1e fa endbr64
401144: f2 ff 25 d5 4e 00 00 bnd jmp *0x4ed5(%rip) # 406020 <nanosleep@GLIBC_2.2.5>
40114b: 0f 1f 44 00 00 nopl 0x0(%rax,%rax,1)
0000000000401150 <__cxa_atexit@plt>:
401150: f3 0f 1e fa endbr64
401154: f2 ff 25 cd 4e 00 00 bnd jmp *0x4ecd(%rip) # 406028 <__cxa_atexit@GLIBC_2.2.5>
40115b: 0f 1f 44 00 00 nopl 0x0(%rax,%rax,1)
0000000000401160 <__isoc99_sscanf@plt>:
401160: f3 0f 1e fa endbr64
401164: f2 ff 25 c5 4e 00 00 bnd jmp *0x4ec5(%rip) # 406030 <__isoc99_sscanf@GLIBC_2.7>
40116b: 0f 1f 44 00 00 nopl 0x0(%rax,%rax,1)
0000000000401170 <fclose@plt>:
401170: f3 0f 1e fa endbr64
401174: f2 ff 25 bd 4e 00 00 bnd jmp *0x4ebd(%rip) # 406038 <fclose@GLIBC_2.2.5>
40117b: 0f 1f 44 00 00 nopl 0x0(%rax,%rax,1)
0000000000401180 <_Znwm@plt>:
401180: f3 0f 1e fa endbr64
401184: f2 ff 25 b5 4e 00 00 bnd jmp *0x4eb5(%rip) # 406040 <_Znwm@GLIBCXX_3.4>
40118b: 0f 1f 44 00 00 nopl 0x0(%rax,%rax,1)
0000000000401190 <fflush@plt>:
401190: f3 0f 1e fa endbr64
401194: f2 ff 25 ad 4e 00 00 bnd jmp *0x4ead(%rip) # 406048 <fflush@GLIBC_2.2.5>
40119b: 0f 1f 44 00 00 nopl 0x0(%rax,%rax,1)
00000000004011a0 <fopen@plt>:
4011a0: f3 0f 1e fa endbr64
4011a4: f2 ff 25 a5 4e 00 00 bnd jmp *0x4ea5(%rip) # 406050 <fopen@GLIBC_2.2.5>
4011ab: 0f 1f 44 00 00 nopl 0x0(%rax,%rax,1)
00000000004011b0 <exit@plt>:
4011b0: f3 0f 1e fa endbr64
4011b4: f2 ff 25 9d 4e 00 00 bnd jmp *0x4e9d(%rip) # 406058 <exit@GLIBC_2.2.5>
4011bb: 0f 1f 44 00 00 nopl 0x0(%rax,%rax,1)
00000000004011c0 <getchar@plt>:
4011c0: f3 0f 1e fa endbr64
4011c4: f2 ff 25 95 4e 00 00 bnd jmp *0x4e95(%rip) # 406060 <getchar@GLIBC_2.2.5>
4011cb: 0f 1f 44 00 00 nopl 0x0(%rax,%rax,1)
00000000004011d0 <putchar@plt>:
4011d0: f3 0f 1e fa endbr64
4011d4: f2 ff 25 8d 4e 00 00 bnd jmp *0x4e8d(%rip) # 406068 <putchar@GLIBC_2.2.5>
4011db: 0f 1f 44 00 00 nopl 0x0(%rax,%rax,1)
00000000004011e0 <strcmp@plt>:
4011e0: f3 0f 1e fa endbr64
4011e4: f2 ff 25 85 4e 00 00 bnd jmp *0x4e85(%rip) # 406070 <strcmp@GLIBC_2.2.5>
4011eb: 0f 1f 44 00 00 nopl 0x0(%rax,%rax,1)
00000000004011f0 <_ZNSt8ios_base4InitC1Ev@plt>:
4011f0: f3 0f 1e fa endbr64
4011f4: f2 ff 25 7d 4e 00 00 bnd jmp *0x4e7d(%rip) # 406078 <_ZNSt8ios_base4InitC1Ev@GLIBCXX_3.4>
4011fb: 0f 1f 44 00 00 nopl 0x0(%rax,%rax,1)
0000000000401200 <puts@plt>:
401200: f3 0f 1e fa endbr64
401204: f2 ff 25 75 4e 00 00 bnd jmp *0x4e75(%rip) # 406080 <puts@GLIBC_2.2.5>
40120b: 0f 1f 44 00 00 nopl 0x0(%rax,%rax,1)
0000000000401210 <feof@plt>:
401210: f3 0f 1e fa endbr64
401214: f2 ff 25 6d 4e 00 00 bnd jmp *0x4e6d(%rip) # 406088 <feof@GLIBC_2.2.5>
40121b: 0f 1f 44 00 00 nopl 0x0(%rax,%rax,1)
0000000000401220 <fgetc@plt>:
401220: f3 0f 1e fa endbr64
401224: f2 ff 25 65 4e 00 00 bnd jmp *0x4e65(%rip) # 406090 <fgetc@GLIBC_2.2.5>
40122b: 0f 1f 44 00 00 nopl 0x0(%rax,%rax,1)
Disassembly of section .text:
0000000000401230 <_start>:
401230: f3 0f 1e fa endbr64
401234: 31 ed xor %ebp,%ebp
401236: 49 89 d1 mov %rdx,%r9
401239: 5e pop %rsi
40123a: 48 89 e2 mov %rsp,%rdx
40123d: 48 83 e4 f0 and $0xfffffffffffffff0,%rsp
401241: 50 push %rax
401242: 54 push %rsp
401243: 49 c7 c0 70 23 40 00 mov $0x402370,%r8
40124a: 48 c7 c1 00 23 40 00 mov $0x402300,%rcx
401251: 48 c7 c7 8f 13 40 00 mov $0x40138f,%rdi
401258: ff 15 8a 4d 00 00 call *0x4d8a(%rip) # 405fe8 <__libc_start_main@GLIBC_2.2.5>
40125e: f4 hlt
40125f: 90 nop
0000000000401260 <_dl_relocate_static_pie>:
401260: f3 0f 1e fa endbr64
401264: c3 ret
401265: 66 2e 0f 1f 84 00 00 cs nopw 0x0(%rax,%rax,1)
40126c: 00 00 00
40126f: 90 nop
0000000000401270 <deregister_tm_clones>:
401270: b8 20 64 40 00 mov $0x406420,%eax
401275: 48 3d 20 64 40 00 cmp $0x406420,%rax
40127b: 74 13 je 401290 <deregister_tm_clones+0x20>
40127d: b8 00 00 00 00 mov $0x0,%eax
401282: 48 85 c0 test %rax,%rax
401285: 74 09 je 401290 <deregister_tm_clones+0x20>
401287: bf 20 64 40 00 mov $0x406420,%edi
40128c: ff e0 jmp *%rax
40128e: 66 90 xchg %ax,%ax
401290: c3 ret
401291: 66 66 2e 0f 1f 84 00 data16 cs nopw 0x0(%rax,%rax,1)
401298: 00 00 00 00
40129c: 0f 1f 40 00 nopl 0x0(%rax)
00000000004012a0 <register_tm_clones>:
4012a0: be 20 64 40 00 mov $0x406420,%esi
4012a5: 48 81 ee 20 64 40 00 sub $0x406420,%rsi
4012ac: 48 89 f0 mov %rsi,%rax
4012af: 48 c1 ee 3f shr $0x3f,%rsi
4012b3: 48 c1 f8 03 sar $0x3,%rax
4012b7: 48 01 c6 add %rax,%rsi
4012ba: 48 d1 fe sar %rsi
4012bd: 74 11 je 4012d0 <register_tm_clones+0x30>
4012bf: b8 00 00 00 00 mov $0x0,%eax
4012c4: 48 85 c0 test %rax,%rax
4012c7: 74 07 je 4012d0 <register_tm_clones+0x30>
4012c9: bf 20 64 40 00 mov $0x406420,%edi
4012ce: ff e0 jmp *%rax
4012d0: c3 ret
4012d1: 66 66 2e 0f 1f 84 00 data16 cs nopw 0x0(%rax,%rax,1)
4012d8: 00 00 00 00
4012dc: 0f 1f 40 00 nopl 0x0(%rax)
00000000004012e0 <__do_global_dtors_aux>:
4012e0: f3 0f 1e fa endbr64
4012e4: 80 3d 3d 51 00 00 00 cmpb $0x0,0x513d(%rip) # 406428 <completed.8061>
4012eb: 75 13 jne 401300 <__do_global_dtors_aux+0x20>
4012ed: 55 push %rbp
4012ee: 48 89 e5 mov %rsp,%rbp
4012f1: e8 7a ff ff ff call 401270 <deregister_tm_clones>
4012f6: c6 05 2b 51 00 00 01 movb $0x1,0x512b(%rip) # 406428 <completed.8061>
4012fd: 5d pop %rbp
4012fe: c3 ret
4012ff: 90 nop
401300: c3 ret
401301: 66 66 2e 0f 1f 84 00 data16 cs nopw 0x0(%rax,%rax,1)
401308: 00 00 00 00
40130c: 0f 1f 40 00 nopl 0x0(%rax)
0000000000401310 <frame_dummy>:
401310: f3 0f 1e fa endbr64
401314: eb 8a jmp 4012a0 <register_tm_clones>
0000000000401316 <read_line>:
401316: f3 0f 1e fa endbr64
40131a: 55 push %rbp
40131b: 48 89 e5 mov %rsp,%rbp
40131e: 48 83 ec 20 sub $0x20,%rsp
401322: 48 89 7d e8 mov %rdi,-0x18(%rbp)
401326: c7 45 fc 00 00 00 00 movl $0x0,-0x4(%rbp)
40132d: 83 7d fc 28 cmpl $0x28,-0x4(%rbp)
401331: 7f 39 jg 40136c <read_line+0x56>
401333: e8 88 fe ff ff call 4011c0 <getchar@plt>
401338: 88 45 fb mov %al,-0x5(%rbp)
40133b: 80 7d fb 0a cmpb $0xa,-0x5(%rbp)
40133f: 75 12 jne 401353 <read_line+0x3d>
401341: 8b 45 fc mov -0x4(%rbp),%eax
401344: 48 63 d0 movslq %eax,%rdx
401347: 48 8b 45 e8 mov -0x18(%rbp),%rax
40134b: 48 01 d0 add %rdx,%rax
40134e: c6 00 00 movb $0x0,(%rax)
401351: eb 3a jmp 40138d <read_line+0x77>
401353: 8b 45 fc mov -0x4(%rbp),%eax
401356: 48 63 d0 movslq %eax,%rdx
401359: 48 8b 45 e8 mov -0x18(%rbp),%rax
40135d: 48 01 c2 add %rax,%rdx
401360: 0f b6 45 fb movzbl -0x5(%rbp),%eax
401364: 88 02 mov %al,(%rdx)
401366: 83 45 fc 01 addl $0x1,-0x4(%rbp)
40136a: eb c1 jmp 40132d <read_line+0x17>
40136c: 8b 45 fc mov -0x4(%rbp),%eax
40136f: 48 63 d0 movslq %eax,%rdx
401372: 48 8b 45 e8 mov -0x18(%rbp),%rax
401376: 48 01 d0 add %rdx,%rax
401379: c6 00 00 movb $0x0,(%rax)
40137c: e8 3f fe ff ff call 4011c0 <getchar@plt>
401381: 83 f8 0a cmp $0xa,%eax
401384: 0f 95 c0 setne %al
401387: 84 c0 test %al,%al
401389: 74 02 je 40138d <read_line+0x77>
40138b: eb ef jmp 40137c <read_line+0x66>
40138d: c9 leave
40138e: c3 ret
000000000040138f <main>:
40138f: f3 0f 1e fa endbr64
401393: 55 push %rbp
401394: 48 89 e5 mov %rsp,%rbp
401397: 48 83 ec 30 sub $0x30,%rsp
40139b: 48 c7 45 f8 00 00 00 movq $0x0,-0x8(%rbp)
4013a2: 00
4013a3: 48 8d 3d 5e 1c 00 00 lea 0x1c5e(%rip),%rdi # 403008 <_IO_stdin_used+0x8>
4013aa: e8 51 fe ff ff call 401200 <puts@plt>
4013af: 48 8d 45 d0 lea -0x30(%rbp),%rax
4013b3: 48 89 c7 mov %rax,%rdi
4013b6: e8 5b ff ff ff call 401316 <read_line>
4013bb: 48 8d 45 d0 lea -0x30(%rbp),%rax
4013bf: 48 89 c7 mov %rax,%rdi
4013c2: e8 7e 01 00 00 call 401545 <phase_1>
4013c7: 48 8d 3d 82 1c 00 00 lea 0x1c82(%rip),%rdi # 403050 <_IO_stdin_used+0x50>
4013ce: e8 2d fe ff ff call 401200 <puts@plt>
4013d3: 48 8d 45 d0 lea -0x30(%rbp),%rax
4013d7: 48 89 c7 mov %rax,%rdi
4013da: e8 37 ff ff ff call 401316 <read_line>
4013df: 48 8d 45 d0 lea -0x30(%rbp),%rax
4013e3: 48 89 c7 mov %rax,%rdi
4013e6: e8 a2 01 00 00 call 40158d <phase_2>
4013eb: 48 8d 3d 87 1c 00 00 lea 0x1c87(%rip),%rdi # 403079 <_IO_stdin_used+0x79>
4013f2: e8 09 fe ff ff call 401200 <puts@plt>
4013f7: 48 8d 45 d0 lea -0x30(%rbp),%rax
4013fb: 48 89 c7 mov %rax,%rdi
4013fe: e8 13 ff ff ff call 401316 <read_line>
401403: 48 8d 45 d0 lea -0x30(%rbp),%rax
401407: 48 89 c7 mov %rax,%rdi
40140a: e8 0c 02 00 00 call 40161b <phase_3>
40140f: 48 8d 3d 80 1c 00 00 lea 0x1c80(%rip),%rdi # 403096 <_IO_stdin_used+0x96>
401416: e8 e5 fd ff ff call 401200 <puts@plt>
40141b: 48 8d 45 d0 lea -0x30(%rbp),%rax
40141f: 48 89 c7 mov %rax,%rdi
401422: e8 ef fe ff ff call 401316 <read_line>
401427: 48 8d 45 d0 lea -0x30(%rbp),%rax
40142b: 48 89 c7 mov %rax,%rdi
40142e: e8 c1 03 00 00 call 4017f4 <phase_4>
401433: 48 8d 3d 6e 1c 00 00 lea 0x1c6e(%rip),%rdi # 4030a8 <_IO_stdin_used+0xa8>
40143a: e8 c1 fd ff ff call 401200 <puts@plt>
40143f: 48 8d 45 d0 lea -0x30(%rbp),%rax
401443: 48 89 c7 mov %rax,%rdi
401446: e8 cb fe ff ff call 401316 <read_line>
40144b: 48 8d 45 d0 lea -0x30(%rbp),%rax
40144f: 48 89 c7 mov %rax,%rdi
401452: e8 3c 04 00 00 call 401893 <phase_5>
401457: 48 8d 3d 6d 1c 00 00 lea 0x1c6d(%rip),%rdi # 4030cb <_IO_stdin_used+0xcb>
40145e: e8 9d fd ff ff call 401200 <puts@plt>
401463: 48 8d 45 d0 lea -0x30(%rbp),%rax
401467: 48 89 c7 mov %rax,%rdi
40146a: e8 a7 fe ff ff call 401316 <read_line>
40146f: 48 8d 45 d0 lea -0x30(%rbp),%rax
401473: 48 89 c7 mov %rax,%rdi
401476: e8 fb 07 00 00 call 401c76 <phase_6>
40147b: 48 8d 3d 66 1c 00 00 lea 0x1c66(%rip),%rdi # 4030e8 <_IO_stdin_used+0xe8>
401482: e8 79 fd ff ff call 401200 <puts@plt>
401487: 48 83 7d f8 00 cmpq $0x0,-0x8(%rbp)
40148c: 74 4d je 4014db <main+0x14c>
40148e: 48 8d 3d 8b 1c 00 00 lea 0x1c8b(%rip),%rdi # 403120 <_IO_stdin_used+0x120>
401495: e8 40 0c 00 00 call 4020da <slow_put>
40149a: 48 8d 3d a6 1c 00 00 lea 0x1ca6(%rip),%rdi # 403147 <_IO_stdin_used+0x147>
4014a1: e8 34 0c 00 00 call 4020da <slow_put>
4014a6: 48 8d 3d a7 1c 00 00 lea 0x1ca7(%rip),%rdi # 403154 <_IO_stdin_used+0x154>
4014ad: e8 8e 0c 00 00 call 402140 <slow_slow_put>
4014b2: 48 8d 45 d0 lea -0x30(%rbp),%rax
4014b6: 48 89 c7 mov %rax,%rdi
4014b9: e8 58 fe ff ff call 401316 <read_line>
4014be: 48 8d 45 d0 lea -0x30(%rbp),%rax
4014c2: 48 89 c7 mov %rax,%rdi
4014c5: e8 eb 08 00 00 call 401db5 <secret_phase>
4014ca: 48 8d 3d 9f 1c 00 00 lea 0x1c9f(%rip),%rdi # 403170 <_IO_stdin_used+0x170>
4014d1: e8 2a fd ff ff call 401200 <puts@plt>
4014d6: e8 44 0d 00 00 call 40221f <true_ending>
4014db: b8 00 00 00 00 mov $0x0,%eax
4014e0: c9 leave
4014e1: c3 ret
00000000004014e2 <string_not_equal>:
4014e2: f3 0f 1e fa endbr64
4014e6: 55 push %rbp
4014e7: 48 89 e5 mov %rsp,%rbp
4014ea: 48 89 7d e8 mov %rdi,-0x18(%rbp)
4014ee: 48 89 75 e0 mov %rsi,-0x20(%rbp)
4014f2: c7 45 fc 00 00 00 00 movl $0x0,-0x4(%rbp)
4014f9: 8b 45 fc mov -0x4(%rbp),%eax
4014fc: 48 63 d0 movslq %eax,%rdx
4014ff: 48 8b 45 e0 mov -0x20(%rbp),%rax
401503: 48 01 d0 add %rdx,%rax
401506: 0f b6 00 movzbl (%rax),%eax
401509: 84 c0 test %al,%al
40150b: 74 31 je 40153e <string_not_equal+0x5c>
40150d: 8b 45 fc mov -0x4(%rbp),%eax
401510: 48 63 d0 movslq %eax,%rdx
401513: 48 8b 45 e8 mov -0x18(%rbp),%rax
401517: 48 01 d0 add %rdx,%rax
40151a: 0f b6 10 movzbl (%rax),%edx
40151d: 8b 45 fc mov -0x4(%rbp),%eax
401520: 48 63 c8 movslq %eax,%rcx
401523: 48 8b 45 e0 mov -0x20(%rbp),%rax
401527: 48 01 c8 add %rcx,%rax
40152a: 0f b6 00 movzbl (%rax),%eax
40152d: 38 c2 cmp %al,%dl
40152f: 74 07 je 401538 <string_not_equal+0x56>
401531: b8 00 00 00 00 mov $0x0,%eax
401536: eb 0b jmp 401543 <string_not_equal+0x61>
401538: 83 45 fc 01 addl $0x1,-0x4(%rbp)
40153c: eb bb jmp 4014f9 <string_not_equal+0x17>
40153e: b8 01 00 00 00 mov $0x1,%eax
401543: 5d pop %rbp
401544: c3 ret
0000000000401545 <phase_1>:
401545: f3 0f 1e fa endbr64
401549: 55 push %rbp
40154a: 48 89 e5 mov %rsp,%rbp
40154d: 48 83 ec 20 sub $0x20,%rsp
401551: 48 89 7d e8 mov %rdi,-0x18(%rbp)
401555: 8b 05 f1 4d 00 00 mov 0x4df1(%rip),%eax # 40634c <phase_1_offset>
40155b: 48 98 cltq
40155d: 48 8d 15 5c 4b 00 00 lea 0x4b5c(%rip),%rdx # 4060c0 <phase_1_str>
401564: 48 01 d0 add %rdx,%rax
401567: 48 89 45 f8 mov %rax,-0x8(%rbp)
40156b: 48 8b 55 f8 mov -0x8(%rbp),%rdx
40156f: 48 8b 45 e8 mov -0x18(%rbp),%rax
401573: 48 89 d6 mov %rdx,%rsi
401576: 48 89 c7 mov %rax,%rdi
401579: e8 64 ff ff ff call 4014e2 <string_not_equal>
40157e: 83 f0 01 xor $0x1,%eax
401581: 84 c0 test %al,%al
401583: 74 05 je 40158a <phase_1+0x45>
401585: e8 be 0a 00 00 call 402048 <explode_bomb>
40158a: 90 nop
40158b: c9 leave
40158c: c3 ret
000000000040158d <phase_2>:
40158d: f3 0f 1e fa endbr64
401591: 55 push %rbp
401592: 48 89 e5 mov %rsp,%rbp
401595: 48 83 ec 30 sub $0x30,%rsp
401599: 48 89 7d d8 mov %rdi,-0x28(%rbp)
40159d: 48 8b 45 d8 mov -0x28(%rbp),%rax
4015a1: 48 8d 35 b8 4d 00 00 lea 0x4db8(%rip),%rsi # 406360 <phase_2_nums>
4015a8: 48 89 c7 mov %rax,%rdi
4015ab: e8 f6 0b 00 00 call 4021a6 <read_six_numbers>
4015b0: 48 8d 05 a9 4d 00 00 lea 0x4da9(%rip),%rax # 406360 <phase_2_nums>
4015b7: 48 89 45 f0 mov %rax,-0x10(%rbp)
4015bb: c7 45 fc 01 00 00 00 movl $0x1,-0x4(%rbp)
4015c2: 83 7d fc 05 cmpl $0x5,-0x4(%rbp)
4015c6: 7f 50 jg 401618 <phase_2+0x8b>
4015c8: 8b 15 ae 4d 00 00 mov 0x4dae(%rip),%edx # 40637c <phase_2_nums+0x1c>
4015ce: 8b 45 fc mov -0x4(%rbp),%eax
4015d1: 48 98 cltq
4015d3: 48 c1 e0 02 shl $0x2,%rax
4015d7: 48 8d 48 fc lea -0x4(%rax),%rcx
4015db: 48 8b 45 f0 mov -0x10(%rbp),%rax
4015df: 48 01 c8 add %rcx,%rax
4015e2: 8b 00 mov (%rax),%eax
4015e4: 0f af d0 imul %eax,%edx
4015e7: 8b 05 93 4d 00 00 mov 0x4d93(%rip),%eax # 406380 <phase_2_nums+0x20>
4015ed: 01 d0 add %edx,%eax
4015ef: 89 45 ec mov %eax,-0x14(%rbp)
4015f2: 8b 45 fc mov -0x4(%rbp),%eax
4015f5: 48 98 cltq
4015f7: 48 8d 14 85 00 00 00 lea 0x0(,%rax,4),%rdx
4015fe: 00
4015ff: 48 8b 45 f0 mov -0x10(%rbp),%rax
401603: 48 01 d0 add %rdx,%rax
401606: 8b 00 mov (%rax),%eax
401608: 39 45 ec cmp %eax,-0x14(%rbp)
40160b: 74 05 je 401612 <phase_2+0x85>
40160d: e8 36 0a 00 00 call 402048 <explode_bomb>
401612: 83 45 fc 01 addl $0x1,-0x4(%rbp)
401616: eb aa jmp 4015c2 <phase_2+0x35>
401618: 90 nop
401619: c9 leave
40161a: c3 ret
000000000040161b <phase_3>:
40161b: f3 0f 1e fa endbr64
40161f: 55 push %rbp
401620: 48 89 e5 mov %rsp,%rbp
401623: 48 83 ec 30 sub $0x30,%rsp
401627: 48 89 7d d8 mov %rdi,-0x28(%rbp)
40162b: 48 8d 75 ef lea -0x11(%rbp),%rsi
40162f: 48 8d 4d f0 lea -0x10(%rbp),%rcx
401633: 48 8d 55 f4 lea -0xc(%rbp),%rdx
401637: 48 8b 45 d8 mov -0x28(%rbp),%rax
40163b: 49 89 f0 mov %rsi,%r8
40163e: 48 8d 35 03 1c 00 00 lea 0x1c03(%rip),%rsi # 403248 <_ZNSt8__detailL19_S_invalid_state_idE+0x8>
401645: 48 89 c7 mov %rax,%rdi
401648: b8 00 00 00 00 mov $0x0,%eax
40164d: e8 0e fb ff ff call 401160 <__isoc99_sscanf@plt>
401652: 89 45 f8 mov %eax,-0x8(%rbp)
401655: 83 7d f8 03 cmpl $0x3,-0x8(%rbp)
401659: 74 05 je 401660 <phase_3+0x45>
40165b: e8 e8 09 00 00 call 402048 <explode_bomb>
401660: 8b 45 f4 mov -0xc(%rbp),%eax
401663: 3d 62 02 00 00 cmp $0x262,%eax
401668: 0f 84 fb 00 00 00 je 401769 <phase_3+0x14e>
40166e: 3d 62 02 00 00 cmp $0x262,%eax
401673: 0f 8f 05 01 00 00 jg 40177e <phase_3+0x163>
401679: 3d e9 00 00 00 cmp $0xe9,%eax
40167e: 0f 84 d0 00 00 00 je 401754 <phase_3+0x139>
401684: 3d e9 00 00 00 cmp $0xe9,%eax
401689: 0f 8f ef 00 00 00 jg 40177e <phase_3+0x163>
40168f: 83 f8 22 cmp $0x22,%eax
401692: 7f 34 jg 4016c8 <phase_3+0xad>
401694: 85 c0 test %eax,%eax
401696: 0f 8e e2 00 00 00 jle 40177e <phase_3+0x163>
40169c: 83 f8 22 cmp $0x22,%eax
40169f: 0f 87 d9 00 00 00 ja 40177e <phase_3+0x163>
4016a5: 89 c0 mov %eax,%eax
4016a7: 48 8d 14 85 00 00 00 lea 0x0(,%rax,4),%rdx
4016ae: 00
4016af: 48 8d 05 9e 1b 00 00 lea 0x1b9e(%rip),%rax # 403254 <_ZNSt8__detailL19_S_invalid_state_idE+0x14>
4016b6: 8b 04 02 mov (%rdx,%rax,1),%eax
4016b9: 48 98 cltq
4016bb: 48 8d 15 92 1b 00 00 lea 0x1b92(%rip),%rdx # 403254 <_ZNSt8__detailL19_S_invalid_state_idE+0x14>
4016c2: 48 01 d0 add %rdx,%rax
4016c5: 3e ff e0 notrack jmp *%rax
4016c8: 83 f8 59 cmp $0x59,%eax
4016cb: 74 72 je 40173f <phase_3+0x124>
4016cd: e9 ac 00 00 00 jmp 40177e <phase_3+0x163>
4016d2: c6 45 ff 6f movb $0x6f,-0x1(%rbp)
4016d6: 8b 45 f0 mov -0x10(%rbp),%eax
4016d9: 83 f8 01 cmp $0x1,%eax
4016dc: 0f 84 a3 00 00 00 je 401785 <phase_3+0x16a>
4016e2: e8 61 09 00 00 call 402048 <explode_bomb>
4016e7: e9 99 00 00 00 jmp 401785 <phase_3+0x16a>
4016ec: c6 45 ff 76 movb $0x76,-0x1(%rbp)
4016f0: 8b 45 f0 mov -0x10(%rbp),%eax
4016f3: 83 f8 03 cmp $0x3,%eax
4016f6: 0f 84 8c 00 00 00 je 401788 <phase_3+0x16d>
4016fc: e8 47 09 00 00 call 402048 <explode_bomb>
401701: e9 82 00 00 00 jmp 401788 <phase_3+0x16d>
401706: c6 45 ff 65 movb $0x65,-0x1(%rbp)
40170a: 8b 45 f0 mov -0x10(%rbp),%eax
40170d: 83 f8 08 cmp $0x8,%eax
401710: 74 79 je 40178b <phase_3+0x170>
401712: e8 31 09 00 00 call 402048 <explode_bomb>
401717: eb 72 jmp 40178b <phase_3+0x170>
401719: c6 45 ff 72 movb $0x72,-0x1(%rbp)
40171d: 8b 45 f0 mov -0x10(%rbp),%eax
401720: 83 f8 15 cmp $0x15,%eax
401723: 74 69 je 40178e <phase_3+0x173>
401725: e8 1e 09 00 00 call 402048 <explode_bomb>
40172a: eb 62 jmp 40178e <phase_3+0x173>
40172c: c6 45 ff 66 movb $0x66,-0x1(%rbp)
401730: 8b 45 f0 mov -0x10(%rbp),%eax
401733: 83 f8 37 cmp $0x37,%eax
401736: 74 59 je 401791 <phase_3+0x176>
401738: e8 0b 09 00 00 call 402048 <explode_bomb>
40173d: eb 52 jmp 401791 <phase_3+0x176>
40173f: c6 45 ff 6c movb $0x6c,-0x1(%rbp)
401743: 8b 45 f0 mov -0x10(%rbp),%eax
401746: 3d 90 00 00 00 cmp $0x90,%eax
40174b: 74 47 je 401794 <phase_3+0x179>
40174d: e8 f6 08 00 00 call 402048 <explode_bomb>
401752: eb 40 jmp 401794 <phase_3+0x179>
401754: c6 45 ff 6f movb $0x6f,-0x1(%rbp)
401758: 8b 45 f0 mov -0x10(%rbp),%eax
40175b: 3d 79 01 00 00 cmp $0x179,%eax
401760: 74 35 je 401797 <phase_3+0x17c>
401762: e8 e1 08 00 00 call 402048 <explode_bomb>
401767: eb 2e jmp 401797 <phase_3+0x17c>
401769: c6 45 ff 77 movb $0x77,-0x1(%rbp)
40176d: 8b 45 f0 mov -0x10(%rbp),%eax
401770: 3d db 03 00 00 cmp $0x3db,%eax
401775: 74 23 je 40179a <phase_3+0x17f>
401777: e8 cc 08 00 00 call 402048 <explode_bomb>
40177c: eb 1c jmp 40179a <phase_3+0x17f>
40177e: e8 c5 08 00 00 call 402048 <explode_bomb>
401783: eb 16 jmp 40179b <phase_3+0x180>
401785: 90 nop
401786: eb 13 jmp 40179b <phase_3+0x180>
401788: 90 nop
401789: eb 10 jmp 40179b <phase_3+0x180>
40178b: 90 nop
40178c: eb 0d jmp 40179b <phase_3+0x180>
40178e: 90 nop
40178f: eb 0a jmp 40179b <phase_3+0x180>
401791: 90 nop
401792: eb 07 jmp 40179b <phase_3+0x180>
401794: 90 nop
401795: eb 04 jmp 40179b <phase_3+0x180>
401797: 90 nop
401798: eb 01 jmp 40179b <phase_3+0x180>
40179a: 90 nop
40179b: 0f b6 45 ef movzbl -0x11(%rbp),%eax
40179f: 38 45 ff cmp %al,-0x1(%rbp)
4017a2: 74 05 je 4017a9 <phase_3+0x18e>
4017a4: e8 9f 08 00 00 call 402048 <explode_bomb>
4017a9: 90 nop
4017aa: c9 leave
4017ab: c3 ret
00000000004017ac <_ZL4hopei>:
4017ac: f3 0f 1e fa endbr64
4017b0: 55 push %rbp
4017b1: 48 89 e5 mov %rsp,%rbp
4017b4: 48 83 ec 20 sub $0x20,%rsp
4017b8: 89 7d ec mov %edi,-0x14(%rbp)
4017bb: 83 7d ec 00 cmpl $0x0,-0x14(%rbp)
4017bf: 75 07 jne 4017c8 <_ZL4hopei+0x1c>
4017c1: b8 01 00 00 00 mov $0x1,%eax
4017c6: eb 2a jmp 4017f2 <_ZL4hopei+0x46>
4017c8: 8b 45 ec mov -0x14(%rbp),%eax
4017cb: d1 f8 sar %eax
4017cd: 89 c7 mov %eax,%edi
4017cf: e8 d8 ff ff ff call 4017ac <_ZL4hopei>
4017d4: 89 45 fc mov %eax,-0x4(%rbp)
4017d7: 8b 45 ec mov -0x14(%rbp),%eax
4017da: 83 e0 01 and $0x1,%eax
4017dd: 85 c0 test %eax,%eax
4017df: 74 0b je 4017ec <_ZL4hopei+0x40>
4017e1: 8b 45 fc mov -0x4(%rbp),%eax
4017e4: 0f af c0 imul %eax,%eax
4017e7: c1 e0 02 shl $0x2,%eax
4017ea: eb 06 jmp 4017f2 <_ZL4hopei+0x46>
4017ec: 8b 45 fc mov -0x4(%rbp),%eax
4017ef: 0f af c0 imul %eax,%eax
4017f2: c9 leave
4017f3: c3 ret
00000000004017f4 <phase_4>:
4017f4: f3 0f 1e fa endbr64
4017f8: 55 push %rbp
4017f9: 48 89 e5 mov %rsp,%rbp
4017fc: 48 83 ec 20 sub $0x20,%rsp
401800: 48 89 7d e8 mov %rdi,-0x18(%rbp)
401804: 48 8d 55 f0 lea -0x10(%rbp),%rdx
401808: 48 8b 45 e8 mov -0x18(%rbp),%rax
40180c: 48 8d 35 cd 1a 00 00 lea 0x1acd(%rip),%rsi # 4032e0 <_ZNSt8__detailL19_S_invalid_state_idE+0xa0>
401813: 48 89 c7 mov %rax,%rdi
401816: b8 00 00 00 00 mov $0x0,%eax
40181b: e8 40 f9 ff ff call 401160 <__isoc99_sscanf@plt>
401820: 83 f8 01 cmp $0x1,%eax
401823: 0f 95 c0 setne %al
401826: 84 c0 test %al,%al
401828: 74 05 je 40182f <phase_4+0x3b>
40182a: e8 19 08 00 00 call 402048 <explode_bomb>
40182f: 48 8b 45 f0 mov -0x10(%rbp),%rax
401833: 48 c1 f8 20 sar $0x20,%rax
401837: 89 45 fc mov %eax,-0x4(%rbp)
40183a: 48 8b 45 f0 mov -0x10(%rbp),%rax
40183e: 89 45 f8 mov %eax,-0x8(%rbp)
401841: 83 7d fc 00 cmpl $0x0,-0x4(%rbp)
401845: 0f 9e c2 setle %dl
401848: 83 7d fc 0e cmpl $0xe,-0x4(%rbp)
40184c: 0f 9f c0 setg %al
40184f: 09 d0 or %edx,%eax
401851: 0f b6 d0 movzbl %al,%edx
401854: 83 7d f8 00 cmpl $0x0,-0x8(%rbp)
401858: 0f 9e c0 setle %al
40185b: 0f b6 c0 movzbl %al,%eax
40185e: 09 c2 or %eax,%edx
401860: 83 7d f8 0e cmpl $0xe,-0x8(%rbp)
401864: 0f 9f c0 setg %al
401867: 0f b6 c0 movzbl %al,%eax
40186a: 09 d0 or %edx,%eax
40186c: 85 c0 test %eax,%eax
40186e: 74 05 je 401875 <phase_4+0x81>
401870: e8 d3 07 00 00 call 402048 <explode_bomb>
401875: 8b 45 fc mov -0x4(%rbp),%eax
401878: 89 c7 mov %eax,%edi
40187a: e8 2d ff ff ff call 4017ac <_ZL4hopei>
40187f: 3d 00 00 00 01 cmp $0x1000000,%eax
401884: 0f 95 c0 setne %al
401887: 84 c0 test %al,%al
401889: 74 05 je 401890 <phase_4+0x9c>
40188b: e8 b8 07 00 00 call 402048 <explode_bomb>
401890: 90 nop
401891: c9 leave
401892: c3 ret
0000000000401893 <phase_5>:
401893: f3 0f 1e fa endbr64
401897: 55 push %rbp
401898: 48 89 e5 mov %rsp,%rbp
40189b: 53 push %rbx
40189c: 48 83 ec 48 sub $0x48,%rsp
4018a0: 48 89 7d b8 mov %rdi,-0x48(%rbp)
4018a4: 48 8d 4d cc lea -0x34(%rbp),%rcx
4018a8: 48 8d 55 d0 lea -0x30(%rbp),%rdx
4018ac: 48 8b 45 b8 mov -0x48(%rbp),%rax
4018b0: 48 8d 35 2e 1a 00 00 lea 0x1a2e(%rip),%rsi # 4032e5 <_ZNSt8__detailL19_S_invalid_state_idE+0xa5>
4018b7: 48 89 c7 mov %rax,%rdi
4018ba: b8 00 00 00 00 mov $0x0,%eax
4018bf: e8 9c f8 ff ff call 401160 <__isoc99_sscanf@plt>
4018c4: 83 f8 02 cmp $0x2,%eax
4018c7: 0f 95 c0 setne %al
4018ca: 84 c0 test %al,%al
4018cc: 74 05 je 4018d3 <phase_5+0x40>
4018ce: e8 75 07 00 00 call 402048 <explode_bomb>
4018d3: 48 8d 45 d0 lea -0x30(%rbp),%rax
4018d7: 48 8d 35 0d 1a 00 00 lea 0x1a0d(%rip),%rsi # 4032eb <_ZNSt8__detailL19_S_invalid_state_idE+0xab>
4018de: 48 89 c7 mov %rax,%rdi
4018e1: e8 fa f8 ff ff call 4011e0 <strcmp@plt>
4018e6: 85 c0 test %eax,%eax
4018e8: 75 1b jne 401905 <phase_5+0x72>
4018ea: bf 10 00 00 00 mov $0x10,%edi
4018ef: e8 8c f8 ff ff call 401180 <_Znwm@plt>
4018f4: 48 89 c3 mov %rax,%rbx
4018f7: 48 89 df mov %rbx,%rdi
4018fa: e8 c9 05 00 00 call 401ec8 <_ZN10worldline1C1Ev>
4018ff: 48 89 5d e8 mov %rbx,-0x18(%rbp)
401903: eb 69 jmp 40196e <phase_5+0xdb>
401905: 48 8d 45 d0 lea -0x30(%rbp),%rax
401909: 48 8d 35 e8 19 00 00 lea 0x19e8(%rip),%rsi # 4032f8 <_ZNSt8__detailL19_S_invalid_state_idE+0xb8>
401910: 48 89 c7 mov %rax,%rdi
401913: e8 c8 f8 ff ff call 4011e0 <strcmp@plt>
401918: 85 c0 test %eax,%eax
40191a: 75 1b jne 401937 <phase_5+0xa4>
40191c: bf 10 00 00 00 mov $0x10,%edi
401921: e8 5a f8 ff ff call 401180 <_Znwm@plt>
401926: 48 89 c3 mov %rax,%rbx
401929: 48 89 df mov %rbx,%rdi
40192c: e8 17 06 00 00 call 401f48 <_ZN10worldline2C1Ev>
401931: 48 89 5d e8 mov %rbx,-0x18(%rbp)
401935: eb 37 jmp 40196e <phase_5+0xdb>
401937: 48 8d 45 d0 lea -0x30(%rbp),%rax
40193b: 48 8d 35 c3 19 00 00 lea 0x19c3(%rip),%rsi # 403305 <_ZNSt8__detailL19_S_invalid_state_idE+0xc5>
401942: 48 89 c7 mov %rax,%rdi
401945: e8 96 f8 ff ff call 4011e0 <strcmp@plt>
40194a: 85 c0 test %eax,%eax
40194c: 75 1b jne 401969 <phase_5+0xd6>
40194e: bf 10 00 00 00 mov $0x10,%edi
401953: e8 28 f8 ff ff call 401180 <_Znwm@plt>
401958: 48 89 c3 mov %rax,%rbx
40195b: 48 89 df mov %rbx,%rdi
40195e: e8 65 06 00 00 call 401fc8 <_ZN10worldline3C1Ev>
401963: 48 89 5d e8 mov %rbx,-0x18(%rbp)
401967: eb 05 jmp 40196e <phase_5+0xdb>
401969: e8 da 06 00 00 call 402048 <explode_bomb>
40196e: 48 8b 45 e8 mov -0x18(%rbp),%rax
401972: 48 8b 00 mov (%rax),%rax
401975: 48 83 c0 10 add $0x10,%rax
401979: 48 8b 08 mov (%rax),%rcx
40197c: 8b 55 cc mov -0x34(%rbp),%edx
40197f: 48 8b 45 e8 mov -0x18(%rbp),%rax
401983: 89 d6 mov %edx,%esi
401985: 48 89 c7 mov %rax,%rdi
401988: ff d1 call *%rcx
40198a: 85 c0 test %eax,%eax
40198c: 74 10 je 40199e <phase_5+0x10b>
40198e: 48 8b 45 e8 mov -0x18(%rbp),%rax
401992: 48 89 c7 mov %rax,%rdi
401995: e8 0c 05 00 00 call 401ea6 <_ZN9worldline18is_phase5_passableEv>
40199a: 85 c0 test %eax,%eax
40199c: 75 07 jne 4019a5 <phase_5+0x112>
40199e: b8 01 00 00 00 mov $0x1,%eax
4019a3: eb 05 jmp 4019aa <phase_5+0x117>
4019a5: b8 00 00 00 00 mov $0x0,%eax
4019aa: 84 c0 test %al,%al
4019ac: 74 05 je 4019b3 <phase_5+0x120>
4019ae: e8 95 06 00 00 call 402048 <explode_bomb>
4019b3: 90 nop
4019b4: 48 83 c4 48 add $0x48,%rsp
4019b8: 5b pop %rbx
4019b9: 5d pop %rbp
4019ba: c3 ret
00000000004019bb <put_val>:
4019bb: f3 0f 1e fa endbr64
4019bf: 55 push %rbp
4019c0: 48 89 e5 mov %rsp,%rbp
4019c3: 48 89 7d f8 mov %rdi,-0x8(%rbp)
4019c7: 89 75 f4 mov %esi,-0xc(%rbp)
4019ca: 48 8b 45 f8 mov -0x8(%rbp),%rax
4019ce: 48 8b 00 mov (%rax),%rax
4019d1: 8b 55 f4 mov -0xc(%rbp),%edx
4019d4: 89 10 mov %edx,(%rax)
4019d6: 48 8b 45 f8 mov -0x8(%rbp),%rax
4019da: 48 8b 00 mov (%rax),%rax
4019dd: 48 8b 50 08 mov 0x8(%rax),%rdx
4019e1: 48 8b 45 f8 mov -0x8(%rbp),%rax
4019e5: 48 89 10 mov %rdx,(%rax)
4019e8: 90 nop
4019e9: 5d pop %rbp
4019ea: c3 ret
00000000004019eb <get_val>:
4019eb: f3 0f 1e fa endbr64
4019ef: 55 push %rbp
4019f0: 48 89 e5 mov %rsp,%rbp
4019f3: 48 89 7d e8 mov %rdi,-0x18(%rbp)
4019f7: 48 8b 45 e8 mov -0x18(%rbp),%rax
4019fb: 48 8b 00 mov (%rax),%rax
4019fe: 8b 00 mov (%rax),%eax
401a00: 89 45 fc mov %eax,-0x4(%rbp)
401a03: 48 8b 45 e8 mov -0x18(%rbp),%rax
401a07: 48 8b 00 mov (%rax),%rax
401a0a: 48 8b 50 08 mov 0x8(%rax),%rdx
401a0e: 48 8b 45 e8 mov -0x18(%rbp),%rax
401a12: 48 89 10 mov %rdx,(%rax)
401a15: 8b 45 fc mov -0x4(%rbp),%eax
401a18: 5d pop %rbp
401a19: c3 ret
0000000000401a1a <build_queue>:
401a1a: f3 0f 1e fa endbr64
401a1e: 55 push %rbp
401a1f: 48 89 e5 mov %rsp,%rbp
401a22: 48 8d 05 d7 49 00 00 lea 0x49d7(%rip),%rax # 406400 <initialNodes+0x40>
401a29: 5d pop %rbp
401a2a: c3 ret
0000000000401a2b <check_answer>:
401a2b: f3 0f 1e fa endbr64
401a2f: 55 push %rbp
401a30: 48 89 e5 mov %rsp,%rbp
401a33: 48 89 7d e8 mov %rdi,-0x18(%rbp)
401a37: c7 45 f8 01 00 00 00 movl $0x1,-0x8(%rbp)
401a3e: c7 45 fc 01 00 00 00 movl $0x1,-0x4(%rbp)
401a45: 83 7d fc 05 cmpl $0x5,-0x4(%rbp)
401a49: 7f 44 jg 401a8f <check_answer+0x64>
401a4b: 8b 45 fc mov -0x4(%rbp),%eax
401a4e: 48 98 cltq
401a50: 48 8d 14 85 00 00 00 lea 0x0(,%rax,4),%rdx
401a57: 00
401a58: 48 8b 45 e8 mov -0x18(%rbp),%rax
401a5c: 48 01 d0 add %rdx,%rax
401a5f: 8b 10 mov (%rax),%edx
401a61: 8b 45 fc mov -0x4(%rbp),%eax
401a64: 48 98 cltq
401a66: 48 c1 e0 02 shl $0x2,%rax
401a6a: 48 8d 48 fc lea -0x4(%rax),%rcx
401a6e: 48 8b 45 e8 mov -0x18(%rbp),%rax
401a72: 48 01 c8 add %rcx,%rax
401a75: 8b 00 mov (%rax),%eax
401a77: 39 c2
401a79: 7d 0e jge 401a89 <check_answer+0x5e>
401a7b: c7 45 f8 00 00 00 00 movl $0x0,-0x8(%rbp)
401a82: b8 00 00 00 00 mov $0x0,%eax
401a87: eb 0b jmp 401a94 <check_answer+0x69>
401a89: 83 45 fc 01 addl $0x1,-0x4(%rbp)
401a8d: eb b6 jmp 401a45 <check_answer+0x1a>
401a8f: b8 01 00 00 00 mov $0x1,%eax
401a94: 5d pop %rbp
401a95: c3 ret
0000000000401a96 <build_target>:
401a96: f3 0f 1e fa endbr64
401a9a: 55 push %rbp
401a9b: 48 89 e5 mov %rsp,%rbp
401a9e: 41 57 push %r15
401aa0: 41 56 push %r14
401aa2: 41 55 push %r13
401aa4: 41 54 push %r12
401aa6: 53 push %rbx
401aa7: 48 83 ec 78 sub $0x78,%rsp
401aab: 48 89 bd 68 ff ff ff mov %rdi,-0x98(%rbp)
401ab2: 48 c7 45 90 00 00 00 movq $0x0,-0x70(%rbp)
401ab9: 00
401aba: 48 c7 45 88 00 00 00 movq $0x0,-0x78(%rbp)
401ac1: 00
401ac2: e8 53 ff ff ff call 401a1a <build_queue>
401ac7: 48 89 45 b8 mov %rax,-0x48(%rbp)
401acb: 48 8b 45 b8 mov -0x48(%rbp),%rax
401acf: 48 89 45 90 mov %rax,-0x70(%rbp)
401ad3: 48 8b 45 b8 mov -0x48(%rbp),%rax
401ad7: 48 89 45 88 mov %rax,-0x78(%rbp)
401adb: c7 45 c4 00 00 00 00 movl $0x0,-0x3c(%rbp)
401ae2: 83 7d c4 05 cmpl $0x5,-0x3c(%rbp)
401ae6: 0f 8f 65 01 00 00 jg 401c51 <build_target+0x1bb>
401aec: 48 89 e0 mov %rsp,%rax
401aef: 48 89 c3 mov %rax,%rbx
401af2: 8b 45 c4 mov -0x3c(%rbp),%eax
401af5: 48 98 cltq
401af7: 48 8d 14 85 00 00 00 lea 0x0(,%rax,4),%rdx
401afe: 00
401aff: 48 8b 85 68 ff ff ff mov -0x98(%rbp),%rax
401b06: 48 01 d0 add %rdx,%rax
401b09: 8b 00 mov (%rax),%eax
401b0b: 89 45 b4 mov %eax,-0x4c(%rbp)
401b0e: 8b 45 b4 mov -0x4c(%rbp),%eax
401b11: 48 98 cltq
401b13: 48 83 e8 01 sub $0x1,%rax
401b17: 48 89 45 a8 mov %rax,-0x58(%rbp)
401b1b: 48 89 c2 mov %rax,%rdx
401b1e: 48 83 c2 01 add $0x1,%rdx
401b22: 49 89 d4 mov %rdx,%r12
401b25: 41 bd 00 00 00 00 mov $0x0,%r13d
401b2b: 48 89 c2 mov %rax,%rdx
401b2e: 48 83 c2 01 add $0x1,%rdx
401b32: 49 89 d6 mov %rdx,%r14
401b35: 41 bf 00 00 00 00 mov $0x0,%r15d
401b3b: 48 83 c0 01 add $0x1,%rax
401b3f: 48 8d 14 85 00 00 00 lea 0x0(,%rax,4),%rdx
401b46: 00
401b47: b8 10 00 00 00 mov $0x10,%eax
401b4c: 48 83 e8 01 sub $0x1,%rax
401b50: 48 01 d0 add %rdx,%rax
401b53: b9 10 00 00 00 mov $0x10,%ecx
401b58: ba 00 00 00 00 mov $0x0,%edx
401b5d: 48 f7 f1 div %rcx
401b60: 48 6b c0 10 imul $0x10,%rax,%rax
401b64: 48 89 c2 mov %rax,%rdx
401b67: 48 81 e2 00 f0 ff ff and $0xfffffffffffff000,%rdx
401b6e: 48 89 e6 mov %rsp,%rsi
401b71: 48 29 d6 sub %rdx,%rsi
401b74: 48 89 f2 mov %rsi,%rdx
401b77: 48 39 d4 cmp %rdx,%rsp
401b7a: 74 12 je 401b8e <build_target+0xf8>
401b7c: 48 81 ec 00 10 00 00 sub $0x1000,%rsp
401b83: 48 83 8c 24 f8 0f 00 orq $0x0,0xff8(%rsp)
401b8a: 00 00
401b8c: eb e9 jmp 401b77 <build_target+0xe1>
401b8e: 48 89 c2 mov %rax,%rdx
401b91: 81 e2 ff 0f 00 00 and $0xfff,%edx
401b97: 48 29 d4 sub %rdx,%rsp
401b9a: 48 89 c2 mov %rax,%rdx
401b9d: 81 e2 ff 0f 00 00 and $0xfff,%edx
401ba3: 48 85 d2 test %rdx,%rdx
401ba6: 74 10 je 401bb8 <build_target+0x122>
401ba8: 25 ff 0f 00 00 and $0xfff,%eax
401bad: 48 83 e8 08 sub $0x8,%rax
401bb1: 48 01 e0 add %rsp,%rax
401bb4: 48 83 08 00 orq $0x0,(%rax)
401bb8: 48 89 e0 mov %rsp,%rax
401bbb: 48 83 c0 03 add $0x3,%rax
401bbf: 48 c1 e8 02 shr $0x2,%rax
401bc3: 48 c1 e0 02 shl $0x2,%rax
401bc7: 48 89 45 a0 mov %rax,-0x60(%rbp)
401bcb: c7 45 c8 00 00 00 00 movl $0x0,-0x38(%rbp)
401bd2: 8b 45 c8 mov -0x38(%rbp),%eax
401bd5: 3b 45 b4 cmp -0x4c(%rbp),%eax
401bd8: 7d 1f jge 401bf9 <build_target+0x163>
401bda: 48 8d 45 90 lea -0x70(%rbp),%rax
401bde: 48 89 c7 mov %rax,%rdi
401be1: e8 05 fe ff ff call 4019eb <get_val>
401be6: 48 8b 55 a0 mov -0x60(%rbp),%rdx
401bea: 8b 4d c8 mov -0x38(%rbp),%ecx
401bed: 48 63 c9 movslq %ecx,%rcx
401bf0: 89 04 8a mov %eax,(%rdx,%rcx,4)
401bf3: 83 45 c8 01 addl $0x1,-0x38(%rbp)
401bf7: eb d9 jmp 401bd2 <build_target+0x13c>
401bf9: 8b 45 b4 mov -0x4c(%rbp),%eax
401bfc: 8d 50 ff lea -0x1(%rax),%edx
401bff: 48 8b 45 a0 mov -0x60(%rbp),%rax
401c03: 48 63 d2 movslq %edx,%rdx
401c06: 8b 14 90 mov (%rax,%rdx,4),%edx
401c09: 8b 45 c4 mov -0x3c(%rbp),%eax
401c0c: 48 98 cltq
401c0e: 89 94 85 70 ff ff ff mov %edx,-0x90(%rbp,%rax,4)
401c15: 8b 45 b4 mov -0x4c(%rbp),%eax
401c18: 83 e8 01 sub $0x1,%eax
401c1b: 89 45 cc mov %eax,-0x34(%rbp)
401c1e: 83 7d cc 00 cmpl $0x0,-0x34(%rbp)
401c22: 78 21 js 401c45 <build_target+0x1af>
401c24: 48 8b 45 a0 mov -0x60(%rbp),%rax
401c28: 8b 55 cc mov -0x34(%rbp),%edx
401c2b: 48 63 d2 movslq %edx,%rdx
401c2e: 8b 14 90 mov (%rax,%rdx,4),%edx
401c31: 48 8d 45 88 lea -0x78(%rbp),%rax
401c35: 89 d6 mov %edx,%esi
401c37: 48 89 c7 mov %rax,%rdi
401c3a: e8 7c fd ff ff call 4019bb <put_val>
401c3f: 83 6d cc 01 subl $0x1,-0x34(%rbp)
401c43: eb d9 jmp 401c1e <build_target+0x188>
401c45: 48 89 dc mov %rbx,%rsp
401c48: 83 45 c4 01 addl $0x1,-0x3c(%rbp)
401c4c: e9 91 fe ff ff jmp 401ae2 <build_target+0x4c>
401c51: 48 8d 85 70 ff ff ff lea -0x90(%rbp),%rax
401c58: 48 89 c7 mov %rax,%rdi
401c5b: e8 cb fd ff ff call 401a2b <check_answer>
401c60: 88 45 9f mov %al,-0x61(%rbp)
401c63: 0f b6 45 9f movzbl -0x61(%rbp),%eax
401c67: 48 8d 65 d8 lea -0x28(%rbp),%rsp
401c6b: 5b pop %rbx
401c6c: 41 5c pop %r12
401c6e: 41 5d pop %r13
401c70: 41 5e pop %r14
401c72: 41 5f pop %r15
401c74: 5d pop %rbp
401c75: c3 ret
0000000000401c76 <phase_6>:
401c76: f3 0f 1e fa endbr64
401c7a: 55 push %rbp
401c7b: 48 89 e5 mov %rsp,%rbp
401c7e: 48 83 ec 30 sub $0x30,%rsp
401c82: 48 89 7d d8 mov %rdi,-0x28(%rbp)
401c86: 48 8b 45 d8 mov -0x28(%rbp),%rax
401c8a: 48 8d 35 ff 46 00 00 lea 0x46ff(%rip),%rsi # 406390 <phase_6_nums>
401c91: 48 89 c7 mov %rax,%rdi
401c94: e8 0d 05 00 00 call 4021a6 <read_six_numbers>
401c99: 48 8d 05 f0 46 00 00 lea 0x46f0(%rip),%rax # 406390 <phase_6_nums>
401ca0: 48 89 45 f0 mov %rax,-0x10(%rbp)
401ca4: c7 45 fc 00 00 00 00 movl $0x0,-0x4(%rbp)
401cab: 83 7d fc 05 cmpl $0x5,-0x4(%rbp)
401caf: 7f 40 jg 401cf1 <phase_6+0x7b>
401cb1: 8b 45 fc mov -0x4(%rbp),%eax
401cb4: 48 98 cltq
401cb6: 48 8d 14 85 00 00 00 lea 0x0(,%rax,4),%rdx
401cbd: 00
401cbe: 48 8b 45 f0 mov -0x10(%rbp),%rax
401cc2: 48 01 d0 add %rdx,%rax
401cc5: 8b 00 mov (%rax),%eax
401cc7: 83 f8 06 cmp $0x6,%eax
401cca: 7f 1a jg 401ce6 <phase_6+0x70>
401ccc: 8b 45 fc mov -0x4(%rbp),%eax
401ccf: 48 98 cltq
401cd1: 48 8d 14 85 00 00 00 lea 0x0(,%rax,4),%rdx
401cd8: 00
401cd9: 48 8b 45 f0 mov -0x10(%rbp),%rax
401cdd: 48 01 d0 add %rdx,%rax
401ce0: 8b 00 mov (%rax),%eax
401ce2: 85 c0 test %eax,%eax
401ce4: 79 05 jns 401ceb <phase_6+0x75>
401ce6: e8 5d 03 00 00 call 402048 <explode_bomb>
401ceb: 83 45 fc 01 addl $0x1,-0x4(%rbp)
401cef: eb ba jmp 401cab <phase_6+0x35>
401cf1: 48 8b 45 f0 mov -0x10(%rbp),%rax
401cf5: 48 89 c7 mov %rax,%rdi
401cf8: e8 99 fd ff ff call 401a96 <build_target>
401cfd: 88 45 ef mov %al,-0x11(%rbp)
401d00: 0f b6 45 ef movzbl -0x11(%rbp),%eax
401d04: 83 f0 01 xor $0x1,%eax
401d07: 84 c0 test %al,%al
401d09: 74 05 je 401d10 <phase_6+0x9a>
401d0b: e8 38 03 00 00 call 402048 <explode_bomb>
401d10: 90 nop
401d11: c9 leave
401d12: c3 ret
0000000000401d13 <_Z28print_candidate_line_inorderP4node>:
401d13: f3 0f 1e fa endbr64
401d17: 55 push %rbp
401d18: 48 89 e5 mov %rsp,%rbp
401d1b: 48 83 ec 20 sub $0x20,%rsp
401d1f: 48 89 7d e8 mov %rdi,-0x18(%rbp)
401d23: 48 8b 45 e8 mov -0x18(%rbp),%rax
401d27: 48 89 45 f8 mov %rax,-0x8(%rbp)
401d2b: c7 45 f4 00 00 00 00 movl $0x0,-0xc(%rbp)
401d32: 83 7d f4 05 cmpl $0x5,-0xc(%rbp)
401d36: 7f 2b jg 401d63 <_Z28print_candidate_line_inorderP4node+0x50>
401d38: 48 8b 45 f8 mov -0x8(%rbp),%rax
401d3c: 8b 00 mov (%rax),%eax
401d3e: 89 c6 mov %eax,%esi
401d40: 48 8d 3d c9 15 00 00 lea 0x15c9(%rip),%rdi # 403310 <_ZNSt8__detailL19_S_invalid_state_idE+0xd0>
401d47: b8 00 00 00 00 mov $0x0,%eax
401d4c: e8 df f3 ff ff call 401130 <printf@plt>
401d51: 48 8b 45 f8 mov -0x8(%rbp),%rax
401d55: 48 8b 40 08 mov 0x8(%rax),%rax
401d59: 48 89 45 f8 mov %rax,-0x8(%rbp)
401d5d: 83 45 f4 01 addl $0x1,-0xc(%rbp)
401d61: eb cf jmp 401d32 <_Z28print_candidate_line_inorderP4node+0x1f>
401d63: 90 nop
401d64: c9 leave
401d65: c3 ret
0000000000401d66 <_Z25print_answer_line_inorderPi>:
401d66: f3 0f 1e fa endbr64
401d6a: 55 push %rbp
401d6b: 48 89 e5 mov %rsp,%rbp
401d6e: 48 83 ec 20 sub $0x20,%rsp
401d72: 48 89 7d e8 mov %rdi,-0x18(%rbp)
401d76: c7 45 fc 00 00 00 00 movl $0x0,-0x4(%rbp)
401d7d: 83 7d fc 05 cmpl $0x5,-0x4(%rbp)
401d81: 7f 2f jg 401db2 <_Z25print_answer_line_inorderPi+0x4c>
401d83: 8b 45 fc mov -0x4(%rbp),%eax
401d86: 48 98 cltq
401d88: 48 8d 14 85 00 00 00 lea 0x0(,%rax,4),%rdx
401d8f: 00
401d90: 48 8b 45 e8 mov -0x18(%rbp),%rax
401d94: 48 01 d0 add %rdx,%rax
401d97: 8b 00 mov (%rax),%eax
401d99: 89 c6 mov %eax,%esi
401d9b: 48 8d 3d 6e 15 00 00 lea 0x156e(%rip),%rdi # 403310 <_ZNSt8__detailL19_S_invalid_state_idE+0xd0>
401da2: b8 00 00 00 00 mov $0x0,%eax
401da7: e8 84 f3 ff ff call 401130 <printf@plt>
401dac: 83 45 fc 01 addl $0x1,-0x4(%rbp)
401db0: eb cb jmp 401d7d <_Z25print_answer_line_inorderPi+0x17>
401db2: 90 nop
401db3: c9 leave
401db4: c3 ret
0000000000401db5 <secret_phase>:
401db5: f3 0f 1e fa endbr64
401db9: 55 push %rbp
401dba: 48 89 e5 mov %rsp,%rbp
401dbd: 48 83 ec 20 sub $0x20,%rsp
401dc1: 48 89 7d e8 mov %rdi,-0x18(%rbp)
401dc5: eb ff jmp 401dc6 <secret_phase+0x11>
401dc7: c0 48 8d 3d rorb $0x3d,-0x73(%rax)
401dcb: 45 15 00 00 e8 2c rex.RB adc $0x2ce80000,%eax
401dd1: f4 hlt
401dd2: ff (bad)
401dd3: ff c7 inc %edi
401dd5: 45 fc rex.RB cld
401dd7: de c0 faddp %st,%st(0)
401dd9: ad lods %ds:(%rsi),%eax
401dda: de 48 8d fimuls -0x73(%rax)
401ddd: 55 push %rbp
401dde: f8 clc
401ddf: 48 8b 45 e8 mov -0x18(%rbp),%rax
401de3: 48 8d 35 2f 15 00 00 lea 0x152f(%rip),%rsi # 403319 <_ZNSt8__detailL19_S_invalid_state_idE+0xd9>
401dea: 48 89 c7 mov %rax,%rdi
401ded: b8 00 00 00 00 mov $0x0,%eax
401df2: e8 69 f3 ff ff call 401160 <__isoc99_sscanf@plt>
401df7: 83 f8 01 cmp $0x1,%eax
401dfa: 0f 95 c0 setne %al
401dfd: 84 c0 test %al,%al
401dff: 74 05 je 401e06 <secret_phase+0x51>
401e01: e8 42 02 00 00 call 402048 <explode_bomb>
401e06: 8b 45 f8 mov -0x8(%rbp),%eax
401e09: 33 45 fc xor -0x4(%rbp),%eax
401e0c: 89 45 f8 mov %eax,-0x8(%rbp)
401e0f: 8b 45 f8 mov -0x8(%rbp),%eax
401e12: 3d 0d f0 ad ba cmp $0xbaadf00d,%eax
401e17: 74 05 je 401e1e <secret_phase+0x69>
401e19: e8 2a 02 00 00 call 402048 <explode_bomb>
401e1e: 90 nop
401e1f: c9 leave
401e20: c3 ret
0000000000401e21 <_Z41__static_initialization_and_destruction_0ii>:
401e21: f3 0f 1e fa endbr64
401e25: 55 push %rbp
401e26: 48 89 e5 mov %rsp,%rbp
401e29: 48 83 ec 10 sub $0x10,%rsp
401e2d: 89 7d fc mov %edi,-0x4(%rbp)
401e30: 89 75 f8 mov %esi,-0x8(%rbp)
401e33: 83 7d fc 01 cmpl $0x1,-0x4(%rbp)
401e37: 75 32 jne 401e6b <_Z41__static_initialization_and_destruction_0ii+0x4a>
401e39: 81 7d f8 ff ff 00 00 cmpl $0xffff,-0x8(%rbp)
401e40: 75 29 jne 401e6b <_Z41__static_initialization_and_destruction_0ii+0x4a>
401e42: 48 8d 3d e0 45 00 00 lea 0x45e0(%rip),%rdi # 406429 <_ZStL8__ioinit>
401e49: e8 a2 f3 ff ff call 4011f0 <_ZNSt8ios_base4InitC1Ev@plt>
401e4e: 48 8d 15 53 42 00 00 lea 0x4253(%rip),%rdx # 4060a8 <__dso_handle>
401e55: 48 8d 35 cd 45 00 00 lea 0x45cd(%rip),%rsi # 406429 <_ZStL8__ioinit>
401e5c: 48 8b 05 95 41 00 00 mov 0x4195(%rip),%rax # 405ff8 <_ZNSt8ios_base4InitD1Ev@GLIBCXX_3.4>
401e63: 48 89 c7 mov %rax,%rdi
401e66: e8 e5 f2 ff ff call 401150 <__cxa_atexit@plt>
401e6b: 90 nop
401e6c: c9 leave
401e6d: c3 ret
0000000000401e6e <_GLOBAL__sub_I_phase_1_str>:
401e6e: f3 0f 1e fa endbr64
401e72: 55 push %rbp
401e73: 48 89 e5 mov %rsp,%rbp
401e76: be ff ff 00 00 mov $0xffff,%esi
401e7b: bf 01 00 00 00 mov $0x1,%edi
401e80: e8 9c ff ff ff call 401e21 <_Z41__static_initialization_and_destruction_0ii>
401e85: 5d pop %rbp
401e86: c3 ret
401e87: 90 nop
0000000000401e88 <_ZN9worldlineC1Ev>:
401e88: f3 0f 1e fa endbr64
401e8c: 55 push %rbp
401e8d: 48 89 e5 mov %rsp,%rbp
401e90: 48 89 7d f8 mov %rdi,-0x8(%rbp)
401e94: 48 8d 15 fd 3e 00 00 lea 0x3efd(%rip),%rdx # 405d98 <__cxa_pure_virtual@CXXABI_1.3>
401e9b: 48 8b 45 f8 mov -0x8(%rbp),%rax
401e9f: 48 89 10 mov %rdx,(%rax)
401ea2: 90 nop
401ea3: 5d pop %rbp
401ea4: c3 ret
401ea5: 90 nop
0000000000401ea6 <_ZN9worldline18is_phase5_passableEv>:
401ea6: f3 0f 1e fa endbr64
401eaa: 55 push %rbp
401eab: 48 89 e5 mov %rsp,%rbp
401eae: 48 89 7d f8 mov %rdi,-0x8(%rbp)
401eb2: 48 8b 45 f8 mov -0x8(%rbp),%rax
401eb6: 48 8b 40 08 mov 0x8(%rax),%rax
401eba: 48 3d 3f 42 0f 00 cmp $0xf423f,%rax
401ec0: 0f 9f c0 setg %al
401ec3: 0f b6 c0 movzbl %al,%eax
401ec6: 5d pop %rbp
401ec7: c3 ret
0000000000401ec8 <_ZN10worldline1C1Ev>:
401ec8: f3 0f 1e fa endbr64
401ecc: 55 push %rbp
401ecd: 48 89 e5 mov %rsp,%rbp
401ed0: 48 83 ec 10 sub $0x10,%rsp
401ed4: 48 89 7d f8 mov %rdi,-0x8(%rbp)
401ed8: 48 8b 45 f8 mov -0x8(%rbp),%rax
401edc: 48 89 c7 mov %rax,%rdi
401edf: e8 a4 ff ff ff call 401e88 <_ZN9worldlineC1Ev>
401ee4: 48 8d 15 85 3e 00 00 lea 0x3e85(%rip),%rdx # 405d70 <_ZTV10worldline1+0x10>
401eeb: 48 8b 45 f8 mov -0x8(%rbp),%rax
401eef: 48 89 10 mov %rdx,(%rax)
401ef2: 48 8b 45 f8 mov -0x8(%rbp),%rax
401ef6: 48 c7 40 08 90 b6 08 movq $0x8b690,0x8(%rax)
401efd: 00
401efe: 90 nop
401eff: c9 leave
401f00: c3 ret
401f01: 90 nop
0000000000401f02 <_ZN10worldline112isWorldPeaceEv>:
401f02: f3 0f 1e fa endbr64
401f06: 55 push %rbp
401f07: 48 89 e5 mov %rsp,%rbp
401f0a: 48 89 7d f8 mov %rdi,-0x8(%rbp)
401f0e: b8 00 00 00 00 mov $0x0,%eax
401f13: 5d pop %rbp
401f14: c3 ret
401f15: 90 nop
0000000000401f16 <_ZN10worldline115isEveryoneEqualEv>:
401f16: f3 0f 1e fa endbr64
401f1a: 55 push %rbp
401f1b: 48 89 e5 mov %rsp,%rbp
401f1e: 48 89 7d f8 mov %rdi,-0x8(%rbp)
401f22: b8 01 00 00 00 mov $0x1,%eax
401f27: 5d pop %rbp
401f28: c3 ret
401f29: 90 nop
0000000000401f2a <_ZN10worldline15dmailEi>:
401f2a: f3 0f 1e fa endbr64
401f2e: 55 push %rbp
401f2f: 48 89 e5 mov %rsp,%rbp
401f32: 48 89 7d f8 mov %rdi,-0x8(%rbp)
401f36: 89 75 f4 mov %esi,-0xc(%rbp)
401f39: 81 7d f4 e2 07 00 00 cmpl $0x7e2,-0xc(%rbp)
401f40: 0f 94 c0 sete %al
401f43: 0f b6 c0 movzbl %al,%eax
401f46: 5d pop %rbp
401f47: c3 ret
0000000000401f48 <_ZN10worldline2C1Ev>:
401f48: f3 0f 1e fa endbr64
401f4c: 55 push %rbp
401f4d: 48 89 e5 mov %rsp,%rbp
401f50: 48 83 ec 10 sub $0x10,%rsp
401f54: 48 89 7d f8 mov %rdi,-0x8(%rbp)
401f58: 48 8b 45 f8 mov -0x8(%rbp),%rax
401f5c: 48 89 c7 mov %rax,%rdi
401f5f: e8 24 ff ff ff call 401e88 <_ZN9worldlineC1Ev>
401f64: 48 8d 15 dd 3d 00 00 lea 0x3ddd(%rip),%rdx # 405d48 <_ZTV10worldline2+0x10>
401f6b: 48 8b 45 f8 mov -0x8(%rbp),%rax
401f6f: 48 89 10 mov %rdx,(%rax)
401f72: 48 8b 45 f8 mov -0x8(%rbp),%rax
401f76: 48 c7 40 08 d2 f8 06 movq $0x6f8d2,0x8(%rax)
401f7d: 00
401f7e: 90 nop
401f7f: c9 leave
401f80: c3 ret
401f81: 90 nop
0000000000401f82 <_ZN10worldline212isWorldPeaceEv>:
401f82: f3 0f 1e fa endbr64
401f86: 55 push %rbp
401f87: 48 89 e5 mov %rsp,%rbp
401f8a: 48 89 7d f8 mov %rdi,-0x8(%rbp)
401f8e: b8 01 00 00 00 mov $0x1,%eax
401f93: 5d pop %rbp
401f94: c3 ret
401f95: 90 nop
0000000000401f96 <_ZN10worldline215isEveryoneEqualEv>:
401f96: f3 0f 1e fa endbr64
401f9a: 55 push %rbp
401f9b: 48 89 e5 mov %rsp,%rbp
401f9e: 48 89 7d f8 mov %rdi,-0x8(%rbp)
401fa2: b8 00 00 00 00 mov $0x0,%eax
401fa7: 5d pop %rbp
401fa8: c3 ret
401fa9: 90 nop
0000000000401faa <_ZN10worldline25dmailEi>:
401faa: f3 0f 1e fa endbr64
401fae: 55 push %rbp
401faf: 48 89 e5 mov %rsp,%rbp
401fb2: 48 89 7d f8 mov %rdi,-0x8(%rbp)
401fb6: 89 75 f4 mov %esi,-0xc(%rbp)
401fb9: 81 7d f4 e5 07 00 00 cmpl $0x7e5,-0xc(%rbp)
401fc0: 0f 94 c0 sete %al
401fc3: 0f b6 c0 movzbl %al,%eax
401fc6: 5d pop %rbp
401fc7: c3 ret
0000000000401fc8 <_ZN10worldline3C1Ev>:
401fc8: f3 0f 1e fa endbr64
401fcc: 55 push %rbp
401fcd: 48 89 e5 mov %rsp,%rbp
401fd0: 48 83 ec 10 sub $0x10,%rsp
401fd4: 48 89 7d f8 mov %rdi,-0x8(%rbp)
401fd8: 48 8b 45 f8 mov -0x8(%rbp),%rax
401fdc: 48 89 c7 mov %rax,%rdi
401fdf: e8 a4 fe ff ff call 401e88 <_ZN9worldlineC1Ev>
401fe4: 48 8d 15 35 3d 00 00 lea 0x3d35(%rip),%rdx # 405d20 <_ZTV10worldline3+0x10>
401feb: 48 8b 45 f8 mov -0x8(%rbp),%rax
401fef: 48 89 10 mov %rdx,(%rax)
401ff2: 48 8b 45 f8 mov -0x8(%rbp),%rax
401ff6: 48 c7 40 08 fd 24 11 movq $0x1124fd,0x8(%rax)
401ffd: 00
401ffe: 90 nop
401fff: c9 leave
402000: c3 ret
402001: 90 nop
0000000000402002 <_ZN10worldline312isWorldPeaceEv>:
402002: f3 0f 1e fa endbr64
402006: 55 push %rbp
402007: 48 89 e5 mov %rsp,%rbp
40200a: 48 89 7d f8 mov %rdi,-0x8(%rbp)
40200e: b8 01 00 00 00 mov $0x1,%eax
402013: 5d pop %rbp
402014: c3 ret
402015: 90 nop
0000000000402016 <_ZN10worldline315isEveryoneEqualEv>:
402016: f3 0f 1e fa endbr64
40201a: 55 push %rbp
40201b: 48 89 e5 mov %rsp,%rbp
40201e: 48 89 7d f8 mov %rdi,-0x8(%rbp)
402022: b8 01 00 00 00 mov $0x1,%eax
402027: 5d pop %rbp
402028: c3 ret
402029: 90 nop
000000000040202a <_ZN10worldline35dmailEi>:
40202a: f3 0f 1e fa endbr64
40202e: 55 push %rbp
40202f: 48 89 e5 mov %rsp,%rbp
402032: 48 89 7d f8 mov %rdi,-0x8(%rbp)
402036: 89 75 f4 mov %esi,-0xc(%rbp)
402039: 81 7d f4 e7 07 00 00 cmpl $0x7e7,-0xc(%rbp)
402040: 0f 94 c0 sete %al
402043: 0f b6 c0 movzbl %al,%eax
402046: 5d pop %rbp
402047: c3 ret
0000000000402048 <explode_bomb>:
402048: f3 0f 1e fa endbr64
40204c: 55 push %rbp
40204d: 48 89 e5 mov %rsp,%rbp
402050: 48 83 ec 10 sub $0x10,%rsp
402054: 48 8d 3d 05 13 00 00 lea 0x1305(%rip),%rdi # 403360 <_ZTS9worldline+0x10>
40205b: e8 a0 f1 ff ff call 401200 <puts@plt>
402060: 48 8d 35 01 13 00 00 lea 0x1301(%rip),%rsi # 403368 <_ZTS9worldline+0x18>
402067: 48 8d 3d fc 12 00 00 lea 0x12fc(%rip),%rdi # 40336a <_ZTS9worldline+0x1a>
40206e: e8 2d f1 ff ff call 4011a0 <fopen@plt>
402073: 48 89 45 f8 mov %rax,-0x8(%rbp)
402077: 48 83 7d f8 00 cmpq $0x0,-0x8(%rbp)
40207c: 0f 94 c0 sete %al
40207f: 84 c0 test %al,%al
402081: 74 0a je 40208d <explode_bomb+0x45>
402083: bf 01 00 00 00 mov $0x1,%edi
402088: e8 23 f1 ff ff call 4011b0 <exit@plt>
40208d: 48 8b 45 f8 mov -0x8(%rbp),%rax
402091: 48 89 c7 mov %rax,%rdi
402094: e8 77 f1 ff ff call 401210 <feof@plt>
402099: 85 c0 test %eax,%eax
40209b: 0f 94 c0 sete %al
40209e: 84 c0 test %al,%al
4020a0: 74 22 je 4020c4 <explode_bomb+0x7c>
4020a2: 48 8b 45 f8 mov -0x8(%rbp),%rax
4020a6: 48 89 c7 mov %rax,%rdi
4020a9: e8 72 f1 ff ff call 401220 <fgetc@plt>
4020ae: 88 45 f7 mov %al,-0x9(%rbp)
4020b1: 80 7d f7 ff cmpb $0xff,-0x9(%rbp)
4020b5: 74 d6 je 40208d <explode_bomb+0x45>
4020b7: 0f be 45 f7 movsbl -0x9(%rbp),%eax
4020bb: 89 c7 mov %eax,%edi
4020bd: e8 0e f1 ff ff call 4011d0 <putchar@plt>
4020c2: eb c9 jmp 40208d <explode_bomb+0x45>
4020c4: 48 8b 45 f8 mov -0x8(%rbp),%rax
4020c8: 48 89 c7 mov %rax,%rdi
4020cb: e8 a0 f0 ff ff call 401170 <fclose@plt>
4020d0: bf 01 00 00 00 mov $0x1,%edi
4020d5: e8 d6 f0 ff ff call 4011b0 <exit@plt>
00000000004020da <slow_put>:
4020da: f3 0f 1e fa endbr64
4020de: 55 push %rbp
4020df: 48 89 e5 mov %rsp,%rbp
4020e2: 48 83 ec 20 sub $0x20,%rsp
4020e6: 48 89 7d e8 mov %rdi,-0x18(%rbp)
4020ea: 48 8b 45 e8 mov -0x18(%rbp),%rax
4020ee: 0f b6 00 movzbl (%rax),%eax
4020f1: 84 c0 test %al,%al
4020f3: 74 48 je 40213d <slow_put+0x63>
4020f5: 48 8b 45 e8 mov -0x18(%rbp),%rax
4020f9: 0f b6 00 movzbl (%rax),%eax
4020fc: 0f be c0 movsbl %al,%eax
4020ff: 89 c7 mov %eax,%edi
402101: e8 ca f0 ff ff call 4011d0 <putchar@plt>
402106: 48 83 45 e8 01 addq $0x1,-0x18(%rbp)
40210b: 48 c7 45 f0 00 00 00 movq $0x0,-0x10(%rbp)
402112: 00
402113: 48 c7 45 f8 80 f0 fa movq $0x2faf080,-0x8(%rbp)
40211a: 02
40211b: 48 8d 45 f0 lea -0x10(%rbp),%rax
40211f: be 00 00 00 00 mov $0x0,%esi
402124: 48 89 c7 mov %rax,%rdi
402127: e8 14 f0 ff ff call 401140 <nanosleep@plt>
40212c: 48 8b 05 ed 42 00 00 mov 0x42ed(%rip),%rax # 406420 <stdout@GLIBC_2.2.5>
402133: 48 89 c7 mov %rax,%rdi
402136: e8 55 f0 ff ff call 401190 <fflush@plt>
40213b: eb ad jmp 4020ea <slow_put+0x10>
40213d: 90 nop
40213e: c9 leave
40213f: c3 ret
0000000000402140 <slow_slow_put>:
402140: f3 0f 1e fa endbr64
402144: 55 push %rbp
402145: 48 89 e5 mov %rsp,%rbp
402148: 48 83 ec 20 sub $0x20,%rsp
40214c: 48 89 7d e8 mov %rdi,-0x18(%rbp)
402150: 48 8b 45 e8 mov -0x18(%rbp),%rax
402154: 0f b6 00 movzbl (%rax),%eax
402157: 84 c0 test %al,%al
402159: 74 48 je 4021a3 <slow_slow_put+0x63>
40215b: 48 8b 45 e8 mov -0x18(%rbp),%rax
40215f: 0f b6 00 movzbl (%rax),%eax
402162: 0f be c0 movsbl %al,%eax
402165: 89 c7 mov %eax,%edi
402167: e8 64 f0 ff ff call 4011d0 <putchar@plt>
40216c: 48 83 45 e8 01 addq $0x1,-0x18(%rbp)
402171: 48 c7 45 f0 00 00 00 movq $0x0,-0x10(%rbp)
402178: 00
402179: 48 c7 45 f8 00 e1 f5 movq $0x5f5e100,-0x8(%rbp)
402180: 05
402181: 48 8d 45 f0 lea -0x10(%rbp),%rax
402185: be 00 00 00 00 mov $0x0,%esi
40218a: 48 89 c7 mov %rax,%rdi
40218d: e8 ae ef ff ff call 401140 <nanosleep@plt>
402192: 48 8b 05 87 42 00 00 mov 0x4287(%rip),%rax # 406420 <stdout@GLIBC_2.2.5>
402199: 48 89 c7 mov %rax,%rdi
40219c: e8 ef ef ff ff call 401190 <fflush@plt>
4021a1: eb ad jmp 402150 <slow_slow_put+0x10>
4021a3: 90 nop
4021a4: c9 leave
4021a5: c3 ret
00000000004021a6 <read_six_numbers>:
4021a6: f3 0f 1e fa endbr64
4021aa: 55 push %rbp
4021ab: 48 89 e5 mov %rsp,%rbp
4021ae: 48 83 ec 20 sub $0x20,%rsp
4021b2: 48 89 7d e8 mov %rdi,-0x18(%rbp)
4021b6: 48 89 75 e0 mov %rsi,-0x20(%rbp)
4021ba: 48 8b 45 e0 mov -0x20(%rbp),%rax
4021be: 48 8d 78 14 lea 0x14(%rax),%rdi
4021c2: 48 8b 45 e0 mov -0x20(%rbp),%rax
4021c6: 48 8d 70 10 lea 0x10(%rax),%rsi
4021ca: 48 8b 45 e0 mov -0x20(%rbp),%rax
4021ce: 4c 8d 48 0c lea 0xc(%rax),%r9
4021d2: 48 8b 45 e0 mov -0x20(%rbp),%rax
4021d6: 4c 8d 40 08 lea 0x8(%rax),%r8
4021da: 48 8b 45 e0 mov -0x20(%rbp),%rax
4021de: 48 8d 48 04 lea 0x4(%rax),%rcx
4021e2: 48 8b 55 e0 mov -0x20(%rbp),%rdx
4021e6: 48 8b 45 e8 mov -0x18(%rbp),%rax
4021ea: 57 push %rdi
4021eb: 56 push %rsi
4021ec: 48 8d 35 80 11 00 00 lea 0x1180(%rip),%rsi # 403373 <_ZTS9worldline+0x23>
4021f3: 48 89 c7 mov %rax,%rdi
4021f6: b8 00 00 00 00 mov $0x0,%eax
4021fb: e8 60 ef ff ff call 401160 <__isoc99_sscanf@plt>
402200: 48 83 c4 10 add $0x10,%rsp
402204: 89 45 fc mov %eax,-0x4(%rbp)
402207: 83 7d fc 06 cmpl $0x6,-0x4(%rbp)
40220b: 75 0a jne 402217 <read_six_numbers+0x71>
40220d: 48 8b 45 e0 mov -0x20(%rbp),%rax
402211: 8b 00 mov (%rax),%eax
402213: 85 c0 test %eax,%eax
402215: 75 05 jne 40221c <read_six_numbers+0x76>
402217: e8 2c fe ff ff call 402048 <explode_bomb>
40221c: 90 nop
40221d: c9 leave
40221e: c3 ret
000000000040221f <true_ending>:
40221f: f3 0f 1e fa endbr64
402223: 55 push %rbp
402224: 48 89 e5 mov %rsp,%rbp
402227: 48 8d 3d 57 11 00 00 lea 0x1157(%rip),%rdi # 403385 <_ZTS9worldline+0x35>
40222e: e8 cd ef ff ff call 401200 <puts@plt>
402233: 48 8d 3d 5e 11 00 00 lea 0x115e(%rip),%rdi # 403398 <_ZTS9worldline+0x48>
40223a: e8 c1 ef ff ff call 401200 <puts@plt>
40223f: 48 8d 3d 7a 11 00 00 lea 0x117a(%rip),%rdi # 4033c0 <_ZTS9worldline+0x70>
402246: e8 b5 ef ff ff call 401200 <puts@plt>
40224b: 48 8d 3d 96 11 00 00 lea 0x1196(%rip),%rdi # 4033e8 <_ZTS9worldline+0x98>
402252: e8 a9 ef ff ff call 401200 <puts@plt>
402257: 48 8d 3d ba 11 00 00 lea 0x11ba(%rip),%rdi # 403418 <_ZTS9worldline+0xc8>
40225e: e8 9d ef ff ff call 401200 <puts@plt>
402263: 48 8d 3d fe 11 00 00 lea 0x11fe(%rip),%rdi # 403468 <_ZTS9worldline+0x118>
40226a: e8 91 ef ff ff call 401200 <puts@plt>
40226f: 48 8d 3d 32 12 00 00 lea 0x1232(%rip),%rdi # 4034a8 <_ZTS9worldline+0x158>
402276: e8 85 ef ff ff call 401200 <puts@plt>
40227b: 48 8d 3d 66 12 00 00 lea 0x1266(%rip),%rdi # 4034e8 <_ZTS9worldline+0x198>
402282: e8 79 ef ff ff call 401200 <puts@plt>
402287: 48 8d 3d da 12 00 00 lea 0x12da(%rip),%rdi # 403568 <_ZTS9worldline+0x218>
40228e: e8 6d ef ff ff call 401200 <puts@plt>
402293: 48 8d 3d 46 13 00 00 lea 0x1346(%rip),%rdi # 4035e0 <_ZTS9worldline+0x290>
40229a: e8 61 ef ff ff call 401200 <puts@plt>
40229f: 48 8d 3d 9a 13 00 00 lea 0x139a(%rip),%rdi # 403640 <_ZTS9worldline+0x2f0>
4022a6: e8 55 ef ff ff call 401200 <puts@plt>
4022ab: 48 8d 3d d6 13 00 00 lea 0x13d6(%rip),%rdi # 403688 <_ZTS9worldline+0x338>
4022b2: e8 49 ef ff ff call 401200 <puts@plt>
4022b7: 48 8d 3d 1a 14 00 00 lea 0x141a(%rip),%rdi # 4036d8 <_ZTS9worldline+0x388>
4022be: e8 3d ef ff ff call 401200 <puts@plt>
4022c3: 48 8d 3d 4d 14 00 00 lea 0x144d(%rip),%rdi # 403717 <_ZTS9worldline+0x3c7>
4022ca: e8 31 ef ff ff call 401200 <puts@plt>
4022cf: 48 8d 3d 62 14 00 00 lea 0x1462(%rip),%rdi # 403738 <_ZTS9worldline+0x3e8>
4022d6: e8 25 ef ff ff call 401200 <puts@plt>
4022db: 48 8d 3d 7b 14 00 00 lea 0x147b(%rip),%rdi # 40375d <_ZTS9worldline+0x40d>
4022e2: e8 19 ef ff ff call 401200 <puts@plt>
4022e7: 48 8d 3d 8a 14 00 00 lea 0x148a(%rip),%rdi # 403778 <_ZTS9worldline+0x428>
4022ee: e8 0d ef ff ff call 401200 <puts@plt>
4022f3: 90 nop
4022f4: 5d pop %rbp
4022f5: c3 ret
4022f6: 66 2e 0f 1f 84 00 00 cs nopw 0x0(%rax,%rax,1)
4022fd: 00 00 00
0000000000402300 <__libc_csu_init>:
402300: f3 0f 1e fa endbr64
402304: 41 57 push %r15
402306: 4c 8d 3d eb 39 00 00 lea 0x39eb(%rip),%r15 # 405cf8 <__frame_dummy_init_array_entry>
40230d: 41 56 push %r14
40230f: 49 89 d6 mov %rdx,%r14
402312: 41 55 push %r13
402314: 49 89 f5 mov %rsi,%r13
402317: 41 54 push %r12
402319: 41 89 fc mov %edi,%r12d
40231c: 55 push %rbp
40231d: 48 8d 2d e4 39 00 00 lea 0x39e4(%rip),%rbp # 405d08 <__do_global_dtors_aux_fini_array_entry>
402324: 53 push %rbx
402325: 4c 29 fd sub %r15,%rbp
402328: 48 83 ec 08 sub $0x8,%rsp
40232c: e8 cf ec ff ff call 401000 <_init>
402331: 48 c1 fd 03 sar $0x3,%rbp
402335: 74 1f je 402356 <__libc_csu_init+0x56>
402337: 31 db xor %ebx,%ebx
402339: 0f 1f 80 00 00 00 00 nopl 0x0(%rax)
402340: 4c 89 f2 mov %r14,%rdx
402343: 4c 89 ee mov %r13,%rsi
402346: 44 89 e7 mov %r12d,%edi
402349: 41 ff 14 df call *(%r15,%rbx,8)
40234d: 48 83 c3 01 add $0x1,%rbx
402351: 48 39 dd cmp %rbx,%rbp
402354: 75 ea jne 402340 <__libc_csu_init+0x40>
402356: 48 83 c4 08 add $0x8,%rsp
40235a: 5b pop %rbx
40235b: 5d pop %rbp
40235c: 41 5c pop %r12
40235e: 41 5d pop %r13
402360: 41 5e pop %r14
402362: 41 5f pop %r15
402364: c3 ret
402365: 66 66 2e 0f 1f 84 00 data16 cs nopw 0x0(%rax,%rax,1)
40236c: 00 00 00 00
0000000000402370 <__libc_csu_fini>:
402370: f3 0f 1e fa endbr64
402374: c3 ret
Disassembly of section .fini:
0000000000402378 <_fini>:
402378: f3 0f 1e fa endbr64
40237c: 48 83 ec 08 sub $0x8,%rsp
402380: 48 83 c4 08 add $0x8,%rsp
402384: c3 ret
|
KyonQi/rustCore
| 3,736
|
os/src/link_app.S
|
.align 3
.section .data
.global _num_app
_num_app:
.quad 16
.quad app_0_start
.quad app_1_start
.quad app_2_start
.quad app_3_start
.quad app_4_start
.quad app_5_start
.quad app_6_start
.quad app_7_start
.quad app_8_start
.quad app_9_start
.quad app_10_start
.quad app_11_start
.quad app_12_start
.quad app_13_start
.quad app_14_start
.quad app_15_start
.quad app_15_end
.global _app_names
_app_names:
.string "exit"
.string "fantastic_text"
.string "forkexec"
.string "forktest"
.string "forktest2"
.string "forktest_simple"
.string "forktree"
.string "hello_world"
.string "initproc"
.string "matrix"
.string "sleep"
.string "sleep_simple"
.string "stack_overflow"
.string "user_shell"
.string "usertests"
.string "yeild"
.section .data
.global app_0_start
.global app_0_end
.align 3
app_0_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/exit"
app_0_end:
.section .data
.global app_1_start
.global app_1_end
.align 3
app_1_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/fantastic_text"
app_1_end:
.section .data
.global app_2_start
.global app_2_end
.align 3
app_2_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/forkexec"
app_2_end:
.section .data
.global app_3_start
.global app_3_end
.align 3
app_3_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/forktest"
app_3_end:
.section .data
.global app_4_start
.global app_4_end
.align 3
app_4_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/forktest2"
app_4_end:
.section .data
.global app_5_start
.global app_5_end
.align 3
app_5_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/forktest_simple"
app_5_end:
.section .data
.global app_6_start
.global app_6_end
.align 3
app_6_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/forktree"
app_6_end:
.section .data
.global app_7_start
.global app_7_end
.align 3
app_7_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/hello_world"
app_7_end:
.section .data
.global app_8_start
.global app_8_end
.align 3
app_8_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/initproc"
app_8_end:
.section .data
.global app_9_start
.global app_9_end
.align 3
app_9_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/matrix"
app_9_end:
.section .data
.global app_10_start
.global app_10_end
.align 3
app_10_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/sleep"
app_10_end:
.section .data
.global app_11_start
.global app_11_end
.align 3
app_11_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/sleep_simple"
app_11_end:
.section .data
.global app_12_start
.global app_12_end
.align 3
app_12_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/stack_overflow"
app_12_end:
.section .data
.global app_13_start
.global app_13_end
.align 3
app_13_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/user_shell"
app_13_end:
.section .data
.global app_14_start
.global app_14_end
.align 3
app_14_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/usertests"
app_14_end:
.section .data
.global app_15_start
.global app_15_end
.align 3
app_15_start:
.incbin "../user/target/riscv64gc-unknown-none-elf/release/yeild"
app_15_end:
|
KyonQi/rustCore
| 1,690
|
os/src/trap/trap.S
|
.altmacro
.macro SAVE_GP n
sd x\n, \n*8(sp)
.endm
.macro LOAD_GP n
ld x\n, \n*8(sp)
.endm
.section .text.trampoline
.globl __alltraps
.globl __restore
.align 2
__alltraps:
csrrw sp, sscratch, sp
# sp->kernel stack, sscratch->user stack
# now sp->*TrapContext in user space, sscratch->user stack
# save other general purpose registers
# save general purpose register
sd x1, 1*8(sp)
# skip sp(x2), we will save it later by sscratch
sd x3, 3*8(sp)
# skip tp(x4) since almost no use
# save x5-x31
.set n, 5
.rept 27
SAVE_GP %n
.set n, n+1
.endr
# using t0,t1,t2 to save sstatus, sepc and sscratch
csrr t0, sstatus
csrr t1, sepc
sd t0, 32*8(sp)
sd t1, 33*8(sp)
csrr t2, sscratch
# note that sscratch points to user stack now
sd t2, 2*8(sp)
# load kernel_satp into t0
ld t0, 34*8(sp)
# load trap_handler into t1
ld t1, 36*8(sp)
# move to kernel_sp
ld sp, 35*8(sp)
# switch to kernel space
csrw satp, t0
sfence.vma
# jump to trap_handler
jr t1
__restore:
# a0: *TrapContext in user space(Constant); a1: user space token
# switch to user space
csrw satp, a1
sfence.vma
csrw sscratch, a0
mv sp, a0
# now sp ponits to TrapContext in user space, start restoring based on it
# restore sstatus/sepc
ld t0, 32*8(sp)
ld t1, 33*8(sp)
csrw sstatus, t0
csrw sepc, t1
# restore general purpose registers except x0/sp/tp
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
# back to user stack
ld sp, 2*8(sp)
sret
|
lab47/lz4decode
| 4,770
|
decode_arm.s
|
// +build gc
// +build !noasm
#include "go_asm.h"
#include "textflag.h"
// Register allocation.
#define dst R0
#define dstorig R1
#define src R2
#define dstend R3
#define srcend R4
#define match R5 // Match address.
#define dictend R6
#define token R7
#define len R8 // Literal and match lengths.
#define offset R7 // Match offset; overlaps with token.
#define tmp1 R9
#define tmp2 R11
#define tmp3 R12
// func decodeBlock(dst, src, dict []byte) int
TEXT ·decodeBlock(SB), NOFRAME+NOSPLIT, $-4-40
MOVW dst_base +0(FP), dst
MOVW dst_len +4(FP), dstend
MOVW src_base +12(FP), src
MOVW src_len +16(FP), srcend
CMP $0, srcend
BEQ shortSrc
ADD dst, dstend
ADD src, srcend
MOVW dst, dstorig
loop:
// Read token. Extract literal length.
MOVBU.P 1(src), token
MOVW token >> 4, len
CMP $15, len
BNE readLitlenDone
readLitlenLoop:
CMP src, srcend
BEQ shortSrc
MOVBU.P 1(src), tmp1
ADD.S tmp1, len
BVS shortDst
CMP $255, tmp1
BEQ readLitlenLoop
readLitlenDone:
CMP $0, len
BEQ copyLiteralDone
// Bounds check dst+len and src+len.
ADD.S dst, len, tmp1
ADD.CC.S src, len, tmp2
BCS shortSrc
CMP dstend, tmp1
//BHI shortDst // Uncomment for distinct error codes.
CMP.LS srcend, tmp2
BHI shortSrc
// Copy literal.
CMP $4, len
BLO copyLiteralFinish
// Copy 0-3 bytes until src is aligned.
TST $1, src
MOVBU.NE.P 1(src), tmp1
MOVB.NE.P tmp1, 1(dst)
SUB.NE $1, len
TST $2, src
MOVHU.NE.P 2(src), tmp2
MOVB.NE.P tmp2, 1(dst)
MOVW.NE tmp2 >> 8, tmp1
MOVB.NE.P tmp1, 1(dst)
SUB.NE $2, len
B copyLiteralLoopCond
copyLiteralLoop:
// Aligned load, unaligned write.
MOVW.P 4(src), tmp1
MOVW tmp1 >> 8, tmp2
MOVB tmp2, 1(dst)
MOVW tmp1 >> 16, tmp3
MOVB tmp3, 2(dst)
MOVW tmp1 >> 24, tmp2
MOVB tmp2, 3(dst)
MOVB.P tmp1, 4(dst)
copyLiteralLoopCond:
// Loop until len-4 < 0.
SUB.S $4, len
BPL copyLiteralLoop
copyLiteralFinish:
// Copy remaining 0-3 bytes.
// At this point, len may be < 0, but len&3 is still accurate.
TST $1, len
MOVB.NE.P 1(src), tmp3
MOVB.NE.P tmp3, 1(dst)
TST $2, len
MOVB.NE.P 2(src), tmp1
MOVB.NE.P tmp1, 2(dst)
MOVB.NE -1(src), tmp2
MOVB.NE tmp2, -1(dst)
copyLiteralDone:
// Initial part of match length.
// This frees up the token register for reuse as offset.
AND $15, token, len
CMP src, srcend
BEQ end
// Read offset.
ADD.S $2, src
BCS shortSrc
CMP srcend, src
BHI shortSrc
MOVBU -2(src), offset
MOVBU -1(src), tmp1
ORR.S tmp1 << 8, offset
BEQ corrupt
// Read rest of match length.
CMP $15, len
BNE readMatchlenDone
readMatchlenLoop:
CMP src, srcend
BEQ shortSrc
MOVBU.P 1(src), tmp1
ADD.S tmp1, len
BVS shortDst
CMP $255, tmp1
BEQ readMatchlenLoop
readMatchlenDone:
// Bounds check dst+len+minMatch.
ADD.S dst, len, tmp1
ADD.CC.S $const_minMatch, tmp1
BCS shortDst
CMP dstend, tmp1
BHI shortDst
RSB dst, offset, match
CMP dstorig, match
BGE copyMatch4
// match < dstorig means the match starts in the dictionary,
// at len(dict) - offset + (dst - dstorig).
MOVW dict_base+24(FP), match
MOVW dict_len +28(FP), dictend
ADD $const_minMatch, len
RSB dst, dstorig, tmp1
RSB dictend, offset, tmp2
ADD.S tmp2, tmp1
BMI shortDict
ADD match, dictend
ADD tmp1, match
copyDict:
MOVBU.P 1(match), tmp1
MOVB.P tmp1, 1(dst)
SUB.S $1, len
CMP.NE match, dictend
BNE copyDict
// If the match extends beyond the dictionary, the rest is at dstorig.
CMP $0, len
BEQ copyMatchDone
MOVW dstorig, match
B copyMatch
// Copy a regular match.
// Since len+minMatch is at least four, we can do a 4× unrolled
// byte copy loop. Using MOVW instead of four byte loads is faster,
// but to remain portable we'd have to align match first, which is
// too expensive. By alternating loads and stores, we also handle
// the case offset < 4.
copyMatch4:
SUB.S $4, len
MOVBU.P 4(match), tmp1
MOVB.P tmp1, 4(dst)
MOVBU -3(match), tmp2
MOVB tmp2, -3(dst)
MOVBU -2(match), tmp3
MOVB tmp3, -2(dst)
MOVBU -1(match), tmp1
MOVB tmp1, -1(dst)
BPL copyMatch4
// Restore len, which is now negative.
ADD.S $4, len
BEQ copyMatchDone
copyMatch:
// Finish with a byte-at-a-time copy.
SUB.S $1, len
MOVBU.P 1(match), tmp2
MOVB.P tmp2, 1(dst)
BNE copyMatch
copyMatchDone:
CMP src, srcend
BNE loop
end:
CMP $0, len
BNE corrupt
SUB dstorig, dst, tmp1
MOVW tmp1, ret+36(FP)
RET
// The error cases have distinct labels so we can put different
// return codes here when debugging, or if the error returns need to
// be changed.
shortDict:
shortDst:
shortSrc:
corrupt:
MOVW $-1, tmp1
MOVW tmp1, ret+36(FP)
RET
|
lab47/lz4decode
| 4,989
|
decode_arm64.s
|
// +build gc
// +build !noasm
// This implementation assumes that strict alignment checking is turned off.
// The Go compiler makes the same assumption.
#include "go_asm.h"
#include "textflag.h"
// Register allocation.
#define dst R0
#define dstorig R1
#define src R2
#define dstend R3
#define dstend16 R4 // dstend - 16
#define srcend R5
#define srcend16 R6 // srcend - 16
#define match R7 // Match address.
#define dict R8
#define dictlen R9
#define dictend R10
#define token R11
#define len R12 // Literal and match lengths.
#define lenRem R13
#define offset R14 // Match offset.
#define tmp1 R15
#define tmp2 R16
#define tmp3 R17
#define tmp4 R19
// func decodeBlock(dst, src, dict []byte) int
TEXT ·decodeBlock(SB), NOFRAME+NOSPLIT, $0-80
LDP dst_base+0(FP), (dst, dstend)
ADD dst, dstend
MOVD dst, dstorig
LDP src_base+24(FP), (src, srcend)
CBZ srcend, shortSrc
ADD src, srcend
// dstend16 = max(dstend-16, 0) and similarly for srcend16.
SUBS $16, dstend, dstend16
CSEL LO, ZR, dstend16, dstend16
SUBS $16, srcend, srcend16
CSEL LO, ZR, srcend16, srcend16
LDP dict_base+48(FP), (dict, dictlen)
ADD dict, dictlen, dictend
loop:
// Read token. Extract literal length.
MOVBU.P 1(src), token
LSR $4, token, len
CMP $15, len
BNE readLitlenDone
readLitlenLoop:
CMP src, srcend
BEQ shortSrc
MOVBU.P 1(src), tmp1
ADDS tmp1, len
BVS shortDst
CMP $255, tmp1
BEQ readLitlenLoop
readLitlenDone:
CBZ len, copyLiteralDone
// Bounds check dst+len and src+len.
ADDS dst, len, tmp1
BCS shortSrc
ADDS src, len, tmp2
BCS shortSrc
CMP dstend, tmp1
BHI shortDst
CMP srcend, tmp2
BHI shortSrc
// Copy literal.
SUBS $16, len
BLO copyLiteralShort
copyLiteralLoop:
LDP.P 16(src), (tmp1, tmp2)
STP.P (tmp1, tmp2), 16(dst)
SUBS $16, len
BPL copyLiteralLoop
// Copy (final part of) literal of length 0-15.
// If we have >=16 bytes left in src and dst, just copy 16 bytes.
copyLiteralShort:
CMP dstend16, dst
CCMP LO, src, srcend16, $0b0010 // 0010 = preserve carry (LO).
BHS copyLiteralShortEnd
AND $15, len
LDP (src), (tmp1, tmp2)
ADD len, src
STP (tmp1, tmp2), (dst)
ADD len, dst
B copyLiteralDone
// Safe but slow copy near the end of src, dst.
copyLiteralShortEnd:
TBZ $3, len, 3(PC)
MOVD.P 8(src), tmp1
MOVD.P tmp1, 8(dst)
TBZ $2, len, 3(PC)
MOVW.P 4(src), tmp2
MOVW.P tmp2, 4(dst)
TBZ $1, len, 3(PC)
MOVH.P 2(src), tmp3
MOVH.P tmp3, 2(dst)
TBZ $0, len, 3(PC)
MOVBU.P 1(src), tmp4
MOVB.P tmp4, 1(dst)
copyLiteralDone:
// Initial part of match length.
AND $15, token, len
CMP src, srcend
BEQ end
// Read offset.
ADDS $2, src
BCS shortSrc
CMP srcend, src
BHI shortSrc
MOVHU -2(src), offset
CBZ offset, corrupt
// Read rest of match length.
CMP $15, len
BNE readMatchlenDone
readMatchlenLoop:
CMP src, srcend
BEQ shortSrc
MOVBU.P 1(src), tmp1
ADDS tmp1, len
BVS shortDst
CMP $255, tmp1
BEQ readMatchlenLoop
readMatchlenDone:
ADD $const_minMatch, len
// Bounds check dst+len.
ADDS dst, len, tmp2
BCS shortDst
CMP dstend, tmp2
BHI shortDst
SUB offset, dst, match
CMP dstorig, match
BHS copyMatchTry8
// match < dstorig means the match starts in the dictionary,
// at len(dict) - offset + (dst - dstorig).
SUB dstorig, dst, tmp1
SUB offset, dictlen, tmp2
ADDS tmp2, tmp1
BMI shortDict
ADD dict, tmp1, match
copyDict:
MOVBU.P 1(match), tmp3
MOVB.P tmp3, 1(dst)
SUBS $1, len
CCMP NE, dictend, match, $0b0100 // 0100 sets the Z (EQ) flag.
BNE copyDict
CBZ len, copyMatchDone
// If the match extends beyond the dictionary, the rest is at dstorig.
// Recompute the offset for the next check.
MOVD dstorig, match
SUB dstorig, dst, offset
copyMatchTry8:
// Copy doublewords if both len and offset are at least eight.
// A 16-at-a-time loop doesn't provide a further speedup.
CMP $8, len
CCMP HS, offset, $8, $0
BLO copyMatchTry4
AND $7, len, lenRem
SUB $8, len
copyMatchLoop8:
MOVD.P 8(match), tmp1
MOVD.P tmp1, 8(dst)
SUBS $8, len
BPL copyMatchLoop8
MOVD (match)(len), tmp2 // match+len == match+lenRem-8.
ADD lenRem, dst
MOVD $0, len
MOVD tmp2, -8(dst)
B copyMatchDone
copyMatchTry4:
// Copy words if both len and offset are at least four.
CMP $4, len
CCMP HS, offset, $4, $0
BLO copyMatchLoop1
MOVWU.P 4(match), tmp2
MOVWU.P tmp2, 4(dst)
SUBS $4, len
BEQ copyMatchDone
copyMatchLoop1:
// Byte-at-a-time copy for small offsets <= 3.
MOVBU.P 1(match), tmp2
MOVB.P tmp2, 1(dst)
SUBS $1, len
BNE copyMatchLoop1
copyMatchDone:
CMP src, srcend
BNE loop
end:
CBNZ len, corrupt
SUB dstorig, dst, tmp1
MOVD tmp1, ret+72(FP)
RET
// The error cases have distinct labels so we can put different
// return codes here when debugging, or if the error returns need to
// be changed.
shortDict:
shortDst:
shortSrc:
corrupt:
MOVD $-1, tmp1
MOVD tmp1, ret+72(FP)
RET
|
lab47/lz4decode
| 8,025
|
decode_amd64.s
|
// +build !appengine
// +build gc
// +build !noasm
#include "go_asm.h"
#include "textflag.h"
// AX scratch
// BX scratch
// CX literal and match lengths
// DX token, match offset
//
// DI &dst
// SI &src
// R8 &dst + len(dst)
// R9 &src + len(src)
// R11 &dst
// R12 short output end
// R13 short input end
// R14 &dict
// R15 len(dict)
// func decodeBlock(dst, src, dict []byte) int
TEXT ·decodeBlock(SB), NOSPLIT, $48-80
MOVQ dst_base+0(FP), DI
MOVQ DI, R11
MOVQ dst_len+8(FP), R8
ADDQ DI, R8
MOVQ src_base+24(FP), SI
MOVQ src_len+32(FP), R9
CMPQ R9, $0
JE err_corrupt
ADDQ SI, R9
MOVQ dict_base+48(FP), R14
MOVQ dict_len+56(FP), R15
// shortcut ends
// short output end
MOVQ R8, R12
SUBQ $32, R12
// short input end
MOVQ R9, R13
SUBQ $16, R13
XORL CX, CX
loop:
// token := uint32(src[si])
MOVBLZX (SI), DX
INCQ SI
// lit_len = token >> 4
// if lit_len > 0
// CX = lit_len
MOVL DX, CX
SHRL $4, CX
// if lit_len != 0xF
CMPL CX, $0xF
JEQ lit_len_loop
CMPQ DI, R12
JAE copy_literal
CMPQ SI, R13
JAE copy_literal
// copy shortcut
// A two-stage shortcut for the most common case:
// 1) If the literal length is 0..14, and there is enough space,
// enter the shortcut and copy 16 bytes on behalf of the literals
// (in the fast mode, only 8 bytes can be safely copied this way).
// 2) Further if the match length is 4..18, copy 18 bytes in a similar
// manner; but we ensure that there's enough space in the output for
// those 18 bytes earlier, upon entering the shortcut (in other words,
// there is a combined check for both stages).
// copy literal
MOVOU (SI), X0
MOVOU X0, (DI)
ADDQ CX, DI
ADDQ CX, SI
MOVL DX, CX
ANDL $0xF, CX
// The second stage: prepare for match copying, decode full info.
// If it doesn't work out, the info won't be wasted.
// offset := uint16(data[:2])
MOVWLZX (SI), DX
TESTL DX, DX
JE err_corrupt
ADDQ $2, SI
JC err_short_buf
MOVQ DI, AX
SUBQ DX, AX
JC err_corrupt
CMPQ AX, DI
JA err_short_buf
// if we can't do the second stage then jump straight to read the
// match length, we already have the offset.
CMPL CX, $0xF
JEQ match_len_loop_pre
CMPL DX, $8
JLT match_len_loop_pre
CMPQ AX, R11
JB match_len_loop_pre
// memcpy(op + 0, match + 0, 8);
MOVQ (AX), BX
MOVQ BX, (DI)
// memcpy(op + 8, match + 8, 8);
MOVQ 8(AX), BX
MOVQ BX, 8(DI)
// memcpy(op +16, match +16, 2);
MOVW 16(AX), BX
MOVW BX, 16(DI)
LEAQ const_minMatch(DI)(CX*1), DI
// shortcut complete, load next token
JMP loopcheck
// Read the rest of the literal length:
// do { BX = src[si++]; lit_len += BX } while (BX == 0xFF).
lit_len_loop:
CMPQ SI, R9
JAE err_short_buf
MOVBLZX (SI), BX
INCQ SI
ADDQ BX, CX
CMPB BX, $0xFF
JE lit_len_loop
copy_literal:
// bounds check src and dst
MOVQ SI, AX
ADDQ CX, AX
JC err_short_buf
CMPQ AX, R9
JA err_short_buf
MOVQ DI, BX
ADDQ CX, BX
JC err_short_buf
CMPQ BX, R8
JA err_short_buf
// Copy literals of <=48 bytes through the XMM registers.
CMPQ CX, $48
JGT memmove_lit
// if len(dst[di:]) < 48
MOVQ R8, AX
SUBQ DI, AX
CMPQ AX, $48
JLT memmove_lit
// if len(src[si:]) < 48
MOVQ R9, BX
SUBQ SI, BX
CMPQ BX, $48
JLT memmove_lit
MOVOU (SI), X0
MOVOU 16(SI), X1
MOVOU 32(SI), X2
MOVOU X0, (DI)
MOVOU X1, 16(DI)
MOVOU X2, 32(DI)
ADDQ CX, SI
ADDQ CX, DI
JMP finish_lit_copy
memmove_lit:
// memmove(to, from, len)
MOVQ DI, 0(SP)
MOVQ SI, 8(SP)
MOVQ CX, 16(SP)
// Spill registers. Increment SI, DI now so we don't need to save CX.
ADDQ CX, DI
ADDQ CX, SI
MOVQ DI, 24(SP)
MOVQ SI, 32(SP)
MOVL DX, 40(SP)
CALL runtime·memmove(SB)
// restore registers
MOVQ 24(SP), DI
MOVQ 32(SP), SI
MOVL 40(SP), DX
// recalc initial values
MOVQ dst_base+0(FP), R8
MOVQ R8, R11
ADDQ dst_len+8(FP), R8
MOVQ src_base+24(FP), R9
ADDQ src_len+32(FP), R9
MOVQ dict_base+48(FP), R14
MOVQ dict_len+56(FP), R15
MOVQ R8, R12
SUBQ $32, R12
MOVQ R9, R13
SUBQ $16, R13
finish_lit_copy:
// CX := mLen
// free up DX to use for offset
MOVL DX, CX
ANDL $0xF, CX
CMPQ SI, R9
JAE end
// offset
// si += 2
// DX := int(src[si-2]) | int(src[si-1])<<8
ADDQ $2, SI
JC err_short_buf
CMPQ SI, R9
JA err_short_buf
MOVWQZX -2(SI), DX
// 0 offset is invalid
TESTL DX, DX
JEQ err_corrupt
match_len_loop_pre:
// if mlen != 0xF
CMPB CX, $0xF
JNE copy_match
// do { BX = src[si++]; mlen += BX } while (BX == 0xFF).
match_len_loop:
CMPQ SI, R9
JAE err_short_buf
MOVBLZX (SI), BX
INCQ SI
ADDQ BX, CX
CMPB BX, $0xFF
JE match_len_loop
copy_match:
ADDQ $const_minMatch, CX
// check we have match_len bytes left in dst
// di+match_len < len(dst)
MOVQ DI, AX
ADDQ CX, AX
JC err_short_buf
CMPQ AX, R8
JA err_short_buf
// DX = offset
// CX = match_len
// BX = &dst + (di - offset)
MOVQ DI, BX
SUBQ DX, BX
// check BX is within dst
// if BX < &dst
JC copy_match_from_dict
CMPQ BX, R11
JBE copy_match_from_dict
// if offset + match_len < di
LEAQ (BX)(CX*1), AX
CMPQ DI, AX
JA copy_interior_match
// AX := len(dst[:di])
// MOVQ DI, AX
// SUBQ R11, AX
// copy 16 bytes at a time
// if di-offset < 16 copy 16-(di-offset) bytes to di
// then do the remaining
copy_match_loop:
// for match_len >= 0
// dst[di] = dst[i]
// di++
// i++
MOVB (BX), AX
MOVB AX, (DI)
INCQ DI
INCQ BX
DECQ CX
JNZ copy_match_loop
JMP loopcheck
copy_interior_match:
CMPQ CX, $16
JGT memmove_match
// if len(dst[di:]) < 16
MOVQ R8, AX
SUBQ DI, AX
CMPQ AX, $16
JLT memmove_match
MOVOU (BX), X0
MOVOU X0, (DI)
ADDQ CX, DI
XORL CX, CX
JMP loopcheck
copy_match_from_dict:
// CX = match_len
// BX = &dst + (di - offset)
// AX = offset - di = dict_bytes_available => count of bytes potentially covered by the dictionary
MOVQ R11, AX
SUBQ BX, AX
// BX = len(dict) - dict_bytes_available
MOVQ R15, BX
SUBQ AX, BX
JS err_short_dict
ADDQ R14, BX
// if match_len > dict_bytes_available, match fits entirely within external dictionary : just copy
CMPQ CX, AX
JLT memmove_match
// The match stretches over the dictionary and our block
// 1) copy what comes from the dictionary
// AX = dict_bytes_available = copy_size
// BX = &dict_end - copy_size
// CX = match_len
// memmove(to, from, len)
MOVQ DI, 0(SP)
MOVQ BX, 8(SP)
MOVQ AX, 16(SP)
// store extra stuff we want to recover
// spill
MOVQ DI, 24(SP)
MOVQ SI, 32(SP)
MOVQ CX, 40(SP)
CALL runtime·memmove(SB)
// restore registers
MOVQ 16(SP), AX // copy_size
MOVQ 24(SP), DI
MOVQ 32(SP), SI
MOVQ 40(SP), CX // match_len
// recalc initial values
MOVQ dst_base+0(FP), R8
MOVQ R8, R11 // TODO: make these sensible numbers
ADDQ dst_len+8(FP), R8
MOVQ src_base+24(FP), R9
ADDQ src_len+32(FP), R9
MOVQ dict_base+48(FP), R14
MOVQ dict_len+56(FP), R15
MOVQ R8, R12
SUBQ $32, R12
MOVQ R9, R13
SUBQ $16, R13
// di+=copy_size
ADDQ AX, DI
// 2) copy the rest from the current block
// CX = match_len - copy_size = rest_size
SUBQ AX, CX
MOVQ R11, BX
// check if we have a copy overlap
// AX = &dst + rest_size
MOVQ CX, AX
ADDQ BX, AX
// if &dst + rest_size > di, copy byte by byte
CMPQ AX, DI
JA copy_match_loop
memmove_match:
// memmove(to, from, len)
MOVQ DI, 0(SP)
MOVQ BX, 8(SP)
MOVQ CX, 16(SP)
// Spill registers. Increment DI now so we don't need to save CX.
ADDQ CX, DI
MOVQ DI, 24(SP)
MOVQ SI, 32(SP)
CALL runtime·memmove(SB)
// restore registers
MOVQ 24(SP), DI
MOVQ 32(SP), SI
// recalc initial values
MOVQ dst_base+0(FP), R8
MOVQ R8, R11 // TODO: make these sensible numbers
ADDQ dst_len+8(FP), R8
MOVQ src_base+24(FP), R9
ADDQ src_len+32(FP), R9
MOVQ R8, R12
SUBQ $32, R12
MOVQ R9, R13
SUBQ $16, R13
MOVQ dict_base+48(FP), R14
MOVQ dict_len+56(FP), R15
XORL CX, CX
loopcheck:
// for si < len(src)
CMPQ SI, R9
JB loop
end:
// Remaining length must be zero.
TESTQ CX, CX
JNE err_corrupt
SUBQ R11, DI
MOVQ DI, ret+72(FP)
RET
err_corrupt:
MOVQ $-1, ret+72(FP)
RET
err_short_buf:
MOVQ $-2, ret+72(FP)
RET
err_short_dict:
MOVQ $-3, ret+72(FP)
RET
|
laduiw/learningOS_rcore
| 1,488
|
os/src/trap/trap.S
|
.altmacro
.macro SAVE_GP n
sd x\n, \n*8(sp)
.endm
.macro LOAD_GP n
ld x\n, \n*8(sp)
.endm
.section .text
.globl __alltraps
.globl __restore
.align 2
__alltraps:
csrrw sp, sscratch, sp
# now sp->kernel stack, sscratch->user stack
# allocate a TrapContext on kernel stack
addi sp, sp, -34*8
# save general-purpose registers
sd x1, 1*8(sp)
# skip sp(x2), we will save it later
sd x3, 3*8(sp)
# skip tp(x4), application does not use it
# save x5~x31
.set n, 5
.rept 27
SAVE_GP %n
.set n, n+1
.endr
# we can use t0/t1/t2 freely, because they were saved on kernel stack
csrr t0, sstatus
csrr t1, sepc
sd t0, 32*8(sp)
sd t1, 33*8(sp)
# read user stack from sscratch and save it on the kernel stack
csrr t2, sscratch
sd t2, 2*8(sp)
# set input argument of trap_handler(cx: &mut TrapContext)
mv a0, sp
call trap_handler
__restore:
# now sp->kernel stack(after allocated), sscratch->user stack
# restore sstatus/sepc
ld t0, 32*8(sp)
ld t1, 33*8(sp)
ld t2, 2*8(sp)
csrw sstatus, t0
csrw sepc, t1
csrw sscratch, t2
# restore general-purpuse registers except sp/tp
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
# release TrapContext on kernel stack
addi sp, sp, 34*8
# now sp->kernel stack, sscratch->user stack
csrrw sp, sscratch, sp
sret
|
lakhwindersinghx/STM32
| 9,725
|
Core/Startup/startup_stm32f103c8tx.s
|
/**
*************** (C) COPYRIGHT 2017 STMicroelectronics ************************
* @file startup_stm32f103xb.s
* @author MCD Application Team
* @brief STM32F103xB Devices vector table for Atollic toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Configure the clock system
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M3 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
* @attention
*
* Copyright (c) 2017-2021 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m3
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.equ BootRAM, 0xF108F85F
/**
* @brief This is the code that gets called when the processor first
* starts execution following a reset event. Only the absolutely
* necessary set is performed, after which the application
* supplied main() routine is called.
* @param None
* @retval : None
*/
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
/* Call the clock system initialization function.*/
bl SystemInit
/* Copy the data segment initializers from flash to SRAM */
ldr r0, =_sdata
ldr r1, =_edata
ldr r2, =_sidata
movs r3, #0
b LoopCopyDataInit
CopyDataInit:
ldr r4, [r2, r3]
str r4, [r0, r3]
adds r3, r3, #4
LoopCopyDataInit:
adds r4, r0, r3
cmp r4, r1
bcc CopyDataInit
/* Zero fill the bss segment. */
ldr r2, =_sbss
ldr r4, =_ebss
movs r3, #0
b LoopFillZerobss
FillZerobss:
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
cmp r2, r4
bcc FillZerobss
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
bx lr
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M3. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word MemManage_Handler
.word BusFault_Handler
.word UsageFault_Handler
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word DebugMon_Handler
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler
.word PVD_IRQHandler
.word TAMPER_IRQHandler
.word RTC_IRQHandler
.word FLASH_IRQHandler
.word RCC_IRQHandler
.word EXTI0_IRQHandler
.word EXTI1_IRQHandler
.word EXTI2_IRQHandler
.word EXTI3_IRQHandler
.word EXTI4_IRQHandler
.word DMA1_Channel1_IRQHandler
.word DMA1_Channel2_IRQHandler
.word DMA1_Channel3_IRQHandler
.word DMA1_Channel4_IRQHandler
.word DMA1_Channel5_IRQHandler
.word DMA1_Channel6_IRQHandler
.word DMA1_Channel7_IRQHandler
.word ADC1_2_IRQHandler
.word USB_HP_CAN1_TX_IRQHandler
.word USB_LP_CAN1_RX0_IRQHandler
.word CAN1_RX1_IRQHandler
.word CAN1_SCE_IRQHandler
.word EXTI9_5_IRQHandler
.word TIM1_BRK_IRQHandler
.word TIM1_UP_IRQHandler
.word TIM1_TRG_COM_IRQHandler
.word TIM1_CC_IRQHandler
.word TIM2_IRQHandler
.word TIM3_IRQHandler
.word TIM4_IRQHandler
.word I2C1_EV_IRQHandler
.word I2C1_ER_IRQHandler
.word I2C2_EV_IRQHandler
.word I2C2_ER_IRQHandler
.word SPI1_IRQHandler
.word SPI2_IRQHandler
.word USART1_IRQHandler
.word USART2_IRQHandler
.word USART3_IRQHandler
.word EXTI15_10_IRQHandler
.word RTC_Alarm_IRQHandler
.word USBWakeUp_IRQHandler
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word BootRAM /* @0x108. This is for boot in RAM mode for
STM32F10x Medium Density devices. */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak MemManage_Handler
.thumb_set MemManage_Handler,Default_Handler
.weak BusFault_Handler
.thumb_set BusFault_Handler,Default_Handler
.weak UsageFault_Handler
.thumb_set UsageFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak DebugMon_Handler
.thumb_set DebugMon_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_IRQHandler
.thumb_set PVD_IRQHandler,Default_Handler
.weak TAMPER_IRQHandler
.thumb_set TAMPER_IRQHandler,Default_Handler
.weak RTC_IRQHandler
.thumb_set RTC_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_IRQHandler
.thumb_set RCC_IRQHandler,Default_Handler
.weak EXTI0_IRQHandler
.thumb_set EXTI0_IRQHandler,Default_Handler
.weak EXTI1_IRQHandler
.thumb_set EXTI1_IRQHandler,Default_Handler
.weak EXTI2_IRQHandler
.thumb_set EXTI2_IRQHandler,Default_Handler
.weak EXTI3_IRQHandler
.thumb_set EXTI3_IRQHandler,Default_Handler
.weak EXTI4_IRQHandler
.thumb_set EXTI4_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_IRQHandler
.thumb_set DMA1_Channel2_IRQHandler,Default_Handler
.weak DMA1_Channel3_IRQHandler
.thumb_set DMA1_Channel3_IRQHandler,Default_Handler
.weak DMA1_Channel4_IRQHandler
.thumb_set DMA1_Channel4_IRQHandler,Default_Handler
.weak DMA1_Channel5_IRQHandler
.thumb_set DMA1_Channel5_IRQHandler,Default_Handler
.weak DMA1_Channel6_IRQHandler
.thumb_set DMA1_Channel6_IRQHandler,Default_Handler
.weak DMA1_Channel7_IRQHandler
.thumb_set DMA1_Channel7_IRQHandler,Default_Handler
.weak ADC1_2_IRQHandler
.thumb_set ADC1_2_IRQHandler,Default_Handler
.weak USB_HP_CAN1_TX_IRQHandler
.thumb_set USB_HP_CAN1_TX_IRQHandler,Default_Handler
.weak USB_LP_CAN1_RX0_IRQHandler
.thumb_set USB_LP_CAN1_RX0_IRQHandler,Default_Handler
.weak CAN1_RX1_IRQHandler
.thumb_set CAN1_RX1_IRQHandler,Default_Handler
.weak CAN1_SCE_IRQHandler
.thumb_set CAN1_SCE_IRQHandler,Default_Handler
.weak EXTI9_5_IRQHandler
.thumb_set EXTI9_5_IRQHandler,Default_Handler
.weak TIM1_BRK_IRQHandler
.thumb_set TIM1_BRK_IRQHandler,Default_Handler
.weak TIM1_UP_IRQHandler
.thumb_set TIM1_UP_IRQHandler,Default_Handler
.weak TIM1_TRG_COM_IRQHandler
.thumb_set TIM1_TRG_COM_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM4_IRQHandler
.thumb_set TIM4_IRQHandler,Default_Handler
.weak I2C1_EV_IRQHandler
.thumb_set I2C1_EV_IRQHandler,Default_Handler
.weak I2C1_ER_IRQHandler
.thumb_set I2C1_ER_IRQHandler,Default_Handler
.weak I2C2_EV_IRQHandler
.thumb_set I2C2_EV_IRQHandler,Default_Handler
.weak I2C2_ER_IRQHandler
.thumb_set I2C2_ER_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_IRQHandler
.thumb_set USART3_IRQHandler,Default_Handler
.weak EXTI15_10_IRQHandler
.thumb_set EXTI15_10_IRQHandler,Default_Handler
.weak RTC_Alarm_IRQHandler
.thumb_set RTC_Alarm_IRQHandler,Default_Handler
.weak USBWakeUp_IRQHandler
.thumb_set USBWakeUp_IRQHandler,Default_Handler
|
lariaus/riscv-emu
| 1,323
|
bitswap/io.s
|
.align 2
.section .text
.equ UART_BASE, 0x10010000
.equ UART_REG_TXFIFO, 0
# `puts` subroutine writes null-terminated string to UART (serial communication port)
# input: a0 register specifies the starting address of a null-terminated string
# clobbers: t0, t1, t2 temporary registers
.globl puts
puts:
li t0, UART_BASE # t0 = UART_BASE
bb1:
lbu t1, (a0) # t1 = load unsigned byte from memory address specified by a0 register
beqz t1, bb3 # break the loop, if loaded byte was null
# wait until UART is ready
bb2:
lw t2, UART_REG_TXFIFO(t0) # t2 = uart[UART_REG_TXFIFO]
bltz t2, bb2 # t2 becomes positive once UART is ready for transmission
sw t1, UART_REG_TXFIFO(t0) # send byte, uart[UART_REG_TXFIFO] = t1
addi a0, a0, 1 # increment a0 address by 1 byte
j bb1
bb3:
ret
# Writes integer to UART (serial communication port)
# a0: integer value to write
.globl puti
puti:
addi sp,sp,-16
sd ra,8(sp)
sd s0,0(sp)
mv a1,a0
la a0, tmp_buff
call int_to_str
la a0, tmp_buff
call puts
ld ra,8(sp)
ld s0,0(sp)
addi sp,sp,16
jr ra
.section .data
.align 2
tmp_buff:
.zero 32
|
lariaus/riscv-emu
| 1,210
|
fibo/exp_fibo.s
|
.file "exp_fibo.c"
.option nopic
.attribute arch, "rv64i2p1_m2p0_a2p1_f2p2_d2p2_zicsr2p0_zifencei2p0"
.attribute unaligned_access, 0
.attribute stack_align, 16
.text
.align 2
.globl fibo
.type fibo, @function
fibo:
mv a2,a0
li a5,1
ble a0,a5,.L4
li a0,1
li a4,0
.L3:
mv a3,a0
addw a0,a4,a0
addiw a5,a5,1
mv a4,a3
bne a2,a5,.L3
ret
.L4:
ret
.size fibo, .-fibo
.align 2
.globl int_to_str
.type int_to_str, @function
int_to_str:
addi sp,sp,-32
sd ra,24(sp)
sd s0,16(sp)
sd s1,8(sp)
mv s0,a1
li a5,9
bgt a1,a5,.L7
addiw s0,a1,48
sb s0,0(a0)
addi a0,a0,1
.L6:
ld ra,24(sp)
ld s0,16(sp)
ld s1,8(sp)
addi sp,sp,32
jr ra
.L7:
li s1,10
divw a1,a1,s1
call int_to_str
remw s0,s0,s1
addiw s0,s0,48
sb s0,0(a0)
addi a0,a0,1
j .L6
.size int_to_str, .-int_to_str
.align 2
.globl foo
.type foo, @function
foo:
addi sp,sp,-16
sd ra,8(sp)
sd s0,0(sp)
mv a1,a0
lui s0,%hi(.LANCHOR0)
addi a0,s0,%lo(.LANCHOR0)
call int_to_str
addi a0,s0,%lo(.LANCHOR0)
call dumps
ld ra,8(sp)
ld s0,0(sp)
addi sp,sp,16
jr ra
.size foo, .-foo
.globl g_buff
.bss
.align 3
.set .LANCHOR0,. + 0
.type g_buff, @object
.size g_buff, 64
g_buff:
.zero 64
.ident "GCC: (GNU) 12.2.0"
|
lariaus/riscv-emu
| 1,323
|
fibo/io.s
|
.align 2
.section .text
.equ UART_BASE, 0x10010000
.equ UART_REG_TXFIFO, 0
# `puts` subroutine writes null-terminated string to UART (serial communication port)
# input: a0 register specifies the starting address of a null-terminated string
# clobbers: t0, t1, t2 temporary registers
.globl puts
puts:
li t0, UART_BASE # t0 = UART_BASE
bb1:
lbu t1, (a0) # t1 = load unsigned byte from memory address specified by a0 register
beqz t1, bb3 # break the loop, if loaded byte was null
# wait until UART is ready
bb2:
lw t2, UART_REG_TXFIFO(t0) # t2 = uart[UART_REG_TXFIFO]
bltz t2, bb2 # t2 becomes positive once UART is ready for transmission
sw t1, UART_REG_TXFIFO(t0) # send byte, uart[UART_REG_TXFIFO] = t1
addi a0, a0, 1 # increment a0 address by 1 byte
j bb1
bb3:
ret
# Writes integer to UART (serial communication port)
# a0: integer value to write
.globl puti
puti:
addi sp,sp,-16
sd ra,8(sp)
sd s0,0(sp)
mv a1,a0
la a0, tmp_buff
call int_to_str
la a0, tmp_buff
call puts
ld ra,8(sp)
ld s0,0(sp)
addi sp,sp,16
jr ra
.section .data
.align 2
tmp_buff:
.zero 32
|
lariaus/riscv-emu
| 2,145
|
hello_world/hello.s
|
.align 2
.equ UART_BASE, 0x10010000
.equ UART_REG_TXFIFO, 0
.section .text
# Entry point
.globl _start
_start:
csrr t0, mhartid # read hardware thread id (`hart` stands for `hardware thread`)
bnez t0, _exit # run only on the first hardware thread (hartid == 0), halt all the other threads
la sp, stack_top # setup stack pointer
jal main # Call main function
j _exit # Exit
# Exit function (infinite loop)
.globl _exit
_exit:
j _exit
# Main function
.globl main
main:
la a0, msg # load address of `msg` to a0 argument register
jal puts # jump to `puts` subroutine, return address is stored in ra regster
la a0, msg2 # load address of `msg2` to a0 argument register
jal puts # jump to `puts` subroutine, return address is stored in ra regster
ret
puts: # `puts` subroutine writes null-terminated string to UART (serial communication port)
# input: a0 register specifies the starting address of a null-terminated string
# clobbers: t0, t1, t2 temporary registers
li t0, UART_BASE # t0 = UART_BASE
bb1:
lbu t1, (a0) # t1 = load unsigned byte from memory address specified by a0 register
beqz t1, bb3 # break the loop, if loaded byte was null
# wait until UART is ready
bb2:
lw t2, UART_REG_TXFIFO(t0) # t2 = uart[UART_REG_TXFIFO]
bltz t2, bb2 # t2 becomes positive once UART is ready for transmission
sw t1, UART_REG_TXFIFO(t0) # send byte, uart[UART_REG_TXFIFO] = t1
addi a0, a0, 1 # increment a0 address by 1 byte
j bb1
bb3:
ret
.section .rodata
msg:
.string "Hello, World !\n\0"
msg2:
.string "This is just the beginning !\n\0"
|
lariaus/riscv-emu
| 1,323
|
hello_clang/io.s
|
.align 2
.section .text
.equ UART_BASE, 0x10010000
.equ UART_REG_TXFIFO, 0
# `puts` subroutine writes null-terminated string to UART (serial communication port)
# input: a0 register specifies the starting address of a null-terminated string
# clobbers: t0, t1, t2 temporary registers
.globl puts
puts:
li t0, UART_BASE # t0 = UART_BASE
bb1:
lbu t1, (a0) # t1 = load unsigned byte from memory address specified by a0 register
beqz t1, bb3 # break the loop, if loaded byte was null
# wait until UART is ready
bb2:
lw t2, UART_REG_TXFIFO(t0) # t2 = uart[UART_REG_TXFIFO]
bltz t2, bb2 # t2 becomes positive once UART is ready for transmission
sw t1, UART_REG_TXFIFO(t0) # send byte, uart[UART_REG_TXFIFO] = t1
addi a0, a0, 1 # increment a0 address by 1 byte
j bb1
bb3:
ret
# Writes integer to UART (serial communication port)
# a0: integer value to write
.globl puti
puti:
addi sp,sp,-16
sd ra,8(sp)
sd s0,0(sp)
mv a1,a0
la a0, tmp_buff
call int_to_str
la a0, tmp_buff
call puts
ld ra,8(sp)
ld s0,0(sp)
addi sp,sp,16
jr ra
.section .data
.align 2
tmp_buff:
.zero 32
|
laurci/x86-machine-code-http-server
| 2,185
|
test/server_asm.S
|
section .data
RESPONSE:
db 'HTTP/1.1 200 OK', 0xd, 0xa
db 'Content-Type: text/html', 0xd, 0xa
db 'Content-Length: 154', 0xd, 0xa, 0xd, 0xa
db '<!DOCTYPE html>'
db '<html>'
db '<head>'
db '<title>Elves and CSS</title>'
db '<style>body { background-color: #f0f0f0; }</style>'
db '</head>'
db '<body>'
db '<h1>Elves and CSS</h1>'
db '</body>'
db '</html>'
RESPONSE_LEN equ $ - RESPONSE
MSG_SERVER_STARTED:
db 'Server started on port 8080', 0xa
MSG_SERVER_STARTED_LEN equ $ - MSG_SERVER_STARTED
SERVER_ADDR:
dw 2 ; sin_family = AF_INET
dw 0x961f ; sin_port = htons(8080)
dd 0x00000000 ; sin_addr = INADDR_ANY (0.0.0.0)
times 8 db 0x00 ; padding to make it 16 bytes total
; variables
server_sock dd 0x00
client_sock dd 0x00
request_buffer times 1024 db 0x00
section .text
global _start
_start:
; socket(AF_INET, SOCK_STREAM, 0)
mov eax, 0x167
mov ebx, 2 ; AF_INET
mov ecx, 1 ; SOCK_STREAM
mov edx, 0
int 0x80
; save the server socket
mov [server_sock], eax
; bind(server_sock, SERVER_ADDR, 16)
mov eax, 0x169
mov ebx, [server_sock]
mov ecx, SERVER_ADDR
mov edx, 16
int 0x80
; listen(server_sock, 10)
mov eax, 0x16b
mov ebx, [server_sock]
mov ecx, 10
int 0x80
; write(1, MSG_SERVER_STARTED, MSG_SERVER_STARTED_LEN)
mov eax, 0x4
mov ebx, 1
mov ecx, MSG_SERVER_STARTED
mov edx, MSG_SERVER_STARTED_LEN
int 0x80
handle_conn:
; accept(server_sock, NULL, NULL)
mov eax, 0x16c
mov ebx, [server_sock]
mov ecx, 0
mov edx, 0
mov esi, 0
int 0x80
; save the client socket
mov [client_sock], eax
; read(client_sock, request_buffer, 1024)
mov eax, 0x3
mov ebx, [client_sock]
mov ecx, request_buffer
mov edx, 1024
int 0x80
; write(client_sock, RESPONSE, RESPONSE_LEN)
mov eax, 0x4
mov ebx, [client_sock]
mov ecx, RESPONSE
mov edx, RESPONSE_LEN
int 0x80
; close(client_sock)
mov eax, 0x6
mov ebx, [client_sock]
int 0x80
; loop back to accept another connection
jmp handle_conn
|
ldos-project/asterinas
| 2,603
|
test/apps/fork/fork.S
|
# SPDX-License-Identifier: MPL-2.0
# FIXME: WNOHANG option currently does not work properly without preemption, so we have temporarily
# removed it. Once preemption is supported, the following macro can be uncommented to add the WNOHANG
# option back.
# #define PREEMPTION_ENABLE
.global _start
.section .text
_start:
call print_hello_world
mov $57, %rax # syscall number of fork
syscall
cmp $0, %rax
je _child # child process
jmp _parent # parent process
_parent:
call wait_child
call get_pid
call print_parent_message
call exit
_child:
call get_pid
call print_child_message
call exit
wait_child:
mov %rax, %rdi # child process id
#ifdef PREEMPTION_ENABLE
_loop:
mov $61, %rax # syscall number of wait4
mov $0, %rsi # exit status address
mov $1, %rdx # wait option: WNOHANG
syscall
cmp %rdi, %rax # The return value is the pid of child
jne _loop
ret
#else
mov $61, %rax # syscall number of wait4
mov $0, %rsi # exit status address
mov $0, %rdx # wait option
syscall
ret
#endif
exit:
mov $60, %rax # syscall number of exit
mov $0, %rdi # exit code
syscall
get_pid:
mov $39, %rax
syscall
ret
print_hello_world:
mov $message, %rsi # address of message
mov $message_end, %rdx
sub %rsi, %rdx # calculate message len
jmp _print_message
print_parent_message:
mov $message_parent, %rsi # address of message
mov $message_parent_end, %rdx
sub %rsi, %rdx # calculate message len
jmp _print_message
print_child_message:
mov $message_child, %rsi # address of message
mov $message_child_end, %rdx
sub %rsi, %rdx # calculate message len
jmp _print_message
# never directly call _print_message
_print_message:
mov $1, %rax # syscall number of write
mov $1, %rdi # stdout
syscall
ret
.section .rodata
message:
.ascii "Hello, world in fork\n"
message_end:
message_parent:
.ascii "Hello world from parent\n"
message_parent_end:
message_child:
.ascii "Hello world from child\n"
message_child_end:
|
ldos-project/asterinas
| 2,554
|
ostd/libs/linux-bzimage/setup/src/x86/header.S
|
/* SPDX-License-Identifier: MPL-2.0 */
// The compatibility file for the Linux x86 Boot Protocol.
// See https://www.kernel.org/doc/html/v5.6/x86/boot.html for
// more information on the Linux x86 Boot Protocol.
// The bootloader may fill some fields at runtime, which can
// be read by the kernel (via `boot_params.hdr`).
.section ".header", "a"
CODE32_START = 0x100000
# Real-mode setup sectors. We don't use them. Their size is set to one page.
SETUP_SECTS = 7
SETUP_SECTS_SIZE = 0x200 * (SETUP_SECTS + 1)
.org 0x01f1
hdr_start:
setup_sects: .byte SETUP_SECTS
root_flags: .word 1
syssize: .long 0
ram_size: .word 0
vid_mode: .word 0xfffd
root_dev: .word 0
boot_flag: .word 0xAA55
jump: .byte 0xeb
jump_addr: .byte hdr_end - jump_addr
magic: .ascii "HdrS"
.word 0x020f
realmode_swtch: .word 0, 0
start_sys_seg: .word 0
.word 0
type_of_loader: .byte 0
loadflags: .byte (1 << 0) # LOADED_HIGH
setup_move_size: .word 0
code32_start: .long CODE32_START
ramdisk_image: .long 0
ramdisk_size: .long 0
bootsect_kludge: .long 0
heap_end_ptr: .word 65535
ext_loader_ver: .byte 0
ext_loader_type: .byte 0
cmd_line_ptr: .long 0
initrd_addr_max: .long 0x7fffffff
kernel_alignment: .long 0x1000000
relocatable_kernel: .byte 0
min_alignment: .byte 0x10
.if {CFG_TARGET_ARCH_X86_64}
# Note that we don't actually support the legacy 64-bit entry point
# (XLF_KERNEL_64). But we have to specify it, otherwise the boot loader
# will think this kernel does not have 64-bit support.
xloadflags: .word 0b01011 # Bit 0: XLF_KERNEL_64
# Bit 1: XLF_CAN_BE_LOADED_ABOVE_4G
# Bit 3: XLF_EFI_HANDOVER_64
.else
xloadflags: .word 0
.endif
cmdline_size: .long 4096 - 1
hardware_subarch: .long 0
hardware_subarch_data: .quad 0
payload_offset: .long 0 # Not used.
payload_length: .long 0 # Not used.
setup_data: .quad 0
pref_address: .quad CODE32_START
init_size: .long __executable_size
.if {CFG_TARGET_ARCH_X86_64}
handover_offset: .long (entry_efi_handover32 - entry_legacy32)
.else
handover_offset: .long 0
.endif
kernel_info_offset: .long 0
hdr_end:
|
ldos-project/asterinas
| 3,171
|
ostd/libs/linux-bzimage/setup/src/x86/amd64_efi/setup.S
|
/* SPDX-License-Identifier: MPL-2.0 */
// The load address of the setup section is CODE32_START (0x100000).
// See the linker script.
.section ".setup", "ax"
CODE32_START = 0x100000
.code32
.global entry_legacy32
entry_legacy32:
// This is the 32-bit Linux legacy entry point.
// Not supported. However, there doesn't seem to be a way to disable this
// entry point in the header, so provide a dummy implementation here.
hlt
jmp entry_legacy32
.global entry_efi_handover32
entry_efi_handover32:
// This is the 32-bit EFI handover entry point.
// Not supported. This entry point is not enabled in the header, so it
// should not be reachable. We declare the entry point anyway, because
// its offset is needed in the header. We provide a dummy implementation
// just in case.
jmp entry_legacy32
// The 64-bit Linux legacy entry point must be 0x200 bytes after the 32-bit
// one. This is required by the x86 Linux boot protocol.
.skip 0x200 - (. - entry_legacy32)
.code64
entry_legacy64:
// This is the 64-bit Linux legacy entry point.
// Not supported. We need to enable this entry point in the header,
// otherwise the boot loader will think the kernel does not support
// 64-bit.
jmp halt
// The 64-bit EFI handover entry point must be 0x200 bytes after the 32-bit
// one. This is required by the x86 Linux boot protocol.
.skip 0x200 - (. - entry_efi_handover32)
entry_efi_handover64:
// This is the 64-bit EFI handover entry point.
//
// Arguments:
// RDI: void *handle
// RSI: efi_system_table_t *table
// RDX: struct boot_params *bp
jmp efi_common64
.global entry_efi_pe64
entry_efi_pe64:
// This is the 64-bit EFI PE/COFF entry point.
//
// Arguments:
// RCX: void *handle
// RDX: efi_system_table_t *table
mov rdi, rcx
mov rsi, rdx
xor rdx, rdx
jmp efi_common64
efi_common64:
// We can reuse the stack provided by the UEFI firmware until a short time
// after exiting the UEFI boot services. So we don't build our own stack.
//
// But the stack must be 16-byte aligned! So we drop the return address.
add rsp, 8
// Compute the load offset.
lea rcx, [rip + entry_legacy32]
sub rcx, CODE32_START
// Do relocations.
lea rax, [rip + __rela_start]
lea rbx, [rip + __rela_end]
cmp rax, rbx
jae reloc_done
reloc_iter:
// We check the type in the builder, so we should not see an unexpected
// type at runtime. Otherwise, we just stop here.
mov r8d, [rax + 8] // Elf64_Rela::r_type
cmp r8d, 8 // R_X86_64_RELATIVE
jne halt
mov r9, [rax + 16] // Elf64_Rela::r_addend
add r9, rcx
mov r8, [rax] // Elf64_Rela::r_offset
mov [r8 + rcx], r9 // *(r_offset + load_offset) = r_addend + load_offset
add rax, 24 // sizeof(Elf64_Rela)
cmp rax, rbx
jb reloc_iter
reloc_done:
// Call the Rust main routine.
call main_efi_common64
// The main routine should not return. If it does, there is nothing we can
// do but stop the machine.
jmp halt
halt:
hlt
jmp halt
|
ldos-project/asterinas
| 1,684
|
ostd/libs/linux-bzimage/setup/src/x86/legacy_i386/setup.S
|
/* SPDX-License-Identifier: MPL-2.0 */
// The load address of the setup section is CODE32_START (0x100000).
// See the linker script.
.section ".setup", "ax"
.code32
.global entry_legacy32
entry_legacy32:
// This is the 32-bit Linux legacy entry point.
//
// Arguments:
// RSI: struct boot_params *bp
// We don't do reloactions, so let's make sure we're at the right address.
// Otherwise, we'd better stop here.
lea esp, [esi + 0x1e8] // scratch: u32 (offset 0x1e4)
call load_address
load_address:
pop eax
cmp eax, offset load_address
jne halt
// Set up the stack.
mov esp, offset __stack_top
// Load the GDT.
push 8 // 32-bit code
mov eax, offset gdt_loaded
push eax
lgdt [gdtr]
retf
gdt_loaded:
mov ax, 16 // 32-bit data
mov ds, ax
mov es, ax
mov fs, ax
mov gs, ax
// Call the Rust main routine.
push esi
call main_legacy32
// The main routine should not return. If it does, there is nothing we can
// do but stop the machine.
jmp halt
// All other types of entry points are not enabled in the header.
// So we don't care about them.
halt:
hlt
jmp halt
// GDT. We define GDT ourselves to ensure that the GDT page will not be
// accidentally overwritten by the allocated memory.
.rodata
.align 16
gdtr:
.word gdt_end - gdt - 1
.long gdt
.align 16
gdt:
.quad 0x0000000000000000 // 0: null descriptor
.quad 0x00cf9a000000ffff // 8: 32-bit code segment
.quad 0x00cf92000000ffff // 16: 32-bit data segment
gdt_end:
// A small stack for the setup code.
.bss
.align 8
__stack_bottom:
.space 0x1000
__stack_top:
|
ldos-project/asterinas
| 4,069
|
ostd/src/arch/riscv/trap/trap.S
|
/* SPDX-License-Identifier: MPL-2.0 OR MIT
*
* The original source code is from [trapframe-rs](https://github.com/rcore-os/trapframe-rs),
* which is released under the following license:
*
* SPDX-License-Identifier: MIT
*
* Copyright (c) 2020 - 2024 Runji Wang
*
* We make the following new changes:
* * Add the `trap_handler_table`.
*
* These changes are released under the following license:
*
* SPDX-License-Identifier: MPL-2.0
*/
# Constants / Macros defined in Rust code:
# XLENB
# LOAD
# STORE
.section .text
.global trap_entry
.balign 4
trap_entry:
# If coming from userspace, preserve the user stack pointer and load
# the kernel stack pointer. If we came from the kernel, sscratch
# will contain 0, and we should continue on the current stack.
csrrw sp, sscratch, sp
bnez sp, trap_from_user
trap_from_kernel:
csrr sp, sscratch
addi sp, sp, -34 * XLENB
# sscratch = previous-sp, sp = kernel-sp
trap_from_user:
# save general registers except sp(x2)
STORE_SP x1, 1
STORE_SP x3, 3
STORE_SP x4, 4
STORE_SP x5, 5
STORE_SP x6, 6
STORE_SP x7, 7
STORE_SP x8, 8
STORE_SP x9, 9
STORE_SP x10, 10
STORE_SP x11, 11
STORE_SP x12, 12
STORE_SP x13, 13
STORE_SP x14, 14
STORE_SP x15, 15
STORE_SP x16, 16
STORE_SP x17, 17
STORE_SP x18, 18
STORE_SP x19, 19
STORE_SP x20, 20
STORE_SP x21, 21
STORE_SP x22, 22
STORE_SP x23, 23
STORE_SP x24, 24
STORE_SP x25, 25
STORE_SP x26, 26
STORE_SP x27, 27
STORE_SP x28, 28
STORE_SP x29, 29
STORE_SP x30, 30
STORE_SP x31, 31
# save sp, sstatus, sepc
csrrw t0, sscratch, x0 # sscratch = 0 (kernel)
csrr t1, sstatus
csrr t2, sepc
STORE_SP t0, 2 # save sp
STORE_SP t1, 32 # save sstatus
STORE_SP t2, 33 # save sepc
li t0, 3 << 13
or t1, t1, t0 # sstatus.FS = Dirty (3)
csrw sstatus, t1
andi t1, t1, 1 << 8 # sstatus.SPP == 1
beqz t1, end_trap_from_user
end_trap_from_kernel:
mv a0, sp # first arg is TrapFrame
la ra, trap_return # set return address
j trap_handler
end_trap_from_user:
# load callee-saved registers
LOAD_SP sp, 0
LOAD_SP s0, 0
LOAD_SP s1, 1
LOAD_SP s2, 2
LOAD_SP s3, 3
LOAD_SP s4, 4
LOAD_SP s5, 5
LOAD_SP s6, 6
LOAD_SP s7, 7
LOAD_SP s8, 8
LOAD_SP s9, 9
LOAD_SP s10, 10
LOAD_SP s11, 11
LOAD_SP ra, 12
# not callee-saved, but is used to store mhartid
LOAD_SP gp, 13
addi sp, sp, 14 * XLENB
ret
.global run_user
run_user:
# save callee-saved registers
addi sp, sp, -14 * XLENB
STORE_SP s0, 0
STORE_SP s1, 1
STORE_SP s2, 2
STORE_SP s3, 3
STORE_SP s4, 4
STORE_SP s5, 5
STORE_SP s6, 6
STORE_SP s7, 7
STORE_SP s8, 8
STORE_SP s9, 9
STORE_SP s10, 10
STORE_SP s11, 11
STORE_SP ra, 12
# not callee-saved, but is used to store mhartid
STORE_SP gp, 13
mv t0, sp
mv sp, a0
STORE_SP t0, 0 # save kernel-sp
csrw sscratch, sp # sscratch = bottom of trap frame
trap_return:
LOAD_SP t0, 32 # t0 = sstatus
LOAD_SP t1, 33 # t1 = sepc
csrw sstatus, t0 # load sstatus
csrw sepc, t1 # load sepc
# restore general registers except sp(x2)
LOAD_SP x1, 1
LOAD_SP x3, 3
LOAD_SP x4, 4
LOAD_SP x5, 5
LOAD_SP x6, 6
LOAD_SP x7, 7
LOAD_SP x8, 8
LOAD_SP x9, 9
LOAD_SP x10, 10
LOAD_SP x11, 11
LOAD_SP x12, 12
LOAD_SP x13, 13
LOAD_SP x14, 14
LOAD_SP x15, 15
LOAD_SP x16, 16
LOAD_SP x17, 17
LOAD_SP x18, 18
LOAD_SP x19, 19
LOAD_SP x20, 20
LOAD_SP x21, 21
LOAD_SP x22, 22
LOAD_SP x23, 23
LOAD_SP x24, 24
LOAD_SP x25, 25
LOAD_SP x26, 26
LOAD_SP x27, 27
LOAD_SP x28, 28
LOAD_SP x29, 29
LOAD_SP x30, 30
LOAD_SP x31, 31
# restore sp last
LOAD_SP x2, 2
# return from supervisor call
sret
|
ldos-project/asterinas
| 1,388
|
ostd/src/arch/riscv/boot/boot.S
|
/* SPDX-License-Identifier: MPL-2.0 */
.section .text.entry
.globl _start
_start:
# Arguments passed from SBI:
# a0 = hart id
# a1 = device tree paddr (not touched)
# 1. enable paging
# setting up 1st pagetable
# entry = (PPN(boot_pagetable_2nd) << 10) | 0x01 # V
la t1, boot_pagetable
li t0, 8 * 511
add t1, t1, t0
la t0, boot_pagetable_2nd
srli t0, t0, 2
ori t0, t0, 0x01
sd t0, 0(t1)
la t0, boot_pagetable
li t1, 9 << 60
srli t0, t0, 12
or t0, t0, t1
csrw satp, t0
sfence.vma
# 2. set sp (BSP only)
lga sp, boot_stack_top
# 3. set gp (CPU-local address)
.extern __cpu_local_start
lga gp, __cpu_local_start
# 4. jump to rust riscv_boot
lga t0, riscv_boot
jr t0
.section .bss.stack
.globl boot_stack_bottom
boot_stack_bottom:
.space 0x40000 # 64 KiB
.globl boot_stack_top
boot_stack_top:
.section .data
.align 12
boot_pagetable:
.quad (0x00000 << 10) | 0xcf # VRWXAD
.zero 8 * 255
.quad (0x00000 << 10) | 0xcf # VRWXAD
.zero 8 * 254
.quad 0 # To-Be-Assign
boot_pagetable_2nd:
# 0x0000_00ff_8000_0000 -> 0x0000_0000_8000_0000
.zero 8 * 508
.quad (0x00000 << 10) | 0xcf # VRWXAD
.quad (0x40000 << 10) | 0xcf # VRWXAD
.quad (0x80000 << 10) | 0xcf # VRWXAD
.quad 0
|
ldos-project/asterinas
| 2,967
|
ostd/src/arch/x86/trap/syscall.S
|
/* SPDX-License-Identifier: MPL-2.0 OR MIT
*
* The original source code is from [trapframe-rs](https://github.com/rcore-os/trapframe-rs),
* which is released under the following license:
*
* SPDX-License-Identifier: MIT
*
* Copyright (c) 2020 - 2024 Runji Wang
*
* We make the following new changes:
* * Skip saving/restoring the fsgsbase registers.
*
* These changes are released under the following license:
*
* SPDX-License-Identifier: MPL-2.0
*/
.code64
.text
# extern "sysv64" fn syscall_return(&mut UserContext)
.global syscall_return
syscall_return:
# disable interrupt
cli
# save callee-saved registers
push r15
push r14
push r13
push r12
push rbp
push rbx
push rdi # keep rsp 16 bytes align
mov gs:4, rsp # store kernel rsp -> TSS.sp0
mov rsp, rdi # set rsp -> UserContext
# restore user gsbase
swapgs
pop rax
pop rbx
pop rcx
pop rdx
pop rsi
pop rdi
pop rbp
pop r8 # skip rsp
pop r8
pop r9
pop r10
pop r11
pop r12
pop r13
pop r14
pop r15
# rip
# rflags
# fsbase
# gsbase
# trap_num
# error_code
# determain sysret or iret
cmp dword ptr [rsp + 4*8], 0x100 # syscall?
je sysret
iret:
# construct trap frame
push {USER_SS} # push ss
push [rsp - 8*8] # push rsp
push [rsp + 3*8] # push rflags
push {USER_CS} # push cs
push [rsp + 4*8] # push rip
iretq
sysret:
pop rcx # rcx = rip
pop r11 # r11 = rflags
mov rsp, [rsp - 11*8] # load rsp
sysretq
# sysretq instruction do:
# - load cs, ss
# - load rflags <- r11
# - load rip <- rcx
.global syscall_entry
syscall_entry:
# syscall instruction do:
# - load cs
# - store rflags -> r11
# - mask rflags
# - store rip -> rcx
# - load rip
swapgs # swap in kernel gs
mov gs:12, rsp # store user rsp -> scratch at TSS.sp1
mov rsp, gs:4 # load kernel rsp <- TSS.sp0
pop rsp # load rsp <- UserContext
add rsp, 21*8 # rsp -> error code of UserContext
push 0x100 # push trap_num
sub rsp, 16 # skip fsbase, gsbase
# push general registers
push r11 # push rflags
push rcx # push rip
.global trap_syscall_entry
trap_syscall_entry:
push r15
push r14
push r13
push r12
push r11
push r10
push r9
push r8
push gs:12 # push rsp
push rbp
push rdi
push rsi
push rdx
push rcx
push rbx
push rax
# restore callee-saved registers
mov rsp, gs:4 # load kernel rsp <- TSS.sp0
pop rbx
pop rbx
pop rbp
pop r12
pop r13
pop r14
pop r15
# go back to Rust
ret
|
ldos-project/asterinas
| 3,012
|
ostd/src/arch/x86/trap/trap.S
|
/* SPDX-License-Identifier: MPL-2.0 OR MIT
*
* The original source code is from [trapframe-rs](https://github.com/rcore-os/trapframe-rs),
* which is released under the following license:
*
* SPDX-License-Identifier: MIT
*
* Copyright (c) 2020 - 2024 Runji Wang
*
* We make the following new changes:
* * Add the `trap_handler_table`.
*
* These changes are released under the following license:
*
* SPDX-License-Identifier: MPL-2.0
*/
.code64
.equ NUM_INT, 256
.altmacro
.macro DEF_HANDLER, i
.Ltrap_handler_\i:
.if \i == 8 || (\i >= 10 && \i <= 14) || \i == 17
# error code pushed by CPU
push \i # interrupt vector
jmp trap_common
.else
push 0 # fill in error code in TrapFrame
push \i # interrupt vector
jmp trap_common
.endif
.endm
.section .text
_trap_handlers:
.set i, 0
.rept NUM_INT
DEF_HANDLER %i
.set i, i + 1
.endr
.macro DEF_TABLE_ENTRY, i
.quad .Ltrap_handler_\i
.endm
.section .rodata
.global trap_handler_table
trap_handler_table:
.set i, 0
.rept NUM_INT
DEF_TABLE_ENTRY %i
.set i, i + 1
.endr
.section .text
.global trap_common
trap_common:
cld # clear DF before calling/returning to any C function to conform to x86-64 calling convention
push rax
mov ax, [rsp + 4*8] # load cs
and ax, 0x3 # test
jz __from_kernel # continue trap
__from_user:
/*
kernel stack:
- ptr to UserContext
- ss
- rsp
- rflags
- cs
- rip
- error code
- trap num
- rax
*/
swapgs # swap in kernel gs
mov rax, [rsp + 6*8] # rax = user rsp
mov gs:12, rax # store user rsp -> scratch at TSS.sp1
mov rsp, [rsp + 8*8] # load rsp <- UserContext
add rsp, 22*8 # rsp -> top of UserContext
mov rax, gs:4 # rax = kernel stack
# push trap_num, error_code
push [rax - 6*8] # push error_code
push [rax - 7*8] # push trap_num
sub rsp, 16 # skip fsbase, gsbase
# push general registers
push [rax - 3*8] # push rflags
push [rax - 5*8] # push rip
mov rax, [rax - 8*8] # pop rax
jmp trap_syscall_entry
__from_kernel:
/*
kernel stack:
- rflags
- cs
- rip
- error code
- trap num
- rax
*/
pop rax
push 0
push r15
push r14
push r13
push r12
push r11
push r10
push r9
push r8
lea r8, [rsp + 13*8]
push r8 # push rsp
push rbp
push rdi
push rsi
push rdx
push rcx
push rbx
push rax
mov rdi, rsp
call trap_handler
.global trap_return
trap_return:
pop rax
pop rbx
pop rcx
pop rdx
pop rsi
pop rdi
pop rbp
pop r8 # skip rsp
pop r8
pop r9
pop r10
pop r11
pop r12
pop r13
pop r14
pop r15
# skip padding, trap_num, error_code
add rsp, 24
iretq
|
ldos-project/asterinas
| 9,694
|
ostd/src/arch/x86/boot/bsp_boot.S
|
/* SPDX-License-Identifier: MPL-2.0 */
// The boot routine executed by the bootstrap processor.
// The boot header, initial boot setup code, temporary GDT and page tables are
// in the boot section. The boot section is mapped writable since kernel may
// modify the initial page table.
.section ".bsp_boot", "awx"
.code32
// With every entry types we could go through common paging or machine
// state setup routines. Thus we make a mark of protocol used in each entrypoint
// on the stack.
ENTRYTYPE_MULTIBOOT = 1
ENTRYTYPE_MULTIBOOT2 = 2
ENTRYTYPE_LINUX_32 = 3
ENTRYTYPE_LINUX_64 = 4
MULTIBOOT_ENTRY_MAGIC = 0x2BADB002
MULTIBOOT2_ENTRY_MAGIC = 0x36D76289
KERNEL_VMA = 0xffffffff80000000
// The Linux 32-bit Boot Protocol entry point.
// Must be located at 0x8001000, ABI immutable!
.code32
.org 0x000
.global __linux32_boot
__linux32_boot:
cli
cld
// Set the kernel call stack.
mov esp, offset boot_stack_top
push 0 // upper 32-bits
push esi // boot_params ptr
push 0 // upper 32-bits
push ENTRYTYPE_LINUX_32
jmp initial_boot_setup
// The Linux 64-bit Boot Protocol entry point.
// Must be located at 0x8001200, ABI immutable!
.code64
.org 0x200
.global __linux64_boot
__linux64_boot:
cli
cld
// Set the kernel call stack.
lea rsp, [rip + boot_stack_top]
push rsi // boot_params ptr from the loader
push ENTRYTYPE_LINUX_64
// Set up the page table and load it.
call page_table_setup_64
lea rdx, [rip + boot_l4pt]
mov cr3, rdx
// Prepare far return. The default operation size of
// far returns is 32 bits even in long mode.
lea edx, [rip + long_mode_in_low_address]
mov rax, (8 << 32)
or rdx, rax
push rdx
// Switch to our own temporary GDT.
lgdt [boot_gdtr]
retf
// The multiboot & multiboot2 entry point.
.code32
.global __multiboot_boot
__multiboot_boot:
cli
cld
// Set the kernel call stack.
mov esp, offset boot_stack_top
push 0 // Upper 32-bits.
push eax // multiboot magic ptr
push 0 // Upper 32-bits.
push ebx // multiboot info ptr
// Tell the entry type from eax
cmp eax, MULTIBOOT_ENTRY_MAGIC
je magic_is_mb
cmp eax, MULTIBOOT2_ENTRY_MAGIC
je magic_is_mb2
jmp halt // Should not be reachable!
magic_is_mb:
push 0 // Upper 32-bits.
push ENTRYTYPE_MULTIBOOT
jmp initial_boot_setup
magic_is_mb2:
push 0 // Upper 32-bits.
push ENTRYTYPE_MULTIBOOT2
jmp initial_boot_setup
initial_boot_setup:
// Prepare for far return. We use a far return as a fence after setting GDT.
push 24
lea edx, [protected_mode]
push edx
// Switch to our own temporary GDT.
lgdt [boot_gdtr]
retf
protected_mode:
mov ax, 16
mov ds, ax
mov ss, ax
mov es, ax
mov fs, ax
mov gs, ax
// Set up the page table.
call page_table_setup_32
// Enable PAE and PGE.
mov eax, cr4
or eax, 0xa0
mov cr4, eax
// Set the page table address.
lea eax, [boot_l4pt]
mov cr3, eax
// Enable long mode.
mov ecx, 0xc0000080
rdmsr
or eax, 0x0100
wrmsr
// Prepare for far return.
push 8
lea edx, [long_mode_in_low_address]
push edx
// Enable paging.
mov eax, cr0
or eax, 0x80000000
mov cr0, eax
retf
.macro define_page_table_setup bits
.code\bits
page_table_setup_\bits:
// Zero out the page table.
mov al, 0x00
lea edi, [boot_page_table_start]
lea ecx, [boot_page_table_end]
sub ecx, edi
rep stosb
// PTE flags used in this file.
PTE_PRESENT = (1)
PTE_WRITE = (1 << 1)
PTE_HUGE = (1 << 7)
PTE_GLOBAL = (1 << 8)
// L4PT: 0x00000000_00000000 ~ 0x00000000_3fffffff
// 0x00000000_40000000 ~ 0x00000000_7fffffff
// 0x00000000_80000000 ~ 0x00000000_bfffffff
// 0x00000000_c0000000 ~ 0x00000000_ffffffff
lea edi, [boot_l4pt]
lea eax, [boot_l3pt_linear_id + (PTE_PRESENT | PTE_WRITE)]
mov dword ptr [edi], eax
mov dword ptr [edi + 4], 0
// L4PT: 0xffff8000_00000000 ~ 0xffff8000_3fffffff
// 0xffff8000_40000000 ~ 0xffff8000_7fffffff
// 0xffff8000_80000000 ~ 0xffff8000_bfffffff
// 0xffff8000_c0000000 ~ 0xffff8000_ffffffff
lea edi, [boot_l4pt + 0x100 * 8]
lea eax, [boot_l3pt_linear_id + (PTE_PRESENT | PTE_WRITE)]
mov dword ptr [edi], eax
mov dword ptr [edi + 4], 0
// L4PT: 0xffffffff_80000000 ~ 0xffffffff_bfffffff
// 0xffffffff_c0000000 ~ 0xffffffff_ffffffff
lea edi, [boot_l4pt + 0x1ff * 8]
lea eax, [boot_l3pt_kernel + (PTE_PRESENT | PTE_WRITE)]
mov dword ptr [edi], eax
mov dword ptr [edi + 4], 0
// L3PT: 0x00000000_00000000 ~ 0x00000000_3fffffff
lea edi, [boot_l3pt_linear_id]
lea eax, [boot_l2pt_0g_1g + (PTE_PRESENT | PTE_WRITE | PTE_GLOBAL)]
mov dword ptr [edi], eax
mov dword ptr [edi + 4], 0
// L3PT: 0x00000000_40000000 ~ 0x00000000_7fffffff
lea edi, [boot_l3pt_linear_id + 0x1 * 8]
lea eax, [boot_l2pt_1g_2g + (PTE_PRESENT | PTE_WRITE | PTE_GLOBAL)]
mov dword ptr [edi], eax
mov dword ptr [edi + 4], 0
// L3PT: 0x00000000_80000000 ~ 0x00000000_bfffffff
lea edi, [boot_l3pt_linear_id + 0x2 * 8]
lea eax, [boot_l2pt_2g_3g + (PTE_PRESENT | PTE_WRITE | PTE_GLOBAL)]
mov dword ptr [edi], eax
mov dword ptr [edi + 4], 0
// L3PT: 0x00000000_c0000000 ~ 0x00000000_ffffffff
lea edi, [boot_l3pt_linear_id + 0x3 * 8]
lea eax, [boot_l2pt_3g_4g + (PTE_PRESENT | PTE_WRITE | PTE_GLOBAL)]
mov dword ptr [edi], eax
mov dword ptr [edi + 4], 0
// L3PT: 0xffffffff_80000000 ~ 0xffffffff_bfffffff
lea edi, [boot_l3pt_kernel + 0x1fe * 8]
lea eax, [boot_l2pt_0g_1g + (PTE_PRESENT | PTE_WRITE | PTE_GLOBAL)]
mov dword ptr [edi], eax
mov dword ptr [edi + 4], 0
// L3PT: 0xffffffff_c0000000 ~ 0xffffffff_ffffffff
lea edi, [boot_l3pt_kernel + 0x1ff * 8]
lea eax, [boot_l2pt_1g_2g + (PTE_PRESENT | PTE_WRITE | PTE_GLOBAL)]
mov dword ptr [edi], eax
mov dword ptr [edi + 4], 0
// L2PT: map to low 1 GiB * 4 space
lea edi, [boot_l2pt]
mov eax, PTE_PRESENT | PTE_WRITE | PTE_GLOBAL | PTE_HUGE
mov ecx, 512 * 4 // (of entries in PD) * (number of PD)
write_l2pt_entry_\bits:
mov dword ptr [edi], eax
mov dword ptr [edi + 4], 0
add eax, 0x200000 // +2MiB
add edi, 8
loop write_l2pt_entry_\bits
ret
.endm
define_page_table_setup 32
define_page_table_setup 64
// Temporary GDTR/GDT entries. This must be located in the .boot section as its
// address (gdt) must be physical to load.
.align 16
.global boot_gdtr
boot_gdtr:
.word gdt_end - gdt - 1
.quad gdt
.align 16
gdt:
.quad 0 // 0: null descriptor
.quad {KCODE64} // 8: code segment (kernel, 64-bit)
.quad {KDATA} // 16: data segment (kernel)
.quad {KCODE32} // 24: code segment (kernel, 32-bit)
gdt_end:
// The page tables and the stack
.align 4096
.global boot_page_table_start
boot_page_table_start:
boot_l4pt:
.skip 4096
// This L3PT is used for both identity mapping and linear mapping. Four lower
// entries point to `boot_l2pt`s so that it maps to low 4G physical memory.
boot_l3pt_linear_id:
.skip 4096
// This L3PT is used for kernel mapping, which is at highest 2G space. Two
// higher entries point to `boot_l2pt`s so it maps to low 2G physical memory.
boot_l3pt_kernel:
.skip 4096
// These L2PTs are used for identity mapping, linear mapping and kernel mapping.
// They map to low 4G physical memory in 2MB huge pages.
boot_l2pt:
boot_l2pt_0g_1g:
.skip 4096
boot_l2pt_1g_2g:
.skip 4096
boot_l2pt_2g_3g:
.skip 4096
boot_l2pt_3g_4g:
.skip 4096
boot_page_table_end:
.global boot_stack_top
boot_stack_bottom:
.skip 0x40000
boot_stack_top:
.code64
long_mode_in_low_address:
mov ax, 0
mov ds, ax
mov ss, ax
mov es, ax
mov fs, ax
mov gs, ax
// Update RSP/RIP to use the virtual address.
mov rbx, KERNEL_VMA
or rsp, rbx
mov rax, offset long_mode
jmp rax
// From here, we're in the .text section: we no longer use physical address.
.text
.code64
long_mode:
// Clear .bss section.
mov al, 0x00
lea rdi, [rip + __bss]
lea rcx, [rip + __bss_end]
sub rcx, rdi
rep stosb
// Clear RBP to stop the backtrace.
xor rbp, rbp
// Initialize the GS base to the CPU-local start address.
.extern __cpu_local_start
lea rax, [rip + __cpu_local_start]
mov rdx, rax
shr rdx, 32 // EDX:EAX = __cpu_local_start
mov ecx, 0xC0000101 // ECX = GS.base
wrmsr
// Call the corresponding Rust entrypoint according to the boot entrypoint.
pop rax
cmp rax, ENTRYTYPE_MULTIBOOT
je entry_type_multiboot
cmp rax, ENTRYTYPE_MULTIBOOT2
je entry_type_multiboot2
cmp rax, ENTRYTYPE_LINUX_32
je entry_type_linux
cmp rax, ENTRYTYPE_LINUX_64
je entry_type_linux
// Unreachable!
jmp halt
.extern __linux_boot
.extern __multiboot_entry
.extern __multiboot2_entry
entry_type_linux:
pop rdi // boot_params ptr
lea rax, [rip + __linux_boot] // jump into Rust code
call rax
jmp halt
entry_type_multiboot:
pop rsi // the address of multiboot info
pop rdi // multiboot magic
lea rax, [rip + __multiboot_entry] // jump into Rust code
call rax
jmp halt
entry_type_multiboot2:
pop rsi // the address of multiboot info
pop rdi // multiboot magic
lea rax, [rip + __multiboot2_entry] // jump into Rust code
call rax
jmp halt
halt:
cli
hlt
jmp halt
|
ldos-project/asterinas
| 3,820
|
ostd/src/arch/x86/boot/ap_boot.S
|
/* SPDX-License-Identifier: MPL-2.0 */
// The boot routine executed by the application processor.
.global ap_boot_from_real_mode
.global ap_boot_from_long_mode
.section ".ap_boot", "awx"
.align 4096
IA32_EFER_MSR = 0xC0000080
IA32_EFER_BIT_LME = 1 << 8
IA32_EFER_BIT_NXE = 1 << 11
CR0_BIT_PE = 1 << 0
CR0_BIT_PG = 1 << 31
CR4_BIT_PAE = 1 << 5
CR4_BIT_PGE = 1 << 7
.macro setup_64bit_gdt_and_page_table eax
// Use the 64-bit GDT.
.extern boot_gdtr
lgdt [boot_gdtr]
// Set the NX bit support in the EFER MSR.
mov ecx, IA32_EFER_MSR
rdmsr
or eax, IA32_EFER_BIT_NXE
wrmsr
// Enable PAE and PGE.
mov \eax, cr4
or \eax, CR4_BIT_PAE | CR4_BIT_PGE
mov cr4, \eax
// Set the page table. The application processors use
// the same page table as the bootstrap processor's
// boot phase page table.
xor \eax, \eax // clear the upper 32 bits if \eax is 64-bit
mov eax, __boot_page_table_pointer // 32-bit load
mov cr3, \eax
.endm
.code16
ap_boot_from_real_mode:
cli // disable interrupts
cld
jmp ap_real_mode
.code64
ap_boot_from_long_mode:
cli // disable interrupts
cld
setup_64bit_gdt_and_page_table rax
// Some firmware seems to provide per-AP stacks that we can use. However,
// the ACPI specification does not promise that the stack is usable. It is
// better not to rely on such implementation details.
lea rsp, [rip + retf_stack_bottom]
retf // 32-bit far return
.align 8
retf_stack_bottom:
.long ap_long_mode_in_low_address
.long 0x8
retf_stack_top:
.code16
ap_real_mode:
xor ax, ax // clear ax
mov ds, ax // clear ds
lgdt [ap_gdtr] // load gdt
mov eax, cr0
or eax, CR0_BIT_PE
mov cr0, eax // enable protected mode
ljmp 0x8, offset ap_protect_mode
// 32-bit AP GDT.
.align 16
ap_gdt:
.quad 0x0000000000000000
ap_gdt_code:
.quad 0x00cf9a000000ffff
ap_gdt_data:
.quad 0x00cf92000000ffff
ap_gdt_end:
.align 16
ap_gdtr:
.word ap_gdt_end - ap_gdt - 1
.quad ap_gdt
.align 4
.code32
ap_protect_mode:
mov ax, 0x10
mov ds, ax
mov ss, ax
setup_64bit_gdt_and_page_table eax
// Enable long mode.
mov ecx, IA32_EFER_MSR
rdmsr
or eax, IA32_EFER_BIT_LME
wrmsr
// Enable paging.
mov eax, cr0
or eax, CR0_BIT_PG
mov cr0, eax
ljmp 0x8, offset ap_long_mode_in_low_address
// This is a pointer to the page table used by the APs.
// The BSP will fill this pointer before kicking the APs.
.global __boot_page_table_pointer
.align 4
__boot_page_table_pointer:
.skip 4
.code64
ap_long_mode_in_low_address:
mov ax, 0
mov ds, ax
mov ss, ax
mov es, ax
mov fs, ax
mov gs, ax
// Update RIP to use the virtual address.
mov rax, offset ap_long_mode
jmp rax
.data
// This is a pointer to be filled by the BSP when boot information
// of all APs are allocated and initialized.
.global __ap_boot_info_array_pointer
.align 8
__ap_boot_info_array_pointer:
.quad 0
__ap_boot_cpu_id_tail:
.quad 1
.text
.code64
ap_long_mode:
mov rdi, 1
lock xadd [__ap_boot_cpu_id_tail], rdi
// The CPU ID is in the RDI.
mov rax, rdi
shl rax, 4 // 16-byte `PerApRawInfo`
mov rbx, [rip + __ap_boot_info_array_pointer]
// Setup the stack.
mov rsp, [rbx + rax - 16] // raw_info[cpu_id - 1].stack_top
// Setup the GS base (the CPU-local address).
mov rax, [rbx + rax - 8] // raw_info[cpu_id - 1].cpu_local
mov rdx, rax
shr rdx, 32 // EDX:EAX = raw_info.cpu_local
mov ecx, 0xC0000101 // ECX = GS.base
wrmsr
// Go to Rust code.
.extern ap_early_entry
xor rbp, rbp
mov rax, offset ap_early_entry
call rax
.extern halt // bsp_boot.S
jmp halt
|
ldos-project/asterinas
| 1,253
|
ostd/src/arch/x86/boot/multiboot2/header.S
|
/* SPDX-License-Identifier: MPL-2.0 */
// This is the GNU Multiboot 2 header.
// Reference: https://www.gnu.org/software/grub/manual/multiboot2/html_node/Index.html//Index
.section ".multiboot2_header", "a"
.code32
// Macros for cleaner code in the header fields.
MB2_MAGIC = 0xE85250D6
MB2_ARCHITECTURE = 0 // 32-bit (protected) mode of i386
MB2_HEADERLEN = header_end - header_start
MB2_CHECKSUM = -(MB2_MAGIC + MB2_ARCHITECTURE + MB2_HEADERLEN)
header_start:
.align 8
.long MB2_MAGIC
.long MB2_ARCHITECTURE
.long MB2_HEADERLEN
.long MB2_CHECKSUM
// Tag: entry address
entry_address_tag_start:
.short 3
.short 1 // Optional
.long entry_address_tag_end - entry_address_tag_start
.extern __multiboot_boot
.long __multiboot_boot // entry_addr
entry_address_tag_end:
// Tag: information request
.align 8
info_request:
.short 1
.short 0 // Required
.long info_request_end - info_request
.long 6 // Memory map request
.long 15 // ACPI (new) request
info_request_end:
// Tag: header end
.align 8
.short 0 // type: tags end
.short 0 // flags
.long 8 // size
header_end:
|
ldq3/ones-riscv
| 2,156
|
kernel/src/intervene/handler.S
|
.altmacro
.macro SAVE_GP n
sd x\n, \n*8(sp)
.endm
.macro LOAD_GP n
ld x\n, \n*8(sp)
.endm
.section .text.itext
.globl handler_user
.globl load_user_context
.globl handler_kernel
.globl load_kernel_context
.align 2
handler_user:
csrrw sp, sscratch, sp
_save_context:
# save general
# skip x0, constant 0
sd x1, 1*8(sp)
# skip x2(sp), the sp is in sscratch
sd x3, 3*8(sp)
# skip x4(tp), application does not use it
.set n, 5
.rept 27
SAVE_GP %n
.set n, n+1
.endr
# save csr
csrr t0, sstatus
sd t0, 32*8(sp)
csrr t1, sepc
sd t1, 33*8(sp)
csrr t2, sscratch
sd t2, 2*8(sp)
_switch_to_kernel:
ld t0, 34*8(sp) # load kernel_satp into t0
ld t1, 36*8(sp) # load trap_handler into t1
ld sp, 35*8(sp) # move to kernel_sp
# switch to kernel space
csrw satp, t0
sfence.vma
_distribute:
# ditribute and handle exception
jr t1 # call distribute
load_user_context:
_switch_back:
# call get_kernel_context
# a0: *TrapContext in user space(Constant); a1: user space token
csrw satp, a1
sfence.vma
csrw sscratch, a0
mv sp, a0
# now sp points to TrapContext in user space, start restoring based on it
# restore sstatus/sepc
ld t0, 32*8(sp)
ld t1, 33*8(sp)
csrw sstatus, t0
csrw sepc, t1
# restore general purpose registers except x0/sp/tp
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
# back to user stack
ld sp, 2*8(sp)
# return
sret
.align 2
handler_kernel:
addi sp, sp, -34*8
sd x1, 1*8(sp)
sd x3, 3*8(sp)
.set n, 5
.rept 27
SAVE_GP %n
.set n, n+1
.endr
csrr t0, sstatus
csrr t1, sepc
sd t0, 32*8(sp)
sd t1, 33*8(sp)
mv a0, sp
csrr t2, sscratch
jalr t2
load_kernel_context:
ld t0, 32*8(sp)
ld t1, 33*8(sp)
csrw sstatus, t0
csrw sepc, t1
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
addi sp, sp, 34*8
# return
sret
|
lehtojo/metallium
| 21,601
|
low/x64.s
|
.intel_syntax noprefix
.global read_msr
read_msr:
mov rcx, rdi
rdmsr
sal rdx, 32
or rax, rdx
ret
.global write_msr
write_msr:
mov rcx, rdi
mov rax, rsi
mov rdx, rsi
shr rdx, 32
wrmsr
ret
.global write_cr0
write_cr0:
mov cr0, rdi
ret
.global write_cr1
write_cr1:
mov cr1, rdi
ret
.global write_cr2
write_cr2:
mov cr2, rdi
ret
.global write_cr3
write_cr3:
mov cr3, rdi
ret
.global write_cr4
write_cr4:
mov cr4, rdi
ret
.global read_cr0
read_cr0:
mov rax, cr0
ret
.global read_cr1
read_cr1:
mov rax, cr1
ret
.global read_cr2
read_cr2:
mov rax, cr2
ret
.global read_cr3
read_cr3:
mov rax, cr3
ret
.global read_cr4
read_cr4:
mov rax, cr4
ret
.global write_gdtr
write_gdtr:
lgdt [rdi]
ret
.global write_fs_base
write_fs_base:
wrfsbase rdi
ret
.global read_fs_base
read_fs_base:
rdfsbase rax
ret
.global flush_tlb_local
flush_tlb_local:
invlpg [rdi]
ret
.global flush_tlb
flush_tlb:
mov rax, cr3
mov cr3, rax
ret
.global interrupts_enable
interrupts_enable:
sti
ret
.global interrupts_disable
interrupts_disable:
sti
ret
.global interrupts_set_idtr
interrupts_set_idtr:
lidt [rdi]
ret
.global ports_read_u8
ports_read_u8:
xor rax, rax
mov rdx, rdi
in al, dx
ret
.global ports_read_u16
ports_read_u16:
xor rax, rax
mov rdx, rdi
in ax, dx
ret
.global ports_read_u32
ports_read_u32:
xor rax, rax
mov rdx, rdi
in eax, dx
ret
.global ports_write_u8
ports_write_u8:
mov rax, rsi
mov rdx, rdi
out dx, al
ret
.global ports_write_u16
ports_write_u16:
mov rax, rsi
mov rdx, rdi
out dx, ax
ret
.global ports_write_u32
ports_write_u32:
mov rax, rsi
mov rdx, rdi
out dx, eax
ret
.global registers_rsp
registers_rsp:
lea rax, [rsp+8]
ret
.global registers_rip
registers_rip:
mov rax, [rsp]
ret
.align 32
.global interrupts_entry
interrupts_entry:
# Todo: Move cli instruction to interrupt entry before this, so that interrupts are immediately disabled, so that nothing gets pushed to stack before disabling
cli # Disable interrupts
# Save the interrupt stack pointer and load the kernel stack pointer
# Note: Each thread has its own kernel stack for saving the state in kernel easily
mov [gs:0], rsp
mov rsp, [gs:8]
# Interrupt stack has data we do not have in this new stack, reserve space for it and copy it later
sub rsp, 56
# Save all the registers
push r15
push r14
push r13
push r12
push r11
push r10
push r9
push r8
push rax
push rcx
push rdx
push rbx
push rsp
push rbp
push rsi
push rdi
# Copy the data pushed in the interrupt stack to the allocated region
lea rdi, [rsp+128]
mov rsi, [gs:0]
mov rcx, 7
rep movsq
mov rdi, rsp # Pass the register state
call interrupts_kernel_entry
# If the return address (rip) is in kernel mode, do a kernel switch
mov rdi, [rsp+144]
test rdi, rdi
jl kernel_switch_return
# Copy changes to the interrupt stack
lea rsi, [rsp+128]
mov rdi, [gs:24]
sub rdi, 56 # 7 * 8
mov rcx, 7
rep movsq
pop rdi
pop rsi
pop rbp
add rsp, 8 # Skip restoring rsp
pop rbx
pop rdx
pop rcx
pop rax
pop r8
pop r9
pop r10
pop r11
pop r12
pop r13
pop r14
pop r15
# Switch back to the interrupt stack
mov rsp, [gs:24]
sub rsp, 56
add rsp, 16 # Remove the interrupt number and padding (Added by the interrupt entry)
iretq # Note: Interrupts are enabled by restoring rflags
.global system_call_entry
system_call_entry:
# Interrupts are disabled
# Save the user stack pointer and load the kernel stack pointer
mov [gs:0], rsp
mov rsp, [gs:8]
pushq 0x1b # User ss (0x18 | 3)
pushq [gs:0] # User rsp
push r11 # RFLAGS
pushq 0x23 # User cs (0x20 | 3)
push rcx # User RIP
pushq 0 # Padding
pushq 0x80 # "System call interrupt"
push r15
push r14
push r13
push r12
push r11
push r10
push r9
push r8
push rax
push rcx
push rdx
push rbx
push rsp
push rbp
push rsi
push rdi
mov rdi, rsp
call interrupts_kernel_entry
# If the return address (rip) is in kernel mode, do a kernel switch
mov rdi, [rsp+144]
test rdi, rdi
jl kernel_switch_return
pop rdi
pop rsi
pop rbp
add rsp, 8 # Skip restoring rsp
pop rbx
pop rdx
pop rcx
add rsp, 8 # Skip restoring rax
pop r8
pop r9
pop r10
pop r11
pop r12
pop r13
pop r14
pop r15
add rsp, 16 # Remove the interrupt number and padding
pop rcx
add rsp, 8
pop r11 # Load rflags
pop rsp
sysretq # Enables interrupts
kernel_switch_return:
pop rdi
pop rsi
pop rbp
add rsp, 8 # Skip restoring rsp
pop rbx
pop rdx
pop rcx
pop rax
pop r8
pop r9
pop r10
pop r11
pop r12
pop r13
pop r14
pop r15
# Note: Kernel switches are done using syscall instruction, so we have rcx and r11 to work with
add rsp, 16 # Remove the interrupt number and padding
popq [gs:0] # Save rip temporarily
add rsp, 8 # Do not restore cs as it is correct already
popfq # Load rflags
pop rsp # Load the stack pointer
jmp [gs:0]
.global full_memory_barrier
full_memory_barrier:
lock or dword ptr [rsp], 0 # Note: Or zero with return address, does locking but nothing else
mfence
ret
.global wait_for_microseconds
wait_for_microseconds:
xor rax, rax
test rdi, rdi
jnz wait_for_microseconds_L0
ret
wait_for_microseconds_L0:
out 0x80, al
dec rdi
jnz wait_for_microseconds_L0
ret
.global system_call
system_call:
mov rax, rdi
mov rdi, rsi
mov rsi, rdx
mov rdx, rcx
mov r10, r8
mov r8, r9
mov r9, qword [rsp+8]
syscall
ret
.global save_fpu_state_xsave
save_fpu_state_xsave:
; mov rax, rsi
; mov rdx, rsi
; shr rdx, 32
fxsave [rdi]
ret
.global load_fpu_state_xrstor
load_fpu_state_xrstor:
; mov rax, rsi
; mov rdx, rsi
; shr rdx, 32
fxrstor [rdi]
ret
.global uefi_call_wrapper
uefi_call_wrapper:
sub rsp, 40 # Allocate memory for the shadow space and the last argument
# Convert between the following calling conventions:
# Note: RDI has the function pointer
# Arguments: rsi, rdx, rcx, r8, r9
# UEFI calling convention: rcx, rdx, r8, r9, [rsp+32]
mov qword ptr [rsp+32], r9
mov r9, r8
mov r8, rcx
# mov rdx, rdx
mov rcx, rsi
# Call the uefi function pointer
call rdi
add rsp, 40 # Remove the shadow space and the last argument
ret
# Todo: Prettify
.global forward_copy
forward_copy:
test rdx, rdx
je .LBB0_13
cmp rdx, 8
jae .LBB0_3
xor eax, eax
jmp .LBB0_12
.LBB0_3:
cmp rdx, 32
jae .LBB0_5
xor eax, eax
jmp .LBB0_9
.LBB0_5:
mov rax, rdx
and rax, -32
xor ecx, ecx
.LBB0_6:
movups xmm0, xmmword ptr [rsi + rcx]
movups xmm1, xmmword ptr [rsi + rcx + 16]
movups xmmword ptr [rdi + rcx], xmm0
movups xmmword ptr [rdi + rcx + 16], xmm1
add rcx, 32
cmp rax, rcx
jne .LBB0_6
cmp rax, rdx
je .LBB0_13
test dl, 24
je .LBB0_12
.LBB0_9:
mov rcx, rax
mov rax, rdx
and rax, -8
.LBB0_10:
mov r8, qword ptr [rsi + rcx]
mov qword ptr [rdi + rcx], r8
add rcx, 8
cmp rax, rcx
jne .LBB0_10
cmp rax, rdx
je .LBB0_13
.LBB0_12:
movzx ecx, byte ptr [rsi + rax]
mov byte ptr [rdi + rax], cl
inc rax
cmp rdx, rax
jne .LBB0_12
.LBB0_13:
ret
.global reverse_copy
reverse_copy:
test rdx, rdx
je .LBB1_11
cmp rdx, 4
jae .LBB1_4
xor eax, eax
.LBB1_3:
mov rcx, rdi
mov r8, rsi
jmp .LBB1_9
.LBB1_4:
cmp rdx, 16
jae .LBB1_12
xor eax, eax
jmp .LBB1_6
.LBB1_12:
mov rax, rdx
and rax, -16
mov rcx, rax
neg rcx
xor r8d, r8d
.LBB1_13:
movups xmm0, xmmword ptr [rsi + r8 - 15]
movups xmmword ptr [rdi + r8 - 15], xmm0
add r8, -16
cmp rcx, r8
jne .LBB1_13
cmp rax, rdx
je .LBB1_11
test dl, 12
je .LBB1_16
.LBB1_6:
mov r9, rax
mov rax, rdx
and rax, -4
mov r10, rax
neg r10
mov rcx, rdi
sub rcx, rax
mov r8, rsi
sub r8, rax
neg r9
.LBB1_7:
mov r11d, dword ptr [rsi + r9 - 3]
mov dword ptr [rdi + r9 - 3], r11d
add r9, -4
cmp r10, r9
jne .LBB1_7
cmp rax, rdx
je .LBB1_11
.LBB1_9:
sub rax, rdx
xor edx, edx
.LBB1_10:
movzx esi, byte ptr [r8 + rdx]
mov byte ptr [rcx + rdx], sil
dec rdx
cmp rax, rdx
jne .LBB1_10
.LBB1_11:
ret
.LBB1_16:
sub rsi, rax
sub rdi, rax
jmp .LBB1_3
.global zero
zero:
test rsi, rsi
je .LBB2_8
cmp rsi, 8
jae .LBB2_3
xor eax, eax
.LBB2_14:
mov rcx, rdi
jmp .LBB2_15
.LBB2_3:
cmp rsi, 32
jae .LBB2_9
xor eax, eax
jmp .LBB2_5
.LBB2_9:
mov rax, rsi
and rax, -32
xor ecx, ecx
xorps xmm0, xmm0
.LBB2_10:
movups xmmword ptr [rdi + rcx], xmm0
movups xmmword ptr [rdi + rcx + 16], xmm0
add rcx, 32
cmp rax, rcx
jne .LBB2_10
cmp rax, rsi
je .LBB2_8
test sil, 24
je .LBB2_13
.LBB2_5:
mov rdx, rax
mov rax, rsi
and rax, -8
lea rcx, [rdi + rax]
.LBB2_6:
mov qword ptr [rdi + rdx], 0
add rdx, 8
cmp rax, rdx
jne .LBB2_6
cmp rax, rsi
je .LBB2_8
.LBB2_15:
sub rsi, rax
xor eax, eax
.LBB2_16:
mov byte ptr [rcx + rax], 0
inc rax
cmp rsi, rax
jne .LBB2_16
.LBB2_8:
ret
.LBB2_13:
add rdi, rax
jmp .LBB2_14
# void color_area(unsigned int* pixels, unsigned int width, unsigned int height, unsigned int stride, unsigned int foreground, unsigned int background) {
# unsigned char fr = (foreground >> 16) & 0xFF;
# unsigned char fg = (foreground >> 8) & 0xFF;
# unsigned char fb = foreground & 0xFF;
#
# unsigned char br = (background >> 16) & 0xFF;
# unsigned char bg = (background >> 8) & 0xFF;
# unsigned char bb = background & 0xFF;
#
# for (unsigned int y = 0; y < height; y++) {
# for (unsigned int i = 0; i < width; i++) {
# unsigned char intensity = pixels[i];
#
# unsigned char rr = ((unsigned short)fr * intensity + (unsigned short)br * (255 - intensity)) / 255;
# unsigned char rg = ((unsigned short)fg * intensity + (unsigned short)bg * (255 - intensity)) / 255;
# unsigned char rb = ((unsigned short)fb * intensity + (unsigned short)bb * (255 - intensity)) / 255;
#
# pixels[i] = (rr << 16) | (rg << 8) | rb;
# }
#
# pixels += stride / sizeof(unsigned int);
# }
# }
.global color_area
color_area:
push rbp
mov eax, r8d
mov r10d, esi
mov r11d, r9d
shr eax, 16
mov esi, edx
shr r11d, 8
mov rbp, rsp
push r15
push r14
push r13
push r12
push rbx
mov ebx, r8d
shr ebx, 8
and rsp, -16
sub rsp, 56
mov DWORD PTR [rsp-88], edx
mov edx, eax
mov eax, r9d
shr eax, 16
test esi, esi
je .L1
mov esi, ecx
and esi, -4
mov QWORD PTR [rsp-96], rsi
test r10d, r10d
je .L1
lea esi, [r10-1]
mov ecx, r10d
movzx eax, al
xor r12d, r12d
mov DWORD PTR [rsp-100], esi
mov esi, r10d
shr ecx, 2
movd xmm0, eax
and esi, -4
sal rcx, 4
pxor xmm14, xmm14
mov DWORD PTR [rsp-84], eax
mov DWORD PTR [rsp-104], esi
movzx esi, dl
pshufd xmm0, xmm0, 0
movzx ebx, bl
movd xmm7, esi
mov DWORD PTR [rsp-80], esi
movzx r11d, r11b
movzx r8d, r8b
pshufd xmm7, xmm7, 0
mov QWORD PTR [rsp-112], rcx
movdqa xmm12, xmm14
movdqa xmm13, XMMWORD PTR .LC1[rip]
movzx ecx, r9b
movaps XMMWORD PTR [rsp-24], xmm7
psrlq xmm7, 32
movdqa xmm15, XMMWORD PTR .LC0[rip]
movaps XMMWORD PTR [rsp-56], xmm7
pcmpgtd xmm12, xmm13
movdqa xmm7, xmm0
mov DWORD PTR [rsp-76], ecx
psrlq xmm7, 32
movaps XMMWORD PTR [rsp-40], xmm0
movaps XMMWORD PTR [rsp-72], xmm7
.L4:
cmp DWORD PTR [rsp-100], 2
jbe .L24
movd xmm7, ebx
mov rsi, QWORD PTR [rsp-112]
mov rax, rdi
pshufd xmm11, xmm7, 0
movd xmm7, r11d
pshufd xmm10, xmm7, 0
movd xmm7, r8d
lea rdx, [rsi+rdi]
pshufd xmm9, xmm7, 0
movd xmm7, DWORD PTR [rsp-76]
pshufd xmm8, xmm7, 0
movdqa xmm7, xmm11
psrlq xmm7, 32
movaps XMMWORD PTR [rsp+8], xmm7
movdqa xmm7, xmm10
psrlq xmm7, 32
movaps XMMWORD PTR [rsp-8], xmm7
movdqa xmm7, xmm9
psrlq xmm7, 32
movaps XMMWORD PTR [rsp+40], xmm7
movdqa xmm7, xmm8
psrlq xmm7, 32
movaps XMMWORD PTR [rsp+24], xmm7
.L5:
movdqu xmm0, XMMWORD PTR [rax]
movdqa xmm3, XMMWORD PTR [rsp-24]
add rax, 16
movdqa xmm2, XMMWORD PTR [rsp-56]
movdqu xmm1, XMMWORD PTR [rax-16]
pand xmm0, xmm15
movdqa xmm6, XMMWORD PTR [rsp-72]
movdqa xmm7, XMMWORD PTR [rsp-8]
movdqa xmm5, xmm0
pmuludq xmm3, xmm0
pandn xmm1, xmm15
psrlq xmm5, 32
movdqa xmm4, xmm1
pmuludq xmm2, xmm5
psrlq xmm4, 32
pmuludq xmm6, xmm4
pmuludq xmm7, xmm4
pmuludq xmm4, XMMWORD PTR [rsp+24]
pshufd xmm4, xmm4, 8
pshufd xmm3, xmm3, 8
pshufd xmm2, xmm2, 8
punpckldq xmm3, xmm2
movdqa xmm2, XMMWORD PTR [rsp-40]
pshufd xmm6, xmm6, 8
pshufd xmm7, xmm7, 8
pmuludq xmm2, xmm1
pshufd xmm2, xmm2, 8
punpckldq xmm2, xmm6
movdqa xmm6, xmm11
paddd xmm3, xmm2
movdqa xmm2, XMMWORD PTR [rsp+8]
pmuludq xmm6, xmm0
pmuludq xmm0, xmm9
pmuludq xmm2, xmm5
pmuludq xmm5, XMMWORD PTR [rsp+40]
pshufd xmm5, xmm5, 8
pshufd xmm6, xmm6, 8
pshufd xmm0, xmm0, 8
pshufd xmm2, xmm2, 8
punpckldq xmm0, xmm5
movdqa xmm5, xmm12
punpckldq xmm6, xmm2
pmuludq xmm5, xmm3
movdqa xmm2, xmm1
pmuludq xmm1, xmm8
pmuludq xmm2, xmm10
pshufd xmm1, xmm1, 8
pshufd xmm2, xmm2, 8
punpckldq xmm1, xmm4
movdqa xmm4, xmm3
punpckldq xmm2, xmm7
paddd xmm0, xmm1
movdqa xmm1, xmm14
paddd xmm2, xmm6
pcmpgtd xmm1, xmm3
pmuludq xmm4, xmm13
movdqa xmm6, xmm12
pmuludq xmm1, xmm13
paddq xmm1, xmm5
movdqa xmm5, xmm14
psllq xmm1, 32
paddq xmm4, xmm1
movdqa xmm1, xmm3
psrlq xmm1, 32
pcmpgtd xmm5, xmm1
pmuludq xmm6, xmm1
pmuludq xmm1, xmm13
pmuludq xmm5, xmm13
paddq xmm5, xmm6
movdqa xmm6, xmm12
psllq xmm5, 32
paddq xmm1, xmm5
movdqa xmm5, xmm12
shufps xmm4, xmm1, 221
movdqa xmm1, xmm14
pmuludq xmm5, xmm2
pshufd xmm4, xmm4, 216
pcmpgtd xmm1, xmm2
paddd xmm4, xmm3
movdqa xmm3, xmm2
pmuludq xmm3, xmm13
psrad xmm4, 7
pslld xmm4, 16
pand xmm4, XMMWORD PTR .LC2[rip]
pmuludq xmm1, xmm13
paddq xmm1, xmm5
movdqa xmm5, xmm14
psllq xmm1, 32
paddq xmm3, xmm1
movdqa xmm1, xmm2
psrlq xmm1, 32
pcmpgtd xmm5, xmm1
pmuludq xmm6, xmm1
pmuludq xmm1, xmm13
pmuludq xmm5, xmm13
paddq xmm5, xmm6
psllq xmm5, 32
paddq xmm1, xmm5
movdqa xmm5, xmm12
shufps xmm3, xmm1, 221
pshufd xmm3, xmm3, 216
paddd xmm3, xmm2
movdqa xmm2, xmm0
psrad xmm3, 7
pmuludq xmm2, xmm13
movdqa xmm1, xmm3
movdqa xmm3, xmm14
pcmpgtd xmm3, xmm0
pslld xmm1, 8
pand xmm1, XMMWORD PTR .LC3[rip]
por xmm1, xmm4
movdqa xmm4, xmm12
pmuludq xmm4, xmm0
pmuludq xmm3, xmm13
paddq xmm3, xmm4
movdqa xmm4, xmm14
psllq xmm3, 32
paddq xmm2, xmm3
movdqa xmm3, xmm0
psrlq xmm3, 32
pcmpgtd xmm4, xmm3
pmuludq xmm5, xmm3
pmuludq xmm3, xmm13
pmuludq xmm4, xmm13
paddq xmm4, xmm5
psllq xmm4, 32
paddq xmm3, xmm4
shufps xmm2, xmm3, 221
pshufd xmm2, xmm2, 216
paddd xmm2, xmm0
psrad xmm2, 7
pand xmm2, xmm15
por xmm1, xmm2
movups XMMWORD PTR [rax-16], xmm1
cmp rax, rdx
jne .L5
test r10b, 3
je .L6
mov r9d, DWORD PTR [rsp-104]
.L7:
mov eax, r9d
mov ecx, DWORD PTR [rsp-84]
mov esi, 2155905153
lea r14, [rdi+rax*4]
mov eax, DWORD PTR [rsp-80]
mov r13d, DWORD PTR [r14]
movzx edx, r13b
not r13d
movzx r13d, r13b
imul eax, edx
mov r15d, edx
imul ecx, r13d
imul r15d, ebx
imul edx, r8d
add ecx, eax
mov eax, r15d
imul rcx, rsi
mov r15d, r13d
imul r15d, r11d
shr rcx, 39
add eax, r15d
mov r15d, DWORD PTR [rsp-76]
sal ecx, 16
imul rax, rsi
and ecx, 16711680
imul r13d, r15d
shr rax, 39
add edx, r13d
sal eax, 8
imul rdx, rsi
movzx eax, ax
or eax, ecx
shr rdx, 39
movzx edx, dl
or eax, edx
mov DWORD PTR [r14], eax
lea eax, [r9+1]
cmp eax, r10d
jnb .L6
lea r14, [rdi+rax*4]
mov ecx, DWORD PTR [rsp-84]
mov eax, DWORD PTR [rsp-80]
add r9d, 2
mov r13d, DWORD PTR [r14]
movzx edx, r13b
not r13d
movzx r13d, r13b
imul eax, edx
imul ecx, r13d
mov r15d, r13d
imul r15d, r11d
add ecx, eax
mov eax, edx
imul eax, ebx
imul edx, r8d
imul rcx, rsi
add eax, r15d
mov r15d, DWORD PTR [rsp-76]
imul rax, rsi
imul r13d, r15d
shr rcx, 39
sal ecx, 16
shr rax, 39
and ecx, 16711680
add edx, r13d
sal eax, 8
imul rdx, rsi
movzx eax, ax
or eax, ecx
shr rdx, 39
movzx edx, dl
or eax, edx
mov DWORD PTR [r14], eax
cmp r9d, r10d
jnb .L6
lea r13, [rdi+r9*4]
mov r14d, DWORD PTR [rsp-80]
mov eax, DWORD PTR [r13+0]
movzx r9d, al
not eax
movzx eax, al
imul r14d, r9d
mov ecx, r9d
mov edx, eax
imul ecx, ebx
imul edx, r11d
imul r9d, r8d
add ecx, edx
mov edx, DWORD PTR [rsp-84]
imul rcx, rsi
imul edx, eax
imul eax, r15d
shr rcx, 39
add edx, r14d
sal ecx, 8
imul rdx, rsi
add eax, r9d
movzx ecx, cx
imul rax, rsi
shr rdx, 39
sal edx, 16
shr rax, 39
and edx, 16711680
movzx eax, al
or edx, ecx
or edx, eax
mov DWORD PTR [r13+0], edx
.L6:
mov rax, QWORD PTR [rsp-96]
add r12d, 1
add rdi, rax
cmp DWORD PTR [rsp-88], r12d
jne .L4
.L1:
lea rsp, [rbp-40]
pop rbx
pop r12
pop r13
pop r14
pop r15
pop rbp
ret
.L24:
xor r9d, r9d
jmp .L7
.balign 16
.LC0:
.long 255
.long 255
.long 255
.long 255
.LC1:
.long -2139062143
.long -2139062143
.long -2139062143
.long -2139062143
.LC2:
.long 16711680
.long 16711680
.long 16711680
.long 16711680
.LC3:
.long 65535
.long 65535
.long 65535
.long 65535
.align 0x1000
.global interrupts_tables
interrupts_tables:
.zero 0x1000 # interrupt descriptor table descriptor (idtr)
.zero 0x1000 # interrupt descriptor table (idt)
.zero 0x1000 # interrupt stubs
|
lehtojo/metallium-loader
| 4,864
|
low/boot.s
|
.intel_syntax noprefix
.global enter_kernel
enter_kernel:
# Disable interrupts while we mess around
cli
# Save the kernel entry point and uefi data
mov qword ptr [rip+kernel_entry_point], rcx
mov qword ptr [rip+uefi_information], rdx
# Enable: fxsave, fxrstor, unmasked simd floating point exceptions
mov rax, cr4
or rax, 0b11000000000
mov cr4, rax
# ---------- Kernel mapping ----------
# Load the first address of the first paging table layer
mov rsi, cr3
# Copy the entries to the new kernel paging table
lea rdi, [rip+kernel_paging_table]
mov rcx, 0x1000
rep movsb
# Map the last entry to the first
lea rdi, [rip+kernel_paging_table]
mov rax, qword ptr [rdi]
mov qword ptr [rdi+(0x100*8)], rax
# Reload the paging table
mov cr3, rdi
# ---------- GDT & TSS ----------
# Load the kernel mapping base
mov rax, 0xffff800000000000
# Use kernel mapping in TSS RSPs and ISTs
add qword ptr [rip+tss64_rsp0], rax
add qword ptr [rip+tss64_ist1], rax
# Insert address of TSS into GDT before loading it
lea rcx, [rip+tss64]
add rcx, rax
mov word ptr [rip+gdt64_tss_address_0], cx
shr rcx, 16
mov byte ptr [rip+gdt64_tss_address_1], cl
shr rcx, 8
mov byte ptr [rip+gdt64_tss_address_2], cl
shr rcx, 8
mov dword ptr [rip+gdt64_tss_address_3], ecx
# Use kernel mapping with GDTR
add qword ptr [rip+gdtr64_address], rax
# Load the 64-bit global descriptor table
lgdt [rip+gdtr64]
# Register TSS from GDT
mov cx, 0x40
ltr cx
# ---------- Stack ----------
# Use kernel mapping with the stack
add rsp, rax
# ---------- Kernel ----------
# First argument
mov rdi, qword ptr [rip+uefi_information]
# Pass the interrupt tables to the kernel
lea rsi, [rip+interrupt_tables]
add rsi, rax
# Pass the interrupt stack pointer
lea rdx, [rip+interrupt_stack_start]
add rdx, rax
# Pass the physical address of GDTR
lea rcx, [rip+gdtr64]
# Pass the uefi data
mov r8, rsi
# Remap uefi data
add qword ptr [rip+uefi_information], rax
push rdi # Push zero padding
push qword ptr [rip+uefi_information]
push rcx
push rdx
push rsi
push rdi
# Jump to the kernel using the kernel mapping
add rax, qword ptr [rip+kernel_entry_point]
jmp rax
.section .data
kernel_entry_point: .quad 0
uefi_information: .quad 0
.align 16
gdt64_start:
# Null descriptor
.word 0x0000
.word 0x0000
.byte 0x00
.byte 0x00
.byte 0x00
.byte 0x00
# Kernel code segment (selector = 0x8)
gdt64_code:
.word 0x0000
.word 0x0000
.byte 0x00
.byte 0b10011010
.byte 0b00100000
.byte 0x00
# Kernel data segment (selector = 0x10)
gdt64_data:
.word 0x0000
.word 0x0000
.byte 0x00
.byte 0b10010010
.byte 0b00000000
.byte 0x00
# User data segment (selector = 0x18)
.word 0x0000
.word 0x0000
.byte 0x00
.byte 0b11110010
.byte 0b00000000
.byte 0x00
# User code segment (selector = 0x20)
.word 0x0000
.word 0x0000
.byte 0x00
.byte 0b11111010
.byte 0b00100000
.byte 0x00
# Padding
.quad 0
# Duplicate: Kernel data segment (selector = 0x30)
gdt64_data_uefi:
.word 0x0000
.word 0x0000
.byte 0x00
.byte 0b10010010
.byte 0b00000000
.byte 0x00
# Duplicate: Kernel code segment (selector = 0x38)
gdt64_code_uefi:
.word 0x0000
.word 0x0000
.byte 0x00
.byte 0b10011010
.byte 0b00100000
.byte 0x00
# TSS segment (selector = 0x40)
gdt64_tss:
.word 0x0068
gdt64_tss_address_0:
.word 0x0000 # TSS address (bits 0-16)
gdt64_tss_address_1:
.byte 0x00 # TSS address (bits 16-24)
.byte 0b10001001 # Present | 64-bit TSS (Available)
.byte 0b00000000
gdt64_tss_address_2:
.byte 0x00 # TSS address (bits 24-32)
gdt64_tss_address_3:
.quad 0x00 # TSS address (bits 32-64)
gdt64_end:
.align 16
gdtr64:
.word gdt64_end - gdt64_start - 1
gdtr64_address:
.quad gdt64_start
# --- TSS (64-bit) ---
.align 16
tss64:
.long 0 # Reserved
tss64_rsp0:
.quad interrupt_stack_start # RSP0
.quad 0 # RSP1
.quad 0 # RSP2
.quad 0 # Reserved
tss64_ist1:
.quad interrupt_stack_start # IST1
.quad 0 # IST2
.quad 0 # IST3
.quad 0 # IST4
.quad 0 # IST5
.quad 0 # IST6
.quad 0 # IST7
.quad 0 # Reserved
.word 0 # Reserved
.word 0x68 # IO permission map (point to the end of this structure)
.zero 0x1000
# --- Configuration ---
.align 0x1000
kernel_paging_table:
.zero 0x1000
.align 0x1000
interrupt_tables:
.zero 0x1000 # interrupt descriptor table descriptor (idtr)
.zero 0x1000 # interrupt descriptor table (idt)
.zero 0x1000 # interrupt entries
.zero 0x25000
interrupt_stack_start:
.zero 0x25000
kernel_stack_start:
|
lemonJumps/pinaplua
| 1,693
|
pinaple/archDependent/win64.S
|
# x86-64 MICROSOFT (ew) version
.intel_syntax noprefix
.global pinADcallWIN
# x64 windows calling convention
# RCX, RDX, R8, R9 for integer
# XMM0, XMM1, XMM2, XMM3 for float
# returns RAX when 64bit
# returns XMM0 when float
# for our call:
# r9d contains argument count
# r8 contains array of sizes
# rdx contains array of values
# rcx contains function pointer
pinADcallWIN:
push rbp
push r11
push r12
push r13
push r14
push r15
mov rbp, rsp
cmp r9, 1
je _winCall1
cmp r9, 2
je _winCall2
cmp r9, 3
je _winCall3
jmp _winCall4p
_winCall1:
mov rsp, rdx
movsd xmm0, qword ptr [rsp]
pop r11
mov rsp, rbp
jmp __winCall
_winCall2:
mov rsp, rdx
movsd xmm0, qword ptr [rsp]
pop r11
movsd xmm1, qword ptr [rsp]
pop r12
mov rsp, rbp
jmp __winCall
_winCall3:
mov rsp, rdx
movsd xmm0, qword ptr [rsp]
pop r11
movsd xmm1, qword ptr [rsp]
pop r12
movsd xmm2, qword ptr [rsp]
pop r13
mov rsp, rbp
jmp __winCall
_winCall4p:
mov rsp, rdx
movsd xmm0, qword ptr [rsp]
pop r11
movsd xmm1, qword ptr [rsp]
pop r12
movsd xmm2, qword ptr [rsp]
pop r13
movsd xmm3, qword ptr [rsp]
pop r14
mov rsp, rbp
sub r9d, 4
jz __winCall
add rdx, 24
imul r9, 8
mov r15, r9
__winCallLoop:
push qword ptr [rdx + r9]
sub r9d, 8
jnz __winCallLoop
__winCall:
mov rax, rcx
mov rcx, r11
mov rdx, r12
mov r8, r13
mov r9, r14
sub rsp, 32
call rax
add rsp, r15
add rsp, 32
pop r15
pop r14
pop r13
pop r12
pop r11
pop rbp
ret
|
leoraclet/hacking
| 5,477
|
CTF-2022/Reverse/MVBN/MVBN.s
|
; ModuleID = 'source.c'
source_filename = "source.c"
target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-pc-linux-gnu"
@key = dso_local global [33 x i8] c"7 >)\04\09\00\102\1A}5 q\130L75\19\1Cu|* 5>v\15v/2\02", align 16
@.str = private unnamed_addr constant [18 x i8] c"Usage: %s <flag>\0A\00", align 1
@.str.1 = private unnamed_addr constant [31 x i8] c"[-] Bad password... Try again.\00", align 1
@.str.2 = private unnamed_addr constant [46 x i8] c"[+] Well done ! You can validate with flag :)\00", align 1
; Function Attrs: noinline nounwind optnone uwtable
define dso_local i32 @check_password(i8* %0) #0 {
%2 = alloca i32, align 4
%3 = alloca i8*, align 8
%4 = alloca i32, align 4
store i8* %0, i8** %3, align 8
%5 = load i8*, i8** %3, align 8
%6 = call i64 @strlen(i8* %5) #3
%7 = icmp ne i64 %6, 33
br i1 %7, label %8, label %9
8: ; preds = %1
store i32 -1, i32* %2, align 4
br label %40
9: ; preds = %1
store i32 0, i32* %4, align 4
br label %10
10: ; preds = %36, %9
%11 = load i32, i32* %4, align 4
%12 = icmp slt i32 %11, 33
br i1 %12, label %13, label %39
13: ; preds = %10
%14 = load i32, i32* %4, align 4
%15 = srem i32 %14, 4
%16 = add nsw i32 4194304, %15
%17 = sext i32 %16 to i64
%18 = inttoptr i64 %17 to i8*
%19 = load i8, i8* %18, align 1
%20 = zext i8 %19 to i32
%21 = load i8*, i8** %3, align 8
%22 = load i32, i32* %4, align 4
%23 = sext i32 %22 to i64
%24 = getelementptr inbounds i8, i8* %21, i64 %23
%25 = load i8, i8* %24, align 1
%26 = sext i8 %25 to i32
%27 = xor i32 %20, %26
%28 = load i32, i32* %4, align 4
%29 = sext i32 %28 to i64
%30 = getelementptr inbounds [33 x i8], [33 x i8]* @key, i64 0, i64 %29
%31 = load i8, i8* %30, align 1
%32 = zext i8 %31 to i32
%33 = icmp ne i32 %27, %32
br i1 %33, label %34, label %35
34: ; preds = %13
store i32 1, i32* %2, align 4
br label %40
35: ; preds = %13
br label %36
36: ; preds = %35
%37 = load i32, i32* %4, align 4
%38 = add nsw i32 %37, 1
store i32 %38, i32* %4, align 4
br label %10, !llvm.loop !4
39: ; preds = %10
store i32 0, i32* %2, align 4
br label %40
40: ; preds = %39, %34, %8
%41 = load i32, i32* %2, align 4
ret i32 %41
}
; Function Attrs: nounwind readonly willreturn
declare dso_local i64 @strlen(i8*) #1
; Function Attrs: noinline nounwind optnone uwtable
define dso_local i32 @main(i32 %0, i8** %1) #0 {
%3 = alloca i32, align 4
%4 = alloca i32, align 4
%5 = alloca i8**, align 8
store i32 0, i32* %3, align 4
store i32 %0, i32* %4, align 4
store i8** %1, i8*** %5, align 8
%6 = load i32, i32* %4, align 4
%7 = icmp ne i32 %6, 2
br i1 %7, label %8, label %13
8: ; preds = %2
%9 = load i8**, i8*** %5, align 8
%10 = getelementptr inbounds i8*, i8** %9, i64 0
%11 = load i8*, i8** %10, align 8
%12 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([18 x i8], [18 x i8]* @.str, i64 0, i64 0), i8* %11)
store i32 1, i32* %3, align 4
br label %24
13: ; preds = %2
%14 = load i8**, i8*** %5, align 8
%15 = getelementptr inbounds i8*, i8** %14, i64 1
%16 = load i8*, i8** %15, align 8
%17 = call i32 @check_password(i8* %16)
%18 = icmp ne i32 %17, 0
br i1 %18, label %19, label %21
19: ; preds = %13
%20 = call i32 @puts(i8* getelementptr inbounds ([31 x i8], [31 x i8]* @.str.1, i64 0, i64 0))
br label %23
21: ; preds = %13
%22 = call i32 @puts(i8* getelementptr inbounds ([46 x i8], [46 x i8]* @.str.2, i64 0, i64 0))
br label %23
23: ; preds = %21, %19
store i32 0, i32* %3, align 4
br label %24
24: ; preds = %23, %8
%25 = load i32, i32* %3, align 4
ret i32 %25
}
declare dso_local i32 @printf(i8*, ...) #2
declare dso_local i32 @puts(i8*) #2
attributes #0 = { noinline nounwind optnone uwtable "frame-pointer"="all" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" }
attributes #1 = { nounwind readonly willreturn "frame-pointer"="all" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" }
attributes #2 = { "frame-pointer"="all" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" }
attributes #3 = { nounwind readonly willreturn }
!llvm.module.flags = !{!0, !1, !2}
!llvm.ident = !{!3}
!0 = !{i32 1, !"wchar_size", i32 4}
!1 = !{i32 7, !"uwtable", i32 1}
!2 = !{i32 7, !"frame-pointer", i32 2}
!3 = !{!"Ubuntu clang version 13.0.0-2"}
!4 = distinct !{!4, !5}
!5 = !{!"llvm.loop.mustprogress"}
|
levirogalla/rust-stm32-rtos
| 2,233
|
src/asm_utils.s
|
.syntax unified //This lets us use C like comments!
.cpu cortex-m4 //Guess what this does
.thumb //Practically this only matters to the CPU, but it ensures that the correct types of instructions get included
; .global idleThread
; .thumb_func
; idleThread:
; B .
.global SVCall
.thumb_func
SVCall:
tst lr, 4 //TeST the 3rd bit in LR (4 is 0b1000, so its 3rd bit is 1)
ite eq // tbh i don't really understand this instruction, but it does a condtional check to see if we are using msp or psp, then passes it to my function
mrseq r0, msp
mrsne r0, psp
b SVCall_Handler
.global PendSV
.thumb_func
PendSV: // assume the user was running in thread mode, so we can use psp
// save current threads registers to the stack
mrs r0, psp // buffer psp into r0 for the next insturction
stmdb r0!, {r4-r11} // store the registers r4-r11 on the stack, this is needed so they can be restored later, will store lowest register at the lowest address
msr psp, r0 // update the process stack pointer with the new value
// call rust function to run the more complex logic of finding the next task/thread to run
bl PendSV_Handler
// restore next threads state, the scratch registers will be automatically restored by the return instruction
mrs r0, psp
ldmia r0!, {r4-r11} // load the registers r4-r11 from the stack, this is needed so that the registers are restored to their previous values
msr psp, r0 // update the process stack pointer with the new value, this is needed so that the stack pointer points to the correct location
mov lr, #0xFFFFFFFD // Set the return address to the thread mode, this is needed so that we can return to the thread mode after the function call
bx lr
.global initial_context_switch
.thumb_func
initial_context_switch:
mov lr, #0xFFFFFFFD //Set the return address to the thread mode
ldmia r0!, {r4-r11} //Load the registers from the stack
msr psp, r0 //Set the process stack pointer to the value in r0, the first argument
bx lr //Return to the thread mode
// saving callee registers
;
; msr psp, r0 // update the process stack pointer with the new value, this is needed so that the stack pointer points to the correct location
|
levontumanyan/c_programming
| 1,140
|
beginner/func.s
|
.section __TEXT,__text,regular,pure_instructions
.build_version macos, 14, 0 sdk_version 14, 2
.globl _funcy ; -- Begin function funcy
.p2align 2
_funcy: ; @funcy
.cfi_startproc
; %bb.0:
sub sp, sp, #16
.cfi_def_cfa_offset 16
str w0, [sp, #12]
ldr w9, [sp, #12]
mov w8, #2
mul w0, w8, w9
add sp, sp, #16
ret
.cfi_endproc
; -- End function
.globl _main ; -- Begin function main
.p2align 2
_main: ; @main
.cfi_startproc
; %bb.0:
sub sp, sp, #32
.cfi_def_cfa_offset 32
stp x29, x30, [sp, #16] ; 16-byte Folded Spill
add x29, sp, #16
.cfi_def_cfa w29, 16
.cfi_offset w30, -8
.cfi_offset w29, -16
mov w8, #0
str w8, [sp, #8] ; 4-byte Folded Spill
stur wzr, [x29, #-4]
mov w0, #4
bl _funcy
ldr w0, [sp, #8] ; 4-byte Folded Reload
ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
add sp, sp, #32
ret
.cfi_endproc
; -- End function
.subsections_via_symbols
|
LighTend3r/OwnOS
| 2,341
|
loader.s
|
global loader ; the entry symbol for ELF
MAGIC_NUMBER equ 0x1BADB002 ; define the magic number constant
FLAGS equ 0x0 ; multiboot flags
CHECKSUM equ -MAGIC_NUMBER ; calculate the checksum
; (magic number + checksum + flags should equal 0)
KERNEL_STACK_SIZE equ 4096 ; size of stack in bytes
section .bss
align 4 ; align at 4 bytes
kernel_stack: ; label points to beginning of memory
resb KERNEL_STACK_SIZE ; reserve stack for the kernel
section .text: ; start of the text (code) section
align 4 ; the code must be 4 byte aligned
dd MAGIC_NUMBER ; write the magic number to the machine code,
dd FLAGS ; the flags,
dd CHECKSUM ; and the checksum
myString db 'Hello, World!', 0 ;
loader: ; the loader label (defined as entry point in linker script)
mov esp, kernel_stack + KERNEL_STACK_SIZE ; point esp to the start of the
; The assembly code
;extern sum_of_three ; the function sum_of_three is defined elsewhere
;push dword 3 ; arg3
;push dword 2 ; arg2
;push dword 1 ; arg1
;call sum_of_three ; call the function, the result will be in eax
;mov eax, 0xCAFEBABE ; place the number 0xCAFEBABE in the register eax
;mov word [0x000B8000], 0x4128 ; Display the letter "A" with green foreground and darkgrey background
;extern fb_write_cell
;
;push dword 2;
;push dword 8;
;push dword 0x41;
;push dword 0;
;call fb_write_cell
;
;extern fb_move_cursor
;
;push 1;
;call fb_move_cursor
; Ecriture de Hello, world! dans le frambuffer
;extern write
;push dword 13
;push dword myString
;call write
;add esp, 8
; Initialisation du port COM 1 (0x3F8)
extern serial_init
push dword 0x3F8
call serial_init
add esp, 4
; Ecriture dans le COM1 (0x3F8)
extern serial_write
push dword 0x3F8
push dword 13
push dword myString
call serial_write
add esp, 12
.loop:
jmp .loop ; loop forever
|
LioranHale/glowight
| 1,854
|
examples/a.s
|
.file "a.c"
.text
.globl amazing_func
.def amazing_func; .scl 2; .type 32; .endef
.seh_proc amazing_func
amazing_func:
pushq %rbp
.seh_pushreg %rbp
movq %rsp, %rbp
.seh_setframe %rbp, 0
subq $16, %rsp
.seh_stackalloc 16
.seh_endprologue
movl %ecx, 16(%rbp)
movl 16(%rbp), %eax
movl %eax, -4(%rbp)
movl -4(%rbp), %edx
movl %edx, %eax
sall $2, %eax
addl %edx, %eax
sall $2, %eax
movl %eax, -4(%rbp)
movl -4(%rbp), %ecx
movl $1717986919, %edx
movl %ecx, %eax
imull %edx
sarl $3, %edx
movl %ecx, %eax
sarl $31, %eax
subl %eax, %edx
movl %edx, %eax
movl %eax, -4(%rbp)
movl $1, %eax
subl -4(%rbp), %eax
movl %eax, -4(%rbp)
movl $1, %eax
subl -4(%rbp), %eax
movl %eax, -4(%rbp)
movl -4(%rbp), %eax
addq $16, %rsp
popq %rbp
ret
.seh_endproc
.globl fib
.def fib; .scl 2; .type 32; .endef
.seh_proc fib
fib:
pushq %rbp
.seh_pushreg %rbp
pushq %rbx
.seh_pushreg %rbx
subq $40, %rsp
.seh_stackalloc 40
leaq 128(%rsp), %rbp
.seh_setframe %rbp, 128
.seh_endprologue
movl %ecx, -64(%rbp)
movl -64(%rbp), %ecx
call amazing_func
movl %eax, -64(%rbp)
cmpl $1, -64(%rbp)
jg .L4
movl -64(%rbp), %eax
jmp .L5
.L4:
movl -64(%rbp), %eax
subl $1, %eax
movl %eax, %ecx
call fib
movl %eax, %ebx
movl -64(%rbp), %eax
subl $2, %eax
movl %eax, %ecx
call fib
addl %ebx, %eax
.L5:
addq $40, %rsp
popq %rbx
popq %rbp
ret
.seh_endproc
.def __main; .scl 2; .type 32; .endef
.globl main
.def main; .scl 2; .type 32; .endef
.seh_proc main
main:
pushq %rbp
.seh_pushreg %rbp
movq %rsp, %rbp
.seh_setframe %rbp, 0
subq $64, %rsp
.seh_stackalloc 64
.seh_endprologue
call __main
movl $10, -12(%rbp)
movl -12(%rbp), %eax
movl %eax, %ecx
call fib
movl %eax, -4(%rbp)
movl $0, %eax
addq $64, %rsp
popq %rbp
ret
.seh_endproc
.ident "GCC: (x86_64-posix-seh-rev0, Built by MinGW-W64 project) 8.1.0"
|
LittleLucifer1/duck_os
| 1,169
|
os/src/entry.S
|
.section .text.entry
.globl _start
_start:
# hart_id is in a0, a0 --> tp
# pc = 0x8020_0000
# 1.According to hart_id, set sp to the top
la sp, boot_stack_top
mv tp, a0
slli t0, tp, 16 # 内核栈大小size: (4096 * 16)
sub sp, sp, t0
# 2.set table & satp
# satp: 8 << 60 | boot_pagetable
la t0, boot_pagetable
li t1, 8 << 60
srli t0, t0, 12
or t0, t0, t1
csrw satp, t0
sfence.vma
# 3.reset stack
# the offset is 0xffff_ffff_0000_0000
li t1, 0xffffffff00000000
add sp, sp, t1
# 4.jump to rust_main 此时的rust_main还是物理地址
la t0, rust_main
add t0, t0, t1
jr t0
.section .bss.stack
.globl boot_stack_lower
boot_stack_lower:
# 内核栈总大小:最多8个核,每个核有15页,再加上一个guard page, 总共16页——64kb
.space 4096 * 16 * 8
.globl boot_stack_top
boot_stack_top:
.section .data
.align 12
boot_pagetable:
# 0x0000_0000_8000_0000 ---> 0x0000_0000_8000_0000
# 0xffff_ffff_8000_0000 ---> 0x0000_0000_8000_0000
.quad 0
.quad 0
.quad (0x80000 << 10) | 0xcf #VRWXAD vpn L2 = 0b10
.zero 8 * 507
.quad (0x80000 << 10) | 0xcf # vpn L2 = 0b1_1111_1110
.quad 0
|
LittleLucifer1/duck_os
| 1,916
|
os/src/process/trap/trap.S
|
.altmacro
.macro SAVE_GP n
sd x\n, \n*8(sp)
.endm
.macro LOAD_GP n
ld x\n, \n*8(sp)
.endm
.section .text
.globl __alltraps
.globl __restore
.align 2 # stvec中的BASE必须保证是4字节对齐!因为BASE中表示的地址默认后两位为0
# TODO:暂时没有考虑中断嵌套的问题,如果 U -> S -> S 那会发生什么?
__alltraps:
# 不能使用spp来判断,因为此时没有空余的寄存器可以使用
# 于是这里把sp看作有符号数来判断,如果是在用户空间,则为正数,否则为负数。
# 用户态:则交换sp sscratch; 内核态:不用交换
bgtz sp, __user_to_kernel
sd tp, -1*8(sp)
__trap_entry:
# sp -> kernel stack
addi sp, sp, -35*8
# sd x1 -> sp + 8
sd x1, 1*8(sp)
.set n, 3
.rept 29
SAVE_GP %n
.set n, n+1
.endr
csrr t0, sstatus
csrr t1, sepc
sd t0, 32*8(sp)
sd t1, 33*8(sp)
csrr t2, sscratch
sd t2, 2*8(sp)
ld tp, 34*8(sp)
mv a0, sp
call trap_handler
__restore:
# 从内核到用户态,或者从内核到内核。
# sp -> kernel stack, sscratch -> user stack
# restore sstatus/sepc/user_stack_sp
ld t0, 32*8(sp) # t0 <- sstatus
ld t1, 33*8(sp) # t1 <- sepc
ld t2, 2*8(sp) # t2 <- sp (user stack)
sd tp, 34*8(sp)
csrw sstatus, t0
csrw sepc, t1
csrw sscratch, t2
# get SPP 在第8位
# 之后要恢复t0,所以这里要先判断,后恢复
andi t0, t0, 0x100
bnez t0, __kernel_to_kernel
ld x1, 1*8(sp)
# skip x2(sp),已保存
.set n, 3
.rept 29
LOAD_GP %n
.set n, n+1
.endr
addi sp, sp, 35*8
csrrw sp, sscratch, sp
# 如果在设置用户程序初始的trap之后,SPP设置为了User,但是还没有赶在switch之前触发异常
# 之后就发生了内核中断,此时在restore时,会进入user的restore,但是又是内核 -> 内核
# TODO:可不可以提前判断这种情况,从而相当于有三种分支?
# 在restore的时候提前判断。现在先按照maturin的写法,求稳。
beqz sp, __idle
sret
__kernel_to_kernel:
# 内核中断时,恢复现场,不用恢复fld,不用交换寄存器
ld x1, 1*8(sp)
# skip x2(sp),已保存
.set n, 3
.rept 29
LOAD_GP %n
.set n, n+1
.endr
addi sp, sp, 35
sret
__user_to_kernel:
csrrw sp, sscratch, sp
j __trap_entry
__idle:
csrrw sp, sscratch, sp
sret
|
LittleLucifer1/duck_os
| 17,376
|
dependency/riscv/asm.S
|
#include "asm.h"
.section .text.__ebreak
.global __ebreak
__ebreak:
ebreak
ret
.section .text.__wfi
.global __wfi
__wfi:
wfi
ret
.section .text.__sfence_vma_all
.global __sfence_vma_all
__sfence_vma_all:
sfence.vma
ret
.section .text.__sfence_vma
.global __sfence_vma
__sfence_vma:
sfence.vma a0, a1
ret
// RISC-V hypervisor instructions.
// The switch for enabling LLVM support for asm generation.
// #define LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT
.section .text.__hfence_gvma
.global __hfence_gvma
__hfence_gvma:
#ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT
hfence.gvma a0, a1
#else
.word 1656029299
#endif
ret
.section .text.__hfence_vvma
.global __hfence_vvma
__hfence_vvma:
#ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT
hfence.vvma a0, a1
#else
.word 582287475
#endif
ret
.section .text.__hlv_b
.global __hlv_b
__hlv_b:
#ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT
hlv.b a0, a0
#else
.word 1610958195
#endif
ret
.section .text.__hlv_bu
.global __hlv_bu
__hlv_bu:
#ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT
hlv.bu a0, a0
#else
.word 1612006771
#endif
ret
.section .text.__hlv_h
.global __hlv_h
__hlv_h:
#ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT
hlv.h a0, a0
#else
.word 1678067059
#endif
ret
.section .text.__hlv_hu
.global __hlv_hu
__hlv_hu:
#ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT
hlv.hu a0, a0
#else
.word 1679115635
#endif
ret
.section .text.__hlvx_hu
.global __hlvx_hu
__hlvx_hu:
#ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT
hlvx.hu a0, a0
#else
.word 1681212787
#endif
ret
.section .text.__hlv_w
.global __hlv_w
__hlv_w:
#ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT
hlv.w a0, a0
#else
.word 1745175923
#endif
ret
.section .text.__hlvx_wu
.global __hlvx_wu
__hlvx_wu:
#ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT
hlvx.wu a0, a0
#else
.word 1748321651
#endif
ret
.section .text.__hsv_b
.global __hsv_b
__hsv_b:
#ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT
hsv.b a0, a1
#else
.word 1656045683
#endif
ret
.section .text.__hsv_h
.global __hsv_h
__hsv_h:
#ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT
hsv.h a0, a1
#else
.word 1723154547
#endif
ret
.section .text.__hsv_w
.global __hsv_w
__hsv_w:
#ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT
hsv.w a0, a1
#else
.word 1790263411
#endif
ret
.section .text.__hlv_wu
.global __hlv_wu
__hlv_wu:
#ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT
hlv.wu a0, a0
#else
.word 1746224499
#endif
ret
.section .text.__hlv_d
.global __hlv_d
__hlv_d:
#ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT
hlv.d a0, a0
#else
.word 1812284787
#endif
ret
.section .text.__hsv_d
.global __hsv_d
__hsv_d:
#ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT
hsv.d a0, a1
#else
.word 1857372275
#endif
ret
// User Trap Setup
RW(0x000, ustatus) // User status register
RW(0x004, uie) // User interrupt-enable register
RW(0x005, utvec) // User trap handler base address
// User Trap Handling
RW(0x040, uscratch) // Scratch register for user trap handlers
RW(0x041, uepc) // User exception program counter
RW(0x042, ucause) // User trap cause
RW(0x043, utval) // User bad address or instruction
RW(0x044, uip) // User interrupt pending
// User Floating-Point CSRs
RW(0x001, fflags) // Floating-Point Accrued Exceptions
RW(0x002, frm) // Floating-Point Dynamic Rounding Mode
RW(0x003, fcsr) // Floating-Point Control and Status Register (frm + fflags)
// User Counter/Timers
RO( 0xC00, cycle) // Cycle counter for RDCYCLE instruction
RO( 0xC01, time) // Timer for RDTIME instruction
RO( 0xC02, instret) // Instructions-retired counter for RDINSTRET instruction
RO( 0xC03, hpmcounter3) // Performance-monitoring counter
RO( 0xC04, hpmcounter4) // Performance-monitoring counter
RO( 0xC05, hpmcounter5) // Performance-monitoring counter
RO( 0xC06, hpmcounter6) // Performance-monitoring counter
RO( 0xC07, hpmcounter7) // Performance-monitoring counter
RO( 0xC08, hpmcounter8) // Performance-monitoring counter
RO( 0xC09, hpmcounter9) // Performance-monitoring counter
RO( 0xC0A, hpmcounter10) // Performance-monitoring counter
RO( 0xC0B, hpmcounter11) // Performance-monitoring counter
RO( 0xC0C, hpmcounter12) // Performance-monitoring counter
RO( 0xC0D, hpmcounter13) // Performance-monitoring counter
RO( 0xC0E, hpmcounter14) // Performance-monitoring counter
RO( 0xC0F, hpmcounter15) // Performance-monitoring counter
RO( 0xC10, hpmcounter16) // Performance-monitoring counter
RO( 0xC11, hpmcounter17) // Performance-monitoring counter
RO( 0xC12, hpmcounter18) // Performance-monitoring counter
RO( 0xC13, hpmcounter19) // Performance-monitoring counter
RO( 0xC14, hpmcounter20) // Performance-monitoring counter
RO( 0xC15, hpmcounter21) // Performance-monitoring counter
RO( 0xC16, hpmcounter22) // Performance-monitoring counter
RO( 0xC17, hpmcounter23) // Performance-monitoring counter
RO( 0xC18, hpmcounter24) // Performance-monitoring counter
RO( 0xC19, hpmcounter25) // Performance-monitoring counter
RO( 0xC1A, hpmcounter26) // Performance-monitoring counter
RO( 0xC1B, hpmcounter27) // Performance-monitoring counter
RO( 0xC1C, hpmcounter28) // Performance-monitoring counter
RO( 0xC1D, hpmcounter29) // Performance-monitoring counter
RO( 0xC1E, hpmcounter30) // Performance-monitoring counter
RO( 0xC1F, hpmcounter31) // Performance-monitoring counter
RO32(0xC80, cycleh) // Upper 32 bits of cycle, RV32I only
RO32(0xC81, timeh) // Upper 32 bits of time, RV32I only
RO32(0xC82, instreth) // Upper 32 bits of instret, RV32I only
RO32(0xC83, hpmcounter3h) // Upper 32 bits of hpmcounter3, RV32I only
RO32(0xC84, hpmcounter4h)
RO32(0xC85, hpmcounter5h)
RO32(0xC86, hpmcounter6h)
RO32(0xC87, hpmcounter7h)
RO32(0xC88, hpmcounter8h)
RO32(0xC89, hpmcounter9h)
RO32(0xC8A, hpmcounter10h)
RO32(0xC8B, hpmcounter11h)
RO32(0xC8C, hpmcounter12h)
RO32(0xC8D, hpmcounter13h)
RO32(0xC8E, hpmcounter14h)
RO32(0xC8F, hpmcounter15h)
RO32(0xC90, hpmcounter16h)
RO32(0xC91, hpmcounter17h)
RO32(0xC92, hpmcounter18h)
RO32(0xC93, hpmcounter19h)
RO32(0xC94, hpmcounter20h)
RO32(0xC95, hpmcounter21h)
RO32(0xC96, hpmcounter22h)
RO32(0xC97, hpmcounter23h)
RO32(0xC98, hpmcounter24h)
RO32(0xC99, hpmcounter25h)
RO32(0xC9A, hpmcounter26h)
RO32(0xC9B, hpmcounter27h)
RO32(0xC9C, hpmcounter28h)
RO32(0xC9D, hpmcounter29h)
RO32(0xC9E, hpmcounter30h)
RO32(0xC9F, hpmcounter31h)
// Supervisor Trap Setup
RW(0x100, sstatus) // Supervisor status register
RW(0x102, sedeleg) // Supervisor exception delegation register
RW(0x103, sideleg) // Supervisor interrupt delegation register
RW(0x104, sie) // Supervisor interrupt-enable register
RW(0x105, stvec) // Supervisor trap handler base address
RW(0x106, scounteren) // Supervisor counter enable
// Supervisor Trap Handling
RW(0x140, sscratch) // Scratch register for supervisor trap handlers
RW(0x141, sepc) // Supervisor exception program counter
RW(0x142, scause) // Supervisor trap cause
RW(0x143, stval) // Supervisor bad address or instruction
RW(0x144, sip) // Supervisor interrupt pending
// Supervisor Protection and Translation
RW(0x180, satp) // Supervisor address translation and protection
// Machine Information Registers
RO(0xF11, mvendorid) // Vendor ID
RO(0xF12, marchid) // Architecture ID
RO(0xF13, mimpid) // Implementation ID
RO(0xF14, mhartid) // Hardware thread ID
// Machine Trap Setup
RW(0x300, mstatus) // Machine status register
RW(0x301, misa) // ISA and extensions
RW(0x302, medeleg) // Machine exception delegation register
RW(0x303, mideleg) // Machine interrupt delegation register
RW(0x304, mie) // Machine interrupt-enable register
RW(0x305, mtvec) // Machine trap handler base address
RW(0x306, mcounteren) // Machine counter enable
// Machine Trap Handling
RW(0x340, mscratch) // Scratch register for machine trap handlers
RW(0x341, mepc) // Machine exception program counter
RW(0x342, mcause) // Machine trap cause
RW(0x343, mtval) // Machine bad address or instruction
RW(0x344, mip) // Machine interrupt pending
// Machine Protection and Translation
RW( 0x3A0, pmpcfg0) // Physical memory protection configuration
RW32(0x3A1, pmpcfg1) // Physical memory protection configuration, RV32 only
RW( 0x3A2, pmpcfg2) // Physical memory protection configuration
RW32(0x3A3, pmpcfg3) // Physical memory protection configuration, RV32 only
RW( 0x3B0, pmpaddr0) // Physical memory protection address register
RW( 0x3B1, pmpaddr1) // Physical memory protection address register
RW( 0x3B2, pmpaddr2) // Physical memory protection address register
RW( 0x3B3, pmpaddr3) // Physical memory protection address register
RW( 0x3B4, pmpaddr4) // Physical memory protection address register
RW( 0x3B5, pmpaddr5) // Physical memory protection address register
RW( 0x3B6, pmpaddr6) // Physical memory protection address register
RW( 0x3B7, pmpaddr7) // Physical memory protection address register
RW( 0x3B8, pmpaddr8) // Physical memory protection address register
RW( 0x3B9, pmpaddr9) // Physical memory protection address register
RW( 0x3BA, pmpaddr10) // Physical memory protection address register
RW( 0x3BB, pmpaddr11) // Physical memory protection address register
RW( 0x3BC, pmpaddr12) // Physical memory protection address register
RW( 0x3BD, pmpaddr13) // Physical memory protection address register
RW( 0x3BE, pmpaddr14) // Physical memory protection address register
RW( 0x3BF, pmpaddr15) // Physical memory protection address register
// Machine Counter/Timers
RO( 0xB00, mcycle) // Machine cycle counter
RO( 0xB02, minstret) // Machine instructions-retired counter
RO( 0xB03, mhpmcounter3) // Machine performance-monitoring counter
RO( 0xB04, mhpmcounter4) // Machine performance-monitoring counter
RO( 0xB05, mhpmcounter5) // Machine performance-monitoring counter
RO( 0xB06, mhpmcounter6) // Machine performance-monitoring counter
RO( 0xB07, mhpmcounter7) // Machine performance-monitoring counter
RO( 0xB08, mhpmcounter8) // Machine performance-monitoring counter
RO( 0xB09, mhpmcounter9) // Machine performance-monitoring counter
RO( 0xB0A, mhpmcounter10) // Machine performance-monitoring counter
RO( 0xB0B, mhpmcounter11) // Machine performance-monitoring counter
RO( 0xB0C, mhpmcounter12) // Machine performance-monitoring counter
RO( 0xB0D, mhpmcounter13) // Machine performance-monitoring counter
RO( 0xB0E, mhpmcounter14) // Machine performance-monitoring counter
RO( 0xB0F, mhpmcounter15) // Machine performance-monitoring counter
RO( 0xB10, mhpmcounter16) // Machine performance-monitoring counter
RO( 0xB11, mhpmcounter17) // Machine performance-monitoring counter
RO( 0xB12, mhpmcounter18) // Machine performance-monitoring counter
RO( 0xB13, mhpmcounter19) // Machine performance-monitoring counter
RO( 0xB14, mhpmcounter20) // Machine performance-monitoring counter
RO( 0xB15, mhpmcounter21) // Machine performance-monitoring counter
RO( 0xB16, mhpmcounter22) // Machine performance-monitoring counter
RO( 0xB17, mhpmcounter23) // Machine performance-monitoring counter
RO( 0xB18, mhpmcounter24) // Machine performance-monitoring counter
RO( 0xB19, mhpmcounter25) // Machine performance-monitoring counter
RO( 0xB1A, mhpmcounter26) // Machine performance-monitoring counter
RO( 0xB1B, mhpmcounter27) // Machine performance-monitoring counter
RO( 0xB1C, mhpmcounter28) // Machine performance-monitoring counter
RO( 0xB1D, mhpmcounter29) // Machine performance-monitoring counter
RO( 0xB1E, mhpmcounter30) // Machine performance-monitoring counter
RO( 0xB1F, mhpmcounter31) // Machine performance-monitoring counter
RO32(0xB80, mcycleh) // Upper 32 bits of mcycle, RV32I only
RO32(0xB82, minstreth) // Upper 32 bits of minstret, RV32I only
RO32(0xB83, mhpmcounter3h) // Upper 32 bits of mhpmcounter3, RV32I only
RO32(0xB84, mhpmcounter4h)
RO32(0xB85, mhpmcounter5h)
RO32(0xB86, mhpmcounter6h)
RO32(0xB87, mhpmcounter7h)
RO32(0xB88, mhpmcounter8h)
RO32(0xB89, mhpmcounter9h)
RO32(0xB8A, mhpmcounter10h)
RO32(0xB8B, mhpmcounter11h)
RO32(0xB8C, mhpmcounter12h)
RO32(0xB8D, mhpmcounter13h)
RO32(0xB8E, mhpmcounter14h)
RO32(0xB8F, mhpmcounter15h)
RO32(0xB90, mhpmcounter16h)
RO32(0xB91, mhpmcounter17h)
RO32(0xB92, mhpmcounter18h)
RO32(0xB93, mhpmcounter19h)
RO32(0xB94, mhpmcounter20h)
RO32(0xB95, mhpmcounter21h)
RO32(0xB96, mhpmcounter22h)
RO32(0xB97, mhpmcounter23h)
RO32(0xB98, mhpmcounter24h)
RO32(0xB99, mhpmcounter25h)
RO32(0xB9A, mhpmcounter26h)
RO32(0xB9B, mhpmcounter27h)
RO32(0xB9C, mhpmcounter28h)
RO32(0xB9D, mhpmcounter29h)
RO32(0xB9E, mhpmcounter30h)
RO32(0xB9F, mhpmcounter31h)
RW(0x323, mhpmevent3) // Machine performance-monitoring event selector
RW(0x324, mhpmevent4) // Machine performance-monitoring event selector
RW(0x325, mhpmevent5) // Machine performance-monitoring event selector
RW(0x326, mhpmevent6) // Machine performance-monitoring event selector
RW(0x327, mhpmevent7) // Machine performance-monitoring event selector
RW(0x328, mhpmevent8) // Machine performance-monitoring event selector
RW(0x329, mhpmevent9) // Machine performance-monitoring event selector
RW(0x32A, mhpmevent10) // Machine performance-monitoring event selector
RW(0x32B, mhpmevent11) // Machine performance-monitoring event selector
RW(0x32C, mhpmevent12) // Machine performance-monitoring event selector
RW(0x32D, mhpmevent13) // Machine performance-monitoring event selector
RW(0x32E, mhpmevent14) // Machine performance-monitoring event selector
RW(0x32F, mhpmevent15) // Machine performance-monitoring event selector
RW(0x330, mhpmevent16) // Machine performance-monitoring event selector
RW(0x331, mhpmevent17) // Machine performance-monitoring event selector
RW(0x332, mhpmevent18) // Machine performance-monitoring event selector
RW(0x333, mhpmevent19) // Machine performance-monitoring event selector
RW(0x334, mhpmevent20) // Machine performance-monitoring event selector
RW(0x335, mhpmevent21) // Machine performance-monitoring event selector
RW(0x336, mhpmevent22) // Machine performance-monitoring event selector
RW(0x337, mhpmevent23) // Machine performance-monitoring event selector
RW(0x338, mhpmevent24) // Machine performance-monitoring event selector
RW(0x339, mhpmevent25) // Machine performance-monitoring event selector
RW(0x33A, mhpmevent26) // Machine performance-monitoring event selector
RW(0x33B, mhpmevent27) // Machine performance-monitoring event selector
RW(0x33C, mhpmevent28) // Machine performance-monitoring event selector
RW(0x33D, mhpmevent29) // Machine performance-monitoring event selector
RW(0x33E, mhpmevent30) // Machine performance-monitoring event selector
RW(0x33F, mhpmevent31) // Machine performance-monitoring event selector
// Debug/Trace Registers (shared with Debug Mode)
RW(0x7A0, tselect) // Debug/Trace trigger register select
RW(0x7A1, tdata1) // First Debug/Trace trigger data register
RW(0x7A2, tdata2) // Second Debug/Trace trigger data register
RW(0x7A3, tdata3) // Third Debug/Trace trigger data register
// Debug Mode Registers
RW(0x7B0, dcsr) // Debug control and status register
RW(0x7B1, dpc) // Debug PC
RW(0x7B2, dscratch) // Debug scratch register
// Hypervisor Trap Setup
RW(0x600, hstatus) // Hypervisor status register
RW(0x602, hedeleg) // Hypervisor exception delegation register
RW(0x603, hideleg) // Hypervisor interrupt delegation register
RW(0x604, hie) // Hypervisor interrupt-enable register
RW(0x606, hcounteren) // Hypervisor counter enable
RW(0x607, hgeie) // Hypervisor guest external interrupt-enable register
// Hypervisor Trap Handling
RW(0x643, htval) // Hypervisor bad guest physical address
RW(0x644, hip) // Hypervisor interrupt pending
RW(0x645, hvip) // Hypervisor virtual interrupt pending
RW(0x64a, htinst) // Hypervisor trap instruction (transformed)
RW(0xe12, hgeip) // Hypervisor guest external interrupt pending
// Hypervisor Protection and Translation
RO(0x680, hgatp) // Hypervisor guest address translation and protection
// Debug/Trace Registers
RW(0x6a8, hcontext) // Hypervisor-mode context register
// Hypervisor Counter/Timer Virtualization Registers
RW(0x605, htimedelta) // Delta for VS/VU-mode timer
RW32(0x615, htimedeltah) // Upper 32 bits of {\tt htimedelta}, RV32 only
// Virtual Supervisor Registers
RW(0x200, vsstatus) // Virtual supervisor status register
RW(0x204, vsie) // Virtual supervisor interrupt-enable register
RW(0x205, vstvec) // Virtual supervisor trap handler base address
RW(0x240, vsscratch) // Virtual supervisor scratch register
RW(0x241, vsepc) // Virtual supervisor exception program counter
RW(0x242, vscause) // Virtual supervisor trap cause
RW(0x243, vstval) // Virtual supervisor bad address or instruction
RW(0x244, vsip) // Virtual supervisor interrupt pending
RW(0x280, vsatp) // Virtual supervisor address translation and protection
|
lkitching/rsdb
| 1,468
|
src/support/reg_write.s
|
.global main
.section .data
hex_format: .asciz "%#x"
float_format: .asciz "%.2f"
long_float_format: .asciz "%.2Lf"
.section .text
.macro trap
# kill(pid, SIGTRAP)
movq $62, %rax
movq %r12, %rdi
movq $5, %rsi
syscall
.endm
main:
push %rbp
movq %rsp, %rbp
# get pid and move to r12
movq $39, %rax
syscall
movq %rax, %r12
trap
# print contents of rsi
leaq hex_format(%rip), %rdi
movq $0, %rax
call printf@plt
# fflush(NULL)
movq $0, %rdi
call fflush@plt
trap
# print contents of mm0
movq %mm0, %rsi
leaq hex_format(%rip), %rdi
movq $0, %rax
call printf@plt
# fflush(NULL)
movq $0, %rdi
call fflush@plt
trap
# print contents of xmm0
# use 1 as second argument to printf to indicate argument in vector register
leaq float_format(%rip), %rdi
movq $1, %rax
call printf@plt
# fflush(NULL)
movq $0, %rdi
call fflush@plt
trap
# Print contents of st0
# make space for 16-byte float and pop top of floating point stack into it
subq $16, %rsp
fstpt (%rsp)
# print
leaq long_float_format(%rip), %rdi
movq $0, %rax
call printf@plt
# fflush(NULL)
movq $0, %rdi
call fflush@plt
# de-allocate float
addq $16, %rsp
popq %rbp
movq $0, %rax
ret
|
lnxrobots/rcj-soccer-open-gen3
| 24,043
|
Undercarriage/Main_stm/Core/Startup/startup_stm32f427vitx.s
|
/**
******************************************************************************
* @file startup_stm32f427xx.s
* @author MCD Application Team
* @brief STM32F427xx Devices vector table for GCC based toolchains.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M4 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
* @attention
*
* Copyright (c) 2017 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m4
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
/* stack used for SystemInit_ExtMemCtl; always internal RAM used */
/**
* @brief This is the code that gets called when the processor first
* starts execution following a reset event. Only the absolutely
* necessary set is performed, after which the application
* supplied main() routine is called.
* @param None
* @retval : None
*/
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr sp, =_estack /* set stack pointer */
/* Call the clock system initialization function.*/
bl SystemInit
/* Copy the data segment initializers from flash to SRAM */
ldr r0, =_sdata
ldr r1, =_edata
ldr r2, =_sidata
movs r3, #0
b LoopCopyDataInit
CopyDataInit:
ldr r4, [r2, r3]
str r4, [r0, r3]
adds r3, r3, #4
LoopCopyDataInit:
adds r4, r0, r3
cmp r4, r1
bcc CopyDataInit
/* Zero fill the bss segment. */
ldr r2, =_sbss
ldr r4, =_ebss
movs r3, #0
b LoopFillZerobss
FillZerobss:
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
cmp r2, r4
bcc FillZerobss
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
bx lr
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
* @param None
* @retval None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M3. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
*******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word MemManage_Handler
.word BusFault_Handler
.word UsageFault_Handler
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word DebugMon_Handler
.word 0
.word PendSV_Handler
.word SysTick_Handler
/* External Interrupts */
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_IRQHandler /* PVD through EXTI Line detection */
.word TAMP_STAMP_IRQHandler /* Tamper and TimeStamps through the EXTI line */
.word RTC_WKUP_IRQHandler /* RTC Wakeup through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_IRQHandler /* RCC */
.word EXTI0_IRQHandler /* EXTI Line0 */
.word EXTI1_IRQHandler /* EXTI Line1 */
.word EXTI2_IRQHandler /* EXTI Line2 */
.word EXTI3_IRQHandler /* EXTI Line3 */
.word EXTI4_IRQHandler /* EXTI Line4 */
.word DMA1_Stream0_IRQHandler /* DMA1 Stream 0 */
.word DMA1_Stream1_IRQHandler /* DMA1 Stream 1 */
.word DMA1_Stream2_IRQHandler /* DMA1 Stream 2 */
.word DMA1_Stream3_IRQHandler /* DMA1 Stream 3 */
.word DMA1_Stream4_IRQHandler /* DMA1 Stream 4 */
.word DMA1_Stream5_IRQHandler /* DMA1 Stream 5 */
.word DMA1_Stream6_IRQHandler /* DMA1 Stream 6 */
.word ADC_IRQHandler /* ADC1, ADC2 and ADC3s */
.word CAN1_TX_IRQHandler /* CAN1 TX */
.word CAN1_RX0_IRQHandler /* CAN1 RX0 */
.word CAN1_RX1_IRQHandler /* CAN1 RX1 */
.word CAN1_SCE_IRQHandler /* CAN1 SCE */
.word EXTI9_5_IRQHandler /* External Line[9:5]s */
.word TIM1_BRK_TIM9_IRQHandler /* TIM1 Break and TIM9 */
.word TIM1_UP_TIM10_IRQHandler /* TIM1 Update and TIM10 */
.word TIM1_TRG_COM_TIM11_IRQHandler /* TIM1 Trigger and Commutation and TIM11 */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM4_IRQHandler /* TIM4 */
.word I2C1_EV_IRQHandler /* I2C1 Event */
.word I2C1_ER_IRQHandler /* I2C1 Error */
.word I2C2_EV_IRQHandler /* I2C2 Event */
.word I2C2_ER_IRQHandler /* I2C2 Error */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_IRQHandler /* USART3 */
.word EXTI15_10_IRQHandler /* External Line[15:10]s */
.word RTC_Alarm_IRQHandler /* RTC Alarm (A and B) through EXTI Line */
.word OTG_FS_WKUP_IRQHandler /* USB OTG FS Wakeup through EXTI line */
.word TIM8_BRK_TIM12_IRQHandler /* TIM8 Break and TIM12 */
.word TIM8_UP_TIM13_IRQHandler /* TIM8 Update and TIM13 */
.word TIM8_TRG_COM_TIM14_IRQHandler /* TIM8 Trigger and Commutation and TIM14 */
.word TIM8_CC_IRQHandler /* TIM8 Capture Compare */
.word DMA1_Stream7_IRQHandler /* DMA1 Stream7 */
.word FMC_IRQHandler /* FMC */
.word SDIO_IRQHandler /* SDIO */
.word TIM5_IRQHandler /* TIM5 */
.word SPI3_IRQHandler /* SPI3 */
.word UART4_IRQHandler /* UART4 */
.word UART5_IRQHandler /* UART5 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC1&2 underrun errors */
.word TIM7_IRQHandler /* TIM7 */
.word DMA2_Stream0_IRQHandler /* DMA2 Stream 0 */
.word DMA2_Stream1_IRQHandler /* DMA2 Stream 1 */
.word DMA2_Stream2_IRQHandler /* DMA2 Stream 2 */
.word DMA2_Stream3_IRQHandler /* DMA2 Stream 3 */
.word DMA2_Stream4_IRQHandler /* DMA2 Stream 4 */
.word ETH_IRQHandler /* Ethernet */
.word ETH_WKUP_IRQHandler /* Ethernet Wakeup through EXTI line */
.word CAN2_TX_IRQHandler /* CAN2 TX */
.word CAN2_RX0_IRQHandler /* CAN2 RX0 */
.word CAN2_RX1_IRQHandler /* CAN2 RX1 */
.word CAN2_SCE_IRQHandler /* CAN2 SCE */
.word OTG_FS_IRQHandler /* USB OTG FS */
.word DMA2_Stream5_IRQHandler /* DMA2 Stream 5 */
.word DMA2_Stream6_IRQHandler /* DMA2 Stream 6 */
.word DMA2_Stream7_IRQHandler /* DMA2 Stream 7 */
.word USART6_IRQHandler /* USART6 */
.word I2C3_EV_IRQHandler /* I2C3 event */
.word I2C3_ER_IRQHandler /* I2C3 error */
.word OTG_HS_EP1_OUT_IRQHandler /* USB OTG HS End Point 1 Out */
.word OTG_HS_EP1_IN_IRQHandler /* USB OTG HS End Point 1 In */
.word OTG_HS_WKUP_IRQHandler /* USB OTG HS Wakeup through EXTI */
.word OTG_HS_IRQHandler /* USB OTG HS */
.word DCMI_IRQHandler /* DCMI */
.word 0 /* Reserved */
.word HASH_RNG_IRQHandler /* Hash and Rng */
.word FPU_IRQHandler /* FPU */
.word UART7_IRQHandler /* UART7 */
.word UART8_IRQHandler /* UART8 */
.word SPI4_IRQHandler /* SPI4 */
.word SPI5_IRQHandler /* SPI5 */
.word SPI6_IRQHandler /* SPI6 */
.word SAI1_IRQHandler /* SAI1 */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word DMA2D_IRQHandler /* DMA2D */
.size g_pfnVectors, .-g_pfnVectors
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak MemManage_Handler
.thumb_set MemManage_Handler,Default_Handler
.weak BusFault_Handler
.thumb_set BusFault_Handler,Default_Handler
.weak UsageFault_Handler
.thumb_set UsageFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak DebugMon_Handler
.thumb_set DebugMon_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_IRQHandler
.thumb_set PVD_IRQHandler,Default_Handler
.weak TAMP_STAMP_IRQHandler
.thumb_set TAMP_STAMP_IRQHandler,Default_Handler
.weak RTC_WKUP_IRQHandler
.thumb_set RTC_WKUP_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_IRQHandler
.thumb_set RCC_IRQHandler,Default_Handler
.weak EXTI0_IRQHandler
.thumb_set EXTI0_IRQHandler,Default_Handler
.weak EXTI1_IRQHandler
.thumb_set EXTI1_IRQHandler,Default_Handler
.weak EXTI2_IRQHandler
.thumb_set EXTI2_IRQHandler,Default_Handler
.weak EXTI3_IRQHandler
.thumb_set EXTI3_IRQHandler,Default_Handler
.weak EXTI4_IRQHandler
.thumb_set EXTI4_IRQHandler,Default_Handler
.weak DMA1_Stream0_IRQHandler
.thumb_set DMA1_Stream0_IRQHandler,Default_Handler
.weak DMA1_Stream1_IRQHandler
.thumb_set DMA1_Stream1_IRQHandler,Default_Handler
.weak DMA1_Stream2_IRQHandler
.thumb_set DMA1_Stream2_IRQHandler,Default_Handler
.weak DMA1_Stream3_IRQHandler
.thumb_set DMA1_Stream3_IRQHandler,Default_Handler
.weak DMA1_Stream4_IRQHandler
.thumb_set DMA1_Stream4_IRQHandler,Default_Handler
.weak DMA1_Stream5_IRQHandler
.thumb_set DMA1_Stream5_IRQHandler,Default_Handler
.weak DMA1_Stream6_IRQHandler
.thumb_set DMA1_Stream6_IRQHandler,Default_Handler
.weak ADC_IRQHandler
.thumb_set ADC_IRQHandler,Default_Handler
.weak CAN1_TX_IRQHandler
.thumb_set CAN1_TX_IRQHandler,Default_Handler
.weak CAN1_RX0_IRQHandler
.thumb_set CAN1_RX0_IRQHandler,Default_Handler
.weak CAN1_RX1_IRQHandler
.thumb_set CAN1_RX1_IRQHandler,Default_Handler
.weak CAN1_SCE_IRQHandler
.thumb_set CAN1_SCE_IRQHandler,Default_Handler
.weak EXTI9_5_IRQHandler
.thumb_set EXTI9_5_IRQHandler,Default_Handler
.weak TIM1_BRK_TIM9_IRQHandler
.thumb_set TIM1_BRK_TIM9_IRQHandler,Default_Handler
.weak TIM1_UP_TIM10_IRQHandler
.thumb_set TIM1_UP_TIM10_IRQHandler,Default_Handler
.weak TIM1_TRG_COM_TIM11_IRQHandler
.thumb_set TIM1_TRG_COM_TIM11_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM4_IRQHandler
.thumb_set TIM4_IRQHandler,Default_Handler
.weak I2C1_EV_IRQHandler
.thumb_set I2C1_EV_IRQHandler,Default_Handler
.weak I2C1_ER_IRQHandler
.thumb_set I2C1_ER_IRQHandler,Default_Handler
.weak I2C2_EV_IRQHandler
.thumb_set I2C2_EV_IRQHandler,Default_Handler
.weak I2C2_ER_IRQHandler
.thumb_set I2C2_ER_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_IRQHandler
.thumb_set USART3_IRQHandler,Default_Handler
.weak EXTI15_10_IRQHandler
.thumb_set EXTI15_10_IRQHandler,Default_Handler
.weak RTC_Alarm_IRQHandler
.thumb_set RTC_Alarm_IRQHandler,Default_Handler
.weak OTG_FS_WKUP_IRQHandler
.thumb_set OTG_FS_WKUP_IRQHandler,Default_Handler
.weak TIM8_BRK_TIM12_IRQHandler
.thumb_set TIM8_BRK_TIM12_IRQHandler,Default_Handler
.weak TIM8_UP_TIM13_IRQHandler
.thumb_set TIM8_UP_TIM13_IRQHandler,Default_Handler
.weak TIM8_TRG_COM_TIM14_IRQHandler
.thumb_set TIM8_TRG_COM_TIM14_IRQHandler,Default_Handler
.weak TIM8_CC_IRQHandler
.thumb_set TIM8_CC_IRQHandler,Default_Handler
.weak DMA1_Stream7_IRQHandler
.thumb_set DMA1_Stream7_IRQHandler,Default_Handler
.weak FMC_IRQHandler
.thumb_set FMC_IRQHandler,Default_Handler
.weak SDIO_IRQHandler
.thumb_set SDIO_IRQHandler,Default_Handler
.weak TIM5_IRQHandler
.thumb_set TIM5_IRQHandler,Default_Handler
.weak SPI3_IRQHandler
.thumb_set SPI3_IRQHandler,Default_Handler
.weak UART4_IRQHandler
.thumb_set UART4_IRQHandler,Default_Handler
.weak UART5_IRQHandler
.thumb_set UART5_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak DMA2_Stream0_IRQHandler
.thumb_set DMA2_Stream0_IRQHandler,Default_Handler
.weak DMA2_Stream1_IRQHandler
.thumb_set DMA2_Stream1_IRQHandler,Default_Handler
.weak DMA2_Stream2_IRQHandler
.thumb_set DMA2_Stream2_IRQHandler,Default_Handler
.weak DMA2_Stream3_IRQHandler
.thumb_set DMA2_Stream3_IRQHandler,Default_Handler
.weak DMA2_Stream4_IRQHandler
.thumb_set DMA2_Stream4_IRQHandler,Default_Handler
.weak ETH_IRQHandler
.thumb_set ETH_IRQHandler,Default_Handler
.weak ETH_WKUP_IRQHandler
.thumb_set ETH_WKUP_IRQHandler,Default_Handler
.weak CAN2_TX_IRQHandler
.thumb_set CAN2_TX_IRQHandler,Default_Handler
.weak CAN2_RX0_IRQHandler
.thumb_set CAN2_RX0_IRQHandler,Default_Handler
.weak CAN2_RX1_IRQHandler
.thumb_set CAN2_RX1_IRQHandler,Default_Handler
.weak CAN2_SCE_IRQHandler
.thumb_set CAN2_SCE_IRQHandler,Default_Handler
.weak OTG_FS_IRQHandler
.thumb_set OTG_FS_IRQHandler,Default_Handler
.weak DMA2_Stream5_IRQHandler
.thumb_set DMA2_Stream5_IRQHandler,Default_Handler
.weak DMA2_Stream6_IRQHandler
.thumb_set DMA2_Stream6_IRQHandler,Default_Handler
.weak DMA2_Stream7_IRQHandler
.thumb_set DMA2_Stream7_IRQHandler,Default_Handler
.weak USART6_IRQHandler
.thumb_set USART6_IRQHandler,Default_Handler
.weak I2C3_EV_IRQHandler
.thumb_set I2C3_EV_IRQHandler,Default_Handler
.weak I2C3_ER_IRQHandler
.thumb_set I2C3_ER_IRQHandler,Default_Handler
.weak OTG_HS_EP1_OUT_IRQHandler
.thumb_set OTG_HS_EP1_OUT_IRQHandler,Default_Handler
.weak OTG_HS_EP1_IN_IRQHandler
.thumb_set OTG_HS_EP1_IN_IRQHandler,Default_Handler
.weak OTG_HS_WKUP_IRQHandler
.thumb_set OTG_HS_WKUP_IRQHandler,Default_Handler
.weak OTG_HS_IRQHandler
.thumb_set OTG_HS_IRQHandler,Default_Handler
.weak DCMI_IRQHandler
.thumb_set DCMI_IRQHandler,Default_Handler
.weak HASH_RNG_IRQHandler
.thumb_set HASH_RNG_IRQHandler,Default_Handler
.weak FPU_IRQHandler
.thumb_set FPU_IRQHandler,Default_Handler
.weak UART7_IRQHandler
.thumb_set UART7_IRQHandler,Default_Handler
.weak UART8_IRQHandler
.thumb_set UART8_IRQHandler,Default_Handler
.weak SPI4_IRQHandler
.thumb_set SPI4_IRQHandler,Default_Handler
.weak SPI5_IRQHandler
.thumb_set SPI5_IRQHandler,Default_Handler
.weak SPI6_IRQHandler
.thumb_set SPI6_IRQHandler,Default_Handler
.weak SAI1_IRQHandler
.thumb_set SAI1_IRQHandler,Default_Handler
.weak DMA2D_IRQHandler
.thumb_set DMA2D_IRQHandler,Default_Handler
|
lnxrobots/rcj-soccer-open-gen3
| 15,535
|
Undercarriage/Bottom_stm/Core/Startup/startup_stm32g474retx.s
|
/**
******************************************************************************
* @file startup_stm32g474xx.s
* @author MCD Application Team
* @brief STM32G474xx devices vector table GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address,
* - Configure the clock system
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M4 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
* @attention
*
* Copyright (c) 2019 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m4
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
.equ BootRAM, 0xF1E0F85F
/**
* @brief This is the code that gets called when the processor first
* starts execution following a reset event. Only the absolutely
* necessary set is performed, after which the application
* supplied main() routine is called.
* @param None
* @retval : None
*/
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Call the clock system initialization function.*/
bl SystemInit
/* Copy the data segment initializers from flash to SRAM */
ldr r0, =_sdata
ldr r1, =_edata
ldr r2, =_sidata
movs r3, #0
b LoopCopyDataInit
CopyDataInit:
ldr r4, [r2, r3]
str r4, [r0, r3]
adds r3, r3, #4
LoopCopyDataInit:
adds r4, r0, r3
cmp r4, r1
bcc CopyDataInit
/* Zero fill the bss segment. */
ldr r2, =_sbss
ldr r4, =_ebss
movs r3, #0
b LoopFillZerobss
FillZerobss:
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
cmp r2, r4
bcc FillZerobss
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex-M4. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word MemManage_Handler
.word BusFault_Handler
.word UsageFault_Handler
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word DebugMon_Handler
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler
.word PVD_PVM_IRQHandler
.word RTC_TAMP_LSECSS_IRQHandler
.word RTC_WKUP_IRQHandler
.word FLASH_IRQHandler
.word RCC_IRQHandler
.word EXTI0_IRQHandler
.word EXTI1_IRQHandler
.word EXTI2_IRQHandler
.word EXTI3_IRQHandler
.word EXTI4_IRQHandler
.word DMA1_Channel1_IRQHandler
.word DMA1_Channel2_IRQHandler
.word DMA1_Channel3_IRQHandler
.word DMA1_Channel4_IRQHandler
.word DMA1_Channel5_IRQHandler
.word DMA1_Channel6_IRQHandler
.word DMA1_Channel7_IRQHandler
.word ADC1_2_IRQHandler
.word USB_HP_IRQHandler
.word USB_LP_IRQHandler
.word FDCAN1_IT0_IRQHandler
.word FDCAN1_IT1_IRQHandler
.word EXTI9_5_IRQHandler
.word TIM1_BRK_TIM15_IRQHandler
.word TIM1_UP_TIM16_IRQHandler
.word TIM1_TRG_COM_TIM17_IRQHandler
.word TIM1_CC_IRQHandler
.word TIM2_IRQHandler
.word TIM3_IRQHandler
.word TIM4_IRQHandler
.word I2C1_EV_IRQHandler
.word I2C1_ER_IRQHandler
.word I2C2_EV_IRQHandler
.word I2C2_ER_IRQHandler
.word SPI1_IRQHandler
.word SPI2_IRQHandler
.word USART1_IRQHandler
.word USART2_IRQHandler
.word USART3_IRQHandler
.word EXTI15_10_IRQHandler
.word RTC_Alarm_IRQHandler
.word USBWakeUp_IRQHandler
.word TIM8_BRK_IRQHandler
.word TIM8_UP_IRQHandler
.word TIM8_TRG_COM_IRQHandler
.word TIM8_CC_IRQHandler
.word ADC3_IRQHandler
.word FMC_IRQHandler
.word LPTIM1_IRQHandler
.word TIM5_IRQHandler
.word SPI3_IRQHandler
.word UART4_IRQHandler
.word UART5_IRQHandler
.word TIM6_DAC_IRQHandler
.word TIM7_DAC_IRQHandler
.word DMA2_Channel1_IRQHandler
.word DMA2_Channel2_IRQHandler
.word DMA2_Channel3_IRQHandler
.word DMA2_Channel4_IRQHandler
.word DMA2_Channel5_IRQHandler
.word ADC4_IRQHandler
.word ADC5_IRQHandler
.word UCPD1_IRQHandler
.word COMP1_2_3_IRQHandler
.word COMP4_5_6_IRQHandler
.word COMP7_IRQHandler
.word HRTIM1_Master_IRQHandler
.word HRTIM1_TIMA_IRQHandler
.word HRTIM1_TIMB_IRQHandler
.word HRTIM1_TIMC_IRQHandler
.word HRTIM1_TIMD_IRQHandler
.word HRTIM1_TIME_IRQHandler
.word HRTIM1_FLT_IRQHandler
.word HRTIM1_TIMF_IRQHandler
.word CRS_IRQHandler
.word SAI1_IRQHandler
.word TIM20_BRK_IRQHandler
.word TIM20_UP_IRQHandler
.word TIM20_TRG_COM_IRQHandler
.word TIM20_CC_IRQHandler
.word FPU_IRQHandler
.word I2C4_EV_IRQHandler
.word I2C4_ER_IRQHandler
.word SPI4_IRQHandler
.word 0
.word FDCAN2_IT0_IRQHandler
.word FDCAN2_IT1_IRQHandler
.word FDCAN3_IT0_IRQHandler
.word FDCAN3_IT1_IRQHandler
.word RNG_IRQHandler
.word LPUART1_IRQHandler
.word I2C3_EV_IRQHandler
.word I2C3_ER_IRQHandler
.word DMAMUX_OVR_IRQHandler
.word QUADSPI_IRQHandler
.word DMA1_Channel8_IRQHandler
.word DMA2_Channel6_IRQHandler
.word DMA2_Channel7_IRQHandler
.word DMA2_Channel8_IRQHandler
.word CORDIC_IRQHandler
.word FMAC_IRQHandler
.size g_pfnVectors, .-g_pfnVectors
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak MemManage_Handler
.thumb_set MemManage_Handler,Default_Handler
.weak BusFault_Handler
.thumb_set BusFault_Handler,Default_Handler
.weak UsageFault_Handler
.thumb_set UsageFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak DebugMon_Handler
.thumb_set DebugMon_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_PVM_IRQHandler
.thumb_set PVD_PVM_IRQHandler,Default_Handler
.weak RTC_TAMP_LSECSS_IRQHandler
.thumb_set RTC_TAMP_LSECSS_IRQHandler,Default_Handler
.weak RTC_WKUP_IRQHandler
.thumb_set RTC_WKUP_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_IRQHandler
.thumb_set RCC_IRQHandler,Default_Handler
.weak EXTI0_IRQHandler
.thumb_set EXTI0_IRQHandler,Default_Handler
.weak EXTI1_IRQHandler
.thumb_set EXTI1_IRQHandler,Default_Handler
.weak EXTI2_IRQHandler
.thumb_set EXTI2_IRQHandler,Default_Handler
.weak EXTI3_IRQHandler
.thumb_set EXTI3_IRQHandler,Default_Handler
.weak EXTI4_IRQHandler
.thumb_set EXTI4_IRQHandler,Default_Handler
.weak DMA1_Channel1_IRQHandler
.thumb_set DMA1_Channel1_IRQHandler,Default_Handler
.weak DMA1_Channel2_IRQHandler
.thumb_set DMA1_Channel2_IRQHandler,Default_Handler
.weak DMA1_Channel3_IRQHandler
.thumb_set DMA1_Channel3_IRQHandler,Default_Handler
.weak DMA1_Channel4_IRQHandler
.thumb_set DMA1_Channel4_IRQHandler,Default_Handler
.weak DMA1_Channel5_IRQHandler
.thumb_set DMA1_Channel5_IRQHandler,Default_Handler
.weak DMA1_Channel6_IRQHandler
.thumb_set DMA1_Channel6_IRQHandler,Default_Handler
.weak DMA1_Channel7_IRQHandler
.thumb_set DMA1_Channel7_IRQHandler,Default_Handler
.weak ADC1_2_IRQHandler
.thumb_set ADC1_2_IRQHandler,Default_Handler
.weak USB_HP_IRQHandler
.thumb_set USB_HP_IRQHandler,Default_Handler
.weak USB_LP_IRQHandler
.thumb_set USB_LP_IRQHandler,Default_Handler
.weak FDCAN1_IT0_IRQHandler
.thumb_set FDCAN1_IT0_IRQHandler,Default_Handler
.weak FDCAN1_IT1_IRQHandler
.thumb_set FDCAN1_IT1_IRQHandler,Default_Handler
.weak EXTI9_5_IRQHandler
.thumb_set EXTI9_5_IRQHandler,Default_Handler
.weak TIM1_BRK_TIM15_IRQHandler
.thumb_set TIM1_BRK_TIM15_IRQHandler,Default_Handler
.weak TIM1_UP_TIM16_IRQHandler
.thumb_set TIM1_UP_TIM16_IRQHandler,Default_Handler
.weak TIM1_TRG_COM_TIM17_IRQHandler
.thumb_set TIM1_TRG_COM_TIM17_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM4_IRQHandler
.thumb_set TIM4_IRQHandler,Default_Handler
.weak I2C1_EV_IRQHandler
.thumb_set I2C1_EV_IRQHandler,Default_Handler
.weak I2C1_ER_IRQHandler
.thumb_set I2C1_ER_IRQHandler,Default_Handler
.weak I2C2_EV_IRQHandler
.thumb_set I2C2_EV_IRQHandler,Default_Handler
.weak I2C2_ER_IRQHandler
.thumb_set I2C2_ER_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_IRQHandler
.thumb_set USART3_IRQHandler,Default_Handler
.weak EXTI15_10_IRQHandler
.thumb_set EXTI15_10_IRQHandler,Default_Handler
.weak RTC_Alarm_IRQHandler
.thumb_set RTC_Alarm_IRQHandler,Default_Handler
.weak USBWakeUp_IRQHandler
.thumb_set USBWakeUp_IRQHandler,Default_Handler
.weak TIM8_BRK_IRQHandler
.thumb_set TIM8_BRK_IRQHandler,Default_Handler
.weak TIM8_UP_IRQHandler
.thumb_set TIM8_UP_IRQHandler,Default_Handler
.weak TIM8_TRG_COM_IRQHandler
.thumb_set TIM8_TRG_COM_IRQHandler,Default_Handler
.weak TIM8_CC_IRQHandler
.thumb_set TIM8_CC_IRQHandler,Default_Handler
.weak ADC3_IRQHandler
.thumb_set ADC3_IRQHandler,Default_Handler
.weak FMC_IRQHandler
.thumb_set FMC_IRQHandler,Default_Handler
.weak LPTIM1_IRQHandler
.thumb_set LPTIM1_IRQHandler,Default_Handler
.weak TIM5_IRQHandler
.thumb_set TIM5_IRQHandler,Default_Handler
.weak SPI3_IRQHandler
.thumb_set SPI3_IRQHandler,Default_Handler
.weak UART4_IRQHandler
.thumb_set UART4_IRQHandler,Default_Handler
.weak UART5_IRQHandler
.thumb_set UART5_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_DAC_IRQHandler
.thumb_set TIM7_DAC_IRQHandler,Default_Handler
.weak DMA2_Channel1_IRQHandler
.thumb_set DMA2_Channel1_IRQHandler,Default_Handler
.weak DMA2_Channel2_IRQHandler
.thumb_set DMA2_Channel2_IRQHandler,Default_Handler
.weak DMA2_Channel3_IRQHandler
.thumb_set DMA2_Channel3_IRQHandler,Default_Handler
.weak DMA2_Channel4_IRQHandler
.thumb_set DMA2_Channel4_IRQHandler,Default_Handler
.weak DMA2_Channel5_IRQHandler
.thumb_set DMA2_Channel5_IRQHandler,Default_Handler
.weak ADC4_IRQHandler
.thumb_set ADC4_IRQHandler,Default_Handler
.weak ADC5_IRQHandler
.thumb_set ADC5_IRQHandler,Default_Handler
.weak UCPD1_IRQHandler
.thumb_set UCPD1_IRQHandler,Default_Handler
.weak COMP1_2_3_IRQHandler
.thumb_set COMP1_2_3_IRQHandler,Default_Handler
.weak COMP4_5_6_IRQHandler
.thumb_set COMP4_5_6_IRQHandler,Default_Handler
.weak COMP7_IRQHandler
.thumb_set COMP7_IRQHandler,Default_Handler
.weak HRTIM1_Master_IRQHandler
.thumb_set HRTIM1_Master_IRQHandler,Default_Handler
.weak HRTIM1_TIMA_IRQHandler
.thumb_set HRTIM1_TIMA_IRQHandler,Default_Handler
.weak HRTIM1_TIMB_IRQHandler
.thumb_set HRTIM1_TIMB_IRQHandler,Default_Handler
.weak HRTIM1_TIMC_IRQHandler
.thumb_set HRTIM1_TIMC_IRQHandler,Default_Handler
.weak HRTIM1_TIMD_IRQHandler
.thumb_set HRTIM1_TIMD_IRQHandler,Default_Handler
.weak HRTIM1_TIME_IRQHandler
.thumb_set HRTIM1_TIME_IRQHandler,Default_Handler
.weak HRTIM1_FLT_IRQHandler
.thumb_set HRTIM1_FLT_IRQHandler,Default_Handler
.weak HRTIM1_TIMF_IRQHandler
.thumb_set HRTIM1_TIMF_IRQHandler,Default_Handler
.weak CRS_IRQHandler
.thumb_set CRS_IRQHandler,Default_Handler
.weak SAI1_IRQHandler
.thumb_set SAI1_IRQHandler,Default_Handler
.weak TIM20_BRK_IRQHandler
.thumb_set TIM20_BRK_IRQHandler,Default_Handler
.weak TIM20_UP_IRQHandler
.thumb_set TIM20_UP_IRQHandler,Default_Handler
.weak TIM20_TRG_COM_IRQHandler
.thumb_set TIM20_TRG_COM_IRQHandler,Default_Handler
.weak TIM20_CC_IRQHandler
.thumb_set TIM20_CC_IRQHandler,Default_Handler
.weak FPU_IRQHandler
.thumb_set FPU_IRQHandler,Default_Handler
.weak I2C4_EV_IRQHandler
.thumb_set I2C4_EV_IRQHandler,Default_Handler
.weak I2C4_ER_IRQHandler
.thumb_set I2C4_ER_IRQHandler,Default_Handler
.weak SPI4_IRQHandler
.thumb_set SPI4_IRQHandler,Default_Handler
.weak FDCAN2_IT0_IRQHandler
.thumb_set FDCAN2_IT0_IRQHandler,Default_Handler
.weak FDCAN2_IT1_IRQHandler
.thumb_set FDCAN2_IT1_IRQHandler,Default_Handler
.weak FDCAN3_IT0_IRQHandler
.thumb_set FDCAN3_IT0_IRQHandler,Default_Handler
.weak FDCAN3_IT1_IRQHandler
.thumb_set FDCAN3_IT1_IRQHandler,Default_Handler
.weak RNG_IRQHandler
.thumb_set RNG_IRQHandler,Default_Handler
.weak LPUART1_IRQHandler
.thumb_set LPUART1_IRQHandler,Default_Handler
.weak I2C3_EV_IRQHandler
.thumb_set I2C3_EV_IRQHandler,Default_Handler
.weak I2C3_ER_IRQHandler
.thumb_set I2C3_ER_IRQHandler,Default_Handler
.weak DMAMUX_OVR_IRQHandler
.thumb_set DMAMUX_OVR_IRQHandler,Default_Handler
.weak QUADSPI_IRQHandler
.thumb_set QUADSPI_IRQHandler,Default_Handler
.weak DMA1_Channel8_IRQHandler
.thumb_set DMA1_Channel8_IRQHandler,Default_Handler
.weak DMA2_Channel6_IRQHandler
.thumb_set DMA2_Channel6_IRQHandler,Default_Handler
.weak DMA2_Channel7_IRQHandler
.thumb_set DMA2_Channel7_IRQHandler,Default_Handler
.weak DMA2_Channel8_IRQHandler
.thumb_set DMA2_Channel8_IRQHandler,Default_Handler
.weak CORDIC_IRQHandler
.thumb_set CORDIC_IRQHandler,Default_Handler
.weak FMAC_IRQHandler
.thumb_set FMAC_IRQHandler,Default_Handler
|
LRenzo0801/ittapi
| 12,448
|
src/ittnotify/ittptmark32.S
|
/*
Copyright (C) 2017-2025 Intel Corporation
SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
*/
// /////////////////////////////////////////////////////////////////////////
////// Intel Processor Trace Marker Functionality
////////////////////////////////////////////////////////////////////////////
.text
.align 16
.globl __itt_pt_mark
.globl __itt_pt_event
.globl __itt_pt_mark_event
.globl __itt_pt_mark_threshold
.globl __itt_pt_byte
.globl __itt_pt_write
/// void __itt_pt_mark(unsigned char index);
__itt_pt_mark:
movzbl 4(%esp), %eax
// and $0xff, %eax
lea __itt_pt_mark_call_table(,%eax,4), %eax
jmp *%eax
.align 4
.long 0, 1, 2, 3 // GUID
.long 0xfadefade
__itt_pt_mark_call_table:
/// .fill 256,4,(0x0000c2c3 | (( . - __itt_pt_mark_call_table) << 14))
ret
ret $0x0
ret
ret $0x1
ret
ret $0x2
ret
ret $0x3
ret
ret $0x4
ret
ret $0x5
ret
ret $0x6
ret
ret $0x7
ret
ret $0x8
ret
ret $0x9
ret
ret $0xa
ret
ret $0xb
ret
ret $0xc
ret
ret $0xd
ret
ret $0xe
ret
ret $0xf
ret
ret $0x10
ret
ret $0x11
ret
ret $0x12
ret
ret $0x13
ret
ret $0x14
ret
ret $0x15
ret
ret $0x16
ret
ret $0x17
ret
ret $0x18
ret
ret $0x19
ret
ret $0x1a
ret
ret $0x1b
ret
ret $0x1c
ret
ret $0x1d
ret
ret $0x1e
ret
ret $0x1f
ret
ret $0x20
ret
ret $0x21
ret
ret $0x22
ret
ret $0x23
ret
ret $0x24
ret
ret $0x25
ret
ret $0x26
ret
ret $0x27
ret
ret $0x28
ret
ret $0x29
ret
ret $0x2a
ret
ret $0x2b
ret
ret $0x2c
ret
ret $0x2d
ret
ret $0x2e
ret
ret $0x2f
ret
ret $0x30
ret
ret $0x31
ret
ret $0x32
ret
ret $0x33
ret
ret $0x34
ret
ret $0x35
ret
ret $0x36
ret
ret $0x37
ret
ret $0x38
ret
ret $0x39
ret
ret $0x3a
ret
ret $0x3b
ret
ret $0x3c
ret
ret $0x3d
ret
ret $0x3e
ret
ret $0x3f
ret
ret $0x40
ret
ret $0x41
ret
ret $0x42
ret
ret $0x43
ret
ret $0x44
ret
ret $0x45
ret
ret $0x46
ret
ret $0x47
ret
ret $0x48
ret
ret $0x49
ret
ret $0x4a
ret
ret $0x4b
ret
ret $0x4c
ret
ret $0x4d
ret
ret $0x4e
ret
ret $0x4f
ret
ret $0x50
ret
ret $0x51
ret
ret $0x52
ret
ret $0x53
ret
ret $0x54
ret
ret $0x55
ret
ret $0x56
ret
ret $0x57
ret
ret $0x58
ret
ret $0x59
ret
ret $0x5a
ret
ret $0x5b
ret
ret $0x5c
ret
ret $0x5d
ret
ret $0x5e
ret
ret $0x5f
ret
ret $0x60
ret
ret $0x61
ret
ret $0x62
ret
ret $0x63
ret
ret $0x64
ret
ret $0x65
ret
ret $0x66
ret
ret $0x67
ret
ret $0x68
ret
ret $0x69
ret
ret $0x6a
ret
ret $0x6b
ret
ret $0x6c
ret
ret $0x6d
ret
ret $0x6e
ret
ret $0x6f
ret
ret $0x70
ret
ret $0x71
ret
ret $0x72
ret
ret $0x73
ret
ret $0x74
ret
ret $0x75
ret
ret $0x76
ret
ret $0x77
ret
ret $0x78
ret
ret $0x79
ret
ret $0x7a
ret
ret $0x7b
ret
ret $0x7c
ret
ret $0x7d
ret
ret $0x7e
ret
ret $0x7f
ret
ret $0x80
ret
ret $0x81
ret
ret $0x82
ret
ret $0x83
ret
ret $0x84
ret
ret $0x85
ret
ret $0x86
ret
ret $0x87
ret
ret $0x88
ret
ret $0x89
ret
ret $0x8a
ret
ret $0x8b
ret
ret $0x8c
ret
ret $0x8d
ret
ret $0x8e
ret
ret $0x8f
ret
ret $0x90
ret
ret $0x91
ret
ret $0x92
ret
ret $0x93
ret
ret $0x94
ret
ret $0x95
ret
ret $0x96
ret
ret $0x97
ret
ret $0x98
ret
ret $0x99
ret
ret $0x9a
ret
ret $0x9b
ret
ret $0x9c
ret
ret $0x9d
ret
ret $0x9e
ret
ret $0x9f
ret
ret $0xa0
ret
ret $0xa1
ret
ret $0xa2
ret
ret $0xa3
ret
ret $0xa4
ret
ret $0xa5
ret
ret $0xa6
ret
ret $0xa7
ret
ret $0xa8
ret
ret $0xa9
ret
ret $0xaa
ret
ret $0xab
ret
ret $0xac
ret
ret $0xad
ret
ret $0xae
ret
ret $0xaf
ret
ret $0xb0
ret
ret $0xb1
ret
ret $0xb2
ret
ret $0xb3
ret
ret $0xb4
ret
ret $0xb5
ret
ret $0xb6
ret
ret $0xb7
ret
ret $0xb8
ret
ret $0xb9
ret
ret $0xba
ret
ret $0xbb
ret
ret $0xbc
ret
ret $0xbd
ret
ret $0xbe
ret
ret $0xbf
ret
ret $0xc0
ret
ret $0xc1
ret
ret $0xc2
ret
ret $0xc3
ret
ret $0xc4
ret
ret $0xc5
ret
ret $0xc6
ret
ret $0xc7
ret
ret $0xc8
ret
ret $0xc9
ret
ret $0xca
ret
ret $0xcb
ret
ret $0xcc
ret
ret $0xcd
ret
ret $0xce
ret
ret $0xcf
ret
ret $0xd0
ret
ret $0xd1
ret
ret $0xd2
ret
ret $0xd3
ret
ret $0xd4
ret
ret $0xd5
ret
ret $0xd6
ret
ret $0xd7
ret
ret $0xd8
ret
ret $0xd9
ret
ret $0xda
ret
ret $0xdb
ret
ret $0xdc
ret
ret $0xdd
ret
ret $0xde
ret
ret $0xdf
ret
ret $0xe0
ret
ret $0xe1
ret
ret $0xe2
ret
ret $0xe3
ret
ret $0xe4
ret
ret $0xe5
ret
ret $0xe6
ret
ret $0xe7
ret
ret $0xe8
ret
ret $0xe9
ret
ret $0xea
ret
ret $0xeb
ret
ret $0xec
ret
ret $0xed
ret
ret $0xee
ret
ret $0xef
ret
ret $0xf0
ret
ret $0xf1
ret
ret $0xf2
ret
ret $0xf3
ret
ret $0xf4
ret
ret $0xf5
ret
ret $0xf6
ret
ret $0xf7
ret
ret $0xf8
ret
ret $0xf9
ret
ret $0xfa
ret
ret $0xfb
ret
ret $0xfc
ret
ret $0xfd
ret
ret $0xfe
ret
ret $0xff
.align 16
__itt_pt_byte:
movl 4(%esp), %ecx
__itt_pt_byte_:
and $0xff, %ecx
lea __itt_pt_byte_call_table(,%ecx,1), %ecx
jmp *%ecx
.align 4
.long 0, 1, 2, 3 // GUID
.long 0xfadedeaf
__itt_pt_byte_call_table:
.fill 256,1,0xc3
.align 16
__itt_pt_event:
push %ecx
mov 8(%esp), %ecx
rdpmc
mov %al,%cl
call __itt_pt_byte_
shr $8,%eax
mov %al,%cl
call __itt_pt_byte_
shr $8,%eax
mov %al,%cl
call __itt_pt_byte_
shr $8,%eax
mov %al,%cl
call __itt_pt_byte_
mov %dl,%cl
call __itt_pt_byte_
shr $8,%edx
mov %dl,%cl
call __itt_pt_byte_
shr $8,%edx
mov %dl,%cl
call __itt_pt_byte_
shr $8,%edx
mov %dl,%cl
call __itt_pt_byte_
pop %ecx
ret
.align 16
__itt_pt_mark_event:
testl $1,4(%esp)
jnz odd
pushl $0
call __itt_pt_event
add $2,%esp
jmp __itt_pt_mark
odd:
pushl 4(%esp)
call __itt_pt_mark
add $2,%esp
movl $0,4(%esp)
jmp __itt_pt_event
.align 16
__itt_pt_flush:
lea __itt_pt_mark_flush_1,%eax
jmp *%eax
.align 16
nop
__itt_pt_mark_flush_1:
lea __itt_pt_mark_flush_2,%eax
jmp *%eax
.align 16
nop
nop
__itt_pt_mark_flush_2:
lea __itt_pt_mark_flush_3,%eax
jmp *%eax
.align 16
nop
nop
nop
__itt_pt_mark_flush_3:
ret
.align 16
// int __itt_pt_mark_threshold(unsigned char index, unsigned long long* tmp, int threshold);
__itt_pt_mark_threshold:
// 4(%esp) == index
// 8(%esp) == tmp
// 12(%esp) == threshold
xor %edx,%edx
xor %eax,%eax
testl $1,4(%esp)
jnz mark_end
mark_begin:
mov $((1 << 30) + 1),%ecx
rdpmc
mov 8(%esp), %ecx
mov %eax, (%ecx)
mov %edx,4(%ecx)
jmp __itt_pt_mark
mark_end:
mov $((1 << 30) + 1),%ecx
rdpmc
mov 8(%esp), %ecx
sub (%ecx), %eax
sbb 4(%ecx), %edx
sub 12(%esp), %eax // threshold
jnc found
sbb $0, %edx
jnc found
jmp __itt_pt_mark
found:
call __itt_pt_mark
jmp __itt_pt_flush
// PTWRITE
.align 16
// void __itt_pt_write(unsigned long long value);
.long 0, 1, 2, 3 // GUID
__itt_pt_write:
// ptwrite dword ptr [esp + 4]
.byte 0xF3, 0x0F, 0xAE, 0x64, 0x24, 0x04
ret
// Ensure the stack is non-executable
#if defined(__ELF__)
.section .note.GNU-stack,"",@progbits
#endif
|
LRenzo0801/ittapi
| 13,242
|
src/ittnotify/ittptmark64.S
|
/*
Copyright (C) 2017-2025 Intel Corporation
SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
*/
// /////////////////////////////////////////////////////////////////////////
////// Intel Processor Trace Marker Functionality
////////////////////////////////////////////////////////////////////////////
.text
.align 16
.globl __itt_pt_mark
.globl __itt_pt_event
.globl __itt_pt_mark_event
.globl __itt_pt_mark_threshold
.globl __itt_pt_byte
.globl __itt_pt_write
/// void __itt_pt_mark(unsigned char index);
__itt_pt_mark:
__itt_pt_mark_int:
and $0xff, %rdi
call __itt_pt_mark_pic
__itt_pt_mark_pic:
popq %rax
lea (__itt_pt_mark_call_table - __itt_pt_mark_pic) (%rax,%rdi,4), %rdi
jmp *%rdi
.long 0, 1, 2, 3 // GUID
.long 0xfadefade
__itt_pt_mark_call_table:
retq
retq $0x0
retq
retq $0x1
retq
retq $0x2
retq
retq $0x3
retq
retq $0x4
retq
retq $0x5
retq
retq $0x6
retq
retq $0x7
retq
retq $0x8
retq
retq $0x9
retq
retq $0xa
retq
retq $0xb
retq
retq $0xc
retq
retq $0xd
retq
retq $0xe
retq
retq $0xf
retq
retq $0x10
retq
retq $0x11
retq
retq $0x12
retq
retq $0x13
retq
retq $0x14
retq
retq $0x15
retq
retq $0x16
retq
retq $0x17
retq
retq $0x18
retq
retq $0x19
retq
retq $0x1a
retq
retq $0x1b
retq
retq $0x1c
retq
retq $0x1d
retq
retq $0x1e
retq
retq $0x1f
retq
retq $0x20
retq
retq $0x21
retq
retq $0x22
retq
retq $0x23
retq
retq $0x24
retq
retq $0x25
retq
retq $0x26
retq
retq $0x27
retq
retq $0x28
retq
retq $0x29
retq
retq $0x2a
retq
retq $0x2b
retq
retq $0x2c
retq
retq $0x2d
retq
retq $0x2e
retq
retq $0x2f
retq
retq $0x30
retq
retq $0x31
retq
retq $0x32
retq
retq $0x33
retq
retq $0x34
retq
retq $0x35
retq
retq $0x36
retq
retq $0x37
retq
retq $0x38
retq
retq $0x39
retq
retq $0x3a
retq
retq $0x3b
retq
retq $0x3c
retq
retq $0x3d
retq
retq $0x3e
retq
retq $0x3f
retq
retq $0x40
retq
retq $0x41
retq
retq $0x42
retq
retq $0x43
retq
retq $0x44
retq
retq $0x45
retq
retq $0x46
retq
retq $0x47
retq
retq $0x48
retq
retq $0x49
retq
retq $0x4a
retq
retq $0x4b
retq
retq $0x4c
retq
retq $0x4d
retq
retq $0x4e
retq
retq $0x4f
retq
retq $0x50
retq
retq $0x51
retq
retq $0x52
retq
retq $0x53
retq
retq $0x54
retq
retq $0x55
retq
retq $0x56
retq
retq $0x57
retq
retq $0x58
retq
retq $0x59
retq
retq $0x5a
retq
retq $0x5b
retq
retq $0x5c
retq
retq $0x5d
retq
retq $0x5e
retq
retq $0x5f
retq
retq $0x60
retq
retq $0x61
retq
retq $0x62
retq
retq $0x63
retq
retq $0x64
retq
retq $0x65
retq
retq $0x66
retq
retq $0x67
retq
retq $0x68
retq
retq $0x69
retq
retq $0x6a
retq
retq $0x6b
retq
retq $0x6c
retq
retq $0x6d
retq
retq $0x6e
retq
retq $0x6f
retq
retq $0x70
retq
retq $0x71
retq
retq $0x72
retq
retq $0x73
retq
retq $0x74
retq
retq $0x75
retq
retq $0x76
retq
retq $0x77
retq
retq $0x78
retq
retq $0x79
retq
retq $0x7a
retq
retq $0x7b
retq
retq $0x7c
retq
retq $0x7d
retq
retq $0x7e
retq
retq $0x7f
retq
retq $0x80
retq
retq $0x81
retq
retq $0x82
retq
retq $0x83
retq
retq $0x84
retq
retq $0x85
retq
retq $0x86
retq
retq $0x87
retq
retq $0x88
retq
retq $0x89
retq
retq $0x8a
retq
retq $0x8b
retq
retq $0x8c
retq
retq $0x8d
retq
retq $0x8e
retq
retq $0x8f
retq
retq $0x90
retq
retq $0x91
retq
retq $0x92
retq
retq $0x93
retq
retq $0x94
retq
retq $0x95
retq
retq $0x96
retq
retq $0x97
retq
retq $0x98
retq
retq $0x99
retq
retq $0x9a
retq
retq $0x9b
retq
retq $0x9c
retq
retq $0x9d
retq
retq $0x9e
retq
retq $0x9f
retq
retq $0xa0
retq
retq $0xa1
retq
retq $0xa2
retq
retq $0xa3
retq
retq $0xa4
retq
retq $0xa5
retq
retq $0xa6
retq
retq $0xa7
retq
retq $0xa8
retq
retq $0xa9
retq
retq $0xaa
retq
retq $0xab
retq
retq $0xac
retq
retq $0xad
retq
retq $0xae
retq
retq $0xaf
retq
retq $0xb0
retq
retq $0xb1
retq
retq $0xb2
retq
retq $0xb3
retq
retq $0xb4
retq
retq $0xb5
retq
retq $0xb6
retq
retq $0xb7
retq
retq $0xb8
retq
retq $0xb9
retq
retq $0xba
retq
retq $0xbb
retq
retq $0xbc
retq
retq $0xbd
retq
retq $0xbe
retq
retq $0xbf
retq
retq $0xc0
retq
retq $0xc1
retq
retq $0xc2
retq
retq $0xc3
retq
retq $0xc4
retq
retq $0xc5
retq
retq $0xc6
retq
retq $0xc7
retq
retq $0xc8
retq
retq $0xc9
retq
retq $0xca
retq
retq $0xcb
retq
retq $0xcc
retq
retq $0xcd
retq
retq $0xce
retq
retq $0xcf
retq
retq $0xd0
retq
retq $0xd1
retq
retq $0xd2
retq
retq $0xd3
retq
retq $0xd4
retq
retq $0xd5
retq
retq $0xd6
retq
retq $0xd7
retq
retq $0xd8
retq
retq $0xd9
retq
retq $0xda
retq
retq $0xdb
retq
retq $0xdc
retq
retq $0xdd
retq
retq $0xde
retq
retq $0xdf
retq
retq $0xe0
retq
retq $0xe1
retq
retq $0xe2
retq
retq $0xe3
retq
retq $0xe4
retq
retq $0xe5
retq
retq $0xe6
retq
retq $0xe7
retq
retq $0xe8
retq
retq $0xe9
retq
retq $0xea
retq
retq $0xeb
retq
retq $0xec
retq
retq $0xed
retq
retq $0xee
retq
retq $0xef
retq
retq $0xf0
retq
retq $0xf1
retq
retq $0xf2
retq
retq $0xf3
retq
retq $0xf4
retq
retq $0xf5
retq
retq $0xf6
retq
retq $0xf7
retq
retq $0xf8
retq
retq $0xf9
retq
retq $0xfa
retq
retq $0xfb
retq
retq $0xfc
retq
retq $0xfd
retq
retq $0xfe
retq
retq $0xff
.align 16
__itt_pt_byte:
__itt_pt_byte_int:
and $0xff, %rdi
call __itt_pt_byte_pic
__itt_pt_byte_pic:
popq %rcx
lea (__itt_pt_byte_call_table - __itt_pt_byte_pic) (%rcx,%rdi,1), %rdi
jmp *%rdi
.align 4
.long 0, 1, 2, 3 // GUID
.long 0xfadedeaf
__itt_pt_byte_call_table:
.fill 256,1,0xc3
.align 16
__itt_pt_event:
__itt_pt_event_int:
pushq %rcx
mov %rdi,%rcx
rdpmc
xor %rdi, %rdi
mov %al, %dil
call __itt_pt_byte_int
shr $8, %eax
mov %al, %dil
call __itt_pt_byte_int
shr $8, %eax
mov %al, %dil
call __itt_pt_byte_int
shr $8, %eax
mov %al, %dil
call __itt_pt_byte_int
mov %dl, %dil
call __itt_pt_byte_int
shr $8, %edx
mov %dl, %dil
call __itt_pt_byte_int
shr $8, %edx
mov %dl, %dil
call __itt_pt_byte_int
shr $8, %edx
mov %dl, %dil
call __itt_pt_byte_int
popq %rcx
ret
.align 16
__itt_pt_mark_event:
test $1, %rdi
jnz odd
mov %rdi, %rsi
xor %rdi,%rdi
call __itt_pt_event_int
mov %rsi, %rdi
jmp __itt_pt_mark_int
odd:
call __itt_pt_mark_int
xor %rdi,%rdi
jmp __itt_pt_event_int
.align 16
__itt_pt_flush:
call __itt_pt_flush_pic
__itt_pt_flush_pic:
popq %rdx
lea (__itt_pt_mark_flush_1 - __itt_pt_flush_pic) (%rdx), %rax
jmp *%rax
.align 16
nop
__itt_pt_mark_flush_1:
lea (__itt_pt_mark_flush_2 - __itt_pt_flush_pic) (%rdx), %rax
jmp *%rax
.align 16
nop
nop
__itt_pt_mark_flush_2:
lea (__itt_pt_mark_flush_3 - __itt_pt_flush_pic) (%rdx), %rax
jmp *%rax
.align 16
nop
nop
nop
__itt_pt_mark_flush_3:
ret
.align 16
// int __itt_pt_mark_threshold(unsigned char index, unsigned long long* tmp, int threshold);
__itt_pt_mark_threshold:
// rdi == index
// rsi == tmp
// rdx == threshold
mov %rdx, %r8 // r8 = threshold
xor %rdx, %rdx
xor %rax, %rax
test $1, %rdi
jnz mark_end
mark_begin:
mov $((1 << 30) + 1),%rcx
rdpmc
shl $32, %rdx
or %rax, %rdx
mov %rdx, (%rsi)
jmp __itt_pt_mark_int
mark_end:
mov $((1 << 30) + 1),%rcx
rdpmc
shl $32, %rdx
or %rax, %rdx
sub (%rsi), %rdx
cmp %r8, %rdx // threshold
jnc found
jmp __itt_pt_mark_int
found:
call __itt_pt_mark_int
jmp __itt_pt_flush
// PTWRITE
.align 16
// void __itt_pt_write(unsigned long long value);
.long 0, 1, 2, 3 // GUID
__itt_pt_write:
// ptwrite rcx
.byte 0xF3, 0x48, 0x0F, 0xAE, 0xE1
ret
// Ensure the stack is non-executable
#if defined(__ELF__)
.section .note.GNU-stack,"",@progbits
#endif
|
lte678/Project-Euler
| 3,329
|
9/solution.s
|
# Finds the pythagorean triplet where a^2 + b^2 = c^2 and a+b+c=1000
# Run with:
# gcc solution.s -no-pie -o solution && ./solution
# Mark the stack a non-executable. Preferred by some versions of ld
.section .note.GNU-stack,"",@progbits
.section .rodata
.globl prefix_str
str1:
.string "The answer is c=" # Already zero terminated
str2:
.string " b="
str3:
.string " a="
newline_str:
.string "\n"
nosol_str:
.string "No solution found\n"
.text
.global main
main:
# Solve problem
mov $998, %rcx
c_loop:
mov $1000, %rbx
sub %rcx, %rbx
dec %rbx
b_loop:
mov $1000, %rax
sub %rcx, %rax
sub %rbx, %rax
# Check condition
mul %rax # This clobbers rdx
mov %rax, %rsi
mov %rbx, %rax
mul %rax
add %rax, %rsi
mov %rcx, %rax
mul %rax
cmp %rax, %rsi
je found_solution
dec %rbx
jnz b_loop
dec %rcx
jnz c_loop
push $nosol_str
call print
add $8, %rsp
jmp exit
found_solution:
mov $1000, %rax
sub %rbx, %rax
sub %rcx, %rax
push %rax
push %rbx
push %rcx
# Print result
push $str1
call print
add $8, %rsp
pop %rcx
push %rcx
call print_long
add $8, %rsp
push $str2
call print
add $8, %rsp
pop %rcx
push %rcx
call print_long
add $8, %rsp
push $str3
call print
add $8, %rsp
pop %rcx
push %rcx
call print_long
add $8, %rsp
push $newline_str
call print
add $8, %rsp
jmp exit
print_long:
push %rbp
mov %rsp, %rbp
# Takes an unsigned long as the parameter.
mov 16(%rsp), %rax # Load parameter
sub $8, %rsp # Stack has space for 7 numbers and null terminator
# rcx is the string character index
mov %rsp, %rcx
movb $0, (%rsp)
inc %rcx
mov $10, %rbx # We cannot use an immediate for div
print_long_loop:
mov $0, %rdx # High half, unused
div %rbx # divide rax by 10, and put the result in rax. rdx contains remainder
add $48, %rdx # Add to ascii code for '0'
mov %dl, (%rcx)
inc %rcx
cmp %rbp, %rcx
je print_long_exit
cmp $0, %rax
jne print_long_loop
print_long_exit:
# %rdx is the incrementing char pointer, while rcx decrements
mov %rsp, %rdx
print_long_rev:
dec %rcx
mov (%rcx), %al
mov (%rdx), %ah
mov %al, (%rdx)
mov %ah, (%rcx)
inc %rdx
cmp %rcx, %rdx
jl print_long_rev
push %rsp
call print
add $8, %rsp
mov %rbp, %rsp
pop %rbp
ret
strlen:
# Preserve stack base pointer
push %rbp
mov %rsp, %rbp
mov 16(%rbp), %rbx
mov $0, %rax
strlen_loop:
inc %rax
cmpb $0, -1(%rax, %rbx) # Index into the address %rax + %rbx
jne strlen_loop
dec %rax
mov %rbp, %rsp
pop %rbp
ret
print:
# Preserve stack base pointer
push %rbp
mov %rsp, %rbp
# Get string length
push 16(%rbp) # The only parameter is a pointer to the string. Pass as parameter again
call strlen
add $8, %rsp
# Prepare syscall params
mov %rax, %rdx # Mov string length to rdx
mov $1, %rax
mov $1, %rdi
mov 16(%rbp), %rsi
# sys_write
syscall
mov %rbp, %rsp
pop %rbp
ret
exit:
# sys_exit
mov $60, %rax
mov $0, %rdi
syscall
|
lucyyang01/pint-OS
| 5,272
|
src/threads/start.S
|
#include "threads/loader.h"
#### Kernel startup code.
#### The loader (in loader.S) loads the kernel at physical address
#### 0x20000 (128 kB) and jumps to "start", defined here. This code
#### switches from real mode to 32-bit protected mode and calls
#### main().
/* Flags in control register 0. */
#define CR0_PE 0x00000001 /* Protection Enable. */
#define CR0_EM 0x00000004 /* (Floating-point) Emulation. */
#define CR0_PG 0x80000000 /* Paging. */
#define CR0_WP 0x00010000 /* Write-Protect enable in kernel mode. */
.section .start
# The following code runs in real mode, which is a 16-bit code segment.
.code16
.func start
.globl start
start:
# The loader called into us with CS = 0x2000, SS = 0x0000, ESP = 0xf000,
# but we should initialize the other segment registers.
mov $0x2000, %ax
mov %ax, %ds
mov %ax, %es
# Set string instructions to go upward.
cld
#### Get memory size, via interrupt 15h function 88h (see [IntrList]),
#### which returns AX = (kB of physical memory) - 1024. This only
#### works for memory sizes <= 65 MB, which should be fine for our
#### purposes. We cap memory at 64 MB because that's all we prepare
#### page tables for, below.
movb $0x88, %ah
int $0x15
addl $1024, %eax # Total kB memory
cmp $0x10000, %eax # Cap at 64 MB
jbe 1f
mov $0x10000, %eax
1: shrl $2, %eax # Total 4 kB pages
addr32 movl %eax, init_ram_pages - LOADER_PHYS_BASE - 0x20000
#### Enable A20. Address line 20 is tied low when the machine boots,
#### which prevents addressing memory about 1 MB. This code fixes it.
# Poll status register while busy.
1: inb $0x64, %al
testb $0x2, %al
jnz 1b
# Send command for writing output port.
movb $0xd1, %al
outb %al, $0x64
# Poll status register while busy.
1: inb $0x64, %al
testb $0x2, %al
jnz 1b
# Enable A20 line.
movb $0xdf, %al
outb %al, $0x60
# Poll status register while busy.
1: inb $0x64, %al
testb $0x2, %al
jnz 1b
#### Create temporary page directory and page table and set page
#### directory base register.
# Create page directory at 0xf000 (60 kB) and fill with zeroes.
mov $0xf00, %ax
mov %ax, %es
subl %eax, %eax
subl %edi, %edi
movl $0x400, %ecx
rep stosl
# Add PDEs to point to page tables for the first 64 MB of RAM.
# Also add identical PDEs starting at LOADER_PHYS_BASE.
# See [IA32-v3a] section 3.7.6 "Page-Directory and Page-Table Entries"
# for a description of the bits in %eax.
movl $0x10007, %eax
movl $0x11, %ecx
subl %edi, %edi
1: movl %eax, %es:(%di)
movl %eax, %es:LOADER_PHYS_BASE >> 20(%di)
addw $4, %di
addl $0x1000, %eax
loop 1b
# Set up page tables for one-to-map linear to physical map for the
# first 64 MB of RAM.
# See [IA32-v3a] section 3.7.6 "Page-Directory and Page-Table Entries"
# for a description of the bits in %eax.
movw $0x1000, %ax
movw %ax, %es
movl $0x7, %eax
movl $0x4000, %ecx
subl %edi, %edi
1: movl %eax, %es:(%di)
addw $4, %di
addl $0x1000, %eax
loop 1b
# Set page directory base register.
movl $0xf000, %eax
movl %eax, %cr3
#### Switch to protected mode.
# First, disable interrupts. We won't set up the IDT until we get
# into C code, so any interrupt would blow us away.
cli
# Protected mode requires a GDT, so point the GDTR to our GDT.
# We need a data32 prefix to ensure that all 32 bits of the GDT
# descriptor are loaded (default is to load only 24 bits).
# The CPU doesn't need an addr32 prefix but ELF doesn't do 16-bit
# relocations.
data32 addr32 lgdt gdtdesc - LOADER_PHYS_BASE - 0x20000
# Then we turn on the following bits in CR0:
# PE (Protect Enable): this turns on protected mode.
# PG (Paging): turns on paging.
# WP (Write Protect): if unset, ring 0 code ignores
# write-protect bits in page tables (!).
# EM (Emulation): forces floating-point instructions to trap.
# We don't (yet) support floating point.
movl %cr0, %eax
//orl $CR0_PE | CR0_PG | CR0_WP | CR0_EM, %eax
orl $CR0_PE | CR0_PG | CR0_WP, %eax
movl %eax, %cr0
# We're now in protected mode in a 16-bit segment. The CPU still has
# the real-mode code segment cached in %cs's segment descriptor. We
# need to reload %cs, and the easiest way is to use a far jump.
# Because we're not running in a 32-bit segment the data32 prefix is
# needed to jump to a 32-bit offset in the target segment.
data32 ljmp $SEL_KCSEG, $1f
# We're now in protected mode in a 32-bit segment.
# Let the assembler know.
.code32
# Reload all the other segment registers and the stack pointer to
# point into our new GDT.
1: mov $SEL_KDSEG, %ax
mov %ax, %ds
mov %ax, %es
mov %ax, %fs
mov %ax, %gs
mov %ax, %ss
addl $LOADER_PHYS_BASE, %esp
movl $0, %ebp # Null-terminate main()'s backtrace
#### Call main().
call main
# main() shouldn't ever return. If it does, spin.
1: jmp 1b
.endfunc
#### GDT
.align 8
gdt:
.quad 0x0000000000000000 # Null segment. Not used by CPU.
.quad 0x00cf9a000000ffff # System code, base 0, limit 4 GB.
.quad 0x00cf92000000ffff # System data, base 0, limit 4 GB.
gdtdesc:
.word gdtdesc - gdt - 1 # Size of the GDT, minus 1 byte.
.long gdt # Address of the GDT.
#### Physical memory size in 4 kB pages. This is exported to the rest
#### of the kernel.
.globl init_ram_pages
init_ram_pages:
.long 0
|
lucyyang01/pint-OS
| 7,464
|
src/threads/intr-stubs.S
|
#include "threads/loader.h"
.text
/* Main interrupt entry point.
An internal or external interrupt starts in one of the
intrNN_stub routines, which push the `struct intr_frame'
frame_pointer, error_code, and vec_no members on the stack,
then jump here.
We save the rest of the `struct intr_frame' members to the
stack, set up some registers as needed by the kernel, and then
call intr_handler(), which actually handles the interrupt.
We "fall through" to intr_exit to return from the interrupt.
*/
.func intr_entry
intr_entry:
/* Save caller's registers. */
subl $108, %esp
fsave (%esp)
pushl %ds
pushl %es
pushl %fs
pushl %gs
pushal
/* Set up kernel environment. */
cld /* String instructions go upward. */
mov $SEL_KDSEG, %eax /* Initialize segment registers. */
mov %eax, %ds
mov %eax, %es
leal 164(%esp), %ebp /* Set up frame pointer. */
/* Call interrupt handler. */
pushl %esp
.globl intr_handler
call intr_handler
addl $4, %esp
.endfunc
/* Interrupt exit.
Restores the caller's registers, discards extra data on the
stack, and returns to the caller.
This is a separate function because it is called directly when
we launch a new user process (see start_process() in
userprog/process.c). */
.globl intr_exit
.func intr_exit
intr_exit:
/* Restore caller's registers. */
popal
popl %gs
popl %fs
popl %es
popl %ds
frstor (%esp)
addl $108, %esp
/* Discard `struct intr_frame' vec_no, error_code,
frame_pointer members. */
addl $12, %esp
/* Return to caller. */
iret
.endfunc
/* Interrupt stubs.
This defines 256 fragments of code, named `intr00_stub'
through `intrff_stub', each of which is used as the entry
point for the corresponding interrupt vector. It also puts
the address of each of these functions in the correct spot in
`intr_stubs', an array of function pointers.
Most of the stubs do this:
1. Push %ebp on the stack (frame_pointer in `struct intr_frame').
2. Push 0 on the stack (error_code).
3. Push the interrupt number on the stack (vec_no).
The CPU pushes an extra "error code" on the stack for a few
interrupts. Because we want %ebp to be where the error code
is, we follow a different path:
1. Push a duplicate copy of the error code on the stack.
2. Replace the original copy of the error code by %ebp.
3. Push the interrupt number on the stack. */
.data
.globl intr_stubs
intr_stubs:
/* This implements steps 1 and 2, described above, in the common
case where we just push a 0 error code. */
#define zero \
pushl %ebp; \
pushl $0
/* This implements steps 1 and 2, described above, in the case
where the CPU already pushed an error code. */
#define REAL \
pushl (%esp); \
movl %ebp, 4(%esp)
/* Emits a stub for interrupt vector NUMBER.
TYPE is `zero', for the case where we push a 0 error code,
or `REAL', if the CPU pushes an error code for us. */
#define STUB(NUMBER, TYPE) \
.text; \
.func intr##NUMBER##_stub; \
intr##NUMBER##_stub: \
TYPE; \
push $0x##NUMBER; \
jmp intr_entry; \
.endfunc; \
\
.data; \
.long intr##NUMBER##_stub;
/* All the stubs. */
STUB(00, zero) STUB(01, zero) STUB(02, zero) STUB(03, zero)
STUB(04, zero) STUB(05, zero) STUB(06, zero) STUB(07, zero)
STUB(08, REAL) STUB(09, zero) STUB(0a, REAL) STUB(0b, REAL)
STUB(0c, zero) STUB(0d, REAL) STUB(0e, REAL) STUB(0f, zero)
STUB(10, zero) STUB(11, REAL) STUB(12, zero) STUB(13, zero)
STUB(14, zero) STUB(15, zero) STUB(16, zero) STUB(17, zero)
STUB(18, REAL) STUB(19, zero) STUB(1a, REAL) STUB(1b, REAL)
STUB(1c, zero) STUB(1d, REAL) STUB(1e, REAL) STUB(1f, zero)
STUB(20, zero) STUB(21, zero) STUB(22, zero) STUB(23, zero)
STUB(24, zero) STUB(25, zero) STUB(26, zero) STUB(27, zero)
STUB(28, zero) STUB(29, zero) STUB(2a, zero) STUB(2b, zero)
STUB(2c, zero) STUB(2d, zero) STUB(2e, zero) STUB(2f, zero)
STUB(30, zero) STUB(31, zero) STUB(32, zero) STUB(33, zero)
STUB(34, zero) STUB(35, zero) STUB(36, zero) STUB(37, zero)
STUB(38, zero) STUB(39, zero) STUB(3a, zero) STUB(3b, zero)
STUB(3c, zero) STUB(3d, zero) STUB(3e, zero) STUB(3f, zero)
STUB(40, zero) STUB(41, zero) STUB(42, zero) STUB(43, zero)
STUB(44, zero) STUB(45, zero) STUB(46, zero) STUB(47, zero)
STUB(48, zero) STUB(49, zero) STUB(4a, zero) STUB(4b, zero)
STUB(4c, zero) STUB(4d, zero) STUB(4e, zero) STUB(4f, zero)
STUB(50, zero) STUB(51, zero) STUB(52, zero) STUB(53, zero)
STUB(54, zero) STUB(55, zero) STUB(56, zero) STUB(57, zero)
STUB(58, zero) STUB(59, zero) STUB(5a, zero) STUB(5b, zero)
STUB(5c, zero) STUB(5d, zero) STUB(5e, zero) STUB(5f, zero)
STUB(60, zero) STUB(61, zero) STUB(62, zero) STUB(63, zero)
STUB(64, zero) STUB(65, zero) STUB(66, zero) STUB(67, zero)
STUB(68, zero) STUB(69, zero) STUB(6a, zero) STUB(6b, zero)
STUB(6c, zero) STUB(6d, zero) STUB(6e, zero) STUB(6f, zero)
STUB(70, zero) STUB(71, zero) STUB(72, zero) STUB(73, zero)
STUB(74, zero) STUB(75, zero) STUB(76, zero) STUB(77, zero)
STUB(78, zero) STUB(79, zero) STUB(7a, zero) STUB(7b, zero)
STUB(7c, zero) STUB(7d, zero) STUB(7e, zero) STUB(7f, zero)
STUB(80, zero) STUB(81, zero) STUB(82, zero) STUB(83, zero)
STUB(84, zero) STUB(85, zero) STUB(86, zero) STUB(87, zero)
STUB(88, zero) STUB(89, zero) STUB(8a, zero) STUB(8b, zero)
STUB(8c, zero) STUB(8d, zero) STUB(8e, zero) STUB(8f, zero)
STUB(90, zero) STUB(91, zero) STUB(92, zero) STUB(93, zero)
STUB(94, zero) STUB(95, zero) STUB(96, zero) STUB(97, zero)
STUB(98, zero) STUB(99, zero) STUB(9a, zero) STUB(9b, zero)
STUB(9c, zero) STUB(9d, zero) STUB(9e, zero) STUB(9f, zero)
STUB(a0, zero) STUB(a1, zero) STUB(a2, zero) STUB(a3, zero)
STUB(a4, zero) STUB(a5, zero) STUB(a6, zero) STUB(a7, zero)
STUB(a8, zero) STUB(a9, zero) STUB(aa, zero) STUB(ab, zero)
STUB(ac, zero) STUB(ad, zero) STUB(ae, zero) STUB(af, zero)
STUB(b0, zero) STUB(b1, zero) STUB(b2, zero) STUB(b3, zero)
STUB(b4, zero) STUB(b5, zero) STUB(b6, zero) STUB(b7, zero)
STUB(b8, zero) STUB(b9, zero) STUB(ba, zero) STUB(bb, zero)
STUB(bc, zero) STUB(bd, zero) STUB(be, zero) STUB(bf, zero)
STUB(c0, zero) STUB(c1, zero) STUB(c2, zero) STUB(c3, zero)
STUB(c4, zero) STUB(c5, zero) STUB(c6, zero) STUB(c7, zero)
STUB(c8, zero) STUB(c9, zero) STUB(ca, zero) STUB(cb, zero)
STUB(cc, zero) STUB(cd, zero) STUB(ce, zero) STUB(cf, zero)
STUB(d0, zero) STUB(d1, zero) STUB(d2, zero) STUB(d3, zero)
STUB(d4, zero) STUB(d5, zero) STUB(d6, zero) STUB(d7, zero)
STUB(d8, zero) STUB(d9, zero) STUB(da, zero) STUB(db, zero)
STUB(dc, zero) STUB(dd, zero) STUB(de, zero) STUB(df, zero)
STUB(e0, zero) STUB(e1, zero) STUB(e2, zero) STUB(e3, zero)
STUB(e4, zero) STUB(e5, zero) STUB(e6, zero) STUB(e7, zero)
STUB(e8, zero) STUB(e9, zero) STUB(ea, zero) STUB(eb, zero)
STUB(ec, zero) STUB(ed, zero) STUB(ee, zero) STUB(ef, zero)
STUB(f0, zero) STUB(f1, zero) STUB(f2, zero) STUB(f3, zero)
STUB(f4, zero) STUB(f5, zero) STUB(f6, zero) STUB(f7, zero)
STUB(f8, zero) STUB(f9, zero) STUB(fa, zero) STUB(fb, zero)
STUB(fc, zero) STUB(fd, zero) STUB(fe, zero) STUB(ff, zero)
|
lucyyang01/pint-OS
| 7,224
|
src/threads/loader.S
|
#include "threads/loader.h"
#### Kernel loader.
#### This code should be stored in the first sector of a hard disk.
#### When the BIOS runs, it loads this code at physical address
#### 0x7c00-0x7e00 (512 bytes) and jumps to the beginning of it,
#### in real mode. The loader loads the kernel into memory and jumps
#### to its entry point, which is the start function in start.S.
####
#### The BIOS passes in the drive that the loader was read from as
#### DL, with floppy drives numbered 0x00, 0x01, ... and hard drives
#### numbered 0x80, 0x81, ... We want to support booting a kernel on
#### a different drive from the loader, so we don't take advantage of
#### this.
# Runs in real mode, which is a 16-bit segment.
.code16
# Set up segment registers.
# Set stack to grow downward from 60 kB (after boot, the kernel
# continues to use this stack for its initial thread).
sub %ax, %ax
mov %ax, %ds
mov %ax, %ss
mov $0xf000, %esp
# Configure serial port so we can report progress without connected VGA.
# See [IntrList] for details.
sub %dx, %dx # Serial port 0.
mov $0xe3, %al # 9600 bps, N-8-1.
# AH is already 0 (Initialize Port).
int $0x14 # Destroys AX.
call puts
.string "PiLo"
#### Read the partition table on each system hard disk and scan for a
#### partition of type 0x20, which is the type that we use for a
#### Pintos kernel.
####
#### Read [Partitions] for a description of the partition table format
#### that we parse.
####
#### We print out status messages to show the disk and partition being
#### scanned, e.g. hda1234 as we scan four partitions on the first
#### hard disk.
mov $0x80, %dl # Hard disk 0.
read_mbr:
sub %ebx, %ebx # Sector 0.
mov $0x2000, %ax # Use 0x20000 for buffer.
mov %ax, %es
call read_sector
jc no_such_drive
# Print hd[a-z].
call puts
.string " hd"
mov %dl, %al
add $'a' - 0x80, %al
call putc
# Check for MBR signature--if not present, it's not a
# partitioned hard disk.
cmpw $0xaa55, %es:510
jne next_drive
mov $446, %si # Offset of partition table entry 1.
mov $'1', %al
check_partition:
# Is it an unused partition?
cmpl $0, %es:(%si)
je next_partition
# Print [1-4].
call putc
# Is it a Pintos kernel partition?
cmpb $0x20, %es:4(%si)
jne next_partition
# Is it a bootable partition?
cmpb $0x80, %es:(%si)
je load_kernel
next_partition:
# No match for this partition, go on to the next one.
add $16, %si # Offset to next partition table entry.
inc %al
cmp $510, %si
jb check_partition
next_drive:
# No match on this drive, go on to the next one.
inc %dl
jnc read_mbr
no_such_drive:
no_boot_partition:
# Didn't find a Pintos kernel partition anywhere, give up.
call puts
.string "\rNot found\r"
# Notify BIOS that boot failed. See [IntrList].
int $0x18
#### We found a kernel. The kernel's drive is in DL. The partition
#### table entry for the kernel's partition is at ES:SI. Our job now
#### is to read the kernel from disk and jump to its start address.
load_kernel:
call puts
.string "\rLoading"
# Figure out number of sectors to read. A Pintos kernel is
# just an ELF format object, which doesn't have an
# easy-to-read field to identify its own size (see [ELF1]).
# But we limit Pintos kernels to 512 kB for other reasons, so
# it's easy enough to just read the entire contents of the
# partition or 512 kB from disk, whichever is smaller.
mov %es:12(%si), %ecx # EBP = number of sectors
cmp $1024, %ecx # Cap size at 512 kB
jbe 1f
mov $1024, %cx
1:
mov %es:8(%si), %ebx # EBX = first sector
mov $0x2000, %ax # Start load address: 0x20000
next_sector:
# Read one sector into memory.
mov %ax, %es # ES:0000 -> load address
call read_sector
jc read_failed
# Print '.' as progress indicator once every 16 sectors == 8 kB.
test $15, %bl
jnz 1f
call puts
.string "."
1:
# Advance memory pointer and disk sector.
add $0x20, %ax
inc %bx
loop next_sector
call puts
.string "\r"
#### Transfer control to the kernel that we loaded. We read the start
#### address out of the ELF header (see [ELF1]) and convert it from a
#### 32-bit linear address into a 16:16 segment:offset address for
#### real mode, then jump to the converted address. The 80x86 doesn't
#### have an instruction to jump to an absolute segment:offset kept in
#### registers, so in fact we store the address in a temporary memory
#### location, then jump indirectly through that location. To save 4
#### bytes in the loader, we reuse 4 bytes of the loader's code for
#### this temporary pointer.
mov $0x2000, %ax
mov %ax, %es
mov %es:0x18, %dx
mov %dx, start
movw $0x2000, start + 2
ljmp *start
read_failed:
start:
# Disk sector read failed.
call puts
1: .string "\rBad read\r"
# Notify BIOS that boot failed. See [IntrList].
int $0x18
#### Print string subroutine. To save space in the loader, this
#### subroutine takes its null-terminated string argument from the
#### code stream just after the call, and then returns to the byte
#### just after the terminating null. This subroutine preserves all
#### general-purpose registers.
puts: xchg %si, %ss:(%esp)
push %ax
next_char:
mov %cs:(%si), %al
inc %si
test %al, %al
jz 1f
call putc
jmp next_char
1: pop %ax
xchg %si, %ss:(%esp)
ret
#### Character output subroutine. Prints the character in AL to the
#### VGA display and serial port 0, using BIOS services (see
#### [IntrList]). Preserves all general-purpose registers.
####
#### If called upon to output a carriage return, this subroutine
#### automatically supplies the following line feed.
putc: pusha
1: sub %bh, %bh # Page 0.
mov $0x0e, %ah # Teletype output service.
int $0x10
mov $0x01, %ah # Serial port output service.
sub %dx, %dx # Serial port 0.
2: int $0x14 # Destroys AH.
test $0x80, %ah # Output timed out?
jz 3f
movw $0x9090, 2b # Turn "int $0x14" above into NOPs.
3:
cmp $'\r', %al
jne popa_ret
mov $'\n', %al
jmp 1b
#### Sector read subroutine. Takes a drive number in DL (0x80 = hard
#### disk 0, 0x81 = hard disk 1, ...) and a sector number in EBX, and
#### reads the specified sector into memory at ES:0000. Returns with
#### carry set on error, clear otherwise. Preserves all
#### general-purpose registers.
read_sector:
pusha
sub %ax, %ax
push %ax # LBA sector number [48:63]
push %ax # LBA sector number [32:47]
push %ebx # LBA sector number [0:31]
push %es # Buffer segment
push %ax # Buffer offset (always 0)
push $1 # Number of sectors to read
push $16 # Packet size
mov $0x42, %ah # Extended read
mov %sp, %si # DS:SI -> packet
int $0x13 # Error code in CF
popa # Pop 16 bytes, preserve flags
popa_ret:
popa
ret # Error code still in CF
#### Command-line arguments and their count.
#### This is written by the `pintos' utility and read by the kernel.
#### The loader itself does not do anything with the command line.
.org LOADER_ARG_CNT - LOADER_BASE
.fill LOADER_ARG_CNT_LEN, 1, 0
.org LOADER_ARGS - LOADER_BASE
.fill LOADER_ARGS_LEN, 1, 0
#### Partition table.
.org LOADER_PARTS - LOADER_BASE
.fill LOADER_PARTS_LEN, 1, 0
#### Boot-sector signature for BIOS inspection.
.org LOADER_SIG - LOADER_BASE
.word 0xaa55
|
lucyyang01/pint-OS
| 1,748
|
src/threads/switch.S
|
#include "threads/switch.h"
#### struct thread *switch_threads (struct thread *cur, struct thread *next);
####
#### Switches from CUR, which must be the running thread, to NEXT,
#### which must also be running switch_threads(), returning CUR in
#### NEXT's context.
####
#### This function works by assuming that the thread we're switching
#### into is also running switch_threads(). Thus, all it has to do is
#### preserve a few registers on the stack, then switch stacks and
#### restore the registers. As part of switching stacks we record the
#### current stack pointer in CUR's thread structure.
.globl switch_threads
.func switch_threads
switch_threads:
# Save caller's register state.
#
# Note that the SVR4 ABI allows us to destroy %eax, %ecx, %edx,
# but requires us to preserve %ebx, %ebp, %esi, %edi. See
# [SysV-ABI-386] pages 3-11 and 3-12 for details.
#
# This stack frame must match the one set up by thread_create()
# in size.
pushl %ebx
pushl %ebp
pushl %esi
pushl %edi
subl $108, %esp
fsave (%esp)
# Get offsetof (struct thread, stack).
.globl thread_stack_ofs
mov thread_stack_ofs, %edx
# Save current stack pointer to old thread's stack, if any.
movl SWITCH_CUR(%esp), %eax
movl %esp, (%eax,%edx,1)
# Restore stack pointer from new thread's stack.
movl SWITCH_NEXT(%esp), %ecx
movl (%ecx,%edx,1), %esp
# Restore caller's register state.
frstor (%esp)
addl $108, %esp
popl %edi
popl %esi
popl %ebp
popl %ebx
ret
.endfunc
.globl switch_entry
.func switch_entry
switch_entry:
# Discard switch_threads() arguments.
addl $8, %esp
# Call thread_switch_tail(prev).
pushl %eax
.globl thread_switch_tail
call thread_switch_tail
addl $4, %esp
# Start thread proper.
ret
.endfunc
|
LylioveinTheSilly/risc-v
| 2,005
|
assembly/main.s
|
# Dane są trzy liczby
#
# s0 - dzielna
# s1 - dzielnik
# s2 - number cyfry
#
# Dzieląc s0 przez s1 uzyskujemy liczbę b. (s0 / s1 = b)
# s2 wskazuję n-tą cyfrę za *pierwszą niezerową* cyfrą wyniku
# która jest odpowiedzią na zadanie.
#
# Przykład:
# s0: 100
# s1: 70
# s2: 4
#
# s0 / s1 = 1.4285714285714285
# ^
# s2
#
# Napisz program wskazujący tą liczbę. Cyfra powinna
# znaleść się w rejestrze a0.
.globl main
main:
li s0, 1
li s1, 7
li s2, 17
li s3, 0 # Flaga, która sprawdza, czy znaleśliśmy pierwszą, niezerową cyfrę
1: blt s0, s1, 2f # Aby algorytm zadziałał poprawnie, dzielnik musi być większy od dzielnej
mv a0, s1
li a1, 10
call mul # Jeśli tak nie jest to przemnóż dzielnik przez 10 w pętli, dopóki nie stanie się większy do dzielnej
mv s1, a0
j 1b
2: mv a0, s0
mv a1, s1
loop:
beq s2, zero, end # Czy znajeźliśmy naszą cyfrę w poprzedniej iteracji?
# Nie, dalej dzielenie pod kreską! :D
call div # Ile s1'ek mieści się w s0?
mv s0, a0 # Zapisz wynik
mv a0, a1 # Przenieś resztę z dzielenia do a0 i przemnóż przez 10
li a1, 10
call mul
mv a1, s1 # Przenieś dzielnik do a1 (aby przygotować się na kolejną operację dzielenia)
sltu t0, zero, s3 # Czy napotkaliśmy już pierwszą niezerową cyfrę?
sltu t1, zero, s0 # Czy natrafiliśmy po dzieleniu na niezerową cyfrę?
or t0, t0, t1 # Czy którykolwiek z warunków wyżej jest prawdziwy?
li t1, 1
bne t0, t1, loop # Sprawdź czy któryś warunek jest prawdziwy
addi s2, s2, -1 # Prawda! Licznik cyfr do dołu
li s3, 1 # Ustawiamy flagę, że odnaleźliśmy już niezerową cyfrę
j loop
end:
mv a0, s0
ebreak
mul:
mv t0, a0
li a0, 0
1: beq a1, zero, 2f
add a0, a0, t0
addi a1, a1, -1
j 1b
2: ret
|
LylioveinTheSilly/risc-v
| 2,007
|
assembly/programs/digit.s
|
# Dane są trzy liczby
#
# s0 - dzielna
# s1 - dzielnik
# s2 - number cyfry
#
# Dzieląc s0 przez s1 uzyskujemy liczbę b. (s0 / s1 = b)
# s2 wskazuję n-tą cyfrę za *pierwszą niezerową* cyfrą wyniku
# która jest odpowiedzią na zadanie.
#
# Przykład:
# s0: 100
# s1: 70
# s2: 4
#
# s0 / s1 = 1.4285714285714285
# ^
# s2
#
# Napisz program wskazujący tą liczbę. Cyfra powinna
# znaleść się w rejestrze a0.
.globl main
main:
li s0, 100
li s1, 70
li s2, 70
li s3, 0 # Flaga, która sprawdza, czy znaleśliśmy pierwszą, niezerową cyfrę
1: blt s0, s1, 2f # Aby algorytm zadziałał poprawnie, dzielnik musi być większy od dzielnej
mv a0, s1
li a1, 10
call mul # Jeśli tak nie jest to przemnóż dzielnik przez 10 w pętli, dopóki nie stanie się większy do dzielnej
mv s1, a0
j 1b
2: mv a0, s0
mv a1, s1
loop:
beq s2, zero, end # Czy znajeźliśmy naszą cyfrę w poprzedniej iteracji?
# Nie, dalej dzielenie pod kreską! :D
call div # Ile s1'ek mieści się w s0?
mv s0, a0 # Zapisz wynik
mv a0, a1 # Przenieś resztę z dzielenia do a0 i przemnóż przez 10
li a1, 10
call mul
mv a1, s1 # Przenieś dzielnik do a1 (aby przygotować się na kolejną operację dzielenia)
sltu t0, zero, s3 # Czy napotkaliśmy już pierwszą niezerową cyfrę?
sltu t1, zero, s0 # Czy natrafiliśmy po dzieleniu na niezerową cyfrę?
or t0, t0, t1 # Czy którykolwiek z warunków wyżej jest prawdziwy?
li t1, 1
bne t0, t1, loop # Sprawdź czy któryś warunek jest prawdziwy
addi s2, s2, -1 # Prawda! Licznik cyfr do dołu
li s3, 1 # Ustawiamy flagę, że odnaleźliśmy już niezerową cyfrę
j loop
end:
mv a0, s0
ebreak
mul:
mv t0, a0
li a0, 0
1: beq a1, zero, 2f
add a0, a0, t0
addi a1, a1, -1
j 1b
2: ret
|
lyrakisk/vines
| 6,228
|
examples/color_test/color_test.s
|
; full screen color tester
; Brad Smith, 2015
;
; displays a single color on the full screen
; allows emphasis / greyscale toggle
;
.feature force_range
.macpack longbranch
; iNES header
.segment "HEADER"
INES_MAPPER = 0
INES_MIRROR = 0 ; 0 = horizontal mirroring, 1 = vertical mirroring
INES_SRAM = 0 ; 1 = battery backed SRAM at $6000-7FFF
.byte 'N', 'E', 'S', $1A ; ID
.byte $02 ; 16k PRG bank count
.byte $01 ; 4k CHR bank count
.byte INES_MIRROR | (INES_SRAM << 1) | ((INES_MAPPER & $f) << 4)
.byte (INES_MAPPER & %11110000)
.byte $0, $0, $0, $0, $0, $0, $0, $0 ; padding
; CHR ROM
.segment "TILES"
.incbin "test.chr"
.incbin "test.chr"
; Vectors, defined in CODE segment.
.segment "VECTORS"
.word nmi
.word reset
.word irq
; zero page variables
.segment "ZEROPAGE"
ppu_emphasis: .res 1
color: .res 1
temp: .res 1
gamepad: .res 1
gamepad_last: .res 1
.segment "OAM"
.assert ((* & $FF) = 0),error,"oam not aligned to page"
oam: .res 256
; RAM variables
.segment "BSS"
; CODE
.segment "CODE"
palette:
.byte $00,$16,$2D,$30
CX = 16
CY = 23
oam_fill:
.byte CY+(0*10), $B8, 0, CX+(2*8) ; 0
.byte CY+(0*10), $BA, 0, CX+(3*8) ; 1
.byte CY+(1*10), 'C', 0, CX+(0*8) ; 2
.byte CY+(1*10), '0', 0, CX+(2*8) ; 3 color h
.byte CY+(1*10), '0', 0, CX+(3*8) ; 4 color l
.byte CY+(2*10), $B9, 0, CX+(2*8) ; 5
.byte CY+(2*10), $BB, 0, CX+(3*8) ; 6
.byte CY+(4*10), $BA, 0, CX+(2*8) ; 7
.byte CY+(4*10), $B8, 0, CX+(3*8) ; 8
.byte CY+(4*10), $BB, 0, CX+(4*8) ; 9
.byte CY+(4*10), '+', 0, CX+(5*8) ; 10
.byte CY+(4*10), $B1, 0, CX+(6*8) ; 11
.byte CY+(5*10), 'E', 0, CX+(0*8) ; 12
.byte CY+(5*10), '0', 0, CX+(2*8) ; 13 emph b
.byte CY+(5*10), '0', 0, CX+(3*8) ; 14 emph g
.byte CY+(5*10), '0', 0, CX+(4*8) ; 15 emph r
.byte CY+(7*10), $B0, 0, CX+(2*8) ; 16
.byte CY+(8*10), 'S', 0, CX+(0*8) ; 17
.byte CY+(8*10), '0', 0, CX+(2*8) ; 18 sat
.byte CY+(10*10),'H', 0, CX+(0*8) ; 19
.byte CY+(10*10),$B2, 0, CX+(2*8) ; 20
.byte CY+(10*10),$B3, 0, CX+(3*8) ; 21
.byte CY+(10*10),$B4, 0, CX+(4*8) ; 22
; fill remainder with $FF
.repeat 256
.if ((* - oam_fill) < 256)
.byte $FF
.endif
.endrepeat
oam_tile_ch = oam + ( 3*4)+1
oam_tile_cl = oam + ( 4*4)+1
oam_tile_b = oam + (13*4)+1
oam_tile_g = oam + (14*4)+1
oam_tile_r = oam + (15*4)+1
oam_tile_s = oam + (18*4)+1
.macro PPU_LATCH addr
lda $2002
lda #>addr
sta $2006
lda #<addr
sta $2006
.endmacro
main:
; setup sprites
ldx #0
:
lda oam_fill, X
sta oam, X
inx
bne :-
; setup default palettes
PPU_LATCH $3F00
ldy #16
:
ldx #0
:
lda palette, X
sta $2007
inx
cpx #4
bcc :-
dey
bne :--
; setup nametable
PPU_LATCH $2000
ldy #16
:
ldx #0
:
lda #0
sta $2007
inx
bne :-
dey
bne :--
; setup variables that don't start as 0
lda #%00011110
sta ppu_emphasis
; start NMI
lda #$80
sta $2000
; enter infinite loop
main_loop:
jmp main_loop
PAD_A = $01
PAD_B = $02
PAD_SELECT = $04
PAD_START = $08
PAD_U = $10
PAD_D = $20
PAD_L = $40
PAD_R = $80
gamepad_poll:
lda #1
sta $4016
lda #0
sta $4016
ldx #8
:
pha
lda $4016
and #%00000011
cmp #%00000001
pla
ror
dex
bne :-
sta gamepad
lda gamepad
rts
nmi:
; update sprites
lda #0
sta $2003
lda #>oam
sta $4014
; set background color
PPU_LATCH $3F00
lda color
sta $2007
; set foreground color
cmp #$20
bcc @dark_background
@light_background:
lda ppu_emphasis
and #%00000001
bne :+
; if not in greyscale mode, E/F columns are all black
lda color
and #$0F
cmp #$0E
bcs @dark_background
:
lda #$0F
jmp @background_chosen
@dark_background:
lda #$30
@background_chosen:
pha
PPU_LATCH $3F13
pla
sta $2007
; set scroll
lda #0
sta $2005
sta $2005
; set emphasis state
lda ppu_emphasis
sta $2001
; respond to gamepad
jsr gamepad_poll
jsr gamepad_poll
lda gamepad_last
jne @gamepad_end ; wait for all buttons released
lda gamepad
and #(PAD_U | PAD_D | PAD_L | PAD_R | PAD_B)
cmp #PAD_U
bne :+
lda color
clc
adc #$F0
and #$3F
sta color
:
lda gamepad
and #(PAD_U | PAD_D | PAD_L | PAD_R | PAD_B)
cmp #PAD_D
bne :+
lda color
clc
adc #$10
and #$3F
sta color
:
lda gamepad
and #(PAD_U | PAD_D | PAD_L | PAD_R | PAD_B)
cmp #PAD_L
bne :+
lda color
and #$30
sta temp
lda color
sec
sbc #1
and #$0F
ora temp
sta color
:
lda gamepad
and #(PAD_U | PAD_D | PAD_L | PAD_R | PAD_B)
cmp #PAD_R
bne :+
lda color
and #$30
sta temp
lda color
clc
adc #1
and #$0F
ora temp
sta color
:
lda gamepad
and #PAD_A
beq :+
lda ppu_emphasis
eor #%00000001
sta ppu_emphasis
:
lda gamepad
and #(PAD_U | PAD_D | PAD_L | PAD_R | PAD_B)
cmp #(PAD_L | PAD_B)
bne :+
lda ppu_emphasis
eor #%10000000
sta ppu_emphasis
:
lda gamepad
and #(PAD_U | PAD_D | PAD_L | PAD_R | PAD_B)
cmp #(PAD_U | PAD_B)
bne :+
lda ppu_emphasis
eor #%01000000
sta ppu_emphasis
:
lda gamepad
and #(PAD_U | PAD_D | PAD_L | PAD_R | PAD_B)
cmp #(PAD_R | PAD_B)
bne :+
lda ppu_emphasis
eor #%00100000
sta ppu_emphasis
:
lda gamepad
and #(PAD_U | PAD_D | PAD_L | PAD_R | PAD_B)
cmp #(PAD_D | PAD_B)
bne :+
lda ppu_emphasis
and #%00011111
sta ppu_emphasis
:
lda gamepad
cmp #PAD_SELECT
bne :+
lda ppu_emphasis
eor #%00010100
sta ppu_emphasis
:
@gamepad_end:
lda gamepad
and #(PAD_U | PAD_D | PAD_L | PAD_R | PAD_A | PAD_SELECT)
sta gamepad_last
; redraw sprites
lda color
lsr
lsr
lsr
lsr
ora #$A0
sta oam_tile_ch ; color high
lda color
and #$0F
ora #$A0
sta oam_tile_cl ; color low
lda ppu_emphasis
rol
rol
and #1
ora #$A0
sta oam_tile_b ; emphasis B
lda ppu_emphasis
rol
rol
rol
and #1
ora #$A0
sta oam_tile_g ; emphasis G
lda ppu_emphasis
rol
rol
rol
rol
and #1
ora #$A0
sta oam_tile_r ; emphasis R
lda ppu_emphasis
and #1
ora #$A0
sta oam_tile_s ; saturate
; wait until next frame
rti
irq:
rti
reset:
sei
cld
ldx #$40
stx $4017
ldx $ff
txs
ldx #$00
stx $2000
stx $2001
stx $4010
bit $2002
:
bit $2002
bpl :-
lda #$00
tax
:
sta $0000, X
sta $0100, X
sta $0200, X
sta $0300, X
sta $0400, X
sta $0500, X
sta $0600, X
sta $0700, X
inx
bne :-
:
bit $2002
bpl :-
jmp main
|
LZx-space/lex
| 4,416
|
src/asm/boot.S
|
# boot.S
# bootloader
# Disable generation of compressed instructions.
.option norvc
# Define a .text.init section. The .text.init is put at the
# starting address so that the entry _start is put at the RISC-V
# address 0x8000_0000.
.section .text.init
# Execution starts here.
.global _start
_start:
# Disable linker instruction relaxation for the `la` instruction below.
# This disallows the assembler from assuming that `gp` is already initialized.
# This causes the value stored in `gp` to be calculated from `pc`.
# The job of the global pointer is to give the linker the ability to address
# memory relative to GP instead of as an absolute address.
.option push
.option norelax
la gp, _global_pointer
.option pop
# SATP should be zero, but let's make sure. Each HART has its own
# SATP register.
csrw satp, zero
# Any hardware threads (hart) that are not bootstrapping
# need to wait for an IPI
csrr t0, mhartid
bnez t0, 3f
# Set all bytes in the BSS section to zero.
la a0, _bss_start
la a1, _bss_end
bgeu a0, a1, 2f
1:
sd zero, (a0)
addi a0, a0, 8
bltu a0, a1, 1b
2:
# The stack grows from bottom to top, so we put the stack pointer
# to the very end of the stack range.
la sp, _stack_end
# Setting `mstatus` register:
# 0b01 << 11: Machine's previous protection mode is 2 (MPP=2).
li t0, 0b11 << 11 | (1 << 13)
csrw mstatus, t0
# Do not allow interrupts while running kernel_main
csrw mie, zero
# Machine's exception program counter (MEPC) is set to `kernel_main`.
la t1, kernel_main
csrw mepc, t1
# Set the return address to get us into supervisor mode
la ra, 2f
# We use mret here so that the mstatus register is properly updated.
mret
2:
# We set the return address (ra above) to this label. When kernel_main() is finished
# in Rust, it will return here.
# Setting `mstatus` (supervisor status) register:
# 0b01 << 11 : Previous protection mode is 1 (MPP=01 [Supervisor]).
# 1 << 7 : Previous machine interrupt-enable bit is 1 (MPIE=1 [Enabled])
# 1 << 5 : Previous interrupt-enable bit is 1 (SPIE=1 [Enabled]).
# We set the "previous" bits because the mret will write the current bits
# with the previous bits.
li t0, (0b00 << 11) | (1 << 7) | (1 << 5) | (1 << 13)
csrw mstatus, t0
# Machine's trap vector base address is set to `m_trap_vector`, for
# "machine" trap vector.
la t2, m_trap_vector
csrw mtvec, t2
# Jump to first process. We put the MPP = 00 for user mode, so after
# mret, we will jump to the first process' addresss in user mode.
la ra, 4f
mret
3:
# Parked harts go here. We need to set these
# to only awaken if it receives a software interrupt,
# which we're going to call the SIPI (Software Intra-Processor Interrupt).
# We call the SIPI by writing the software interrupt into the Core Local Interruptor (CLINT)
# Which is calculated by: base_address + hart * 4
# where base address is 0x0200_0000 (MMIO CLINT base address)
# We only use additional harts to run user-space programs, although this may
# change.
# We divide up the stack so the harts aren't clobbering one another.
la sp, _stack_end
li t0, 0x10000
csrr a0, mhartid
mul t0, t0, a0
sub sp, sp, t0
# The parked harts will be put into machine mode with interrupts enabled.
li t0, 0b11 << 11 | (1 << 7) | (1 << 13)
csrw mstatus, t0
# Allow for MSIP (Software interrupt). We will write the MSIP from hart #0 to
# awaken these parked harts.
li t3, (1 << 3)
csrw mie, t3
# ------------------------------
# Machine's exception program counter (MEPC) is set to the Rust initialization
# code and waiting loop.
# la t1, kinit_hart
# csrw mepc, t1
# ------------------------------
# Machine's trap vector base address is set to `m_trap_vector`, for
# "machine" trap vector. The Rust initialization routines will give each
# hart its own trap frame. We can use the same trap function and distinguish
# between each hart by looking at the trap frame.
la t2, m_trap_vector
csrw mtvec, t2
# Whenever our hart is done initializing, we want it to return to the waiting
# loop, which is just below mret.
la ra, 4f
# We use mret here so that the mstatus register is properly updated.
mret
4:
# wfi = wait for interrupt. This is a hint to the harts to shut everything needed
# down. However, the RISC-V specification allows for wfi to do nothing. Anyway,
# with QEMU, this will save some CPU!
wfi
j 4b
|
macabu/advent-of-code
| 7,707
|
2015/01/part1.s
|
.global _start
.section .text
_start:
la t0, input # offset = *(input + 0)
li t6, 0 # counter = 0
loop:
lb t1, 0(t0) # c = input + offset
beq t1, zero, exit # if (*c == \0) goto exit
addi t0, t0, 1 # offset += 1
li t2, 40 # open = '('
beq t1, t2, inc # if (c == open) goto inc
addi t6, t6, -1 # else counter -= 1
j loop # goto loop
inc:
addi t6, t6, 1 # counter += 1
j loop # goto loop
exit:
ebreak # breakpoint and check t6
# exit(0)
li a0, 0
li a7, 93
ecall
.section .data
input: .asciz "()(((()))(()()()((((()(((())(()(()((((((()(()(((())))((()(((()))((())(()((()()()()(((())(((((((())))()()(()(()(())(((((()()()((())(((((()()))))()(())(((())(())((((((())())))(()())))()))))()())()())((()()((()()()()(()((((((((()()())((()()(((((()(((())((())(()))()((((()((((((((())()((()())(())((()))())((((()())(((((((((((()()(((((()(()))())(((()(()))())((()(()())())())(()(((())(())())()()(()(()((()))((()))))((((()(((()))))((((()(()(()())())()(((()((((())((((()(((()()(())()()()())((()((((((()((()()))()((()))()(()()((())))(((()(((()))((()((()(()))(((()()(()(()()()))))()()(((()(((())())))))((()(((())()(()(())((()())))((((())))(()(()(()())()((()())))(((()((()(())()()((()((())(()()((())(())()))()))((()(())()))())(((((((()(()()(()(())())))))))(()((((((())((((())((())())(()()))))()(())(()())()())((())(()))))(()))(()((()))()(()((((((()()()()((((((((()(()(())((()()(()()))(())()())()((())))()))()())(((()))(())()(())()))()((()((()(()()())(())()()()((())())))((()()(()()((()(())()()())(((()(()()))))(())))(()(()())()))()()))))))()))))((((((())))())))(()(())())(()())))))(()))()))))))()((()))))()))))(()(()((()())())(()()))))(((())()))())())())(((()(()()))(())()(())(())((((((()()))))((()(()))))))(()))())(((()()(()))()())()()()())))))))))))))(())(()))(()))((()(())(()())(())())(()())(())()()(()())))()()()))(())())()))())())(())((())))))))(())))(())))))()))))((())(()(((()))))(()))()((()(())))(()())(((((()))()())()()))))()))))()))())(()(()()()))()))))))((()))))))))))()((()))((()(())((())()()(()()))()(()))))()()(()))()))(((())))(())()((())(())(()())()())())))))))())))()((())))()))(()))()()))(((((((()))())(()()))(()()(()))()(()((()())()))))))(((()()()())))(())()))()())(()()))()()))))))))(())))()))()()))))))()))()())))()(())(())))))()(())()()(()()))))())((()))))()))))(()(((((()))))))))())))())()(())()()))))(())))())()()())()()())()(()))))()))()))))))))())))((()))()))()))())))()())()()())))())))(()((())()((()))())))))())()(())((())))))))))))())()())(())())())(()))(()))()))())(()(())())()())()()(()))))(()(())))))))(())))())(())))))))())()()(())())())))(())))))()))()(()())()(()))())())))))()()(()))()))))())))))))))()))))()))))))())()())()()))))()())))())))))))))))()()))))()()(((()))()()(())()))))((()))))(()))(())())))(())()))))))(()))()))))(())())))))()))(()())))))))))))))())))))))))()((()())(()())))))))((()))))(())(())))()(()())())))())())(()()()())))()))))))())))))())()()())))))))))))()()(()))))()())()))((()())(()))))()(()))))))))))()())())(((())(()))))())()))()))()))))))()))))))(()))))()))))()(())))(())))(()))())()()(()()))()))(()()))))))))()))(()))())(()()(()(()())()()))()))))))))(())))))((()()(()))())())))))()))())(()())()()))())))()(()()()()))((())())))())()(()()))()))))))))(()))(())))()))))(()(()())(()))))()())())()))()()))())))))))))))())()))))))()))))))))())))))()))))())(()())))(())()))())())))))()()(()()())(()())))()()))(((()))(()()()))))()))))()))))((())))()((((((()()))))))())))))))))))(((()))))))))))))(())())))))())(()))))))(()))((()))())))()(()((()))()))()))))))))))())()))()(()()))))())))())(())()(()))()))())(()))()))))(()()))()()(())))))()))(())(()(()()))(()()())))))(((()))))))()))))))))))))(())(()))))()())())()()((()()))())))))(()))))())))))))()()()))))))))())))()(((()()))(())))))(((())())))))((()))()(()))(()))))(()())))(()))())))))()))))(())(())))()((()))(())())))()()))()))))))))()))(()()()(()()()(()))())(())()())(((()))(())))))))))(((()())))()()))))))))()(())(()))()((((())(())(()())))()))(((())()()()))((()))(()))())())))())))(()))())()())())(()(())())()()()(())))())(())))(())))(())()))()))(()((()))))))))())(()))))))())(()()))()()))()(()(()())))()()(()((()((((((()))(())))()()()))())()))((()()(()))())((()(()(()))(()()))))()())))()))()())))))))()()((()())(())))()))(()))(())(()))())(()(())))()()))))))(((()(((()()))()(()(())())((()()))()))()))()))()(()()()(()))((()())()(())))()()))(((())()()())(())()((()()()()(()(())(()()))()(((((()())))((())))))(()()()))))(((()(())))()))((()((()(())()(()((())))((()())()(()))(((()())()()(()))(())(((()((()())()((())()())(((()()))((()((())(()))(()())(()()()))((()))(())(()((()()())((()))(())))(())(())(())))(()())))(((((()(()(((((()())((((()(()())(())(()()(((())((()(((()()(((()()((((((())))())(()((((((()(()))()))()()((()((()))))()(()()(()((()()))))))(((((()(((((())()()()(())())))))))()))((()()(())))(())(()()()())))))(()((((())))))))()()(((()(()(()(()(()())()()()(((((((((()()())()(()))((()()()()()(((((((()())()((())()))((((((()(()(()(()())(((()(((((((()(((())(((((((((())(())())()))((()(()))(((()()())(())(()(()()(((()(())()))())))(())((((((())(()()())()()(((()(((())(()(((())(((((((()(((((((((()))(())(()(()(()))))((()))()(())())())((()(()((()()))((()()((()(())(())(()((())(((())(((()()()((((((()()(())((((())()))))(())((()(()((())))(((((()(()()())())((())())))((())((()((()()((((((())(((()()(()())())(()(()))(()(()))())())()(((((((()(((()(())()()((())((()(()()((()(()()(((((((((((())((())((((((())((()((((()(()((((()(((((((())()((()))))())()((()((((()(()(((()((()())))(())())(((()(((())((((((()(((((((((()()(())))(()(((((()((((()())))((()((()((()(()()(((())((((((((((((()(((())(()(((((()))(()()(()()()()()()((())(((((((())(((((())))))())()(()()(()(()(((()()(((((())(()((()((()(((()()((()((((())()))()((((())(())))()())(((())(())(()()((()(((()()((((((((((()()(()())())(((((((((())((((()))()()((((())(()((((()(((())())(((((((((((()((((())))(())(()(((()(((()((())(((((()((()()(()(()()((((((()((((()((()(()((()(()((((((()))))()()(((((()((()(()(())()))(())(((((((()((((()())(()((()((()(()))())))(())((()))))(((((((()()()())(()))(()()((()())()((()((()()()(()(()()))(()())(())(((((()(((((((((((()((()(((()(((((((()()((((((()(((((()(()((()(((((())((((((()))((((())((()()((())(((())()(((((()()(((((()((()(()(((((((()(((((()((()((()((())(())((())(()))()()))(()()(()(()()(((((((()(((()(((())()(((((()((((((()())((((())()((()((()(()()())(()))((((()()((((((()((()(()(()((((()((()((())((((((()(()(())((((((()((((((((((()((())()))()(()(()(((((()()()))((())))()(()((((((((((((((()(((()((((()((())((()((()(((()()(()(((()((())(()()())))()(()(()(((((()()(()(()((((()(((((())()(()(()))(((((()()(((()()(())((((((((((((((())((())(((((((((((())()()()(())()(()(()(((((((((())(((()))(()()())(()((((()(())(((((()())(())((((((((())()((((()((((((())(()((()(())(((()((((()))(((((((((()()))((((()(())()()()(())(()((())((()()))()(((())(((((())((((((()()))(((((((((()((((((())))(((((((()((()(()(())))())(()(()))()(((((()())(()))()(()(())(((()))))())()())))(((((()))())()((()(()))))((()()()((((((()))()()((((((((())((()(()(((()(()((())((()())(()((((())(()(((()()()(()(()()))())())((((((((((())())((()))()((())(())(())))())()(()()(())))())(()))(((()(()()(((()(((())))()(((()(())()((((((())()))()))()((((((()(()(((((()())))()))))())()()(((()(((((())((()()(()((()((()(()(()(())))(()()()()((()(())(((()((()))((((()))())(())))())(()))()()()())()))(((()()())()((())))(())(()()()()(()())((()(()()((((())))((()((()(())((()(()((())()(()()(((()())()()())((()))((())(((()()(())))()()))(((()((())()(((((()())(())((())()())())((((((()(()(((((()))(()("
.equ input_len, . - input
|
macabu/advent-of-code
| 7,890
|
2015/01/part2.s
|
.global _start
.section .text
_start:
la t0, input # offset = *(input + 0)
li t6, 0 # counter = 0
li t4, 0 # index = 0
li t3, -1 # basement = -1
loop:
beq t6, t3, exit # if (counter == basement) goto exit
lb t1, 0(t0) # c = input + offset
beq t1, zero, exit # if (*c == \0) goto exit
addi t0, t0, 1 # offset += 1
addi t4, t4, 1 # index += 1
li t2, 40 # open = '('
beq t1, t2, inc # if (c == open) goto inc
addi t6, t6, -1 # else counter -= 1
j loop # goto loop
inc:
addi t6, t6, 1 # counter += 1
j loop # goto loop
exit:
ebreak # breakpoint and check t4
# exit(0)
li a0, 0
li a7, 93
ecall
.section .data
input: .asciz "()(((()))(()()()((((()(((())(()(()((((((()(()(((())))((()(((()))((())(()((()()()()(((())(((((((())))()()(()(()(())(((((()()()((())(((((()()))))()(())(((())(())((((((())())))(()())))()))))()())()())((()()((()()()()(()((((((((()()())((()()(((((()(((())((())(()))()((((()((((((((())()((()())(())((()))())((((()())(((((((((((()()(((((()(()))())(((()(()))())((()(()())())())(()(((())(())())()()(()(()((()))((()))))((((()(((()))))((((()(()(()())())()(((()((((())((((()(((()()(())()()()())((()((((((()((()()))()((()))()(()()((())))(((()(((()))((()((()(()))(((()()(()(()()()))))()()(((()(((())())))))((()(((())()(()(())((()())))((((())))(()(()(()())()((()())))(((()((()(())()()((()((())(()()((())(())()))()))((()(())()))())(((((((()(()()(()(())())))))))(()((((((())((((())((())())(()()))))()(())(()())()())((())(()))))(()))(()((()))()(()((((((()()()()((((((((()(()(())((()()(()()))(())()())()((())))()))()())(((()))(())()(())()))()((()((()(()()())(())()()()((())())))((()()(()()((()(())()()())(((()(()()))))(())))(()(()())()))()()))))))()))))((((((())))())))(()(())())(()())))))(()))()))))))()((()))))()))))(()(()((()())())(()()))))(((())()))())())())(((()(()()))(())()(())(())((((((()()))))((()(()))))))(()))())(((()()(()))()())()()()())))))))))))))(())(()))(()))((()(())(()())(())())(()())(())()()(()())))()()()))(())())()))())())(())((())))))))(())))(())))))()))))((())(()(((()))))(()))()((()(())))(()())(((((()))()())()()))))()))))()))())(()(()()()))()))))))((()))))))))))()((()))((()(())((())()()(()()))()(()))))()()(()))()))(((())))(())()((())(())(()())()())())))))))())))()((())))()))(()))()()))(((((((()))())(()()))(()()(()))()(()((()())()))))))(((()()()())))(())()))()())(()()))()()))))))))(())))()))()()))))))()))()())))()(())(())))))()(())()()(()()))))())((()))))()))))(()(((((()))))))))())))())()(())()()))))(())))())()()())()()())()(()))))()))()))))))))())))((()))()))()))())))()())()()())))())))(()((())()((()))())))))())()(())((())))))))))))())()())(())())())(()))(()))()))())(()(())())()())()()(()))))(()(())))))))(())))())(())))))))())()()(())())())))(())))))()))()(()())()(()))())())))))()()(()))()))))())))))))))()))))()))))))())()())()()))))()())))())))))))))))()()))))()()(((()))()()(())()))))((()))))(()))(())())))(())()))))))(()))()))))(())())))))()))(()())))))))))))))())))))))))()((()())(()())))))))((()))))(())(())))()(()())())))())())(()()()())))()))))))())))))())()()())))))))))))()()(()))))()())()))((()())(()))))()(()))))))))))()())())(((())(()))))())()))()))()))))))()))))))(()))))()))))()(())))(())))(()))())()()(()()))()))(()()))))))))()))(()))())(()()(()(()())()()))()))))))))(())))))((()()(()))())())))))()))())(()())()()))())))()(()()()()))((())())))())()(()()))()))))))))(()))(())))()))))(()(()())(()))))()())())()))()()))())))))))))))())()))))))()))))))))())))))()))))())(()())))(())()))())())))))()()(()()())(()())))()()))(((()))(()()()))))()))))()))))((())))()((((((()()))))))())))))))))))(((()))))))))))))(())())))))())(()))))))(()))((()))())))()(()((()))()))()))))))))))())()))()(()()))))())))())(())()(()))()))())(()))()))))(()()))()()(())))))()))(())(()(()()))(()()())))))(((()))))))()))))))))))))(())(()))))()())())()()((()()))())))))(()))))())))))))()()()))))))))())))()(((()()))(())))))(((())())))))((()))()(()))(()))))(()())))(()))())))))()))))(())(())))()((()))(())())))()()))()))))))))()))(()()()(()()()(()))())(())()())(((()))(())))))))))(((()())))()()))))))))()(())(()))()((((())(())(()())))()))(((())()()()))((()))(()))())())))())))(()))())()())())(()(())())()()()(())))())(())))(())))(())()))()))(()((()))))))))())(()))))))())(()()))()()))()(()(()())))()()(()((()((((((()))(())))()()()))())()))((()()(()))())((()(()(()))(()()))))()())))()))()())))))))()()((()())(())))()))(()))(())(()))())(()(())))()()))))))(((()(((()()))()(()(())())((()()))()))()))()))()(()()()(()))((()())()(())))()()))(((())()()())(())()((()()()()(()(())(()()))()(((((()())))((())))))(()()()))))(((()(())))()))((()((()(())()(()((())))((()())()(()))(((()())()()(()))(())(((()((()())()((())()())(((()()))((()((())(()))(()())(()()()))((()))(())(()((()()())((()))(())))(())(())(())))(()())))(((((()(()(((((()())((((()(()())(())(()()(((())((()(((()()(((()()((((((())))())(()((((((()(()))()))()()((()((()))))()(()()(()((()()))))))(((((()(((((())()()()(())())))))))()))((()()(())))(())(()()()())))))(()((((())))))))()()(((()(()(()(()(()())()()()(((((((((()()())()(()))((()()()()()(((((((()())()((())()))((((((()(()(()(()())(((()(((((((()(((())(((((((((())(())())()))((()(()))(((()()())(())(()(()()(((()(())()))())))(())((((((())(()()())()()(((()(((())(()(((())(((((((()(((((((((()))(())(()(()(()))))((()))()(())())())((()(()((()()))((()()((()(())(())(()((())(((())(((()()()((((((()()(())((((())()))))(())((()(()((())))(((((()(()()())())((())())))((())((()((()()((((((())(((()()(()())())(()(()))(()(()))())())()(((((((()(((()(())()()((())((()(()()((()(()()(((((((((((())((())((((((())((()((((()(()((((()(((((((())()((()))))())()((()((((()(()(((()((()())))(())())(((()(((())((((((()(((((((((()()(())))(()(((((()((((()())))((()((()((()(()()(((())((((((((((((()(((())(()(((((()))(()()(()()()()()()((())(((((((())(((((())))))())()(()()(()(()(((()()(((((())(()((()((()(((()()((()((((())()))()((((())(())))()())(((())(())(()()((()(((()()((((((((((()()(()())())(((((((((())((((()))()()((((())(()((((()(((())())(((((((((((()((((())))(())(()(((()(((()((())(((((()((()()(()(()()((((((()((((()((()(()((()(()((((((()))))()()(((((()((()(()(())()))(())(((((((()((((()())(()((()((()(()))())))(())((()))))(((((((()()()())(()))(()()((()())()((()((()()()(()(()()))(()())(())(((((()(((((((((((()((()(((()(((((((()()((((((()(((((()(()((()(((((())((((((()))((((())((()()((())(((())()(((((()()(((((()((()(()(((((((()(((((()((()((()((())(())((())(()))()()))(()()(()(()()(((((((()(((()(((())()(((((()((((((()())((((())()((()((()(()()())(()))((((()()((((((()((()(()(()((((()((()((())((((((()(()(())((((((()((((((((((()((())()))()(()(()(((((()()()))((())))()(()((((((((((((((()(((()((((()((())((()((()(((()()(()(((()((())(()()())))()(()(()(((((()()(()(()((((()(((((())()(()(()))(((((()()(((()()(())((((((((((((((())((())(((((((((((())()()()(())()(()(()(((((((((())(((()))(()()())(()((((()(())(((((()())(())((((((((())()((((()((((((())(()((()(())(((()((((()))(((((((((()()))((((()(())()()()(())(()((())((()()))()(((())(((((())((((((()()))(((((((((()((((((())))(((((((()((()(()(())))())(()(()))()(((((()())(()))()(()(())(((()))))())()())))(((((()))())()((()(()))))((()()()((((((()))()()((((((((())((()(()(((()(()((())((()())(()((((())(()(((()()()(()(()()))())())((((((((((())())((()))()((())(())(())))())()(()()(())))())(()))(((()(()()(((()(((())))()(((()(())()((((((())()))()))()((((((()(()(((((()())))()))))())()()(((()(((((())((()()(()((()((()(()(()(())))(()()()()((()(())(((()((()))((((()))())(())))())(()))()()()())()))(((()()())()((())))(())(()()()()(()())((()(()()((((())))((()((()(())((()(()((())()(()()(((()())()()())((()))((())(((()()(())))()()))(((()((())()(((((()())(())((())()())())((((((()(()(((((()))(()("
.equ input_len, . - input
|
mammuen/space-invaders
| 17,921
|
Startup/startup_stm32f302r8tx.s
|
/**
******************************************************************************
* @file startup_stm32f302r8tx.s
* @author Auto-generated by STM32CubeIDE
* @brief STM32F302R8Tx device vector table for GCC toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
******************************************************************************
* @attention
*
* <h2><center>© Copyright (c) 2020 STMicroelectronics.
* All rights reserved.</center></h2>
*
* This software component is licensed by ST under BSD 3-Clause license,
* the "License"; You may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
* opensource.org/licenses/BSD-3-Clause
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m4
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
/**
* @brief This is the code that gets called when the processor first
* starts execution following a reset event. Only the absolutely
* necessary set is performed, after which the application
* supplied main() routine is called.
* @param None
* @retval : None
*/
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr r0, =_estack
mov sp, r0 /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
ldr r0, =_sdata
ldr r1, =_edata
ldr r2, =_sidata
movs r3, #0
b LoopCopyDataInit
CopyDataInit:
ldr r4, [r2, r3]
str r4, [r0, r3]
adds r3, r3, #4
LoopCopyDataInit:
adds r4, r0, r3
cmp r4, r1
bcc CopyDataInit
/* Zero fill the bss segment. */
ldr r2, =_sbss
ldr r4, =_ebss
movs r3, #0
b LoopFillZerobss
FillZerobss:
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
cmp r2, r4
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
LoopForever:
b LoopForever
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
*
* @param None
* @retval : None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The STM32F302R8Tx vector table. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word MemManage_Handler
.word BusFault_Handler
.word UsageFault_Handler
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word DebugMon_Handler
.word 0
.word PendSV_Handler
.word SysTick_Handler
.word WWDG_IRQHandler /* Window Watchdog interrupt */
.word PVD_IRQHandler /* PVD through EXTI line detection interrupt */
.word TAMP_STAMP_IRQHandler /* Tamper and TimeStamp interrupts */
.word RTC_WKUP_IRQHandler /* RTC Wakeup interrupt through the EXTI line */
.word FLASH_IRQHandler /* Flash global interrupt */
.word RCC_IRQHandler /* RCC global interrupt */
.word EXTI0_IRQHandler /* EXTI Line0 interrupt */
.word EXTI1_IRQHandler /* EXTI Line3 interrupt */
.word EXTI2_TSC_IRQHandler /* EXTI Line2 and Touch sensing interrupts */
.word EXTI3_IRQHandler /* EXTI Line3 interrupt */
.word EXTI4_IRQHandler /* EXTI Line4 interrupt */
.word DMA1_CH1_IRQHandler /* DMA1 channel 1 interrupt */
.word DMA1_CH2_IRQHandler /* DMA1 channel 2 interrupt */
.word DMA1_CH3_IRQHandler /* DMA1 channel 3 interrupt */
.word DMA1_CH4_IRQHandler /* DMA1 channel 4 interrupt */
.word DMA1_CH5_IRQHandler /* DMA1 channel 5 interrupt */
.word DMA1_CH6_IRQHandler /* DMA1 channel 6 interrupt */
.word DMA1_CH7_IRQHandler /* DMA1 channel 7interrupt */
.word ADC1_2_IRQHandler /* ADC1 and ADC2 global interrupt */
.word USB_HP_CAN_TX_IRQHandler /* USB High Priority/CAN_TX interrupts */
.word USB_LP_CAN_RX0_IRQHandler /* USB Low Priority/CAN_RX0 interrupts */
.word CAN_RX1_IRQHandler /* CAN_RX1 interrupt */
.word CAN_SCE_IRQHandler /* CAN_SCE interrupt */
.word EXTI9_5_IRQHandler /* EXTI Line5 to Line9 interrupts */
.word TIM1_BRK_TIM15_IRQHandler /* TIM1 Break/TIM15 global interruts */
.word TIM1_UP_TIM16_IRQHandler /* TIM1 Update/TIM16 global interrupts */
.word TIM1_TRG_COM_TIM17_IRQHandler /* TIM1 trigger and commutation/TIM17 interrupts */
.word TIM1_CC_IRQHandler /* TIM1 capture compare interrupt */
.word TIM2_IRQHandler /* TIM2 global interrupt */
.word TIM3_IRQHandler /* TIM3 global interrupt */
.word TIM4_IRQHandler /* TIM4 global interrupt */
.word I2C1_EV_EXTI23_IRQHandler /* I2C1 event interrupt and EXTI Line23 interrupt */
.word I2C1_ER_IRQHandler /* I2C1 error interrupt */
.word I2C2_EV_EXTI24_IRQHandler /* I2C2 event interrupt & EXTI Line24 interrupt */
.word I2C2_ER_IRQHandler /* I2C2 error interrupt */
.word SPI1_IRQHandler /* SPI1 global interrupt */
.word SPI2_IRQHandler /* SPI2 global interrupt */
.word USART1_EXTI25_IRQHandler /* USART1 global interrupt and EXTI Line 25 interrupt */
/* Renamed IRQ vector table entry from USART2_EXTI26_IRQHandler to USART2_IRQHandler. */
.word USART2_IRQHandler /* USART2 global interrupt and EXTI Line 26 interrupt */
.word USART3_EXTI28_IRQHandler /* USART3 global interrupt and EXTI Line 28 interrupt */
.word EXTI15_10_IRQHandler /* EXTI Line15 to Line10 interrupts */
.word RTCAlarm_IRQHandler /* RTC alarm interrupt */
.word USB_WKUP_IRQHandler /* USB wakeup from Suspend */
.word TIM8_BRK_IRQHandler /* TIM8 break interrupt */
.word TIM8_UP_IRQHandler /* TIM8 update interrupt */
.word TIM8_TRG_COM_IRQHandler /* TIM8 Trigger and commutation interrupts */
.word TIM8_CC_IRQHandler /* TIM8 capture compare interrupt */
.word ADC3_IRQHandler /* ADC3 global interrupt */
.word FMC_IRQHandler /* FSMC global interrupt */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word SPI3_IRQHandler /* SPI3 global interrupt */
.word UART4_EXTI34_IRQHandler /* UART4 global and EXTI Line 34 interrupts */
.word UART5_EXTI35_IRQHandler /* UART5 global and EXTI Line 35 interrupts */
.word TIM6_DACUNDER_IRQHandler /* TIM6 global and DAC12 underrun interrupts */
.word TIM7_IRQHandler /* TIM7 global interrupt */
.word DMA2_CH1_IRQHandler /* DMA2 channel1 global interrupt */
.word DMA2_CH2_IRQHandler /* DMA2 channel2 global interrupt */
.word DMA2_CH3_IRQHandler /* DMA2 channel3 global interrupt */
.word DMA2_CH4_IRQHandler /* DMA2 channel4 global interrupt */
.word DMA2_CH5_IRQHandler /* DMA2 channel5 global interrupt */
.word ADC4_IRQHandler /* ADC4 global interrupt */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word USB_HP_IRQHandler /* USB High priority interrupt */
.word USB_LP_IRQHandler /* USB Low priority interrupt */
.word USB_WKUP_EXTI_IRQHandler /* USB wakeup from Suspend and EXTI Line 18 */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word FPU_IRQHandler /* Floating point unit interrupt */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak MemManage_Handler
.thumb_set MemManage_Handler,Default_Handler
.weak BusFault_Handler
.thumb_set BusFault_Handler,Default_Handler
.weak UsageFault_Handler
.thumb_set UsageFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak DebugMon_Handler
.thumb_set DebugMon_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_IRQHandler
.thumb_set PVD_IRQHandler,Default_Handler
.weak TAMP_STAMP_IRQHandler
.thumb_set TAMP_STAMP_IRQHandler,Default_Handler
.weak RTC_WKUP_IRQHandler
.thumb_set RTC_WKUP_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_IRQHandler
.thumb_set RCC_IRQHandler,Default_Handler
.weak EXTI0_IRQHandler
.thumb_set EXTI0_IRQHandler,Default_Handler
.weak EXTI1_IRQHandler
.thumb_set EXTI1_IRQHandler,Default_Handler
.weak EXTI2_TSC_IRQHandler
.thumb_set EXTI2_TSC_IRQHandler,Default_Handler
.weak EXTI3_IRQHandler
.thumb_set EXTI3_IRQHandler,Default_Handler
.weak EXTI4_IRQHandler
.thumb_set EXTI4_IRQHandler,Default_Handler
.weak DMA1_CH1_IRQHandler
.thumb_set DMA1_CH1_IRQHandler,Default_Handler
.weak DMA1_CH2_IRQHandler
.thumb_set DMA1_CH2_IRQHandler,Default_Handler
.weak DMA1_CH3_IRQHandler
.thumb_set DMA1_CH3_IRQHandler,Default_Handler
.weak DMA1_CH4_IRQHandler
.thumb_set DMA1_CH4_IRQHandler,Default_Handler
.weak DMA1_CH5_IRQHandler
.thumb_set DMA1_CH5_IRQHandler,Default_Handler
.weak DMA1_CH6_IRQHandler
.thumb_set DMA1_CH6_IRQHandler,Default_Handler
.weak DMA1_CH7_IRQHandler
.thumb_set DMA1_CH7_IRQHandler,Default_Handler
.weak ADC1_2_IRQHandler
.thumb_set ADC1_2_IRQHandler,Default_Handler
.weak USB_HP_CAN_TX_IRQHandler
.thumb_set USB_HP_CAN_TX_IRQHandler,Default_Handler
.weak USB_LP_CAN_RX0_IRQHandler
.thumb_set USB_LP_CAN_RX0_IRQHandler,Default_Handler
.weak CAN_RX1_IRQHandler
.thumb_set CAN_RX1_IRQHandler,Default_Handler
.weak CAN_SCE_IRQHandler
.thumb_set CAN_SCE_IRQHandler,Default_Handler
.weak EXTI9_5_IRQHandler
.thumb_set EXTI9_5_IRQHandler,Default_Handler
.weak TIM1_BRK_TIM15_IRQHandler
.thumb_set TIM1_BRK_TIM15_IRQHandler,Default_Handler
.weak TIM1_UP_TIM16_IRQHandler
.thumb_set TIM1_UP_TIM16_IRQHandler,Default_Handler
.weak TIM1_TRG_COM_TIM17_IRQHandler
.thumb_set TIM1_TRG_COM_TIM17_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM4_IRQHandler
.thumb_set TIM4_IRQHandler,Default_Handler
.weak I2C1_EV_EXTI23_IRQHandler
.thumb_set I2C1_EV_EXTI23_IRQHandler,Default_Handler
.weak I2C1_ER_IRQHandler
.thumb_set I2C1_ER_IRQHandler,Default_Handler
.weak I2C2_EV_EXTI24_IRQHandler
.thumb_set I2C2_EV_EXTI24_IRQHandler,Default_Handler
.weak I2C2_ER_IRQHandler
.thumb_set I2C2_ER_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_EXTI25_IRQHandler
.thumb_set USART1_EXTI25_IRQHandler,Default_Handler
.weak USART2_EXTI26_IRQHandler
.thumb_set USART2_EXTI26_IRQHandler,Default_Handler
.weak USART3_EXTI28_IRQHandler
.thumb_set USART3_EXTI28_IRQHandler,Default_Handler
.weak EXTI15_10_IRQHandler
.thumb_set EXTI15_10_IRQHandler,Default_Handler
.weak RTCAlarm_IRQHandler
.thumb_set RTCAlarm_IRQHandler,Default_Handler
.weak USB_WKUP_IRQHandler
.thumb_set USB_WKUP_IRQHandler,Default_Handler
.weak TIM8_BRK_IRQHandler
.thumb_set TIM8_BRK_IRQHandler,Default_Handler
.weak TIM8_UP_IRQHandler
.thumb_set TIM8_UP_IRQHandler,Default_Handler
.weak TIM8_TRG_COM_IRQHandler
.thumb_set TIM8_TRG_COM_IRQHandler,Default_Handler
.weak TIM8_CC_IRQHandler
.thumb_set TIM8_CC_IRQHandler,Default_Handler
.weak ADC3_IRQHandler
.thumb_set ADC3_IRQHandler,Default_Handler
.weak FMC_IRQHandler
.thumb_set FMC_IRQHandler,Default_Handler
.weak SPI3_IRQHandler
.thumb_set SPI3_IRQHandler,Default_Handler
.weak UART4_EXTI34_IRQHandler
.thumb_set UART4_EXTI34_IRQHandler,Default_Handler
.weak UART5_EXTI35_IRQHandler
.thumb_set UART5_EXTI35_IRQHandler,Default_Handler
.weak TIM6_DACUNDER_IRQHandler
.thumb_set TIM6_DACUNDER_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak DMA2_CH1_IRQHandler
.thumb_set DMA2_CH1_IRQHandler,Default_Handler
.weak DMA2_CH2_IRQHandler
.thumb_set DMA2_CH2_IRQHandler,Default_Handler
.weak DMA2_CH3_IRQHandler
.thumb_set DMA2_CH3_IRQHandler,Default_Handler
.weak DMA2_CH4_IRQHandler
.thumb_set DMA2_CH4_IRQHandler,Default_Handler
.weak DMA2_CH5_IRQHandler
.thumb_set DMA2_CH5_IRQHandler,Default_Handler
.weak ADC4_IRQHandler
.thumb_set ADC4_IRQHandler,Default_Handler
.weak USB_HP_IRQHandler
.thumb_set USB_HP_IRQHandler,Default_Handler
.weak USB_LP_IRQHandler
.thumb_set USB_LP_IRQHandler,Default_Handler
.weak USB_WKUP_EXTI_IRQHandler
.thumb_set USB_WKUP_EXTI_IRQHandler,Default_Handler
.weak FPU_IRQHandler
.thumb_set FPU_IRQHandler,Default_Handler
.weak SystemInit
/************************ (C) COPYRIGHT STMicroelectonics *****END OF FILE****/
|
manalejandro/rustkernel
| 2,493
|
kernel/src/arch/x86_64/exceptions.s
|
# SPDX-License-Identifier: GPL-2.0
# Exception handler stubs for x86_64
.section .text
# Macro for exception handlers without error code
.macro EXCEPTION_STUB name, vector
.global \name
\name:
push $0 # Push dummy error code
push $\vector # Push vector number
jmp exception_common
.endm
# Macro for exception handlers with error code
.macro EXCEPTION_STUB_ERR name, vector
.global \name
\name:
push $\vector # Push vector number (error code already on stack)
jmp exception_common
.endm
# Exception handlers
EXCEPTION_STUB divide_error_handler, 0
EXCEPTION_STUB debug_handler, 1
EXCEPTION_STUB nmi_handler, 2
EXCEPTION_STUB breakpoint_handler, 3
EXCEPTION_STUB overflow_handler, 4
EXCEPTION_STUB bound_range_exceeded_handler, 5
EXCEPTION_STUB invalid_opcode_handler, 6
EXCEPTION_STUB device_not_available_handler, 7
EXCEPTION_STUB_ERR double_fault_handler, 8
EXCEPTION_STUB_ERR invalid_tss_handler, 10
EXCEPTION_STUB_ERR segment_not_present_handler, 11
EXCEPTION_STUB_ERR stack_segment_fault_handler, 12
EXCEPTION_STUB_ERR general_protection_fault_handler, 13
EXCEPTION_STUB_ERR page_fault_handler, 14
EXCEPTION_STUB x87_fpu_error_handler, 16
EXCEPTION_STUB_ERR alignment_check_handler, 17
EXCEPTION_STUB machine_check_handler, 18
EXCEPTION_STUB simd_exception_handler, 19
# Common exception handler
exception_common:
# Save all registers
push %rax
push %rcx
push %rdx
push %rbx
push %rbp
push %rsi
push %rdi
push %r8
push %r9
push %r10
push %r11
push %r12
push %r13
push %r14
push %r15
# Save segment registers
mov %ds, %ax
push %rax
mov %es, %ax
push %rax
mov %fs, %ax
push %rax
mov %gs, %ax
push %rax
# Load kernel data segment
mov $0x10, %ax
mov %ax, %ds
mov %ax, %es
mov %ax, %fs
mov %ax, %gs
# Call exception handler
mov %rsp, %rdi # Pass stack pointer
call exception_handler
# Restore segment registers
pop %rax
mov %ax, %gs
pop %rax
mov %ax, %fs
pop %rax
mov %ax, %es
pop %rax
mov %ax, %ds
# Restore all registers
pop %r15
pop %r14
pop %r13
pop %r12
pop %r11
pop %r10
pop %r9
pop %r8
pop %rdi
pop %rsi
pop %rbp
pop %rbx
pop %rdx
pop %rcx
pop %rax
# Remove vector number and error code
add $16, %rsp
# Return from interrupt
iretq
|
manalejandro/rustkernel
| 5,655
|
kernel/src/arch/x86_64/boot.s
|
# SPDX-License-Identifier: GPL-2.0
# Rust Kernel boot entry point for x86_64
.section .multiboot_header
header_start:
# Multiboot2 header
.long 0xe85250d6 # magic number
.long 0 # architecture (i386)
.long header_end - header_start # header length
# checksum
.long -(0xe85250d6 + 0 + (header_end - header_start))
# end tag
.word 0 # type
.word 0 # flags
.long 8 # size
header_end:
.section .bss
# Multiboot information storage
.section .bss
multiboot_magic_store:
.skip 4
# Stack for the kernel
.global stack_bottom
.global stack_top
stack_bottom:
.skip 16384 # 16 KiB stack
stack_top:
# Bootstrap page tables
.align 4096
.global boot_page_directory_ptr_table
boot_page_directory_ptr_table:
.skip 4096
.global boot_page_directory_table
boot_page_directory_table:
.skip 4096
.global boot_page_table
boot_page_table:
.skip 4096
.section .rodata
gdt64:
.quad 0 # null descriptor
.set gdt64.code, . - gdt64
.quad (1<<44) | (1<<47) | (1<<41) | (1<<43) | (1<<53) # code segment
.set gdt64.data, . - gdt64
.quad (1<<44) | (1<<47) | (1<<41) # data segment
.set gdt64.pointer, . - gdt64
.word . - gdt64 - 1 # length
.quad gdt64 # address
.section .text
.global _start
.code32
_start:
# Set up stack
movl $stack_top, %esp
movl %esp, %ebp
# Save multiboot information before we lose it
movl %eax, multiboot_magic_store
movl %ebx, multiboot_info_store
# Check for multiboot
cmpl $0x36d76289, %eax
jne no_multiboot
# Check for CPUID
call check_cpuid
test %eax, %eax
jz no_cpuid
# Check for long mode
call check_long_mode
test %eax, %eax
jz no_long_mode
# Set up page tables for long mode
call setup_page_tables
# Enable PAE
movl %cr4, %eax
orl $1 << 5, %eax # Set PAE bit
movl %eax, %cr4
# Load page table
movl $boot_page_directory_ptr_table, %eax
movl %eax, %cr3
# Enable long mode
movl $0xC0000080, %ecx # EFER MSR
rdmsr
orl $1 << 8, %eax # Set LM bit
wrmsr
# Enable paging
movl %cr0, %eax
orl $1 << 31, %eax # Set PG bit
movl %eax, %cr0
# Load GDT
lgdt gdt64.pointer
# Far jump to 64-bit code
ljmp $gdt64.code, $start64
check_cpuid:
# Try to flip the ID bit (bit 21) in FLAGS
pushfl
popl %eax
movl %eax, %ecx
xorl $1 << 21, %eax
pushl %eax
popfl
pushfl
popl %eax
pushl %ecx
popfl
cmpl %ecx, %eax
sete %al
movzbl %al, %eax
ret
check_long_mode:
# Check if extended processor info is available
movl $0x80000000, %eax
cpuid
cmpl $0x80000001, %eax
jb .no_long_mode
# Check if long mode is available
movl $0x80000001, %eax
cpuid
testl $1 << 29, %edx
setz %al
movzbl %al, %eax
ret
.no_long_mode:
xorl %eax, %eax
ret
setup_page_tables:
# Map first 2MB with 2MB pages
# PDP table entry
movl $boot_page_directory_table, %eax
orl $0b11, %eax # present + writable
movl %eax, boot_page_directory_ptr_table
# PD table entry
movl $boot_page_table, %eax
orl $0b11, %eax # present + writable
movl %eax, boot_page_directory_table
# Page table entries (identity map first 2MB)
movl $boot_page_table, %edi
movl $0, %ebx
movl $512, %ecx
.map_page_table:
movl %ebx, %eax
shll $12, %eax # multiply by 4096 (page size)
orl $0b11, %eax # present + writable
movl %eax, (%edi)
addl $8, %edi
incl %ebx
loop .map_page_table
ret
.code64
start64:
# Set up segment registers
movw $gdt64.data, %ax
movw %ax, %ds
movw %ax, %es
movw %ax, %fs
movw %ax, %gs
movw %ax, %ss
# Set up stack
movq $stack_top, %rsp
# Clear the screen
call clear_screen
# Print boot message
movq $boot_msg, %rsi
call print_string
# Get multiboot parameters from saved locations
movl multiboot_magic_store, %edi # multiboot magic -> first argument
movl multiboot_info_store, %esi # multiboot info -> second argument
# Call Rust kernel main with multiboot parameters
call kernel_main_multiboot
# If we get here, halt
halt:
cli
hlt
jmp halt
# Clear VGA text buffer
clear_screen:
movq $0xb8000, %rdi
movw $0x0f20, %ax # White on black space
movl $2000, %ecx # 80*25 characters
rep stosw
ret
# Print string to VGA buffer
# RSI = string pointer
print_string:
movq $0xb8000, %rdi
movb $0x0f, %ah # White on black
.print_loop:
lodsb
testb %al, %al
jz .print_done
stosw
jmp .print_loop
.print_done:
ret
no_multiboot:
movl $no_multiboot_msg, %esi
call print_string_32
jmp halt32
no_cpuid:
movl $no_cpuid_msg, %esi
call print_string_32
jmp halt32
no_long_mode:
movl $no_long_mode_msg, %esi
call print_string_32
jmp halt32
# 32-bit string printing
print_string_32:
movl $0xb8000, %edi
movb $0x4f, %ah # White on red
.print_loop_32:
lodsb
testb %al, %al
jz .print_done_32
stosw
jmp .print_loop_32
.print_done_32:
ret
halt32:
cli
hlt
jmp halt32
.section .rodata
boot_msg:
.asciz "Rust Kernel booting..."
no_multiboot_msg:
.asciz "ERROR: Not loaded by multiboot bootloader"
no_cpuid_msg:
.asciz "ERROR: CPUID not supported"
no_long_mode_msg:
.asciz "ERROR: Long mode not supported"
|
ManchesterStingerMotorsports/Auxillary-Systems
| 22,858
|
Core/Startup/startup_stm32f407vgtx.s
|
/**
******************************************************************************
* @file startup_stm32f407xx.s
* @author MCD Application Team
* @brief STM32F407xx Devices vector table for GCC based toolchains.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M4 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
* @attention
*
* Copyright (c) 2017 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m4
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
/* stack used for SystemInit_ExtMemCtl; always internal RAM used */
/**
* @brief This is the code that gets called when the processor first
* starts execution following a reset event. Only the absolutely
* necessary set is performed, after which the application
* supplied main() routine is called.
* @param None
* @retval : None
*/
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr sp, =_estack /* set stack pointer */
/* Call the clock system initialization function.*/
bl SystemInit
/* Copy the data segment initializers from flash to SRAM */
ldr r0, =_sdata
ldr r1, =_edata
ldr r2, =_sidata
movs r3, #0
b LoopCopyDataInit
CopyDataInit:
ldr r4, [r2, r3]
str r4, [r0, r3]
adds r3, r3, #4
LoopCopyDataInit:
adds r4, r0, r3
cmp r4, r1
bcc CopyDataInit
/* Zero fill the bss segment. */
ldr r2, =_sbss
ldr r4, =_ebss
movs r3, #0
b LoopFillZerobss
FillZerobss:
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
cmp r2, r4
bcc FillZerobss
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
bx lr
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
* @param None
* @retval None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M3. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
*******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word MemManage_Handler
.word BusFault_Handler
.word UsageFault_Handler
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word DebugMon_Handler
.word 0
.word PendSV_Handler
.word SysTick_Handler
/* External Interrupts */
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_IRQHandler /* PVD through EXTI Line detection */
.word TAMP_STAMP_IRQHandler /* Tamper and TimeStamps through the EXTI line */
.word RTC_WKUP_IRQHandler /* RTC Wakeup through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_IRQHandler /* RCC */
.word EXTI0_IRQHandler /* EXTI Line0 */
.word EXTI1_IRQHandler /* EXTI Line1 */
.word EXTI2_IRQHandler /* EXTI Line2 */
.word EXTI3_IRQHandler /* EXTI Line3 */
.word EXTI4_IRQHandler /* EXTI Line4 */
.word DMA1_Stream0_IRQHandler /* DMA1 Stream 0 */
.word DMA1_Stream1_IRQHandler /* DMA1 Stream 1 */
.word DMA1_Stream2_IRQHandler /* DMA1 Stream 2 */
.word DMA1_Stream3_IRQHandler /* DMA1 Stream 3 */
.word DMA1_Stream4_IRQHandler /* DMA1 Stream 4 */
.word DMA1_Stream5_IRQHandler /* DMA1 Stream 5 */
.word DMA1_Stream6_IRQHandler /* DMA1 Stream 6 */
.word ADC_IRQHandler /* ADC1, ADC2 and ADC3s */
.word CAN1_TX_IRQHandler /* CAN1 TX */
.word CAN1_RX0_IRQHandler /* CAN1 RX0 */
.word CAN1_RX1_IRQHandler /* CAN1 RX1 */
.word CAN1_SCE_IRQHandler /* CAN1 SCE */
.word EXTI9_5_IRQHandler /* External Line[9:5]s */
.word TIM1_BRK_TIM9_IRQHandler /* TIM1 Break and TIM9 */
.word TIM1_UP_TIM10_IRQHandler /* TIM1 Update and TIM10 */
.word TIM1_TRG_COM_TIM11_IRQHandler /* TIM1 Trigger and Commutation and TIM11 */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM4_IRQHandler /* TIM4 */
.word I2C1_EV_IRQHandler /* I2C1 Event */
.word I2C1_ER_IRQHandler /* I2C1 Error */
.word I2C2_EV_IRQHandler /* I2C2 Event */
.word I2C2_ER_IRQHandler /* I2C2 Error */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_IRQHandler /* USART3 */
.word EXTI15_10_IRQHandler /* External Line[15:10]s */
.word RTC_Alarm_IRQHandler /* RTC Alarm (A and B) through EXTI Line */
.word OTG_FS_WKUP_IRQHandler /* USB OTG FS Wakeup through EXTI line */
.word TIM8_BRK_TIM12_IRQHandler /* TIM8 Break and TIM12 */
.word TIM8_UP_TIM13_IRQHandler /* TIM8 Update and TIM13 */
.word TIM8_TRG_COM_TIM14_IRQHandler /* TIM8 Trigger and Commutation and TIM14 */
.word TIM8_CC_IRQHandler /* TIM8 Capture Compare */
.word DMA1_Stream7_IRQHandler /* DMA1 Stream7 */
.word FSMC_IRQHandler /* FSMC */
.word SDIO_IRQHandler /* SDIO */
.word TIM5_IRQHandler /* TIM5 */
.word SPI3_IRQHandler /* SPI3 */
.word UART4_IRQHandler /* UART4 */
.word UART5_IRQHandler /* UART5 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC1&2 underrun errors */
.word TIM7_IRQHandler /* TIM7 */
.word DMA2_Stream0_IRQHandler /* DMA2 Stream 0 */
.word DMA2_Stream1_IRQHandler /* DMA2 Stream 1 */
.word DMA2_Stream2_IRQHandler /* DMA2 Stream 2 */
.word DMA2_Stream3_IRQHandler /* DMA2 Stream 3 */
.word DMA2_Stream4_IRQHandler /* DMA2 Stream 4 */
.word ETH_IRQHandler /* Ethernet */
.word ETH_WKUP_IRQHandler /* Ethernet Wakeup through EXTI line */
.word CAN2_TX_IRQHandler /* CAN2 TX */
.word CAN2_RX0_IRQHandler /* CAN2 RX0 */
.word CAN2_RX1_IRQHandler /* CAN2 RX1 */
.word CAN2_SCE_IRQHandler /* CAN2 SCE */
.word OTG_FS_IRQHandler /* USB OTG FS */
.word DMA2_Stream5_IRQHandler /* DMA2 Stream 5 */
.word DMA2_Stream6_IRQHandler /* DMA2 Stream 6 */
.word DMA2_Stream7_IRQHandler /* DMA2 Stream 7 */
.word USART6_IRQHandler /* USART6 */
.word I2C3_EV_IRQHandler /* I2C3 event */
.word I2C3_ER_IRQHandler /* I2C3 error */
.word OTG_HS_EP1_OUT_IRQHandler /* USB OTG HS End Point 1 Out */
.word OTG_HS_EP1_IN_IRQHandler /* USB OTG HS End Point 1 In */
.word OTG_HS_WKUP_IRQHandler /* USB OTG HS Wakeup through EXTI */
.word OTG_HS_IRQHandler /* USB OTG HS */
.word DCMI_IRQHandler /* DCMI */
.word 0 /* CRYP crypto */
.word HASH_RNG_IRQHandler /* Hash and Rng */
.word FPU_IRQHandler /* FPU */
.size g_pfnVectors, .-g_pfnVectors
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak MemManage_Handler
.thumb_set MemManage_Handler,Default_Handler
.weak BusFault_Handler
.thumb_set BusFault_Handler,Default_Handler
.weak UsageFault_Handler
.thumb_set UsageFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak DebugMon_Handler
.thumb_set DebugMon_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_IRQHandler
.thumb_set PVD_IRQHandler,Default_Handler
.weak TAMP_STAMP_IRQHandler
.thumb_set TAMP_STAMP_IRQHandler,Default_Handler
.weak RTC_WKUP_IRQHandler
.thumb_set RTC_WKUP_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_IRQHandler
.thumb_set RCC_IRQHandler,Default_Handler
.weak EXTI0_IRQHandler
.thumb_set EXTI0_IRQHandler,Default_Handler
.weak EXTI1_IRQHandler
.thumb_set EXTI1_IRQHandler,Default_Handler
.weak EXTI2_IRQHandler
.thumb_set EXTI2_IRQHandler,Default_Handler
.weak EXTI3_IRQHandler
.thumb_set EXTI3_IRQHandler,Default_Handler
.weak EXTI4_IRQHandler
.thumb_set EXTI4_IRQHandler,Default_Handler
.weak DMA1_Stream0_IRQHandler
.thumb_set DMA1_Stream0_IRQHandler,Default_Handler
.weak DMA1_Stream1_IRQHandler
.thumb_set DMA1_Stream1_IRQHandler,Default_Handler
.weak DMA1_Stream2_IRQHandler
.thumb_set DMA1_Stream2_IRQHandler,Default_Handler
.weak DMA1_Stream3_IRQHandler
.thumb_set DMA1_Stream3_IRQHandler,Default_Handler
.weak DMA1_Stream4_IRQHandler
.thumb_set DMA1_Stream4_IRQHandler,Default_Handler
.weak DMA1_Stream5_IRQHandler
.thumb_set DMA1_Stream5_IRQHandler,Default_Handler
.weak DMA1_Stream6_IRQHandler
.thumb_set DMA1_Stream6_IRQHandler,Default_Handler
.weak ADC_IRQHandler
.thumb_set ADC_IRQHandler,Default_Handler
.weak CAN1_TX_IRQHandler
.thumb_set CAN1_TX_IRQHandler,Default_Handler
.weak CAN1_RX0_IRQHandler
.thumb_set CAN1_RX0_IRQHandler,Default_Handler
.weak CAN1_RX1_IRQHandler
.thumb_set CAN1_RX1_IRQHandler,Default_Handler
.weak CAN1_SCE_IRQHandler
.thumb_set CAN1_SCE_IRQHandler,Default_Handler
.weak EXTI9_5_IRQHandler
.thumb_set EXTI9_5_IRQHandler,Default_Handler
.weak TIM1_BRK_TIM9_IRQHandler
.thumb_set TIM1_BRK_TIM9_IRQHandler,Default_Handler
.weak TIM1_UP_TIM10_IRQHandler
.thumb_set TIM1_UP_TIM10_IRQHandler,Default_Handler
.weak TIM1_TRG_COM_TIM11_IRQHandler
.thumb_set TIM1_TRG_COM_TIM11_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM4_IRQHandler
.thumb_set TIM4_IRQHandler,Default_Handler
.weak I2C1_EV_IRQHandler
.thumb_set I2C1_EV_IRQHandler,Default_Handler
.weak I2C1_ER_IRQHandler
.thumb_set I2C1_ER_IRQHandler,Default_Handler
.weak I2C2_EV_IRQHandler
.thumb_set I2C2_EV_IRQHandler,Default_Handler
.weak I2C2_ER_IRQHandler
.thumb_set I2C2_ER_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_IRQHandler
.thumb_set USART3_IRQHandler,Default_Handler
.weak EXTI15_10_IRQHandler
.thumb_set EXTI15_10_IRQHandler,Default_Handler
.weak RTC_Alarm_IRQHandler
.thumb_set RTC_Alarm_IRQHandler,Default_Handler
.weak OTG_FS_WKUP_IRQHandler
.thumb_set OTG_FS_WKUP_IRQHandler,Default_Handler
.weak TIM8_BRK_TIM12_IRQHandler
.thumb_set TIM8_BRK_TIM12_IRQHandler,Default_Handler
.weak TIM8_UP_TIM13_IRQHandler
.thumb_set TIM8_UP_TIM13_IRQHandler,Default_Handler
.weak TIM8_TRG_COM_TIM14_IRQHandler
.thumb_set TIM8_TRG_COM_TIM14_IRQHandler,Default_Handler
.weak TIM8_CC_IRQHandler
.thumb_set TIM8_CC_IRQHandler,Default_Handler
.weak DMA1_Stream7_IRQHandler
.thumb_set DMA1_Stream7_IRQHandler,Default_Handler
.weak FSMC_IRQHandler
.thumb_set FSMC_IRQHandler,Default_Handler
.weak SDIO_IRQHandler
.thumb_set SDIO_IRQHandler,Default_Handler
.weak TIM5_IRQHandler
.thumb_set TIM5_IRQHandler,Default_Handler
.weak SPI3_IRQHandler
.thumb_set SPI3_IRQHandler,Default_Handler
.weak UART4_IRQHandler
.thumb_set UART4_IRQHandler,Default_Handler
.weak UART5_IRQHandler
.thumb_set UART5_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak DMA2_Stream0_IRQHandler
.thumb_set DMA2_Stream0_IRQHandler,Default_Handler
.weak DMA2_Stream1_IRQHandler
.thumb_set DMA2_Stream1_IRQHandler,Default_Handler
.weak DMA2_Stream2_IRQHandler
.thumb_set DMA2_Stream2_IRQHandler,Default_Handler
.weak DMA2_Stream3_IRQHandler
.thumb_set DMA2_Stream3_IRQHandler,Default_Handler
.weak DMA2_Stream4_IRQHandler
.thumb_set DMA2_Stream4_IRQHandler,Default_Handler
.weak ETH_IRQHandler
.thumb_set ETH_IRQHandler,Default_Handler
.weak ETH_WKUP_IRQHandler
.thumb_set ETH_WKUP_IRQHandler,Default_Handler
.weak CAN2_TX_IRQHandler
.thumb_set CAN2_TX_IRQHandler,Default_Handler
.weak CAN2_RX0_IRQHandler
.thumb_set CAN2_RX0_IRQHandler,Default_Handler
.weak CAN2_RX1_IRQHandler
.thumb_set CAN2_RX1_IRQHandler,Default_Handler
.weak CAN2_SCE_IRQHandler
.thumb_set CAN2_SCE_IRQHandler,Default_Handler
.weak OTG_FS_IRQHandler
.thumb_set OTG_FS_IRQHandler,Default_Handler
.weak DMA2_Stream5_IRQHandler
.thumb_set DMA2_Stream5_IRQHandler,Default_Handler
.weak DMA2_Stream6_IRQHandler
.thumb_set DMA2_Stream6_IRQHandler,Default_Handler
.weak DMA2_Stream7_IRQHandler
.thumb_set DMA2_Stream7_IRQHandler,Default_Handler
.weak USART6_IRQHandler
.thumb_set USART6_IRQHandler,Default_Handler
.weak I2C3_EV_IRQHandler
.thumb_set I2C3_EV_IRQHandler,Default_Handler
.weak I2C3_ER_IRQHandler
.thumb_set I2C3_ER_IRQHandler,Default_Handler
.weak OTG_HS_EP1_OUT_IRQHandler
.thumb_set OTG_HS_EP1_OUT_IRQHandler,Default_Handler
.weak OTG_HS_EP1_IN_IRQHandler
.thumb_set OTG_HS_EP1_IN_IRQHandler,Default_Handler
.weak OTG_HS_WKUP_IRQHandler
.thumb_set OTG_HS_WKUP_IRQHandler,Default_Handler
.weak OTG_HS_IRQHandler
.thumb_set OTG_HS_IRQHandler,Default_Handler
.weak DCMI_IRQHandler
.thumb_set DCMI_IRQHandler,Default_Handler
.weak HASH_RNG_IRQHandler
.thumb_set HASH_RNG_IRQHandler,Default_Handler
.weak FPU_IRQHandler
.thumb_set FPU_IRQHandler,Default_Handler
|
mari-cult/mmm-os
| 2,393
|
src/boot.s
|
.section .text
.global _start
.type _start, @function
_start:
mov x0, #0x300000
msr CPACR_EL1, x0
isb sy
ldr x30, =stack_top
mov sp, x30
# Mask all interrupts
msr DAIFSet, #0b1111
# 1. Load the interrupt vector address into VBAR_EL1
ldr x0, =evt
msr VBAR_EL1, x0
isb sy
# 2. Enable GIC
# x0: GIC DIST base address: https://github.com/qemu/qemu/blob/master/hw/arm/virt.c#L166
ldr x0, =0x08000000
bl init_gic_distributor
mov x0, #0x0
msr ICC_CTLR_EL1, x0
isb sy
# x0: GIC REDIST base address: https://github.com/qemu/qemu/blob/master/hw/arm/virt.c#L174
ldr x0, =0x080A0000
bl init_gic_redistributor
# 3. Enable system register access ICC_SRE_EL1
mrs x0, ICC_SRE_EL1
orr x0, x0, #1
msr ICC_SRE_EL1, x0
isb sy
# 4. Set priority mask
mov x0, #0xff
bl set_priority_mask
# 5. Enable Group 1 ints
bl enable_grp0_ints
bl enable_grp1_ints
# 6. Set a priority level for the timer
mov w1, #30
mov w2, #0x80
ldr x0, =AFFINITY_ENABLED
ldr x0, [x0]
cbz x0, not_enabled
ldr x0, =0x080A0000
bl set_int_priority
b next
not_enabled:
ldr x0, =0x08000000
bl set_int_priority
next:
# 7. Route the interrupt through group 1
ldr x0, =0x080A0000
mov w1, #30
bl set_int_grp
# 8. Enable the interrupt
ldr x0, =0x080A0000
mov w1, #30
bl enable_int
# mrs x0, CNTFRQ_EL0
# msr CNTP_TVAL_EL0, x0
# mov x0, #0x1
# msr CNTP_CTL_EL0, x0
ldr x0, =0x08000000
mov w1, #33
bl set_spi_group
ldr x0, =0x08000000
mov w1, #33
mov w2, #0xA0
bl set_spi_priority
ldr x0, =0x08000000
mov w1, #33
bl set_spi_trigger
ldr x0, =0x08000000
mov w1, #33
ldr x2, =0x80000000
bl set_spi_routing
ldr x0, =0x08000000
mov w1, #33
bl enable_spi
msr DAIFClr, #0b0010
isb sy
# In a near future this parameters will be given by the machine:
# w0: UART base address: https://github.com/qemu/qemu/blob/master/hw/arm/virt.c#L175
# w1: UART clock frequency: https://github.com/qemu/qemu/blob/master/hw/arm/virt.c#L323
# w2: UART baud rate
mov w0, #0x09000000
mov w1, #0x3600
movk w1,#0x16e,LSL #16
mov w2, #23
bl init_uart
bl configure_uart
# Unmask IRQs only
mrs x9, ICC_PMR_EL1
svc #0
bl kmain
b .
|
marvin-hansen/iggy-streaming-system
| 40,218
|
thirdparty/crates/ring-0.17.9/pregenerated/chacha-armv8-ios64.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__APPLE__)
#include <ring-core/arm_arch.h>
.section __TEXT,__const
.align 5
Lsigma:
.quad 0x3320646e61707865,0x6b20657479622d32 // endian-neutral
Lone:
.long 1,0,0,0
.byte 67,104,97,67,104,97,50,48,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.text
.globl _ChaCha20_ctr32_nohw
.private_extern _ChaCha20_ctr32_nohw
.align 5
_ChaCha20_ctr32_nohw:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-96]!
add x29,sp,#0
adrp x5,Lsigma@PAGE
add x5,x5,Lsigma@PAGEOFF
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
sub sp,sp,#64
ldp x22,x23,[x5] // load sigma
ldp x24,x25,[x3] // load key
ldp x26,x27,[x3,#16]
ldp x28,x30,[x4] // load counter
#ifdef __AARCH64EB__
ror x24,x24,#32
ror x25,x25,#32
ror x26,x26,#32
ror x27,x27,#32
ror x28,x28,#32
ror x30,x30,#32
#endif
Loop_outer:
mov w5,w22 // unpack key block
lsr x6,x22,#32
mov w7,w23
lsr x8,x23,#32
mov w9,w24
lsr x10,x24,#32
mov w11,w25
lsr x12,x25,#32
mov w13,w26
lsr x14,x26,#32
mov w15,w27
lsr x16,x27,#32
mov w17,w28
lsr x19,x28,#32
mov w20,w30
lsr x21,x30,#32
mov x4,#10
subs x2,x2,#64
Loop:
sub x4,x4,#1
add w5,w5,w9
add w6,w6,w10
add w7,w7,w11
add w8,w8,w12
eor w17,w17,w5
eor w19,w19,w6
eor w20,w20,w7
eor w21,w21,w8
ror w17,w17,#16
ror w19,w19,#16
ror w20,w20,#16
ror w21,w21,#16
add w13,w13,w17
add w14,w14,w19
add w15,w15,w20
add w16,w16,w21
eor w9,w9,w13
eor w10,w10,w14
eor w11,w11,w15
eor w12,w12,w16
ror w9,w9,#20
ror w10,w10,#20
ror w11,w11,#20
ror w12,w12,#20
add w5,w5,w9
add w6,w6,w10
add w7,w7,w11
add w8,w8,w12
eor w17,w17,w5
eor w19,w19,w6
eor w20,w20,w7
eor w21,w21,w8
ror w17,w17,#24
ror w19,w19,#24
ror w20,w20,#24
ror w21,w21,#24
add w13,w13,w17
add w14,w14,w19
add w15,w15,w20
add w16,w16,w21
eor w9,w9,w13
eor w10,w10,w14
eor w11,w11,w15
eor w12,w12,w16
ror w9,w9,#25
ror w10,w10,#25
ror w11,w11,#25
ror w12,w12,#25
add w5,w5,w10
add w6,w6,w11
add w7,w7,w12
add w8,w8,w9
eor w21,w21,w5
eor w17,w17,w6
eor w19,w19,w7
eor w20,w20,w8
ror w21,w21,#16
ror w17,w17,#16
ror w19,w19,#16
ror w20,w20,#16
add w15,w15,w21
add w16,w16,w17
add w13,w13,w19
add w14,w14,w20
eor w10,w10,w15
eor w11,w11,w16
eor w12,w12,w13
eor w9,w9,w14
ror w10,w10,#20
ror w11,w11,#20
ror w12,w12,#20
ror w9,w9,#20
add w5,w5,w10
add w6,w6,w11
add w7,w7,w12
add w8,w8,w9
eor w21,w21,w5
eor w17,w17,w6
eor w19,w19,w7
eor w20,w20,w8
ror w21,w21,#24
ror w17,w17,#24
ror w19,w19,#24
ror w20,w20,#24
add w15,w15,w21
add w16,w16,w17
add w13,w13,w19
add w14,w14,w20
eor w10,w10,w15
eor w11,w11,w16
eor w12,w12,w13
eor w9,w9,w14
ror w10,w10,#25
ror w11,w11,#25
ror w12,w12,#25
ror w9,w9,#25
cbnz x4,Loop
add w5,w5,w22 // accumulate key block
add x6,x6,x22,lsr#32
add w7,w7,w23
add x8,x8,x23,lsr#32
add w9,w9,w24
add x10,x10,x24,lsr#32
add w11,w11,w25
add x12,x12,x25,lsr#32
add w13,w13,w26
add x14,x14,x26,lsr#32
add w15,w15,w27
add x16,x16,x27,lsr#32
add w17,w17,w28
add x19,x19,x28,lsr#32
add w20,w20,w30
add x21,x21,x30,lsr#32
b.lo Ltail
add x5,x5,x6,lsl#32 // pack
add x7,x7,x8,lsl#32
ldp x6,x8,[x1,#0] // load input
add x9,x9,x10,lsl#32
add x11,x11,x12,lsl#32
ldp x10,x12,[x1,#16]
add x13,x13,x14,lsl#32
add x15,x15,x16,lsl#32
ldp x14,x16,[x1,#32]
add x17,x17,x19,lsl#32
add x20,x20,x21,lsl#32
ldp x19,x21,[x1,#48]
add x1,x1,#64
#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
rev x11,x11
rev x13,x13
rev x15,x15
rev x17,x17
rev x20,x20
#endif
eor x5,x5,x6
eor x7,x7,x8
eor x9,x9,x10
eor x11,x11,x12
eor x13,x13,x14
eor x15,x15,x16
eor x17,x17,x19
eor x20,x20,x21
stp x5,x7,[x0,#0] // store output
add x28,x28,#1 // increment counter
stp x9,x11,[x0,#16]
stp x13,x15,[x0,#32]
stp x17,x20,[x0,#48]
add x0,x0,#64
b.hi Loop_outer
ldp x19,x20,[x29,#16]
add sp,sp,#64
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
ret
.align 4
Ltail:
add x2,x2,#64
Less_than_64:
sub x0,x0,#1
add x1,x1,x2
add x0,x0,x2
add x4,sp,x2
neg x2,x2
add x5,x5,x6,lsl#32 // pack
add x7,x7,x8,lsl#32
add x9,x9,x10,lsl#32
add x11,x11,x12,lsl#32
add x13,x13,x14,lsl#32
add x15,x15,x16,lsl#32
add x17,x17,x19,lsl#32
add x20,x20,x21,lsl#32
#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
rev x11,x11
rev x13,x13
rev x15,x15
rev x17,x17
rev x20,x20
#endif
stp x5,x7,[sp,#0]
stp x9,x11,[sp,#16]
stp x13,x15,[sp,#32]
stp x17,x20,[sp,#48]
Loop_tail:
ldrb w10,[x1,x2]
ldrb w11,[x4,x2]
add x2,x2,#1
eor w10,w10,w11
strb w10,[x0,x2]
cbnz x2,Loop_tail
stp xzr,xzr,[sp,#0]
stp xzr,xzr,[sp,#16]
stp xzr,xzr,[sp,#32]
stp xzr,xzr,[sp,#48]
ldp x19,x20,[x29,#16]
add sp,sp,#64
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
ret
.globl _ChaCha20_ctr32_neon
.private_extern _ChaCha20_ctr32_neon
.align 5
_ChaCha20_ctr32_neon:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-96]!
add x29,sp,#0
adrp x5,Lsigma@PAGE
add x5,x5,Lsigma@PAGEOFF
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
cmp x2,#512
b.hs L512_or_more_neon
sub sp,sp,#64
ldp x22,x23,[x5] // load sigma
ld1 {v24.4s},[x5],#16
ldp x24,x25,[x3] // load key
ldp x26,x27,[x3,#16]
ld1 {v25.4s,v26.4s},[x3]
ldp x28,x30,[x4] // load counter
ld1 {v27.4s},[x4]
ld1 {v31.4s},[x5]
#ifdef __AARCH64EB__
rev64 v24.4s,v24.4s
ror x24,x24,#32
ror x25,x25,#32
ror x26,x26,#32
ror x27,x27,#32
ror x28,x28,#32
ror x30,x30,#32
#endif
add v27.4s,v27.4s,v31.4s // += 1
add v28.4s,v27.4s,v31.4s
add v29.4s,v28.4s,v31.4s
shl v31.4s,v31.4s,#2 // 1 -> 4
Loop_outer_neon:
mov w5,w22 // unpack key block
lsr x6,x22,#32
mov v0.16b,v24.16b
mov w7,w23
lsr x8,x23,#32
mov v4.16b,v24.16b
mov w9,w24
lsr x10,x24,#32
mov v16.16b,v24.16b
mov w11,w25
mov v1.16b,v25.16b
lsr x12,x25,#32
mov v5.16b,v25.16b
mov w13,w26
mov v17.16b,v25.16b
lsr x14,x26,#32
mov v3.16b,v27.16b
mov w15,w27
mov v7.16b,v28.16b
lsr x16,x27,#32
mov v19.16b,v29.16b
mov w17,w28
mov v2.16b,v26.16b
lsr x19,x28,#32
mov v6.16b,v26.16b
mov w20,w30
mov v18.16b,v26.16b
lsr x21,x30,#32
mov x4,#10
subs x2,x2,#256
Loop_neon:
sub x4,x4,#1
add v0.4s,v0.4s,v1.4s
add w5,w5,w9
add v4.4s,v4.4s,v5.4s
add w6,w6,w10
add v16.4s,v16.4s,v17.4s
add w7,w7,w11
eor v3.16b,v3.16b,v0.16b
add w8,w8,w12
eor v7.16b,v7.16b,v4.16b
eor w17,w17,w5
eor v19.16b,v19.16b,v16.16b
eor w19,w19,w6
rev32 v3.8h,v3.8h
eor w20,w20,w7
rev32 v7.8h,v7.8h
eor w21,w21,w8
rev32 v19.8h,v19.8h
ror w17,w17,#16
add v2.4s,v2.4s,v3.4s
ror w19,w19,#16
add v6.4s,v6.4s,v7.4s
ror w20,w20,#16
add v18.4s,v18.4s,v19.4s
ror w21,w21,#16
eor v20.16b,v1.16b,v2.16b
add w13,w13,w17
eor v21.16b,v5.16b,v6.16b
add w14,w14,w19
eor v22.16b,v17.16b,v18.16b
add w15,w15,w20
ushr v1.4s,v20.4s,#20
add w16,w16,w21
ushr v5.4s,v21.4s,#20
eor w9,w9,w13
ushr v17.4s,v22.4s,#20
eor w10,w10,w14
sli v1.4s,v20.4s,#12
eor w11,w11,w15
sli v5.4s,v21.4s,#12
eor w12,w12,w16
sli v17.4s,v22.4s,#12
ror w9,w9,#20
add v0.4s,v0.4s,v1.4s
ror w10,w10,#20
add v4.4s,v4.4s,v5.4s
ror w11,w11,#20
add v16.4s,v16.4s,v17.4s
ror w12,w12,#20
eor v20.16b,v3.16b,v0.16b
add w5,w5,w9
eor v21.16b,v7.16b,v4.16b
add w6,w6,w10
eor v22.16b,v19.16b,v16.16b
add w7,w7,w11
ushr v3.4s,v20.4s,#24
add w8,w8,w12
ushr v7.4s,v21.4s,#24
eor w17,w17,w5
ushr v19.4s,v22.4s,#24
eor w19,w19,w6
sli v3.4s,v20.4s,#8
eor w20,w20,w7
sli v7.4s,v21.4s,#8
eor w21,w21,w8
sli v19.4s,v22.4s,#8
ror w17,w17,#24
add v2.4s,v2.4s,v3.4s
ror w19,w19,#24
add v6.4s,v6.4s,v7.4s
ror w20,w20,#24
add v18.4s,v18.4s,v19.4s
ror w21,w21,#24
eor v20.16b,v1.16b,v2.16b
add w13,w13,w17
eor v21.16b,v5.16b,v6.16b
add w14,w14,w19
eor v22.16b,v17.16b,v18.16b
add w15,w15,w20
ushr v1.4s,v20.4s,#25
add w16,w16,w21
ushr v5.4s,v21.4s,#25
eor w9,w9,w13
ushr v17.4s,v22.4s,#25
eor w10,w10,w14
sli v1.4s,v20.4s,#7
eor w11,w11,w15
sli v5.4s,v21.4s,#7
eor w12,w12,w16
sli v17.4s,v22.4s,#7
ror w9,w9,#25
ext v2.16b,v2.16b,v2.16b,#8
ror w10,w10,#25
ext v6.16b,v6.16b,v6.16b,#8
ror w11,w11,#25
ext v18.16b,v18.16b,v18.16b,#8
ror w12,w12,#25
ext v3.16b,v3.16b,v3.16b,#12
ext v7.16b,v7.16b,v7.16b,#12
ext v19.16b,v19.16b,v19.16b,#12
ext v1.16b,v1.16b,v1.16b,#4
ext v5.16b,v5.16b,v5.16b,#4
ext v17.16b,v17.16b,v17.16b,#4
add v0.4s,v0.4s,v1.4s
add w5,w5,w10
add v4.4s,v4.4s,v5.4s
add w6,w6,w11
add v16.4s,v16.4s,v17.4s
add w7,w7,w12
eor v3.16b,v3.16b,v0.16b
add w8,w8,w9
eor v7.16b,v7.16b,v4.16b
eor w21,w21,w5
eor v19.16b,v19.16b,v16.16b
eor w17,w17,w6
rev32 v3.8h,v3.8h
eor w19,w19,w7
rev32 v7.8h,v7.8h
eor w20,w20,w8
rev32 v19.8h,v19.8h
ror w21,w21,#16
add v2.4s,v2.4s,v3.4s
ror w17,w17,#16
add v6.4s,v6.4s,v7.4s
ror w19,w19,#16
add v18.4s,v18.4s,v19.4s
ror w20,w20,#16
eor v20.16b,v1.16b,v2.16b
add w15,w15,w21
eor v21.16b,v5.16b,v6.16b
add w16,w16,w17
eor v22.16b,v17.16b,v18.16b
add w13,w13,w19
ushr v1.4s,v20.4s,#20
add w14,w14,w20
ushr v5.4s,v21.4s,#20
eor w10,w10,w15
ushr v17.4s,v22.4s,#20
eor w11,w11,w16
sli v1.4s,v20.4s,#12
eor w12,w12,w13
sli v5.4s,v21.4s,#12
eor w9,w9,w14
sli v17.4s,v22.4s,#12
ror w10,w10,#20
add v0.4s,v0.4s,v1.4s
ror w11,w11,#20
add v4.4s,v4.4s,v5.4s
ror w12,w12,#20
add v16.4s,v16.4s,v17.4s
ror w9,w9,#20
eor v20.16b,v3.16b,v0.16b
add w5,w5,w10
eor v21.16b,v7.16b,v4.16b
add w6,w6,w11
eor v22.16b,v19.16b,v16.16b
add w7,w7,w12
ushr v3.4s,v20.4s,#24
add w8,w8,w9
ushr v7.4s,v21.4s,#24
eor w21,w21,w5
ushr v19.4s,v22.4s,#24
eor w17,w17,w6
sli v3.4s,v20.4s,#8
eor w19,w19,w7
sli v7.4s,v21.4s,#8
eor w20,w20,w8
sli v19.4s,v22.4s,#8
ror w21,w21,#24
add v2.4s,v2.4s,v3.4s
ror w17,w17,#24
add v6.4s,v6.4s,v7.4s
ror w19,w19,#24
add v18.4s,v18.4s,v19.4s
ror w20,w20,#24
eor v20.16b,v1.16b,v2.16b
add w15,w15,w21
eor v21.16b,v5.16b,v6.16b
add w16,w16,w17
eor v22.16b,v17.16b,v18.16b
add w13,w13,w19
ushr v1.4s,v20.4s,#25
add w14,w14,w20
ushr v5.4s,v21.4s,#25
eor w10,w10,w15
ushr v17.4s,v22.4s,#25
eor w11,w11,w16
sli v1.4s,v20.4s,#7
eor w12,w12,w13
sli v5.4s,v21.4s,#7
eor w9,w9,w14
sli v17.4s,v22.4s,#7
ror w10,w10,#25
ext v2.16b,v2.16b,v2.16b,#8
ror w11,w11,#25
ext v6.16b,v6.16b,v6.16b,#8
ror w12,w12,#25
ext v18.16b,v18.16b,v18.16b,#8
ror w9,w9,#25
ext v3.16b,v3.16b,v3.16b,#4
ext v7.16b,v7.16b,v7.16b,#4
ext v19.16b,v19.16b,v19.16b,#4
ext v1.16b,v1.16b,v1.16b,#12
ext v5.16b,v5.16b,v5.16b,#12
ext v17.16b,v17.16b,v17.16b,#12
cbnz x4,Loop_neon
add w5,w5,w22 // accumulate key block
add v0.4s,v0.4s,v24.4s
add x6,x6,x22,lsr#32
add v4.4s,v4.4s,v24.4s
add w7,w7,w23
add v16.4s,v16.4s,v24.4s
add x8,x8,x23,lsr#32
add v2.4s,v2.4s,v26.4s
add w9,w9,w24
add v6.4s,v6.4s,v26.4s
add x10,x10,x24,lsr#32
add v18.4s,v18.4s,v26.4s
add w11,w11,w25
add v3.4s,v3.4s,v27.4s
add x12,x12,x25,lsr#32
add w13,w13,w26
add v7.4s,v7.4s,v28.4s
add x14,x14,x26,lsr#32
add w15,w15,w27
add v19.4s,v19.4s,v29.4s
add x16,x16,x27,lsr#32
add w17,w17,w28
add v1.4s,v1.4s,v25.4s
add x19,x19,x28,lsr#32
add w20,w20,w30
add v5.4s,v5.4s,v25.4s
add x21,x21,x30,lsr#32
add v17.4s,v17.4s,v25.4s
b.lo Ltail_neon
add x5,x5,x6,lsl#32 // pack
add x7,x7,x8,lsl#32
ldp x6,x8,[x1,#0] // load input
add x9,x9,x10,lsl#32
add x11,x11,x12,lsl#32
ldp x10,x12,[x1,#16]
add x13,x13,x14,lsl#32
add x15,x15,x16,lsl#32
ldp x14,x16,[x1,#32]
add x17,x17,x19,lsl#32
add x20,x20,x21,lsl#32
ldp x19,x21,[x1,#48]
add x1,x1,#64
#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
rev x11,x11
rev x13,x13
rev x15,x15
rev x17,x17
rev x20,x20
#endif
ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64
eor x5,x5,x6
eor x7,x7,x8
eor x9,x9,x10
eor x11,x11,x12
eor x13,x13,x14
eor v0.16b,v0.16b,v20.16b
eor x15,x15,x16
eor v1.16b,v1.16b,v21.16b
eor x17,x17,x19
eor v2.16b,v2.16b,v22.16b
eor x20,x20,x21
eor v3.16b,v3.16b,v23.16b
ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64
stp x5,x7,[x0,#0] // store output
add x28,x28,#4 // increment counter
stp x9,x11,[x0,#16]
add v27.4s,v27.4s,v31.4s // += 4
stp x13,x15,[x0,#32]
add v28.4s,v28.4s,v31.4s
stp x17,x20,[x0,#48]
add v29.4s,v29.4s,v31.4s
add x0,x0,#64
st1 {v0.16b,v1.16b,v2.16b,v3.16b},[x0],#64
ld1 {v0.16b,v1.16b,v2.16b,v3.16b},[x1],#64
eor v4.16b,v4.16b,v20.16b
eor v5.16b,v5.16b,v21.16b
eor v6.16b,v6.16b,v22.16b
eor v7.16b,v7.16b,v23.16b
st1 {v4.16b,v5.16b,v6.16b,v7.16b},[x0],#64
eor v16.16b,v16.16b,v0.16b
eor v17.16b,v17.16b,v1.16b
eor v18.16b,v18.16b,v2.16b
eor v19.16b,v19.16b,v3.16b
st1 {v16.16b,v17.16b,v18.16b,v19.16b},[x0],#64
b.hi Loop_outer_neon
ldp x19,x20,[x29,#16]
add sp,sp,#64
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
ret
Ltail_neon:
add x2,x2,#256
cmp x2,#64
b.lo Less_than_64
add x5,x5,x6,lsl#32 // pack
add x7,x7,x8,lsl#32
ldp x6,x8,[x1,#0] // load input
add x9,x9,x10,lsl#32
add x11,x11,x12,lsl#32
ldp x10,x12,[x1,#16]
add x13,x13,x14,lsl#32
add x15,x15,x16,lsl#32
ldp x14,x16,[x1,#32]
add x17,x17,x19,lsl#32
add x20,x20,x21,lsl#32
ldp x19,x21,[x1,#48]
add x1,x1,#64
#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
rev x11,x11
rev x13,x13
rev x15,x15
rev x17,x17
rev x20,x20
#endif
eor x5,x5,x6
eor x7,x7,x8
eor x9,x9,x10
eor x11,x11,x12
eor x13,x13,x14
eor x15,x15,x16
eor x17,x17,x19
eor x20,x20,x21
stp x5,x7,[x0,#0] // store output
add x28,x28,#4 // increment counter
stp x9,x11,[x0,#16]
stp x13,x15,[x0,#32]
stp x17,x20,[x0,#48]
add x0,x0,#64
b.eq Ldone_neon
sub x2,x2,#64
cmp x2,#64
b.lo Less_than_128
ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64
eor v0.16b,v0.16b,v20.16b
eor v1.16b,v1.16b,v21.16b
eor v2.16b,v2.16b,v22.16b
eor v3.16b,v3.16b,v23.16b
st1 {v0.16b,v1.16b,v2.16b,v3.16b},[x0],#64
b.eq Ldone_neon
sub x2,x2,#64
cmp x2,#64
b.lo Less_than_192
ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64
eor v4.16b,v4.16b,v20.16b
eor v5.16b,v5.16b,v21.16b
eor v6.16b,v6.16b,v22.16b
eor v7.16b,v7.16b,v23.16b
st1 {v4.16b,v5.16b,v6.16b,v7.16b},[x0],#64
b.eq Ldone_neon
sub x2,x2,#64
st1 {v16.16b,v17.16b,v18.16b,v19.16b},[sp]
b Last_neon
Less_than_128:
st1 {v0.16b,v1.16b,v2.16b,v3.16b},[sp]
b Last_neon
Less_than_192:
st1 {v4.16b,v5.16b,v6.16b,v7.16b},[sp]
b Last_neon
.align 4
Last_neon:
sub x0,x0,#1
add x1,x1,x2
add x0,x0,x2
add x4,sp,x2
neg x2,x2
Loop_tail_neon:
ldrb w10,[x1,x2]
ldrb w11,[x4,x2]
add x2,x2,#1
eor w10,w10,w11
strb w10,[x0,x2]
cbnz x2,Loop_tail_neon
stp xzr,xzr,[sp,#0]
stp xzr,xzr,[sp,#16]
stp xzr,xzr,[sp,#32]
stp xzr,xzr,[sp,#48]
Ldone_neon:
ldp x19,x20,[x29,#16]
add sp,sp,#64
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
ret
.align 5
ChaCha20_512_neon:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-96]!
add x29,sp,#0
adrp x5,Lsigma@PAGE
add x5,x5,Lsigma@PAGEOFF
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
L512_or_more_neon:
sub sp,sp,#128+64
ldp x22,x23,[x5] // load sigma
ld1 {v24.4s},[x5],#16
ldp x24,x25,[x3] // load key
ldp x26,x27,[x3,#16]
ld1 {v25.4s,v26.4s},[x3]
ldp x28,x30,[x4] // load counter
ld1 {v27.4s},[x4]
ld1 {v31.4s},[x5]
#ifdef __AARCH64EB__
rev64 v24.4s,v24.4s
ror x24,x24,#32
ror x25,x25,#32
ror x26,x26,#32
ror x27,x27,#32
ror x28,x28,#32
ror x30,x30,#32
#endif
add v27.4s,v27.4s,v31.4s // += 1
stp q24,q25,[sp,#0] // off-load key block, invariant part
add v27.4s,v27.4s,v31.4s // not typo
str q26,[sp,#32]
add v28.4s,v27.4s,v31.4s
add v29.4s,v28.4s,v31.4s
add v30.4s,v29.4s,v31.4s
shl v31.4s,v31.4s,#2 // 1 -> 4
stp d8,d9,[sp,#128+0] // meet ABI requirements
stp d10,d11,[sp,#128+16]
stp d12,d13,[sp,#128+32]
stp d14,d15,[sp,#128+48]
sub x2,x2,#512 // not typo
Loop_outer_512_neon:
mov v0.16b,v24.16b
mov v4.16b,v24.16b
mov v8.16b,v24.16b
mov v12.16b,v24.16b
mov v16.16b,v24.16b
mov v20.16b,v24.16b
mov v1.16b,v25.16b
mov w5,w22 // unpack key block
mov v5.16b,v25.16b
lsr x6,x22,#32
mov v9.16b,v25.16b
mov w7,w23
mov v13.16b,v25.16b
lsr x8,x23,#32
mov v17.16b,v25.16b
mov w9,w24
mov v21.16b,v25.16b
lsr x10,x24,#32
mov v3.16b,v27.16b
mov w11,w25
mov v7.16b,v28.16b
lsr x12,x25,#32
mov v11.16b,v29.16b
mov w13,w26
mov v15.16b,v30.16b
lsr x14,x26,#32
mov v2.16b,v26.16b
mov w15,w27
mov v6.16b,v26.16b
lsr x16,x27,#32
add v19.4s,v3.4s,v31.4s // +4
mov w17,w28
add v23.4s,v7.4s,v31.4s // +4
lsr x19,x28,#32
mov v10.16b,v26.16b
mov w20,w30
mov v14.16b,v26.16b
lsr x21,x30,#32
mov v18.16b,v26.16b
stp q27,q28,[sp,#48] // off-load key block, variable part
mov v22.16b,v26.16b
str q29,[sp,#80]
mov x4,#5
subs x2,x2,#512
Loop_upper_neon:
sub x4,x4,#1
add v0.4s,v0.4s,v1.4s
add w5,w5,w9
add v4.4s,v4.4s,v5.4s
add w6,w6,w10
add v8.4s,v8.4s,v9.4s
add w7,w7,w11
add v12.4s,v12.4s,v13.4s
add w8,w8,w12
add v16.4s,v16.4s,v17.4s
eor w17,w17,w5
add v20.4s,v20.4s,v21.4s
eor w19,w19,w6
eor v3.16b,v3.16b,v0.16b
eor w20,w20,w7
eor v7.16b,v7.16b,v4.16b
eor w21,w21,w8
eor v11.16b,v11.16b,v8.16b
ror w17,w17,#16
eor v15.16b,v15.16b,v12.16b
ror w19,w19,#16
eor v19.16b,v19.16b,v16.16b
ror w20,w20,#16
eor v23.16b,v23.16b,v20.16b
ror w21,w21,#16
rev32 v3.8h,v3.8h
add w13,w13,w17
rev32 v7.8h,v7.8h
add w14,w14,w19
rev32 v11.8h,v11.8h
add w15,w15,w20
rev32 v15.8h,v15.8h
add w16,w16,w21
rev32 v19.8h,v19.8h
eor w9,w9,w13
rev32 v23.8h,v23.8h
eor w10,w10,w14
add v2.4s,v2.4s,v3.4s
eor w11,w11,w15
add v6.4s,v6.4s,v7.4s
eor w12,w12,w16
add v10.4s,v10.4s,v11.4s
ror w9,w9,#20
add v14.4s,v14.4s,v15.4s
ror w10,w10,#20
add v18.4s,v18.4s,v19.4s
ror w11,w11,#20
add v22.4s,v22.4s,v23.4s
ror w12,w12,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w9
eor v25.16b,v5.16b,v6.16b
add w6,w6,w10
eor v26.16b,v9.16b,v10.16b
add w7,w7,w11
eor v27.16b,v13.16b,v14.16b
add w8,w8,w12
eor v28.16b,v17.16b,v18.16b
eor w17,w17,w5
eor v29.16b,v21.16b,v22.16b
eor w19,w19,w6
ushr v1.4s,v24.4s,#20
eor w20,w20,w7
ushr v5.4s,v25.4s,#20
eor w21,w21,w8
ushr v9.4s,v26.4s,#20
ror w17,w17,#24
ushr v13.4s,v27.4s,#20
ror w19,w19,#24
ushr v17.4s,v28.4s,#20
ror w20,w20,#24
ushr v21.4s,v29.4s,#20
ror w21,w21,#24
sli v1.4s,v24.4s,#12
add w13,w13,w17
sli v5.4s,v25.4s,#12
add w14,w14,w19
sli v9.4s,v26.4s,#12
add w15,w15,w20
sli v13.4s,v27.4s,#12
add w16,w16,w21
sli v17.4s,v28.4s,#12
eor w9,w9,w13
sli v21.4s,v29.4s,#12
eor w10,w10,w14
add v0.4s,v0.4s,v1.4s
eor w11,w11,w15
add v4.4s,v4.4s,v5.4s
eor w12,w12,w16
add v8.4s,v8.4s,v9.4s
ror w9,w9,#25
add v12.4s,v12.4s,v13.4s
ror w10,w10,#25
add v16.4s,v16.4s,v17.4s
ror w11,w11,#25
add v20.4s,v20.4s,v21.4s
ror w12,w12,#25
eor v24.16b,v3.16b,v0.16b
add w5,w5,w10
eor v25.16b,v7.16b,v4.16b
add w6,w6,w11
eor v26.16b,v11.16b,v8.16b
add w7,w7,w12
eor v27.16b,v15.16b,v12.16b
add w8,w8,w9
eor v28.16b,v19.16b,v16.16b
eor w21,w21,w5
eor v29.16b,v23.16b,v20.16b
eor w17,w17,w6
ushr v3.4s,v24.4s,#24
eor w19,w19,w7
ushr v7.4s,v25.4s,#24
eor w20,w20,w8
ushr v11.4s,v26.4s,#24
ror w21,w21,#16
ushr v15.4s,v27.4s,#24
ror w17,w17,#16
ushr v19.4s,v28.4s,#24
ror w19,w19,#16
ushr v23.4s,v29.4s,#24
ror w20,w20,#16
sli v3.4s,v24.4s,#8
add w15,w15,w21
sli v7.4s,v25.4s,#8
add w16,w16,w17
sli v11.4s,v26.4s,#8
add w13,w13,w19
sli v15.4s,v27.4s,#8
add w14,w14,w20
sli v19.4s,v28.4s,#8
eor w10,w10,w15
sli v23.4s,v29.4s,#8
eor w11,w11,w16
add v2.4s,v2.4s,v3.4s
eor w12,w12,w13
add v6.4s,v6.4s,v7.4s
eor w9,w9,w14
add v10.4s,v10.4s,v11.4s
ror w10,w10,#20
add v14.4s,v14.4s,v15.4s
ror w11,w11,#20
add v18.4s,v18.4s,v19.4s
ror w12,w12,#20
add v22.4s,v22.4s,v23.4s
ror w9,w9,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w10
eor v25.16b,v5.16b,v6.16b
add w6,w6,w11
eor v26.16b,v9.16b,v10.16b
add w7,w7,w12
eor v27.16b,v13.16b,v14.16b
add w8,w8,w9
eor v28.16b,v17.16b,v18.16b
eor w21,w21,w5
eor v29.16b,v21.16b,v22.16b
eor w17,w17,w6
ushr v1.4s,v24.4s,#25
eor w19,w19,w7
ushr v5.4s,v25.4s,#25
eor w20,w20,w8
ushr v9.4s,v26.4s,#25
ror w21,w21,#24
ushr v13.4s,v27.4s,#25
ror w17,w17,#24
ushr v17.4s,v28.4s,#25
ror w19,w19,#24
ushr v21.4s,v29.4s,#25
ror w20,w20,#24
sli v1.4s,v24.4s,#7
add w15,w15,w21
sli v5.4s,v25.4s,#7
add w16,w16,w17
sli v9.4s,v26.4s,#7
add w13,w13,w19
sli v13.4s,v27.4s,#7
add w14,w14,w20
sli v17.4s,v28.4s,#7
eor w10,w10,w15
sli v21.4s,v29.4s,#7
eor w11,w11,w16
ext v2.16b,v2.16b,v2.16b,#8
eor w12,w12,w13
ext v6.16b,v6.16b,v6.16b,#8
eor w9,w9,w14
ext v10.16b,v10.16b,v10.16b,#8
ror w10,w10,#25
ext v14.16b,v14.16b,v14.16b,#8
ror w11,w11,#25
ext v18.16b,v18.16b,v18.16b,#8
ror w12,w12,#25
ext v22.16b,v22.16b,v22.16b,#8
ror w9,w9,#25
ext v3.16b,v3.16b,v3.16b,#12
ext v7.16b,v7.16b,v7.16b,#12
ext v11.16b,v11.16b,v11.16b,#12
ext v15.16b,v15.16b,v15.16b,#12
ext v19.16b,v19.16b,v19.16b,#12
ext v23.16b,v23.16b,v23.16b,#12
ext v1.16b,v1.16b,v1.16b,#4
ext v5.16b,v5.16b,v5.16b,#4
ext v9.16b,v9.16b,v9.16b,#4
ext v13.16b,v13.16b,v13.16b,#4
ext v17.16b,v17.16b,v17.16b,#4
ext v21.16b,v21.16b,v21.16b,#4
add v0.4s,v0.4s,v1.4s
add w5,w5,w9
add v4.4s,v4.4s,v5.4s
add w6,w6,w10
add v8.4s,v8.4s,v9.4s
add w7,w7,w11
add v12.4s,v12.4s,v13.4s
add w8,w8,w12
add v16.4s,v16.4s,v17.4s
eor w17,w17,w5
add v20.4s,v20.4s,v21.4s
eor w19,w19,w6
eor v3.16b,v3.16b,v0.16b
eor w20,w20,w7
eor v7.16b,v7.16b,v4.16b
eor w21,w21,w8
eor v11.16b,v11.16b,v8.16b
ror w17,w17,#16
eor v15.16b,v15.16b,v12.16b
ror w19,w19,#16
eor v19.16b,v19.16b,v16.16b
ror w20,w20,#16
eor v23.16b,v23.16b,v20.16b
ror w21,w21,#16
rev32 v3.8h,v3.8h
add w13,w13,w17
rev32 v7.8h,v7.8h
add w14,w14,w19
rev32 v11.8h,v11.8h
add w15,w15,w20
rev32 v15.8h,v15.8h
add w16,w16,w21
rev32 v19.8h,v19.8h
eor w9,w9,w13
rev32 v23.8h,v23.8h
eor w10,w10,w14
add v2.4s,v2.4s,v3.4s
eor w11,w11,w15
add v6.4s,v6.4s,v7.4s
eor w12,w12,w16
add v10.4s,v10.4s,v11.4s
ror w9,w9,#20
add v14.4s,v14.4s,v15.4s
ror w10,w10,#20
add v18.4s,v18.4s,v19.4s
ror w11,w11,#20
add v22.4s,v22.4s,v23.4s
ror w12,w12,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w9
eor v25.16b,v5.16b,v6.16b
add w6,w6,w10
eor v26.16b,v9.16b,v10.16b
add w7,w7,w11
eor v27.16b,v13.16b,v14.16b
add w8,w8,w12
eor v28.16b,v17.16b,v18.16b
eor w17,w17,w5
eor v29.16b,v21.16b,v22.16b
eor w19,w19,w6
ushr v1.4s,v24.4s,#20
eor w20,w20,w7
ushr v5.4s,v25.4s,#20
eor w21,w21,w8
ushr v9.4s,v26.4s,#20
ror w17,w17,#24
ushr v13.4s,v27.4s,#20
ror w19,w19,#24
ushr v17.4s,v28.4s,#20
ror w20,w20,#24
ushr v21.4s,v29.4s,#20
ror w21,w21,#24
sli v1.4s,v24.4s,#12
add w13,w13,w17
sli v5.4s,v25.4s,#12
add w14,w14,w19
sli v9.4s,v26.4s,#12
add w15,w15,w20
sli v13.4s,v27.4s,#12
add w16,w16,w21
sli v17.4s,v28.4s,#12
eor w9,w9,w13
sli v21.4s,v29.4s,#12
eor w10,w10,w14
add v0.4s,v0.4s,v1.4s
eor w11,w11,w15
add v4.4s,v4.4s,v5.4s
eor w12,w12,w16
add v8.4s,v8.4s,v9.4s
ror w9,w9,#25
add v12.4s,v12.4s,v13.4s
ror w10,w10,#25
add v16.4s,v16.4s,v17.4s
ror w11,w11,#25
add v20.4s,v20.4s,v21.4s
ror w12,w12,#25
eor v24.16b,v3.16b,v0.16b
add w5,w5,w10
eor v25.16b,v7.16b,v4.16b
add w6,w6,w11
eor v26.16b,v11.16b,v8.16b
add w7,w7,w12
eor v27.16b,v15.16b,v12.16b
add w8,w8,w9
eor v28.16b,v19.16b,v16.16b
eor w21,w21,w5
eor v29.16b,v23.16b,v20.16b
eor w17,w17,w6
ushr v3.4s,v24.4s,#24
eor w19,w19,w7
ushr v7.4s,v25.4s,#24
eor w20,w20,w8
ushr v11.4s,v26.4s,#24
ror w21,w21,#16
ushr v15.4s,v27.4s,#24
ror w17,w17,#16
ushr v19.4s,v28.4s,#24
ror w19,w19,#16
ushr v23.4s,v29.4s,#24
ror w20,w20,#16
sli v3.4s,v24.4s,#8
add w15,w15,w21
sli v7.4s,v25.4s,#8
add w16,w16,w17
sli v11.4s,v26.4s,#8
add w13,w13,w19
sli v15.4s,v27.4s,#8
add w14,w14,w20
sli v19.4s,v28.4s,#8
eor w10,w10,w15
sli v23.4s,v29.4s,#8
eor w11,w11,w16
add v2.4s,v2.4s,v3.4s
eor w12,w12,w13
add v6.4s,v6.4s,v7.4s
eor w9,w9,w14
add v10.4s,v10.4s,v11.4s
ror w10,w10,#20
add v14.4s,v14.4s,v15.4s
ror w11,w11,#20
add v18.4s,v18.4s,v19.4s
ror w12,w12,#20
add v22.4s,v22.4s,v23.4s
ror w9,w9,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w10
eor v25.16b,v5.16b,v6.16b
add w6,w6,w11
eor v26.16b,v9.16b,v10.16b
add w7,w7,w12
eor v27.16b,v13.16b,v14.16b
add w8,w8,w9
eor v28.16b,v17.16b,v18.16b
eor w21,w21,w5
eor v29.16b,v21.16b,v22.16b
eor w17,w17,w6
ushr v1.4s,v24.4s,#25
eor w19,w19,w7
ushr v5.4s,v25.4s,#25
eor w20,w20,w8
ushr v9.4s,v26.4s,#25
ror w21,w21,#24
ushr v13.4s,v27.4s,#25
ror w17,w17,#24
ushr v17.4s,v28.4s,#25
ror w19,w19,#24
ushr v21.4s,v29.4s,#25
ror w20,w20,#24
sli v1.4s,v24.4s,#7
add w15,w15,w21
sli v5.4s,v25.4s,#7
add w16,w16,w17
sli v9.4s,v26.4s,#7
add w13,w13,w19
sli v13.4s,v27.4s,#7
add w14,w14,w20
sli v17.4s,v28.4s,#7
eor w10,w10,w15
sli v21.4s,v29.4s,#7
eor w11,w11,w16
ext v2.16b,v2.16b,v2.16b,#8
eor w12,w12,w13
ext v6.16b,v6.16b,v6.16b,#8
eor w9,w9,w14
ext v10.16b,v10.16b,v10.16b,#8
ror w10,w10,#25
ext v14.16b,v14.16b,v14.16b,#8
ror w11,w11,#25
ext v18.16b,v18.16b,v18.16b,#8
ror w12,w12,#25
ext v22.16b,v22.16b,v22.16b,#8
ror w9,w9,#25
ext v3.16b,v3.16b,v3.16b,#4
ext v7.16b,v7.16b,v7.16b,#4
ext v11.16b,v11.16b,v11.16b,#4
ext v15.16b,v15.16b,v15.16b,#4
ext v19.16b,v19.16b,v19.16b,#4
ext v23.16b,v23.16b,v23.16b,#4
ext v1.16b,v1.16b,v1.16b,#12
ext v5.16b,v5.16b,v5.16b,#12
ext v9.16b,v9.16b,v9.16b,#12
ext v13.16b,v13.16b,v13.16b,#12
ext v17.16b,v17.16b,v17.16b,#12
ext v21.16b,v21.16b,v21.16b,#12
cbnz x4,Loop_upper_neon
add w5,w5,w22 // accumulate key block
add x6,x6,x22,lsr#32
add w7,w7,w23
add x8,x8,x23,lsr#32
add w9,w9,w24
add x10,x10,x24,lsr#32
add w11,w11,w25
add x12,x12,x25,lsr#32
add w13,w13,w26
add x14,x14,x26,lsr#32
add w15,w15,w27
add x16,x16,x27,lsr#32
add w17,w17,w28
add x19,x19,x28,lsr#32
add w20,w20,w30
add x21,x21,x30,lsr#32
add x5,x5,x6,lsl#32 // pack
add x7,x7,x8,lsl#32
ldp x6,x8,[x1,#0] // load input
add x9,x9,x10,lsl#32
add x11,x11,x12,lsl#32
ldp x10,x12,[x1,#16]
add x13,x13,x14,lsl#32
add x15,x15,x16,lsl#32
ldp x14,x16,[x1,#32]
add x17,x17,x19,lsl#32
add x20,x20,x21,lsl#32
ldp x19,x21,[x1,#48]
add x1,x1,#64
#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
rev x11,x11
rev x13,x13
rev x15,x15
rev x17,x17
rev x20,x20
#endif
eor x5,x5,x6
eor x7,x7,x8
eor x9,x9,x10
eor x11,x11,x12
eor x13,x13,x14
eor x15,x15,x16
eor x17,x17,x19
eor x20,x20,x21
stp x5,x7,[x0,#0] // store output
add x28,x28,#1 // increment counter
mov w5,w22 // unpack key block
lsr x6,x22,#32
stp x9,x11,[x0,#16]
mov w7,w23
lsr x8,x23,#32
stp x13,x15,[x0,#32]
mov w9,w24
lsr x10,x24,#32
stp x17,x20,[x0,#48]
add x0,x0,#64
mov w11,w25
lsr x12,x25,#32
mov w13,w26
lsr x14,x26,#32
mov w15,w27
lsr x16,x27,#32
mov w17,w28
lsr x19,x28,#32
mov w20,w30
lsr x21,x30,#32
mov x4,#5
Loop_lower_neon:
sub x4,x4,#1
add v0.4s,v0.4s,v1.4s
add w5,w5,w9
add v4.4s,v4.4s,v5.4s
add w6,w6,w10
add v8.4s,v8.4s,v9.4s
add w7,w7,w11
add v12.4s,v12.4s,v13.4s
add w8,w8,w12
add v16.4s,v16.4s,v17.4s
eor w17,w17,w5
add v20.4s,v20.4s,v21.4s
eor w19,w19,w6
eor v3.16b,v3.16b,v0.16b
eor w20,w20,w7
eor v7.16b,v7.16b,v4.16b
eor w21,w21,w8
eor v11.16b,v11.16b,v8.16b
ror w17,w17,#16
eor v15.16b,v15.16b,v12.16b
ror w19,w19,#16
eor v19.16b,v19.16b,v16.16b
ror w20,w20,#16
eor v23.16b,v23.16b,v20.16b
ror w21,w21,#16
rev32 v3.8h,v3.8h
add w13,w13,w17
rev32 v7.8h,v7.8h
add w14,w14,w19
rev32 v11.8h,v11.8h
add w15,w15,w20
rev32 v15.8h,v15.8h
add w16,w16,w21
rev32 v19.8h,v19.8h
eor w9,w9,w13
rev32 v23.8h,v23.8h
eor w10,w10,w14
add v2.4s,v2.4s,v3.4s
eor w11,w11,w15
add v6.4s,v6.4s,v7.4s
eor w12,w12,w16
add v10.4s,v10.4s,v11.4s
ror w9,w9,#20
add v14.4s,v14.4s,v15.4s
ror w10,w10,#20
add v18.4s,v18.4s,v19.4s
ror w11,w11,#20
add v22.4s,v22.4s,v23.4s
ror w12,w12,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w9
eor v25.16b,v5.16b,v6.16b
add w6,w6,w10
eor v26.16b,v9.16b,v10.16b
add w7,w7,w11
eor v27.16b,v13.16b,v14.16b
add w8,w8,w12
eor v28.16b,v17.16b,v18.16b
eor w17,w17,w5
eor v29.16b,v21.16b,v22.16b
eor w19,w19,w6
ushr v1.4s,v24.4s,#20
eor w20,w20,w7
ushr v5.4s,v25.4s,#20
eor w21,w21,w8
ushr v9.4s,v26.4s,#20
ror w17,w17,#24
ushr v13.4s,v27.4s,#20
ror w19,w19,#24
ushr v17.4s,v28.4s,#20
ror w20,w20,#24
ushr v21.4s,v29.4s,#20
ror w21,w21,#24
sli v1.4s,v24.4s,#12
add w13,w13,w17
sli v5.4s,v25.4s,#12
add w14,w14,w19
sli v9.4s,v26.4s,#12
add w15,w15,w20
sli v13.4s,v27.4s,#12
add w16,w16,w21
sli v17.4s,v28.4s,#12
eor w9,w9,w13
sli v21.4s,v29.4s,#12
eor w10,w10,w14
add v0.4s,v0.4s,v1.4s
eor w11,w11,w15
add v4.4s,v4.4s,v5.4s
eor w12,w12,w16
add v8.4s,v8.4s,v9.4s
ror w9,w9,#25
add v12.4s,v12.4s,v13.4s
ror w10,w10,#25
add v16.4s,v16.4s,v17.4s
ror w11,w11,#25
add v20.4s,v20.4s,v21.4s
ror w12,w12,#25
eor v24.16b,v3.16b,v0.16b
add w5,w5,w10
eor v25.16b,v7.16b,v4.16b
add w6,w6,w11
eor v26.16b,v11.16b,v8.16b
add w7,w7,w12
eor v27.16b,v15.16b,v12.16b
add w8,w8,w9
eor v28.16b,v19.16b,v16.16b
eor w21,w21,w5
eor v29.16b,v23.16b,v20.16b
eor w17,w17,w6
ushr v3.4s,v24.4s,#24
eor w19,w19,w7
ushr v7.4s,v25.4s,#24
eor w20,w20,w8
ushr v11.4s,v26.4s,#24
ror w21,w21,#16
ushr v15.4s,v27.4s,#24
ror w17,w17,#16
ushr v19.4s,v28.4s,#24
ror w19,w19,#16
ushr v23.4s,v29.4s,#24
ror w20,w20,#16
sli v3.4s,v24.4s,#8
add w15,w15,w21
sli v7.4s,v25.4s,#8
add w16,w16,w17
sli v11.4s,v26.4s,#8
add w13,w13,w19
sli v15.4s,v27.4s,#8
add w14,w14,w20
sli v19.4s,v28.4s,#8
eor w10,w10,w15
sli v23.4s,v29.4s,#8
eor w11,w11,w16
add v2.4s,v2.4s,v3.4s
eor w12,w12,w13
add v6.4s,v6.4s,v7.4s
eor w9,w9,w14
add v10.4s,v10.4s,v11.4s
ror w10,w10,#20
add v14.4s,v14.4s,v15.4s
ror w11,w11,#20
add v18.4s,v18.4s,v19.4s
ror w12,w12,#20
add v22.4s,v22.4s,v23.4s
ror w9,w9,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w10
eor v25.16b,v5.16b,v6.16b
add w6,w6,w11
eor v26.16b,v9.16b,v10.16b
add w7,w7,w12
eor v27.16b,v13.16b,v14.16b
add w8,w8,w9
eor v28.16b,v17.16b,v18.16b
eor w21,w21,w5
eor v29.16b,v21.16b,v22.16b
eor w17,w17,w6
ushr v1.4s,v24.4s,#25
eor w19,w19,w7
ushr v5.4s,v25.4s,#25
eor w20,w20,w8
ushr v9.4s,v26.4s,#25
ror w21,w21,#24
ushr v13.4s,v27.4s,#25
ror w17,w17,#24
ushr v17.4s,v28.4s,#25
ror w19,w19,#24
ushr v21.4s,v29.4s,#25
ror w20,w20,#24
sli v1.4s,v24.4s,#7
add w15,w15,w21
sli v5.4s,v25.4s,#7
add w16,w16,w17
sli v9.4s,v26.4s,#7
add w13,w13,w19
sli v13.4s,v27.4s,#7
add w14,w14,w20
sli v17.4s,v28.4s,#7
eor w10,w10,w15
sli v21.4s,v29.4s,#7
eor w11,w11,w16
ext v2.16b,v2.16b,v2.16b,#8
eor w12,w12,w13
ext v6.16b,v6.16b,v6.16b,#8
eor w9,w9,w14
ext v10.16b,v10.16b,v10.16b,#8
ror w10,w10,#25
ext v14.16b,v14.16b,v14.16b,#8
ror w11,w11,#25
ext v18.16b,v18.16b,v18.16b,#8
ror w12,w12,#25
ext v22.16b,v22.16b,v22.16b,#8
ror w9,w9,#25
ext v3.16b,v3.16b,v3.16b,#12
ext v7.16b,v7.16b,v7.16b,#12
ext v11.16b,v11.16b,v11.16b,#12
ext v15.16b,v15.16b,v15.16b,#12
ext v19.16b,v19.16b,v19.16b,#12
ext v23.16b,v23.16b,v23.16b,#12
ext v1.16b,v1.16b,v1.16b,#4
ext v5.16b,v5.16b,v5.16b,#4
ext v9.16b,v9.16b,v9.16b,#4
ext v13.16b,v13.16b,v13.16b,#4
ext v17.16b,v17.16b,v17.16b,#4
ext v21.16b,v21.16b,v21.16b,#4
add v0.4s,v0.4s,v1.4s
add w5,w5,w9
add v4.4s,v4.4s,v5.4s
add w6,w6,w10
add v8.4s,v8.4s,v9.4s
add w7,w7,w11
add v12.4s,v12.4s,v13.4s
add w8,w8,w12
add v16.4s,v16.4s,v17.4s
eor w17,w17,w5
add v20.4s,v20.4s,v21.4s
eor w19,w19,w6
eor v3.16b,v3.16b,v0.16b
eor w20,w20,w7
eor v7.16b,v7.16b,v4.16b
eor w21,w21,w8
eor v11.16b,v11.16b,v8.16b
ror w17,w17,#16
eor v15.16b,v15.16b,v12.16b
ror w19,w19,#16
eor v19.16b,v19.16b,v16.16b
ror w20,w20,#16
eor v23.16b,v23.16b,v20.16b
ror w21,w21,#16
rev32 v3.8h,v3.8h
add w13,w13,w17
rev32 v7.8h,v7.8h
add w14,w14,w19
rev32 v11.8h,v11.8h
add w15,w15,w20
rev32 v15.8h,v15.8h
add w16,w16,w21
rev32 v19.8h,v19.8h
eor w9,w9,w13
rev32 v23.8h,v23.8h
eor w10,w10,w14
add v2.4s,v2.4s,v3.4s
eor w11,w11,w15
add v6.4s,v6.4s,v7.4s
eor w12,w12,w16
add v10.4s,v10.4s,v11.4s
ror w9,w9,#20
add v14.4s,v14.4s,v15.4s
ror w10,w10,#20
add v18.4s,v18.4s,v19.4s
ror w11,w11,#20
add v22.4s,v22.4s,v23.4s
ror w12,w12,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w9
eor v25.16b,v5.16b,v6.16b
add w6,w6,w10
eor v26.16b,v9.16b,v10.16b
add w7,w7,w11
eor v27.16b,v13.16b,v14.16b
add w8,w8,w12
eor v28.16b,v17.16b,v18.16b
eor w17,w17,w5
eor v29.16b,v21.16b,v22.16b
eor w19,w19,w6
ushr v1.4s,v24.4s,#20
eor w20,w20,w7
ushr v5.4s,v25.4s,#20
eor w21,w21,w8
ushr v9.4s,v26.4s,#20
ror w17,w17,#24
ushr v13.4s,v27.4s,#20
ror w19,w19,#24
ushr v17.4s,v28.4s,#20
ror w20,w20,#24
ushr v21.4s,v29.4s,#20
ror w21,w21,#24
sli v1.4s,v24.4s,#12
add w13,w13,w17
sli v5.4s,v25.4s,#12
add w14,w14,w19
sli v9.4s,v26.4s,#12
add w15,w15,w20
sli v13.4s,v27.4s,#12
add w16,w16,w21
sli v17.4s,v28.4s,#12
eor w9,w9,w13
sli v21.4s,v29.4s,#12
eor w10,w10,w14
add v0.4s,v0.4s,v1.4s
eor w11,w11,w15
add v4.4s,v4.4s,v5.4s
eor w12,w12,w16
add v8.4s,v8.4s,v9.4s
ror w9,w9,#25
add v12.4s,v12.4s,v13.4s
ror w10,w10,#25
add v16.4s,v16.4s,v17.4s
ror w11,w11,#25
add v20.4s,v20.4s,v21.4s
ror w12,w12,#25
eor v24.16b,v3.16b,v0.16b
add w5,w5,w10
eor v25.16b,v7.16b,v4.16b
add w6,w6,w11
eor v26.16b,v11.16b,v8.16b
add w7,w7,w12
eor v27.16b,v15.16b,v12.16b
add w8,w8,w9
eor v28.16b,v19.16b,v16.16b
eor w21,w21,w5
eor v29.16b,v23.16b,v20.16b
eor w17,w17,w6
ushr v3.4s,v24.4s,#24
eor w19,w19,w7
ushr v7.4s,v25.4s,#24
eor w20,w20,w8
ushr v11.4s,v26.4s,#24
ror w21,w21,#16
ushr v15.4s,v27.4s,#24
ror w17,w17,#16
ushr v19.4s,v28.4s,#24
ror w19,w19,#16
ushr v23.4s,v29.4s,#24
ror w20,w20,#16
sli v3.4s,v24.4s,#8
add w15,w15,w21
sli v7.4s,v25.4s,#8
add w16,w16,w17
sli v11.4s,v26.4s,#8
add w13,w13,w19
sli v15.4s,v27.4s,#8
add w14,w14,w20
sli v19.4s,v28.4s,#8
eor w10,w10,w15
sli v23.4s,v29.4s,#8
eor w11,w11,w16
add v2.4s,v2.4s,v3.4s
eor w12,w12,w13
add v6.4s,v6.4s,v7.4s
eor w9,w9,w14
add v10.4s,v10.4s,v11.4s
ror w10,w10,#20
add v14.4s,v14.4s,v15.4s
ror w11,w11,#20
add v18.4s,v18.4s,v19.4s
ror w12,w12,#20
add v22.4s,v22.4s,v23.4s
ror w9,w9,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w10
eor v25.16b,v5.16b,v6.16b
add w6,w6,w11
eor v26.16b,v9.16b,v10.16b
add w7,w7,w12
eor v27.16b,v13.16b,v14.16b
add w8,w8,w9
eor v28.16b,v17.16b,v18.16b
eor w21,w21,w5
eor v29.16b,v21.16b,v22.16b
eor w17,w17,w6
ushr v1.4s,v24.4s,#25
eor w19,w19,w7
ushr v5.4s,v25.4s,#25
eor w20,w20,w8
ushr v9.4s,v26.4s,#25
ror w21,w21,#24
ushr v13.4s,v27.4s,#25
ror w17,w17,#24
ushr v17.4s,v28.4s,#25
ror w19,w19,#24
ushr v21.4s,v29.4s,#25
ror w20,w20,#24
sli v1.4s,v24.4s,#7
add w15,w15,w21
sli v5.4s,v25.4s,#7
add w16,w16,w17
sli v9.4s,v26.4s,#7
add w13,w13,w19
sli v13.4s,v27.4s,#7
add w14,w14,w20
sli v17.4s,v28.4s,#7
eor w10,w10,w15
sli v21.4s,v29.4s,#7
eor w11,w11,w16
ext v2.16b,v2.16b,v2.16b,#8
eor w12,w12,w13
ext v6.16b,v6.16b,v6.16b,#8
eor w9,w9,w14
ext v10.16b,v10.16b,v10.16b,#8
ror w10,w10,#25
ext v14.16b,v14.16b,v14.16b,#8
ror w11,w11,#25
ext v18.16b,v18.16b,v18.16b,#8
ror w12,w12,#25
ext v22.16b,v22.16b,v22.16b,#8
ror w9,w9,#25
ext v3.16b,v3.16b,v3.16b,#4
ext v7.16b,v7.16b,v7.16b,#4
ext v11.16b,v11.16b,v11.16b,#4
ext v15.16b,v15.16b,v15.16b,#4
ext v19.16b,v19.16b,v19.16b,#4
ext v23.16b,v23.16b,v23.16b,#4
ext v1.16b,v1.16b,v1.16b,#12
ext v5.16b,v5.16b,v5.16b,#12
ext v9.16b,v9.16b,v9.16b,#12
ext v13.16b,v13.16b,v13.16b,#12
ext v17.16b,v17.16b,v17.16b,#12
ext v21.16b,v21.16b,v21.16b,#12
cbnz x4,Loop_lower_neon
add w5,w5,w22 // accumulate key block
ldp q24,q25,[sp,#0]
add x6,x6,x22,lsr#32
ldp q26,q27,[sp,#32]
add w7,w7,w23
ldp q28,q29,[sp,#64]
add x8,x8,x23,lsr#32
add v0.4s,v0.4s,v24.4s
add w9,w9,w24
add v4.4s,v4.4s,v24.4s
add x10,x10,x24,lsr#32
add v8.4s,v8.4s,v24.4s
add w11,w11,w25
add v12.4s,v12.4s,v24.4s
add x12,x12,x25,lsr#32
add v16.4s,v16.4s,v24.4s
add w13,w13,w26
add v20.4s,v20.4s,v24.4s
add x14,x14,x26,lsr#32
add v2.4s,v2.4s,v26.4s
add w15,w15,w27
add v6.4s,v6.4s,v26.4s
add x16,x16,x27,lsr#32
add v10.4s,v10.4s,v26.4s
add w17,w17,w28
add v14.4s,v14.4s,v26.4s
add x19,x19,x28,lsr#32
add v18.4s,v18.4s,v26.4s
add w20,w20,w30
add v22.4s,v22.4s,v26.4s
add x21,x21,x30,lsr#32
add v19.4s,v19.4s,v31.4s // +4
add x5,x5,x6,lsl#32 // pack
add v23.4s,v23.4s,v31.4s // +4
add x7,x7,x8,lsl#32
add v3.4s,v3.4s,v27.4s
ldp x6,x8,[x1,#0] // load input
add v7.4s,v7.4s,v28.4s
add x9,x9,x10,lsl#32
add v11.4s,v11.4s,v29.4s
add x11,x11,x12,lsl#32
add v15.4s,v15.4s,v30.4s
ldp x10,x12,[x1,#16]
add v19.4s,v19.4s,v27.4s
add x13,x13,x14,lsl#32
add v23.4s,v23.4s,v28.4s
add x15,x15,x16,lsl#32
add v1.4s,v1.4s,v25.4s
ldp x14,x16,[x1,#32]
add v5.4s,v5.4s,v25.4s
add x17,x17,x19,lsl#32
add v9.4s,v9.4s,v25.4s
add x20,x20,x21,lsl#32
add v13.4s,v13.4s,v25.4s
ldp x19,x21,[x1,#48]
add v17.4s,v17.4s,v25.4s
add x1,x1,#64
add v21.4s,v21.4s,v25.4s
#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
rev x11,x11
rev x13,x13
rev x15,x15
rev x17,x17
rev x20,x20
#endif
ld1 {v24.16b,v25.16b,v26.16b,v27.16b},[x1],#64
eor x5,x5,x6
eor x7,x7,x8
eor x9,x9,x10
eor x11,x11,x12
eor x13,x13,x14
eor v0.16b,v0.16b,v24.16b
eor x15,x15,x16
eor v1.16b,v1.16b,v25.16b
eor x17,x17,x19
eor v2.16b,v2.16b,v26.16b
eor x20,x20,x21
eor v3.16b,v3.16b,v27.16b
ld1 {v24.16b,v25.16b,v26.16b,v27.16b},[x1],#64
stp x5,x7,[x0,#0] // store output
add x28,x28,#7 // increment counter
stp x9,x11,[x0,#16]
stp x13,x15,[x0,#32]
stp x17,x20,[x0,#48]
add x0,x0,#64
st1 {v0.16b,v1.16b,v2.16b,v3.16b},[x0],#64
ld1 {v0.16b,v1.16b,v2.16b,v3.16b},[x1],#64
eor v4.16b,v4.16b,v24.16b
eor v5.16b,v5.16b,v25.16b
eor v6.16b,v6.16b,v26.16b
eor v7.16b,v7.16b,v27.16b
st1 {v4.16b,v5.16b,v6.16b,v7.16b},[x0],#64
ld1 {v4.16b,v5.16b,v6.16b,v7.16b},[x1],#64
eor v8.16b,v8.16b,v0.16b
ldp q24,q25,[sp,#0]
eor v9.16b,v9.16b,v1.16b
ldp q26,q27,[sp,#32]
eor v10.16b,v10.16b,v2.16b
eor v11.16b,v11.16b,v3.16b
st1 {v8.16b,v9.16b,v10.16b,v11.16b},[x0],#64
ld1 {v8.16b,v9.16b,v10.16b,v11.16b},[x1],#64
eor v12.16b,v12.16b,v4.16b
eor v13.16b,v13.16b,v5.16b
eor v14.16b,v14.16b,v6.16b
eor v15.16b,v15.16b,v7.16b
st1 {v12.16b,v13.16b,v14.16b,v15.16b},[x0],#64
ld1 {v12.16b,v13.16b,v14.16b,v15.16b},[x1],#64
eor v16.16b,v16.16b,v8.16b
eor v17.16b,v17.16b,v9.16b
eor v18.16b,v18.16b,v10.16b
eor v19.16b,v19.16b,v11.16b
st1 {v16.16b,v17.16b,v18.16b,v19.16b},[x0],#64
shl v0.4s,v31.4s,#1 // 4 -> 8
eor v20.16b,v20.16b,v12.16b
eor v21.16b,v21.16b,v13.16b
eor v22.16b,v22.16b,v14.16b
eor v23.16b,v23.16b,v15.16b
st1 {v20.16b,v21.16b,v22.16b,v23.16b},[x0],#64
add v27.4s,v27.4s,v0.4s // += 8
add v28.4s,v28.4s,v0.4s
add v29.4s,v29.4s,v0.4s
add v30.4s,v30.4s,v0.4s
b.hs Loop_outer_512_neon
adds x2,x2,#512
ushr v0.4s,v31.4s,#2 // 4 -> 1
ldp d8,d9,[sp,#128+0] // meet ABI requirements
ldp d10,d11,[sp,#128+16]
ldp d12,d13,[sp,#128+32]
ldp d14,d15,[sp,#128+48]
stp q24,q31,[sp,#0] // wipe off-load area
stp q24,q31,[sp,#32]
stp q24,q31,[sp,#64]
b.eq Ldone_512_neon
cmp x2,#192
sub v27.4s,v27.4s,v0.4s // -= 1
sub v28.4s,v28.4s,v0.4s
sub v29.4s,v29.4s,v0.4s
add sp,sp,#128
b.hs Loop_outer_neon
eor v25.16b,v25.16b,v25.16b
eor v26.16b,v26.16b,v26.16b
eor v27.16b,v27.16b,v27.16b
eor v28.16b,v28.16b,v28.16b
eor v29.16b,v29.16b,v29.16b
eor v30.16b,v30.16b,v30.16b
b Loop_outer
Ldone_512_neon:
ldp x19,x20,[x29,#16]
add sp,sp,#128+64
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
ret
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__APPLE__)
|
marvin-hansen/iggy-streaming-system
| 18,316
|
thirdparty/crates/ring-0.17.9/pregenerated/aesni-gcm-x86_64-macosx.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__)
.text
.p2align 5
_aesni_ctr32_ghash_6x:
vmovdqu 32(%r11),%xmm2
subq $6,%rdx
vpxor %xmm4,%xmm4,%xmm4
vmovdqu 0-128(%rcx),%xmm15
vpaddb %xmm2,%xmm1,%xmm10
vpaddb %xmm2,%xmm10,%xmm11
vpaddb %xmm2,%xmm11,%xmm12
vpaddb %xmm2,%xmm12,%xmm13
vpaddb %xmm2,%xmm13,%xmm14
vpxor %xmm15,%xmm1,%xmm9
vmovdqu %xmm4,16+8(%rsp)
jmp L$oop6x
.p2align 5
L$oop6x:
addl $100663296,%ebx
jc L$handle_ctr32
vmovdqu 0-32(%r9),%xmm3
vpaddb %xmm2,%xmm14,%xmm1
vpxor %xmm15,%xmm10,%xmm10
vpxor %xmm15,%xmm11,%xmm11
L$resume_ctr32:
vmovdqu %xmm1,(%r8)
vpclmulqdq $0x10,%xmm3,%xmm7,%xmm5
vpxor %xmm15,%xmm12,%xmm12
vmovups 16-128(%rcx),%xmm2
vpclmulqdq $0x01,%xmm3,%xmm7,%xmm6
xorq %r12,%r12
cmpq %r14,%r15
vaesenc %xmm2,%xmm9,%xmm9
vmovdqu 48+8(%rsp),%xmm0
vpxor %xmm15,%xmm13,%xmm13
vpclmulqdq $0x00,%xmm3,%xmm7,%xmm1
vaesenc %xmm2,%xmm10,%xmm10
vpxor %xmm15,%xmm14,%xmm14
setnc %r12b
vpclmulqdq $0x11,%xmm3,%xmm7,%xmm7
vaesenc %xmm2,%xmm11,%xmm11
vmovdqu 16-32(%r9),%xmm3
negq %r12
vaesenc %xmm2,%xmm12,%xmm12
vpxor %xmm5,%xmm6,%xmm6
vpclmulqdq $0x00,%xmm3,%xmm0,%xmm5
vpxor %xmm4,%xmm8,%xmm8
vaesenc %xmm2,%xmm13,%xmm13
vpxor %xmm5,%xmm1,%xmm4
andq $0x60,%r12
vmovups 32-128(%rcx),%xmm15
vpclmulqdq $0x10,%xmm3,%xmm0,%xmm1
vaesenc %xmm2,%xmm14,%xmm14
vpclmulqdq $0x01,%xmm3,%xmm0,%xmm2
leaq (%r14,%r12,1),%r14
vaesenc %xmm15,%xmm9,%xmm9
vpxor 16+8(%rsp),%xmm8,%xmm8
vpclmulqdq $0x11,%xmm3,%xmm0,%xmm3
vmovdqu 64+8(%rsp),%xmm0
vaesenc %xmm15,%xmm10,%xmm10
movbeq 88(%r14),%r13
vaesenc %xmm15,%xmm11,%xmm11
movbeq 80(%r14),%r12
vaesenc %xmm15,%xmm12,%xmm12
movq %r13,32+8(%rsp)
vaesenc %xmm15,%xmm13,%xmm13
movq %r12,40+8(%rsp)
vmovdqu 48-32(%r9),%xmm5
vaesenc %xmm15,%xmm14,%xmm14
vmovups 48-128(%rcx),%xmm15
vpxor %xmm1,%xmm6,%xmm6
vpclmulqdq $0x00,%xmm5,%xmm0,%xmm1
vaesenc %xmm15,%xmm9,%xmm9
vpxor %xmm2,%xmm6,%xmm6
vpclmulqdq $0x10,%xmm5,%xmm0,%xmm2
vaesenc %xmm15,%xmm10,%xmm10
vpxor %xmm3,%xmm7,%xmm7
vpclmulqdq $0x01,%xmm5,%xmm0,%xmm3
vaesenc %xmm15,%xmm11,%xmm11
vpclmulqdq $0x11,%xmm5,%xmm0,%xmm5
vmovdqu 80+8(%rsp),%xmm0
vaesenc %xmm15,%xmm12,%xmm12
vaesenc %xmm15,%xmm13,%xmm13
vpxor %xmm1,%xmm4,%xmm4
vmovdqu 64-32(%r9),%xmm1
vaesenc %xmm15,%xmm14,%xmm14
vmovups 64-128(%rcx),%xmm15
vpxor %xmm2,%xmm6,%xmm6
vpclmulqdq $0x00,%xmm1,%xmm0,%xmm2
vaesenc %xmm15,%xmm9,%xmm9
vpxor %xmm3,%xmm6,%xmm6
vpclmulqdq $0x10,%xmm1,%xmm0,%xmm3
vaesenc %xmm15,%xmm10,%xmm10
movbeq 72(%r14),%r13
vpxor %xmm5,%xmm7,%xmm7
vpclmulqdq $0x01,%xmm1,%xmm0,%xmm5
vaesenc %xmm15,%xmm11,%xmm11
movbeq 64(%r14),%r12
vpclmulqdq $0x11,%xmm1,%xmm0,%xmm1
vmovdqu 96+8(%rsp),%xmm0
vaesenc %xmm15,%xmm12,%xmm12
movq %r13,48+8(%rsp)
vaesenc %xmm15,%xmm13,%xmm13
movq %r12,56+8(%rsp)
vpxor %xmm2,%xmm4,%xmm4
vmovdqu 96-32(%r9),%xmm2
vaesenc %xmm15,%xmm14,%xmm14
vmovups 80-128(%rcx),%xmm15
vpxor %xmm3,%xmm6,%xmm6
vpclmulqdq $0x00,%xmm2,%xmm0,%xmm3
vaesenc %xmm15,%xmm9,%xmm9
vpxor %xmm5,%xmm6,%xmm6
vpclmulqdq $0x10,%xmm2,%xmm0,%xmm5
vaesenc %xmm15,%xmm10,%xmm10
movbeq 56(%r14),%r13
vpxor %xmm1,%xmm7,%xmm7
vpclmulqdq $0x01,%xmm2,%xmm0,%xmm1
vpxor 112+8(%rsp),%xmm8,%xmm8
vaesenc %xmm15,%xmm11,%xmm11
movbeq 48(%r14),%r12
vpclmulqdq $0x11,%xmm2,%xmm0,%xmm2
vaesenc %xmm15,%xmm12,%xmm12
movq %r13,64+8(%rsp)
vaesenc %xmm15,%xmm13,%xmm13
movq %r12,72+8(%rsp)
vpxor %xmm3,%xmm4,%xmm4
vmovdqu 112-32(%r9),%xmm3
vaesenc %xmm15,%xmm14,%xmm14
vmovups 96-128(%rcx),%xmm15
vpxor %xmm5,%xmm6,%xmm6
vpclmulqdq $0x10,%xmm3,%xmm8,%xmm5
vaesenc %xmm15,%xmm9,%xmm9
vpxor %xmm1,%xmm6,%xmm6
vpclmulqdq $0x01,%xmm3,%xmm8,%xmm1
vaesenc %xmm15,%xmm10,%xmm10
movbeq 40(%r14),%r13
vpxor %xmm2,%xmm7,%xmm7
vpclmulqdq $0x00,%xmm3,%xmm8,%xmm2
vaesenc %xmm15,%xmm11,%xmm11
movbeq 32(%r14),%r12
vpclmulqdq $0x11,%xmm3,%xmm8,%xmm8
vaesenc %xmm15,%xmm12,%xmm12
movq %r13,80+8(%rsp)
vaesenc %xmm15,%xmm13,%xmm13
movq %r12,88+8(%rsp)
vpxor %xmm5,%xmm6,%xmm6
vaesenc %xmm15,%xmm14,%xmm14
vpxor %xmm1,%xmm6,%xmm6
vmovups 112-128(%rcx),%xmm15
vpslldq $8,%xmm6,%xmm5
vpxor %xmm2,%xmm4,%xmm4
vmovdqu 16(%r11),%xmm3
vaesenc %xmm15,%xmm9,%xmm9
vpxor %xmm8,%xmm7,%xmm7
vaesenc %xmm15,%xmm10,%xmm10
vpxor %xmm5,%xmm4,%xmm4
movbeq 24(%r14),%r13
vaesenc %xmm15,%xmm11,%xmm11
movbeq 16(%r14),%r12
vpalignr $8,%xmm4,%xmm4,%xmm0
vpclmulqdq $0x10,%xmm3,%xmm4,%xmm4
movq %r13,96+8(%rsp)
vaesenc %xmm15,%xmm12,%xmm12
movq %r12,104+8(%rsp)
vaesenc %xmm15,%xmm13,%xmm13
vmovups 128-128(%rcx),%xmm1
vaesenc %xmm15,%xmm14,%xmm14
vaesenc %xmm1,%xmm9,%xmm9
vmovups 144-128(%rcx),%xmm15
vaesenc %xmm1,%xmm10,%xmm10
vpsrldq $8,%xmm6,%xmm6
vaesenc %xmm1,%xmm11,%xmm11
vpxor %xmm6,%xmm7,%xmm7
vaesenc %xmm1,%xmm12,%xmm12
vpxor %xmm0,%xmm4,%xmm4
movbeq 8(%r14),%r13
vaesenc %xmm1,%xmm13,%xmm13
movbeq 0(%r14),%r12
vaesenc %xmm1,%xmm14,%xmm14
vmovups 160-128(%rcx),%xmm1
cmpl $11,%r10d
jb L$enc_tail
vaesenc %xmm15,%xmm9,%xmm9
vaesenc %xmm15,%xmm10,%xmm10
vaesenc %xmm15,%xmm11,%xmm11
vaesenc %xmm15,%xmm12,%xmm12
vaesenc %xmm15,%xmm13,%xmm13
vaesenc %xmm15,%xmm14,%xmm14
vaesenc %xmm1,%xmm9,%xmm9
vaesenc %xmm1,%xmm10,%xmm10
vaesenc %xmm1,%xmm11,%xmm11
vaesenc %xmm1,%xmm12,%xmm12
vaesenc %xmm1,%xmm13,%xmm13
vmovups 176-128(%rcx),%xmm15
vaesenc %xmm1,%xmm14,%xmm14
vmovups 192-128(%rcx),%xmm1
vaesenc %xmm15,%xmm9,%xmm9
vaesenc %xmm15,%xmm10,%xmm10
vaesenc %xmm15,%xmm11,%xmm11
vaesenc %xmm15,%xmm12,%xmm12
vaesenc %xmm15,%xmm13,%xmm13
vaesenc %xmm15,%xmm14,%xmm14
vaesenc %xmm1,%xmm9,%xmm9
vaesenc %xmm1,%xmm10,%xmm10
vaesenc %xmm1,%xmm11,%xmm11
vaesenc %xmm1,%xmm12,%xmm12
vaesenc %xmm1,%xmm13,%xmm13
vmovups 208-128(%rcx),%xmm15
vaesenc %xmm1,%xmm14,%xmm14
vmovups 224-128(%rcx),%xmm1
jmp L$enc_tail
.p2align 5
L$handle_ctr32:
vmovdqu (%r11),%xmm0
vpshufb %xmm0,%xmm1,%xmm6
vmovdqu 48(%r11),%xmm5
vpaddd 64(%r11),%xmm6,%xmm10
vpaddd %xmm5,%xmm6,%xmm11
vmovdqu 0-32(%r9),%xmm3
vpaddd %xmm5,%xmm10,%xmm12
vpshufb %xmm0,%xmm10,%xmm10
vpaddd %xmm5,%xmm11,%xmm13
vpshufb %xmm0,%xmm11,%xmm11
vpxor %xmm15,%xmm10,%xmm10
vpaddd %xmm5,%xmm12,%xmm14
vpshufb %xmm0,%xmm12,%xmm12
vpxor %xmm15,%xmm11,%xmm11
vpaddd %xmm5,%xmm13,%xmm1
vpshufb %xmm0,%xmm13,%xmm13
vpshufb %xmm0,%xmm14,%xmm14
vpshufb %xmm0,%xmm1,%xmm1
jmp L$resume_ctr32
.p2align 5
L$enc_tail:
vaesenc %xmm15,%xmm9,%xmm9
vmovdqu %xmm7,16+8(%rsp)
vpalignr $8,%xmm4,%xmm4,%xmm8
vaesenc %xmm15,%xmm10,%xmm10
vpclmulqdq $0x10,%xmm3,%xmm4,%xmm4
vpxor 0(%rdi),%xmm1,%xmm2
vaesenc %xmm15,%xmm11,%xmm11
vpxor 16(%rdi),%xmm1,%xmm0
vaesenc %xmm15,%xmm12,%xmm12
vpxor 32(%rdi),%xmm1,%xmm5
vaesenc %xmm15,%xmm13,%xmm13
vpxor 48(%rdi),%xmm1,%xmm6
vaesenc %xmm15,%xmm14,%xmm14
vpxor 64(%rdi),%xmm1,%xmm7
vpxor 80(%rdi),%xmm1,%xmm3
vmovdqu (%r8),%xmm1
vaesenclast %xmm2,%xmm9,%xmm9
vmovdqu 32(%r11),%xmm2
vaesenclast %xmm0,%xmm10,%xmm10
vpaddb %xmm2,%xmm1,%xmm0
movq %r13,112+8(%rsp)
leaq 96(%rdi),%rdi
prefetcht0 512(%rdi)
prefetcht0 576(%rdi)
vaesenclast %xmm5,%xmm11,%xmm11
vpaddb %xmm2,%xmm0,%xmm5
movq %r12,120+8(%rsp)
leaq 96(%rsi),%rsi
vmovdqu 0-128(%rcx),%xmm15
vaesenclast %xmm6,%xmm12,%xmm12
vpaddb %xmm2,%xmm5,%xmm6
vaesenclast %xmm7,%xmm13,%xmm13
vpaddb %xmm2,%xmm6,%xmm7
vaesenclast %xmm3,%xmm14,%xmm14
vpaddb %xmm2,%xmm7,%xmm3
addq $0x60,%rax
subq $0x6,%rdx
jc L$6x_done
vmovups %xmm9,-96(%rsi)
vpxor %xmm15,%xmm1,%xmm9
vmovups %xmm10,-80(%rsi)
vmovdqa %xmm0,%xmm10
vmovups %xmm11,-64(%rsi)
vmovdqa %xmm5,%xmm11
vmovups %xmm12,-48(%rsi)
vmovdqa %xmm6,%xmm12
vmovups %xmm13,-32(%rsi)
vmovdqa %xmm7,%xmm13
vmovups %xmm14,-16(%rsi)
vmovdqa %xmm3,%xmm14
vmovdqu 32+8(%rsp),%xmm7
jmp L$oop6x
L$6x_done:
vpxor 16+8(%rsp),%xmm8,%xmm8
vpxor %xmm4,%xmm8,%xmm8
ret
.globl _aesni_gcm_decrypt
.private_extern _aesni_gcm_decrypt
.p2align 5
_aesni_gcm_decrypt:
_CET_ENDBR
xorq %rax,%rax
cmpq $0x60,%rdx
jb L$gcm_dec_abort
pushq %rbp
movq %rsp,%rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
vzeroupper
movq 16(%rbp),%r12
vmovdqu (%r8),%xmm1
addq $-128,%rsp
movl 12(%r8),%ebx
leaq L$bswap_mask(%rip),%r11
leaq -128(%rcx),%r14
movq $0xf80,%r15
vmovdqu (%r12),%xmm8
andq $-128,%rsp
vmovdqu (%r11),%xmm0
leaq 128(%rcx),%rcx
leaq 32(%r9),%r9
movl 240-128(%rcx),%r10d
vpshufb %xmm0,%xmm8,%xmm8
andq %r15,%r14
andq %rsp,%r15
subq %r14,%r15
jc L$dec_no_key_aliasing
cmpq $768,%r15
jnc L$dec_no_key_aliasing
subq %r15,%rsp
L$dec_no_key_aliasing:
vmovdqu 80(%rdi),%xmm7
movq %rdi,%r14
vmovdqu 64(%rdi),%xmm4
leaq -192(%rdi,%rdx,1),%r15
vmovdqu 48(%rdi),%xmm5
shrq $4,%rdx
xorq %rax,%rax
vmovdqu 32(%rdi),%xmm6
vpshufb %xmm0,%xmm7,%xmm7
vmovdqu 16(%rdi),%xmm2
vpshufb %xmm0,%xmm4,%xmm4
vmovdqu (%rdi),%xmm3
vpshufb %xmm0,%xmm5,%xmm5
vmovdqu %xmm4,48(%rsp)
vpshufb %xmm0,%xmm6,%xmm6
vmovdqu %xmm5,64(%rsp)
vpshufb %xmm0,%xmm2,%xmm2
vmovdqu %xmm6,80(%rsp)
vpshufb %xmm0,%xmm3,%xmm3
vmovdqu %xmm2,96(%rsp)
vmovdqu %xmm3,112(%rsp)
call _aesni_ctr32_ghash_6x
movq 16(%rbp),%r12
vmovups %xmm9,-96(%rsi)
vmovups %xmm10,-80(%rsi)
vmovups %xmm11,-64(%rsi)
vmovups %xmm12,-48(%rsi)
vmovups %xmm13,-32(%rsi)
vmovups %xmm14,-16(%rsi)
vpshufb (%r11),%xmm8,%xmm8
vmovdqu %xmm8,(%r12)
vzeroupper
leaq -40(%rbp),%rsp
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbx
popq %rbp
L$gcm_dec_abort:
ret
.p2align 5
_aesni_ctr32_6x:
vmovdqu 0-128(%rcx),%xmm4
vmovdqu 32(%r11),%xmm2
leaq -1(%r10),%r13
vmovups 16-128(%rcx),%xmm15
leaq 32-128(%rcx),%r12
vpxor %xmm4,%xmm1,%xmm9
addl $100663296,%ebx
jc L$handle_ctr32_2
vpaddb %xmm2,%xmm1,%xmm10
vpaddb %xmm2,%xmm10,%xmm11
vpxor %xmm4,%xmm10,%xmm10
vpaddb %xmm2,%xmm11,%xmm12
vpxor %xmm4,%xmm11,%xmm11
vpaddb %xmm2,%xmm12,%xmm13
vpxor %xmm4,%xmm12,%xmm12
vpaddb %xmm2,%xmm13,%xmm14
vpxor %xmm4,%xmm13,%xmm13
vpaddb %xmm2,%xmm14,%xmm1
vpxor %xmm4,%xmm14,%xmm14
jmp L$oop_ctr32
.p2align 4
L$oop_ctr32:
vaesenc %xmm15,%xmm9,%xmm9
vaesenc %xmm15,%xmm10,%xmm10
vaesenc %xmm15,%xmm11,%xmm11
vaesenc %xmm15,%xmm12,%xmm12
vaesenc %xmm15,%xmm13,%xmm13
vaesenc %xmm15,%xmm14,%xmm14
vmovups (%r12),%xmm15
leaq 16(%r12),%r12
decl %r13d
jnz L$oop_ctr32
vmovdqu (%r12),%xmm3
vaesenc %xmm15,%xmm9,%xmm9
vpxor 0(%rdi),%xmm3,%xmm4
vaesenc %xmm15,%xmm10,%xmm10
vpxor 16(%rdi),%xmm3,%xmm5
vaesenc %xmm15,%xmm11,%xmm11
vpxor 32(%rdi),%xmm3,%xmm6
vaesenc %xmm15,%xmm12,%xmm12
vpxor 48(%rdi),%xmm3,%xmm8
vaesenc %xmm15,%xmm13,%xmm13
vpxor 64(%rdi),%xmm3,%xmm2
vaesenc %xmm15,%xmm14,%xmm14
vpxor 80(%rdi),%xmm3,%xmm3
leaq 96(%rdi),%rdi
vaesenclast %xmm4,%xmm9,%xmm9
vaesenclast %xmm5,%xmm10,%xmm10
vaesenclast %xmm6,%xmm11,%xmm11
vaesenclast %xmm8,%xmm12,%xmm12
vaesenclast %xmm2,%xmm13,%xmm13
vaesenclast %xmm3,%xmm14,%xmm14
vmovups %xmm9,0(%rsi)
vmovups %xmm10,16(%rsi)
vmovups %xmm11,32(%rsi)
vmovups %xmm12,48(%rsi)
vmovups %xmm13,64(%rsi)
vmovups %xmm14,80(%rsi)
leaq 96(%rsi),%rsi
ret
.p2align 5
L$handle_ctr32_2:
vpshufb %xmm0,%xmm1,%xmm6
vmovdqu 48(%r11),%xmm5
vpaddd 64(%r11),%xmm6,%xmm10
vpaddd %xmm5,%xmm6,%xmm11
vpaddd %xmm5,%xmm10,%xmm12
vpshufb %xmm0,%xmm10,%xmm10
vpaddd %xmm5,%xmm11,%xmm13
vpshufb %xmm0,%xmm11,%xmm11
vpxor %xmm4,%xmm10,%xmm10
vpaddd %xmm5,%xmm12,%xmm14
vpshufb %xmm0,%xmm12,%xmm12
vpxor %xmm4,%xmm11,%xmm11
vpaddd %xmm5,%xmm13,%xmm1
vpshufb %xmm0,%xmm13,%xmm13
vpxor %xmm4,%xmm12,%xmm12
vpshufb %xmm0,%xmm14,%xmm14
vpxor %xmm4,%xmm13,%xmm13
vpshufb %xmm0,%xmm1,%xmm1
vpxor %xmm4,%xmm14,%xmm14
jmp L$oop_ctr32
.globl _aesni_gcm_encrypt
.private_extern _aesni_gcm_encrypt
.p2align 5
_aesni_gcm_encrypt:
_CET_ENDBR
#ifdef BORINGSSL_DISPATCH_TEST
movb $1,_BORINGSSL_function_hit+2(%rip)
#endif
xorq %rax,%rax
cmpq $288,%rdx
jb L$gcm_enc_abort
pushq %rbp
movq %rsp,%rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
vzeroupper
vmovdqu (%r8),%xmm1
addq $-128,%rsp
movl 12(%r8),%ebx
leaq L$bswap_mask(%rip),%r11
leaq -128(%rcx),%r14
movq $0xf80,%r15
leaq 128(%rcx),%rcx
vmovdqu (%r11),%xmm0
andq $-128,%rsp
movl 240-128(%rcx),%r10d
andq %r15,%r14
andq %rsp,%r15
subq %r14,%r15
jc L$enc_no_key_aliasing
cmpq $768,%r15
jnc L$enc_no_key_aliasing
subq %r15,%rsp
L$enc_no_key_aliasing:
movq %rsi,%r14
leaq -192(%rsi,%rdx,1),%r15
shrq $4,%rdx
call _aesni_ctr32_6x
vpshufb %xmm0,%xmm9,%xmm8
vpshufb %xmm0,%xmm10,%xmm2
vmovdqu %xmm8,112(%rsp)
vpshufb %xmm0,%xmm11,%xmm4
vmovdqu %xmm2,96(%rsp)
vpshufb %xmm0,%xmm12,%xmm5
vmovdqu %xmm4,80(%rsp)
vpshufb %xmm0,%xmm13,%xmm6
vmovdqu %xmm5,64(%rsp)
vpshufb %xmm0,%xmm14,%xmm7
vmovdqu %xmm6,48(%rsp)
call _aesni_ctr32_6x
movq 16(%rbp),%r12
leaq 32(%r9),%r9
vmovdqu (%r12),%xmm8
subq $12,%rdx
movq $192,%rax
vpshufb %xmm0,%xmm8,%xmm8
call _aesni_ctr32_ghash_6x
vmovdqu 32(%rsp),%xmm7
vmovdqu (%r11),%xmm0
vmovdqu 0-32(%r9),%xmm3
vpunpckhqdq %xmm7,%xmm7,%xmm1
vmovdqu 32-32(%r9),%xmm15
vmovups %xmm9,-96(%rsi)
vpshufb %xmm0,%xmm9,%xmm9
vpxor %xmm7,%xmm1,%xmm1
vmovups %xmm10,-80(%rsi)
vpshufb %xmm0,%xmm10,%xmm10
vmovups %xmm11,-64(%rsi)
vpshufb %xmm0,%xmm11,%xmm11
vmovups %xmm12,-48(%rsi)
vpshufb %xmm0,%xmm12,%xmm12
vmovups %xmm13,-32(%rsi)
vpshufb %xmm0,%xmm13,%xmm13
vmovups %xmm14,-16(%rsi)
vpshufb %xmm0,%xmm14,%xmm14
vmovdqu %xmm9,16(%rsp)
vmovdqu 48(%rsp),%xmm6
vmovdqu 16-32(%r9),%xmm0
vpunpckhqdq %xmm6,%xmm6,%xmm2
vpclmulqdq $0x00,%xmm3,%xmm7,%xmm5
vpxor %xmm6,%xmm2,%xmm2
vpclmulqdq $0x11,%xmm3,%xmm7,%xmm7
vpclmulqdq $0x00,%xmm15,%xmm1,%xmm1
vmovdqu 64(%rsp),%xmm9
vpclmulqdq $0x00,%xmm0,%xmm6,%xmm4
vmovdqu 48-32(%r9),%xmm3
vpxor %xmm5,%xmm4,%xmm4
vpunpckhqdq %xmm9,%xmm9,%xmm5
vpclmulqdq $0x11,%xmm0,%xmm6,%xmm6
vpxor %xmm9,%xmm5,%xmm5
vpxor %xmm7,%xmm6,%xmm6
vpclmulqdq $0x10,%xmm15,%xmm2,%xmm2
vmovdqu 80-32(%r9),%xmm15
vpxor %xmm1,%xmm2,%xmm2
vmovdqu 80(%rsp),%xmm1
vpclmulqdq $0x00,%xmm3,%xmm9,%xmm7
vmovdqu 64-32(%r9),%xmm0
vpxor %xmm4,%xmm7,%xmm7
vpunpckhqdq %xmm1,%xmm1,%xmm4
vpclmulqdq $0x11,%xmm3,%xmm9,%xmm9
vpxor %xmm1,%xmm4,%xmm4
vpxor %xmm6,%xmm9,%xmm9
vpclmulqdq $0x00,%xmm15,%xmm5,%xmm5
vpxor %xmm2,%xmm5,%xmm5
vmovdqu 96(%rsp),%xmm2
vpclmulqdq $0x00,%xmm0,%xmm1,%xmm6
vmovdqu 96-32(%r9),%xmm3
vpxor %xmm7,%xmm6,%xmm6
vpunpckhqdq %xmm2,%xmm2,%xmm7
vpclmulqdq $0x11,%xmm0,%xmm1,%xmm1
vpxor %xmm2,%xmm7,%xmm7
vpxor %xmm9,%xmm1,%xmm1
vpclmulqdq $0x10,%xmm15,%xmm4,%xmm4
vmovdqu 128-32(%r9),%xmm15
vpxor %xmm5,%xmm4,%xmm4
vpxor 112(%rsp),%xmm8,%xmm8
vpclmulqdq $0x00,%xmm3,%xmm2,%xmm5
vmovdqu 112-32(%r9),%xmm0
vpunpckhqdq %xmm8,%xmm8,%xmm9
vpxor %xmm6,%xmm5,%xmm5
vpclmulqdq $0x11,%xmm3,%xmm2,%xmm2
vpxor %xmm8,%xmm9,%xmm9
vpxor %xmm1,%xmm2,%xmm2
vpclmulqdq $0x00,%xmm15,%xmm7,%xmm7
vpxor %xmm4,%xmm7,%xmm4
vpclmulqdq $0x00,%xmm0,%xmm8,%xmm6
vmovdqu 0-32(%r9),%xmm3
vpunpckhqdq %xmm14,%xmm14,%xmm1
vpclmulqdq $0x11,%xmm0,%xmm8,%xmm8
vpxor %xmm14,%xmm1,%xmm1
vpxor %xmm5,%xmm6,%xmm5
vpclmulqdq $0x10,%xmm15,%xmm9,%xmm9
vmovdqu 32-32(%r9),%xmm15
vpxor %xmm2,%xmm8,%xmm7
vpxor %xmm4,%xmm9,%xmm6
vmovdqu 16-32(%r9),%xmm0
vpxor %xmm5,%xmm7,%xmm9
vpclmulqdq $0x00,%xmm3,%xmm14,%xmm4
vpxor %xmm9,%xmm6,%xmm6
vpunpckhqdq %xmm13,%xmm13,%xmm2
vpclmulqdq $0x11,%xmm3,%xmm14,%xmm14
vpxor %xmm13,%xmm2,%xmm2
vpslldq $8,%xmm6,%xmm9
vpclmulqdq $0x00,%xmm15,%xmm1,%xmm1
vpxor %xmm9,%xmm5,%xmm8
vpsrldq $8,%xmm6,%xmm6
vpxor %xmm6,%xmm7,%xmm7
vpclmulqdq $0x00,%xmm0,%xmm13,%xmm5
vmovdqu 48-32(%r9),%xmm3
vpxor %xmm4,%xmm5,%xmm5
vpunpckhqdq %xmm12,%xmm12,%xmm9
vpclmulqdq $0x11,%xmm0,%xmm13,%xmm13
vpxor %xmm12,%xmm9,%xmm9
vpxor %xmm14,%xmm13,%xmm13
vpalignr $8,%xmm8,%xmm8,%xmm14
vpclmulqdq $0x10,%xmm15,%xmm2,%xmm2
vmovdqu 80-32(%r9),%xmm15
vpxor %xmm1,%xmm2,%xmm2
vpclmulqdq $0x00,%xmm3,%xmm12,%xmm4
vmovdqu 64-32(%r9),%xmm0
vpxor %xmm5,%xmm4,%xmm4
vpunpckhqdq %xmm11,%xmm11,%xmm1
vpclmulqdq $0x11,%xmm3,%xmm12,%xmm12
vpxor %xmm11,%xmm1,%xmm1
vpxor %xmm13,%xmm12,%xmm12
vxorps 16(%rsp),%xmm7,%xmm7
vpclmulqdq $0x00,%xmm15,%xmm9,%xmm9
vpxor %xmm2,%xmm9,%xmm9
vpclmulqdq $0x10,16(%r11),%xmm8,%xmm8
vxorps %xmm14,%xmm8,%xmm8
vpclmulqdq $0x00,%xmm0,%xmm11,%xmm5
vmovdqu 96-32(%r9),%xmm3
vpxor %xmm4,%xmm5,%xmm5
vpunpckhqdq %xmm10,%xmm10,%xmm2
vpclmulqdq $0x11,%xmm0,%xmm11,%xmm11
vpxor %xmm10,%xmm2,%xmm2
vpalignr $8,%xmm8,%xmm8,%xmm14
vpxor %xmm12,%xmm11,%xmm11
vpclmulqdq $0x10,%xmm15,%xmm1,%xmm1
vmovdqu 128-32(%r9),%xmm15
vpxor %xmm9,%xmm1,%xmm1
vxorps %xmm7,%xmm14,%xmm14
vpclmulqdq $0x10,16(%r11),%xmm8,%xmm8
vxorps %xmm14,%xmm8,%xmm8
vpclmulqdq $0x00,%xmm3,%xmm10,%xmm4
vmovdqu 112-32(%r9),%xmm0
vpxor %xmm5,%xmm4,%xmm4
vpunpckhqdq %xmm8,%xmm8,%xmm9
vpclmulqdq $0x11,%xmm3,%xmm10,%xmm10
vpxor %xmm8,%xmm9,%xmm9
vpxor %xmm11,%xmm10,%xmm10
vpclmulqdq $0x00,%xmm15,%xmm2,%xmm2
vpxor %xmm1,%xmm2,%xmm2
vpclmulqdq $0x00,%xmm0,%xmm8,%xmm5
vpclmulqdq $0x11,%xmm0,%xmm8,%xmm7
vpxor %xmm4,%xmm5,%xmm5
vpclmulqdq $0x10,%xmm15,%xmm9,%xmm6
vpxor %xmm10,%xmm7,%xmm7
vpxor %xmm2,%xmm6,%xmm6
vpxor %xmm5,%xmm7,%xmm4
vpxor %xmm4,%xmm6,%xmm6
vpslldq $8,%xmm6,%xmm1
vmovdqu 16(%r11),%xmm3
vpsrldq $8,%xmm6,%xmm6
vpxor %xmm1,%xmm5,%xmm8
vpxor %xmm6,%xmm7,%xmm7
vpalignr $8,%xmm8,%xmm8,%xmm2
vpclmulqdq $0x10,%xmm3,%xmm8,%xmm8
vpxor %xmm2,%xmm8,%xmm8
vpalignr $8,%xmm8,%xmm8,%xmm2
vpclmulqdq $0x10,%xmm3,%xmm8,%xmm8
vpxor %xmm7,%xmm2,%xmm2
vpxor %xmm2,%xmm8,%xmm8
movq 16(%rbp),%r12
vpshufb (%r11),%xmm8,%xmm8
vmovdqu %xmm8,(%r12)
vzeroupper
leaq -40(%rbp),%rsp
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbx
popq %rbp
L$gcm_enc_abort:
ret
.section __DATA,__const
.p2align 6
L$bswap_mask:
.byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
L$poly:
.byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xc2
L$one_msb:
.byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1
L$two_lsb:
.byte 2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
L$one_lsb:
.byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
.byte 65,69,83,45,78,73,32,71,67,77,32,109,111,100,117,108,101,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.p2align 6
.text
#endif
|
marvin-hansen/iggy-streaming-system
| 10,896
|
thirdparty/crates/ring-0.17.9/pregenerated/ghash-neon-armv8-win64.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32)
#include <ring-core/arm_arch.h>
.text
.globl gcm_init_neon
.def gcm_init_neon
.type 32
.endef
.align 4
gcm_init_neon:
AARCH64_VALID_CALL_TARGET
// This function is adapted from gcm_init_v8. xC2 is t3.
ld1 {v17.2d}, [x1] // load H
movi v19.16b, #0xe1
shl v19.2d, v19.2d, #57 // 0xc2.0
ext v3.16b, v17.16b, v17.16b, #8
ushr v18.2d, v19.2d, #63
dup v17.4s, v17.s[1]
ext v16.16b, v18.16b, v19.16b, #8 // t0=0xc2....01
ushr v18.2d, v3.2d, #63
sshr v17.4s, v17.4s, #31 // broadcast carry bit
and v18.16b, v18.16b, v16.16b
shl v3.2d, v3.2d, #1
ext v18.16b, v18.16b, v18.16b, #8
and v16.16b, v16.16b, v17.16b
orr v3.16b, v3.16b, v18.16b // H<<<=1
eor v5.16b, v3.16b, v16.16b // twisted H
st1 {v5.2d}, [x0] // store Htable[0]
ret
.globl gcm_gmult_neon
.def gcm_gmult_neon
.type 32
.endef
.align 4
gcm_gmult_neon:
AARCH64_VALID_CALL_TARGET
ld1 {v3.16b}, [x0] // load Xi
ld1 {v5.1d}, [x1], #8 // load twisted H
ld1 {v6.1d}, [x1]
adrp x9, Lmasks // load constants
add x9, x9, :lo12:Lmasks
ld1 {v24.2d, v25.2d}, [x9]
rev64 v3.16b, v3.16b // byteswap Xi
ext v3.16b, v3.16b, v3.16b, #8
eor v7.8b, v5.8b, v6.8b // Karatsuba pre-processing
mov x3, #16
b Lgmult_neon
.globl gcm_ghash_neon
.def gcm_ghash_neon
.type 32
.endef
.align 4
gcm_ghash_neon:
AARCH64_VALID_CALL_TARGET
ld1 {v0.16b}, [x0] // load Xi
ld1 {v5.1d}, [x1], #8 // load twisted H
ld1 {v6.1d}, [x1]
adrp x9, Lmasks // load constants
add x9, x9, :lo12:Lmasks
ld1 {v24.2d, v25.2d}, [x9]
rev64 v0.16b, v0.16b // byteswap Xi
ext v0.16b, v0.16b, v0.16b, #8
eor v7.8b, v5.8b, v6.8b // Karatsuba pre-processing
Loop_neon:
ld1 {v3.16b}, [x2], #16 // load inp
rev64 v3.16b, v3.16b // byteswap inp
ext v3.16b, v3.16b, v3.16b, #8
eor v3.16b, v3.16b, v0.16b // inp ^= Xi
Lgmult_neon:
// Split the input into v3 and v4. (The upper halves are unused,
// so it is okay to leave them alone.)
ins v4.d[0], v3.d[1]
ext v16.8b, v5.8b, v5.8b, #1 // A1
pmull v16.8h, v16.8b, v3.8b // F = A1*B
ext v0.8b, v3.8b, v3.8b, #1 // B1
pmull v0.8h, v5.8b, v0.8b // E = A*B1
ext v17.8b, v5.8b, v5.8b, #2 // A2
pmull v17.8h, v17.8b, v3.8b // H = A2*B
ext v19.8b, v3.8b, v3.8b, #2 // B2
pmull v19.8h, v5.8b, v19.8b // G = A*B2
ext v18.8b, v5.8b, v5.8b, #3 // A3
eor v16.16b, v16.16b, v0.16b // L = E + F
pmull v18.8h, v18.8b, v3.8b // J = A3*B
ext v0.8b, v3.8b, v3.8b, #3 // B3
eor v17.16b, v17.16b, v19.16b // M = G + H
pmull v0.8h, v5.8b, v0.8b // I = A*B3
// Here we diverge from the 32-bit version. It computes the following
// (instructions reordered for clarity):
//
// veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L)
// vand $t0#hi, $t0#hi, $k48
// veor $t0#lo, $t0#lo, $t0#hi
//
// veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M)
// vand $t1#hi, $t1#hi, $k32
// veor $t1#lo, $t1#lo, $t1#hi
//
// veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N)
// vand $t2#hi, $t2#hi, $k16
// veor $t2#lo, $t2#lo, $t2#hi
//
// veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K)
// vmov.i64 $t3#hi, #0
//
// $kN is a mask with the bottom N bits set. AArch64 cannot compute on
// upper halves of SIMD registers, so we must split each half into
// separate registers. To compensate, we pair computations up and
// parallelize.
ext v19.8b, v3.8b, v3.8b, #4 // B4
eor v18.16b, v18.16b, v0.16b // N = I + J
pmull v19.8h, v5.8b, v19.8b // K = A*B4
// This can probably be scheduled more efficiently. For now, we just
// pair up independent instructions.
zip1 v20.2d, v16.2d, v17.2d
zip1 v22.2d, v18.2d, v19.2d
zip2 v21.2d, v16.2d, v17.2d
zip2 v23.2d, v18.2d, v19.2d
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
and v21.16b, v21.16b, v24.16b
and v23.16b, v23.16b, v25.16b
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
zip1 v16.2d, v20.2d, v21.2d
zip1 v18.2d, v22.2d, v23.2d
zip2 v17.2d, v20.2d, v21.2d
zip2 v19.2d, v22.2d, v23.2d
ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8
ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16
pmull v0.8h, v5.8b, v3.8b // D = A*B
ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32
ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24
eor v16.16b, v16.16b, v17.16b
eor v18.16b, v18.16b, v19.16b
eor v0.16b, v0.16b, v16.16b
eor v0.16b, v0.16b, v18.16b
eor v3.8b, v3.8b, v4.8b // Karatsuba pre-processing
ext v16.8b, v7.8b, v7.8b, #1 // A1
pmull v16.8h, v16.8b, v3.8b // F = A1*B
ext v1.8b, v3.8b, v3.8b, #1 // B1
pmull v1.8h, v7.8b, v1.8b // E = A*B1
ext v17.8b, v7.8b, v7.8b, #2 // A2
pmull v17.8h, v17.8b, v3.8b // H = A2*B
ext v19.8b, v3.8b, v3.8b, #2 // B2
pmull v19.8h, v7.8b, v19.8b // G = A*B2
ext v18.8b, v7.8b, v7.8b, #3 // A3
eor v16.16b, v16.16b, v1.16b // L = E + F
pmull v18.8h, v18.8b, v3.8b // J = A3*B
ext v1.8b, v3.8b, v3.8b, #3 // B3
eor v17.16b, v17.16b, v19.16b // M = G + H
pmull v1.8h, v7.8b, v1.8b // I = A*B3
// Here we diverge from the 32-bit version. It computes the following
// (instructions reordered for clarity):
//
// veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L)
// vand $t0#hi, $t0#hi, $k48
// veor $t0#lo, $t0#lo, $t0#hi
//
// veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M)
// vand $t1#hi, $t1#hi, $k32
// veor $t1#lo, $t1#lo, $t1#hi
//
// veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N)
// vand $t2#hi, $t2#hi, $k16
// veor $t2#lo, $t2#lo, $t2#hi
//
// veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K)
// vmov.i64 $t3#hi, #0
//
// $kN is a mask with the bottom N bits set. AArch64 cannot compute on
// upper halves of SIMD registers, so we must split each half into
// separate registers. To compensate, we pair computations up and
// parallelize.
ext v19.8b, v3.8b, v3.8b, #4 // B4
eor v18.16b, v18.16b, v1.16b // N = I + J
pmull v19.8h, v7.8b, v19.8b // K = A*B4
// This can probably be scheduled more efficiently. For now, we just
// pair up independent instructions.
zip1 v20.2d, v16.2d, v17.2d
zip1 v22.2d, v18.2d, v19.2d
zip2 v21.2d, v16.2d, v17.2d
zip2 v23.2d, v18.2d, v19.2d
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
and v21.16b, v21.16b, v24.16b
and v23.16b, v23.16b, v25.16b
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
zip1 v16.2d, v20.2d, v21.2d
zip1 v18.2d, v22.2d, v23.2d
zip2 v17.2d, v20.2d, v21.2d
zip2 v19.2d, v22.2d, v23.2d
ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8
ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16
pmull v1.8h, v7.8b, v3.8b // D = A*B
ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32
ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24
eor v16.16b, v16.16b, v17.16b
eor v18.16b, v18.16b, v19.16b
eor v1.16b, v1.16b, v16.16b
eor v1.16b, v1.16b, v18.16b
ext v16.8b, v6.8b, v6.8b, #1 // A1
pmull v16.8h, v16.8b, v4.8b // F = A1*B
ext v2.8b, v4.8b, v4.8b, #1 // B1
pmull v2.8h, v6.8b, v2.8b // E = A*B1
ext v17.8b, v6.8b, v6.8b, #2 // A2
pmull v17.8h, v17.8b, v4.8b // H = A2*B
ext v19.8b, v4.8b, v4.8b, #2 // B2
pmull v19.8h, v6.8b, v19.8b // G = A*B2
ext v18.8b, v6.8b, v6.8b, #3 // A3
eor v16.16b, v16.16b, v2.16b // L = E + F
pmull v18.8h, v18.8b, v4.8b // J = A3*B
ext v2.8b, v4.8b, v4.8b, #3 // B3
eor v17.16b, v17.16b, v19.16b // M = G + H
pmull v2.8h, v6.8b, v2.8b // I = A*B3
// Here we diverge from the 32-bit version. It computes the following
// (instructions reordered for clarity):
//
// veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L)
// vand $t0#hi, $t0#hi, $k48
// veor $t0#lo, $t0#lo, $t0#hi
//
// veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M)
// vand $t1#hi, $t1#hi, $k32
// veor $t1#lo, $t1#lo, $t1#hi
//
// veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N)
// vand $t2#hi, $t2#hi, $k16
// veor $t2#lo, $t2#lo, $t2#hi
//
// veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K)
// vmov.i64 $t3#hi, #0
//
// $kN is a mask with the bottom N bits set. AArch64 cannot compute on
// upper halves of SIMD registers, so we must split each half into
// separate registers. To compensate, we pair computations up and
// parallelize.
ext v19.8b, v4.8b, v4.8b, #4 // B4
eor v18.16b, v18.16b, v2.16b // N = I + J
pmull v19.8h, v6.8b, v19.8b // K = A*B4
// This can probably be scheduled more efficiently. For now, we just
// pair up independent instructions.
zip1 v20.2d, v16.2d, v17.2d
zip1 v22.2d, v18.2d, v19.2d
zip2 v21.2d, v16.2d, v17.2d
zip2 v23.2d, v18.2d, v19.2d
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
and v21.16b, v21.16b, v24.16b
and v23.16b, v23.16b, v25.16b
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
zip1 v16.2d, v20.2d, v21.2d
zip1 v18.2d, v22.2d, v23.2d
zip2 v17.2d, v20.2d, v21.2d
zip2 v19.2d, v22.2d, v23.2d
ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8
ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16
pmull v2.8h, v6.8b, v4.8b // D = A*B
ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32
ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24
eor v16.16b, v16.16b, v17.16b
eor v18.16b, v18.16b, v19.16b
eor v2.16b, v2.16b, v16.16b
eor v2.16b, v2.16b, v18.16b
ext v16.16b, v0.16b, v2.16b, #8
eor v1.16b, v1.16b, v0.16b // Karatsuba post-processing
eor v1.16b, v1.16b, v2.16b
eor v1.16b, v1.16b, v16.16b // Xm overlaps Xh.lo and Xl.hi
ins v0.d[1], v1.d[0] // Xh|Xl - 256-bit result
// This is a no-op due to the ins instruction below.
// ins v2.d[0], v1.d[1]
// equivalent of reduction_avx from ghash-x86_64.pl
shl v17.2d, v0.2d, #57 // 1st phase
shl v18.2d, v0.2d, #62
eor v18.16b, v18.16b, v17.16b //
shl v17.2d, v0.2d, #63
eor v18.16b, v18.16b, v17.16b //
// Note Xm contains {Xl.d[1], Xh.d[0]}.
eor v18.16b, v18.16b, v1.16b
ins v0.d[1], v18.d[0] // Xl.d[1] ^= t2.d[0]
ins v2.d[0], v18.d[1] // Xh.d[0] ^= t2.d[1]
ushr v18.2d, v0.2d, #1 // 2nd phase
eor v2.16b, v2.16b,v0.16b
eor v0.16b, v0.16b,v18.16b //
ushr v18.2d, v18.2d, #6
ushr v0.2d, v0.2d, #1 //
eor v0.16b, v0.16b, v2.16b //
eor v0.16b, v0.16b, v18.16b //
subs x3, x3, #16
bne Loop_neon
rev64 v0.16b, v0.16b // byteswap Xi and write
ext v0.16b, v0.16b, v0.16b, #8
st1 {v0.16b}, [x0]
ret
.section .rodata
.align 4
Lmasks:
.quad 0x0000ffffffffffff // k48
.quad 0x00000000ffffffff // k32
.quad 0x000000000000ffff // k16
.quad 0x0000000000000000 // k0
.byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,100,101,114,105,118,101,100,32,102,114,111,109,32,65,82,77,118,52,32,118,101,114,115,105,111,110,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
|
marvin-hansen/iggy-streaming-system
| 190,538
|
thirdparty/crates/ring-0.17.9/pregenerated/chacha20_poly1305_x86_64-macosx.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__)
.section __DATA,__const
.p2align 6
chacha20_poly1305_constants:
L$chacha20_consts:
.byte 'e','x','p','a','n','d',' ','3','2','-','b','y','t','e',' ','k'
.byte 'e','x','p','a','n','d',' ','3','2','-','b','y','t','e',' ','k'
L$rol8:
.byte 3,0,1,2, 7,4,5,6, 11,8,9,10, 15,12,13,14
.byte 3,0,1,2, 7,4,5,6, 11,8,9,10, 15,12,13,14
L$rol16:
.byte 2,3,0,1, 6,7,4,5, 10,11,8,9, 14,15,12,13
.byte 2,3,0,1, 6,7,4,5, 10,11,8,9, 14,15,12,13
L$avx2_init:
.long 0,0,0,0
L$sse_inc:
.long 1,0,0,0
L$avx2_inc:
.long 2,0,0,0,2,0,0,0
L$clamp:
.quad 0x0FFFFFFC0FFFFFFF, 0x0FFFFFFC0FFFFFFC
.quad 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF
.p2align 4
L$and_masks:
.byte 0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00
.byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff
.text
.p2align 6
poly_hash_ad_internal:
xorq %r10,%r10
xorq %r11,%r11
xorq %r12,%r12
cmpq $13,%r8
jne L$hash_ad_loop
L$poly_fast_tls_ad:
movq (%rcx),%r10
movq 5(%rcx),%r11
shrq $24,%r11
movq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
ret
L$hash_ad_loop:
cmpq $16,%r8
jb L$hash_ad_tail
addq 0+0(%rcx),%r10
adcq 8+0(%rcx),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rcx),%rcx
subq $16,%r8
jmp L$hash_ad_loop
L$hash_ad_tail:
cmpq $0,%r8
je L$hash_ad_done
xorq %r13,%r13
xorq %r14,%r14
xorq %r15,%r15
addq %r8,%rcx
L$hash_ad_tail_loop:
shldq $8,%r13,%r14
shlq $8,%r13
movzbq -1(%rcx),%r15
xorq %r15,%r13
decq %rcx
decq %r8
jne L$hash_ad_tail_loop
addq %r13,%r10
adcq %r14,%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
L$hash_ad_done:
ret
.globl _chacha20_poly1305_open_nohw
.private_extern _chacha20_poly1305_open_nohw
.p2align 6
_chacha20_poly1305_open_nohw:
_CET_ENDBR
pushq %rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %r9
subq $288 + 0 + 32,%rsp
leaq 32(%rsp),%rbp
andq $-32,%rbp
movq %rdx,%rbx
movq %r8,0+0+32(%rbp)
movq %rbx,8+0+32(%rbp)
cmpq $128,%rbx
jbe L$open_sse_128
movdqa L$chacha20_consts(%rip),%xmm0
movdqu 0(%r9),%xmm4
movdqu 16(%r9),%xmm8
movdqu 32(%r9),%xmm12
movdqa %xmm12,%xmm7
movdqa %xmm4,0+48(%rbp)
movdqa %xmm8,0+64(%rbp)
movdqa %xmm12,0+96(%rbp)
movq $10,%r10
L$open_sse_init_rounds:
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
decq %r10
jne L$open_sse_init_rounds
paddd L$chacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
pand L$clamp(%rip),%xmm0
movdqa %xmm0,0+0(%rbp)
movdqa %xmm4,0+16(%rbp)
movq %r8,%r8
call poly_hash_ad_internal
L$open_sse_main_loop:
cmpq $256,%rbx
jb L$open_sse_tail
movdqa L$chacha20_consts(%rip),%xmm0
movdqa 0+48(%rbp),%xmm4
movdqa 0+64(%rbp),%xmm8
movdqa %xmm0,%xmm1
movdqa %xmm4,%xmm5
movdqa %xmm8,%xmm9
movdqa %xmm0,%xmm2
movdqa %xmm4,%xmm6
movdqa %xmm8,%xmm10
movdqa %xmm0,%xmm3
movdqa %xmm4,%xmm7
movdqa %xmm8,%xmm11
movdqa 0+96(%rbp),%xmm15
paddd L$sse_inc(%rip),%xmm15
movdqa %xmm15,%xmm14
paddd L$sse_inc(%rip),%xmm14
movdqa %xmm14,%xmm13
paddd L$sse_inc(%rip),%xmm13
movdqa %xmm13,%xmm12
paddd L$sse_inc(%rip),%xmm12
movdqa %xmm12,0+96(%rbp)
movdqa %xmm13,0+112(%rbp)
movdqa %xmm14,0+128(%rbp)
movdqa %xmm15,0+144(%rbp)
movq $4,%rcx
movq %rsi,%r8
L$open_sse_main_loop_rounds:
movdqa %xmm8,0+80(%rbp)
movdqa L$rol16(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
addq 0+0(%r8),%r10
adcq 8+0(%r8),%r11
adcq $1,%r12
leaq 16(%r8),%r8
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm4
pxor %xmm8,%xmm4
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movdqa L$rol8(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm4
pxor %xmm8,%xmm4
movdqa 0+80(%rbp),%xmm8
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
.byte 102,15,58,15,255,4
.byte 102,69,15,58,15,219,8
.byte 102,69,15,58,15,255,12
.byte 102,15,58,15,246,4
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,12
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
movdqa %xmm8,0+80(%rbp)
movdqa L$rol16(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm4
pxor %xmm8,%xmm4
movdqa L$rol8(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm4
pxor %xmm8,%xmm4
movdqa 0+80(%rbp),%xmm8
.byte 102,15,58,15,255,12
.byte 102,69,15,58,15,219,8
.byte 102,69,15,58,15,255,4
.byte 102,15,58,15,246,12
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,4
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
decq %rcx
jge L$open_sse_main_loop_rounds
addq 0+0(%r8),%r10
adcq 8+0(%r8),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%r8),%r8
cmpq $-6,%rcx
jg L$open_sse_main_loop_rounds
paddd L$chacha20_consts(%rip),%xmm3
paddd 0+48(%rbp),%xmm7
paddd 0+64(%rbp),%xmm11
paddd 0+144(%rbp),%xmm15
paddd L$chacha20_consts(%rip),%xmm2
paddd 0+48(%rbp),%xmm6
paddd 0+64(%rbp),%xmm10
paddd 0+128(%rbp),%xmm14
paddd L$chacha20_consts(%rip),%xmm1
paddd 0+48(%rbp),%xmm5
paddd 0+64(%rbp),%xmm9
paddd 0+112(%rbp),%xmm13
paddd L$chacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
movdqa %xmm12,0+80(%rbp)
movdqu 0 + 0(%rsi),%xmm12
pxor %xmm3,%xmm12
movdqu %xmm12,0 + 0(%rdi)
movdqu 16 + 0(%rsi),%xmm12
pxor %xmm7,%xmm12
movdqu %xmm12,16 + 0(%rdi)
movdqu 32 + 0(%rsi),%xmm12
pxor %xmm11,%xmm12
movdqu %xmm12,32 + 0(%rdi)
movdqu 48 + 0(%rsi),%xmm12
pxor %xmm15,%xmm12
movdqu %xmm12,48 + 0(%rdi)
movdqu 0 + 64(%rsi),%xmm3
movdqu 16 + 64(%rsi),%xmm7
movdqu 32 + 64(%rsi),%xmm11
movdqu 48 + 64(%rsi),%xmm15
pxor %xmm3,%xmm2
pxor %xmm7,%xmm6
pxor %xmm11,%xmm10
pxor %xmm14,%xmm15
movdqu %xmm2,0 + 64(%rdi)
movdqu %xmm6,16 + 64(%rdi)
movdqu %xmm10,32 + 64(%rdi)
movdqu %xmm15,48 + 64(%rdi)
movdqu 0 + 128(%rsi),%xmm3
movdqu 16 + 128(%rsi),%xmm7
movdqu 32 + 128(%rsi),%xmm11
movdqu 48 + 128(%rsi),%xmm15
pxor %xmm3,%xmm1
pxor %xmm7,%xmm5
pxor %xmm11,%xmm9
pxor %xmm13,%xmm15
movdqu %xmm1,0 + 128(%rdi)
movdqu %xmm5,16 + 128(%rdi)
movdqu %xmm9,32 + 128(%rdi)
movdqu %xmm15,48 + 128(%rdi)
movdqu 0 + 192(%rsi),%xmm3
movdqu 16 + 192(%rsi),%xmm7
movdqu 32 + 192(%rsi),%xmm11
movdqu 48 + 192(%rsi),%xmm15
pxor %xmm3,%xmm0
pxor %xmm7,%xmm4
pxor %xmm11,%xmm8
pxor 0+80(%rbp),%xmm15
movdqu %xmm0,0 + 192(%rdi)
movdqu %xmm4,16 + 192(%rdi)
movdqu %xmm8,32 + 192(%rdi)
movdqu %xmm15,48 + 192(%rdi)
leaq 256(%rsi),%rsi
leaq 256(%rdi),%rdi
subq $256,%rbx
jmp L$open_sse_main_loop
L$open_sse_tail:
testq %rbx,%rbx
jz L$open_sse_finalize
cmpq $192,%rbx
ja L$open_sse_tail_256
cmpq $128,%rbx
ja L$open_sse_tail_192
cmpq $64,%rbx
ja L$open_sse_tail_128
movdqa L$chacha20_consts(%rip),%xmm0
movdqa 0+48(%rbp),%xmm4
movdqa 0+64(%rbp),%xmm8
movdqa 0+96(%rbp),%xmm12
paddd L$sse_inc(%rip),%xmm12
movdqa %xmm12,0+96(%rbp)
xorq %r8,%r8
movq %rbx,%rcx
cmpq $16,%rcx
jb L$open_sse_tail_64_rounds
L$open_sse_tail_64_rounds_and_x1hash:
addq 0+0(%rsi,%r8,1),%r10
adcq 8+0(%rsi,%r8,1),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
subq $16,%rcx
L$open_sse_tail_64_rounds:
addq $16,%r8
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
cmpq $16,%rcx
jae L$open_sse_tail_64_rounds_and_x1hash
cmpq $160,%r8
jne L$open_sse_tail_64_rounds
paddd L$chacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
jmp L$open_sse_tail_64_dec_loop
L$open_sse_tail_128:
movdqa L$chacha20_consts(%rip),%xmm0
movdqa 0+48(%rbp),%xmm4
movdqa 0+64(%rbp),%xmm8
movdqa %xmm0,%xmm1
movdqa %xmm4,%xmm5
movdqa %xmm8,%xmm9
movdqa 0+96(%rbp),%xmm13
paddd L$sse_inc(%rip),%xmm13
movdqa %xmm13,%xmm12
paddd L$sse_inc(%rip),%xmm12
movdqa %xmm12,0+96(%rbp)
movdqa %xmm13,0+112(%rbp)
movq %rbx,%rcx
andq $-16,%rcx
xorq %r8,%r8
L$open_sse_tail_128_rounds_and_x1hash:
addq 0+0(%rsi,%r8,1),%r10
adcq 8+0(%rsi,%r8,1),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
L$open_sse_tail_128_rounds:
addq $16,%r8
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
cmpq %rcx,%r8
jb L$open_sse_tail_128_rounds_and_x1hash
cmpq $160,%r8
jne L$open_sse_tail_128_rounds
paddd L$chacha20_consts(%rip),%xmm1
paddd 0+48(%rbp),%xmm5
paddd 0+64(%rbp),%xmm9
paddd 0+112(%rbp),%xmm13
paddd L$chacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
movdqu 0 + 0(%rsi),%xmm3
movdqu 16 + 0(%rsi),%xmm7
movdqu 32 + 0(%rsi),%xmm11
movdqu 48 + 0(%rsi),%xmm15
pxor %xmm3,%xmm1
pxor %xmm7,%xmm5
pxor %xmm11,%xmm9
pxor %xmm13,%xmm15
movdqu %xmm1,0 + 0(%rdi)
movdqu %xmm5,16 + 0(%rdi)
movdqu %xmm9,32 + 0(%rdi)
movdqu %xmm15,48 + 0(%rdi)
subq $64,%rbx
leaq 64(%rsi),%rsi
leaq 64(%rdi),%rdi
jmp L$open_sse_tail_64_dec_loop
L$open_sse_tail_192:
movdqa L$chacha20_consts(%rip),%xmm0
movdqa 0+48(%rbp),%xmm4
movdqa 0+64(%rbp),%xmm8
movdqa %xmm0,%xmm1
movdqa %xmm4,%xmm5
movdqa %xmm8,%xmm9
movdqa %xmm0,%xmm2
movdqa %xmm4,%xmm6
movdqa %xmm8,%xmm10
movdqa 0+96(%rbp),%xmm14
paddd L$sse_inc(%rip),%xmm14
movdqa %xmm14,%xmm13
paddd L$sse_inc(%rip),%xmm13
movdqa %xmm13,%xmm12
paddd L$sse_inc(%rip),%xmm12
movdqa %xmm12,0+96(%rbp)
movdqa %xmm13,0+112(%rbp)
movdqa %xmm14,0+128(%rbp)
movq %rbx,%rcx
movq $160,%r8
cmpq $160,%rcx
cmovgq %r8,%rcx
andq $-16,%rcx
xorq %r8,%r8
L$open_sse_tail_192_rounds_and_x1hash:
addq 0+0(%rsi,%r8,1),%r10
adcq 8+0(%rsi,%r8,1),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
L$open_sse_tail_192_rounds:
addq $16,%r8
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $12,%xmm3
psrld $20,%xmm6
pxor %xmm3,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $7,%xmm3
psrld $25,%xmm6
pxor %xmm3,%xmm6
.byte 102,15,58,15,246,4
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,12
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $12,%xmm3
psrld $20,%xmm6
pxor %xmm3,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $7,%xmm3
psrld $25,%xmm6
pxor %xmm3,%xmm6
.byte 102,15,58,15,246,12
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,4
cmpq %rcx,%r8
jb L$open_sse_tail_192_rounds_and_x1hash
cmpq $160,%r8
jne L$open_sse_tail_192_rounds
cmpq $176,%rbx
jb L$open_sse_tail_192_finish
addq 0+160(%rsi),%r10
adcq 8+160(%rsi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
cmpq $192,%rbx
jb L$open_sse_tail_192_finish
addq 0+176(%rsi),%r10
adcq 8+176(%rsi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
L$open_sse_tail_192_finish:
paddd L$chacha20_consts(%rip),%xmm2
paddd 0+48(%rbp),%xmm6
paddd 0+64(%rbp),%xmm10
paddd 0+128(%rbp),%xmm14
paddd L$chacha20_consts(%rip),%xmm1
paddd 0+48(%rbp),%xmm5
paddd 0+64(%rbp),%xmm9
paddd 0+112(%rbp),%xmm13
paddd L$chacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
movdqu 0 + 0(%rsi),%xmm3
movdqu 16 + 0(%rsi),%xmm7
movdqu 32 + 0(%rsi),%xmm11
movdqu 48 + 0(%rsi),%xmm15
pxor %xmm3,%xmm2
pxor %xmm7,%xmm6
pxor %xmm11,%xmm10
pxor %xmm14,%xmm15
movdqu %xmm2,0 + 0(%rdi)
movdqu %xmm6,16 + 0(%rdi)
movdqu %xmm10,32 + 0(%rdi)
movdqu %xmm15,48 + 0(%rdi)
movdqu 0 + 64(%rsi),%xmm3
movdqu 16 + 64(%rsi),%xmm7
movdqu 32 + 64(%rsi),%xmm11
movdqu 48 + 64(%rsi),%xmm15
pxor %xmm3,%xmm1
pxor %xmm7,%xmm5
pxor %xmm11,%xmm9
pxor %xmm13,%xmm15
movdqu %xmm1,0 + 64(%rdi)
movdqu %xmm5,16 + 64(%rdi)
movdqu %xmm9,32 + 64(%rdi)
movdqu %xmm15,48 + 64(%rdi)
subq $128,%rbx
leaq 128(%rsi),%rsi
leaq 128(%rdi),%rdi
jmp L$open_sse_tail_64_dec_loop
L$open_sse_tail_256:
movdqa L$chacha20_consts(%rip),%xmm0
movdqa 0+48(%rbp),%xmm4
movdqa 0+64(%rbp),%xmm8
movdqa %xmm0,%xmm1
movdqa %xmm4,%xmm5
movdqa %xmm8,%xmm9
movdqa %xmm0,%xmm2
movdqa %xmm4,%xmm6
movdqa %xmm8,%xmm10
movdqa %xmm0,%xmm3
movdqa %xmm4,%xmm7
movdqa %xmm8,%xmm11
movdqa 0+96(%rbp),%xmm15
paddd L$sse_inc(%rip),%xmm15
movdqa %xmm15,%xmm14
paddd L$sse_inc(%rip),%xmm14
movdqa %xmm14,%xmm13
paddd L$sse_inc(%rip),%xmm13
movdqa %xmm13,%xmm12
paddd L$sse_inc(%rip),%xmm12
movdqa %xmm12,0+96(%rbp)
movdqa %xmm13,0+112(%rbp)
movdqa %xmm14,0+128(%rbp)
movdqa %xmm15,0+144(%rbp)
xorq %r8,%r8
L$open_sse_tail_256_rounds_and_x1hash:
addq 0+0(%rsi,%r8,1),%r10
adcq 8+0(%rsi,%r8,1),%r11
adcq $1,%r12
movdqa %xmm11,0+80(%rbp)
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm11
pslld $12,%xmm11
psrld $20,%xmm4
pxor %xmm11,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm11
pslld $7,%xmm11
psrld $25,%xmm4
pxor %xmm11,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm11
pslld $12,%xmm11
psrld $20,%xmm5
pxor %xmm11,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm11
pslld $7,%xmm11
psrld $25,%xmm5
pxor %xmm11,%xmm5
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm11
pslld $12,%xmm11
psrld $20,%xmm6
pxor %xmm11,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm11
pslld $7,%xmm11
psrld $25,%xmm6
pxor %xmm11,%xmm6
.byte 102,15,58,15,246,4
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,12
movdqa 0+80(%rbp),%xmm11
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movdqa %xmm9,0+80(%rbp)
paddd %xmm7,%xmm3
pxor %xmm3,%xmm15
pshufb L$rol16(%rip),%xmm15
paddd %xmm15,%xmm11
pxor %xmm11,%xmm7
movdqa %xmm7,%xmm9
pslld $12,%xmm9
psrld $20,%xmm7
pxor %xmm9,%xmm7
paddd %xmm7,%xmm3
pxor %xmm3,%xmm15
pshufb L$rol8(%rip),%xmm15
paddd %xmm15,%xmm11
pxor %xmm11,%xmm7
movdqa %xmm7,%xmm9
pslld $7,%xmm9
psrld $25,%xmm7
pxor %xmm9,%xmm7
.byte 102,15,58,15,255,4
.byte 102,69,15,58,15,219,8
.byte 102,69,15,58,15,255,12
movdqa 0+80(%rbp),%xmm9
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
movdqa %xmm11,0+80(%rbp)
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm11
pslld $12,%xmm11
psrld $20,%xmm4
pxor %xmm11,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm11
pslld $7,%xmm11
psrld $25,%xmm4
pxor %xmm11,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm11
pslld $12,%xmm11
psrld $20,%xmm5
pxor %xmm11,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm11
pslld $7,%xmm11
psrld $25,%xmm5
pxor %xmm11,%xmm5
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm11
pslld $12,%xmm11
psrld $20,%xmm6
pxor %xmm11,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm11
pslld $7,%xmm11
psrld $25,%xmm6
pxor %xmm11,%xmm6
.byte 102,15,58,15,246,12
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,4
movdqa 0+80(%rbp),%xmm11
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
movdqa %xmm9,0+80(%rbp)
paddd %xmm7,%xmm3
pxor %xmm3,%xmm15
pshufb L$rol16(%rip),%xmm15
paddd %xmm15,%xmm11
pxor %xmm11,%xmm7
movdqa %xmm7,%xmm9
pslld $12,%xmm9
psrld $20,%xmm7
pxor %xmm9,%xmm7
paddd %xmm7,%xmm3
pxor %xmm3,%xmm15
pshufb L$rol8(%rip),%xmm15
paddd %xmm15,%xmm11
pxor %xmm11,%xmm7
movdqa %xmm7,%xmm9
pslld $7,%xmm9
psrld $25,%xmm7
pxor %xmm9,%xmm7
.byte 102,15,58,15,255,12
.byte 102,69,15,58,15,219,8
.byte 102,69,15,58,15,255,4
movdqa 0+80(%rbp),%xmm9
addq $16,%r8
cmpq $160,%r8
jb L$open_sse_tail_256_rounds_and_x1hash
movq %rbx,%rcx
andq $-16,%rcx
L$open_sse_tail_256_hash:
addq 0+0(%rsi,%r8,1),%r10
adcq 8+0(%rsi,%r8,1),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
addq $16,%r8
cmpq %rcx,%r8
jb L$open_sse_tail_256_hash
paddd L$chacha20_consts(%rip),%xmm3
paddd 0+48(%rbp),%xmm7
paddd 0+64(%rbp),%xmm11
paddd 0+144(%rbp),%xmm15
paddd L$chacha20_consts(%rip),%xmm2
paddd 0+48(%rbp),%xmm6
paddd 0+64(%rbp),%xmm10
paddd 0+128(%rbp),%xmm14
paddd L$chacha20_consts(%rip),%xmm1
paddd 0+48(%rbp),%xmm5
paddd 0+64(%rbp),%xmm9
paddd 0+112(%rbp),%xmm13
paddd L$chacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
movdqa %xmm12,0+80(%rbp)
movdqu 0 + 0(%rsi),%xmm12
pxor %xmm3,%xmm12
movdqu %xmm12,0 + 0(%rdi)
movdqu 16 + 0(%rsi),%xmm12
pxor %xmm7,%xmm12
movdqu %xmm12,16 + 0(%rdi)
movdqu 32 + 0(%rsi),%xmm12
pxor %xmm11,%xmm12
movdqu %xmm12,32 + 0(%rdi)
movdqu 48 + 0(%rsi),%xmm12
pxor %xmm15,%xmm12
movdqu %xmm12,48 + 0(%rdi)
movdqu 0 + 64(%rsi),%xmm3
movdqu 16 + 64(%rsi),%xmm7
movdqu 32 + 64(%rsi),%xmm11
movdqu 48 + 64(%rsi),%xmm15
pxor %xmm3,%xmm2
pxor %xmm7,%xmm6
pxor %xmm11,%xmm10
pxor %xmm14,%xmm15
movdqu %xmm2,0 + 64(%rdi)
movdqu %xmm6,16 + 64(%rdi)
movdqu %xmm10,32 + 64(%rdi)
movdqu %xmm15,48 + 64(%rdi)
movdqu 0 + 128(%rsi),%xmm3
movdqu 16 + 128(%rsi),%xmm7
movdqu 32 + 128(%rsi),%xmm11
movdqu 48 + 128(%rsi),%xmm15
pxor %xmm3,%xmm1
pxor %xmm7,%xmm5
pxor %xmm11,%xmm9
pxor %xmm13,%xmm15
movdqu %xmm1,0 + 128(%rdi)
movdqu %xmm5,16 + 128(%rdi)
movdqu %xmm9,32 + 128(%rdi)
movdqu %xmm15,48 + 128(%rdi)
movdqa 0+80(%rbp),%xmm12
subq $192,%rbx
leaq 192(%rsi),%rsi
leaq 192(%rdi),%rdi
L$open_sse_tail_64_dec_loop:
cmpq $16,%rbx
jb L$open_sse_tail_16_init
subq $16,%rbx
movdqu (%rsi),%xmm3
pxor %xmm3,%xmm0
movdqu %xmm0,(%rdi)
leaq 16(%rsi),%rsi
leaq 16(%rdi),%rdi
movdqa %xmm4,%xmm0
movdqa %xmm8,%xmm4
movdqa %xmm12,%xmm8
jmp L$open_sse_tail_64_dec_loop
L$open_sse_tail_16_init:
movdqa %xmm0,%xmm1
L$open_sse_tail_16:
testq %rbx,%rbx
jz L$open_sse_finalize
pxor %xmm3,%xmm3
leaq -1(%rsi,%rbx,1),%rsi
movq %rbx,%r8
L$open_sse_tail_16_compose:
pslldq $1,%xmm3
pinsrb $0,(%rsi),%xmm3
subq $1,%rsi
subq $1,%r8
jnz L$open_sse_tail_16_compose
.byte 102,73,15,126,221
pextrq $1,%xmm3,%r14
pxor %xmm1,%xmm3
L$open_sse_tail_16_extract:
pextrb $0,%xmm3,(%rdi)
psrldq $1,%xmm3
addq $1,%rdi
subq $1,%rbx
jne L$open_sse_tail_16_extract
addq %r13,%r10
adcq %r14,%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
L$open_sse_finalize:
addq 0+0+32(%rbp),%r10
adcq 8+0+32(%rbp),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
movq %r10,%r13
movq %r11,%r14
movq %r12,%r15
subq $-5,%r10
sbbq $-1,%r11
sbbq $3,%r12
cmovcq %r13,%r10
cmovcq %r14,%r11
cmovcq %r15,%r12
addq 0+0+16(%rbp),%r10
adcq 8+0+16(%rbp),%r11
addq $288 + 0 + 32,%rsp
popq %r9
movq %r10,(%r9)
movq %r11,8(%r9)
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbx
popq %rbp
ret
L$open_sse_128:
movdqu L$chacha20_consts(%rip),%xmm0
movdqa %xmm0,%xmm1
movdqa %xmm0,%xmm2
movdqu 0(%r9),%xmm4
movdqa %xmm4,%xmm5
movdqa %xmm4,%xmm6
movdqu 16(%r9),%xmm8
movdqa %xmm8,%xmm9
movdqa %xmm8,%xmm10
movdqu 32(%r9),%xmm12
movdqa %xmm12,%xmm13
paddd L$sse_inc(%rip),%xmm13
movdqa %xmm13,%xmm14
paddd L$sse_inc(%rip),%xmm14
movdqa %xmm4,%xmm7
movdqa %xmm8,%xmm11
movdqa %xmm13,%xmm15
movq $10,%r10
L$open_sse_128_rounds:
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $12,%xmm3
psrld $20,%xmm6
pxor %xmm3,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $7,%xmm3
psrld $25,%xmm6
pxor %xmm3,%xmm6
.byte 102,15,58,15,246,4
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,12
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $12,%xmm3
psrld $20,%xmm6
pxor %xmm3,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $7,%xmm3
psrld $25,%xmm6
pxor %xmm3,%xmm6
.byte 102,15,58,15,246,12
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,4
decq %r10
jnz L$open_sse_128_rounds
paddd L$chacha20_consts(%rip),%xmm0
paddd L$chacha20_consts(%rip),%xmm1
paddd L$chacha20_consts(%rip),%xmm2
paddd %xmm7,%xmm4
paddd %xmm7,%xmm5
paddd %xmm7,%xmm6
paddd %xmm11,%xmm9
paddd %xmm11,%xmm10
paddd %xmm15,%xmm13
paddd L$sse_inc(%rip),%xmm15
paddd %xmm15,%xmm14
pand L$clamp(%rip),%xmm0
movdqa %xmm0,0+0(%rbp)
movdqa %xmm4,0+16(%rbp)
movq %r8,%r8
call poly_hash_ad_internal
L$open_sse_128_xor_hash:
cmpq $16,%rbx
jb L$open_sse_tail_16
subq $16,%rbx
addq 0+0(%rsi),%r10
adcq 8+0(%rsi),%r11
adcq $1,%r12
movdqu 0(%rsi),%xmm3
pxor %xmm3,%xmm1
movdqu %xmm1,0(%rdi)
leaq 16(%rsi),%rsi
leaq 16(%rdi),%rdi
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
movdqa %xmm5,%xmm1
movdqa %xmm9,%xmm5
movdqa %xmm13,%xmm9
movdqa %xmm2,%xmm13
movdqa %xmm6,%xmm2
movdqa %xmm10,%xmm6
movdqa %xmm14,%xmm10
jmp L$open_sse_128_xor_hash
.globl _chacha20_poly1305_seal_nohw
.private_extern _chacha20_poly1305_seal_nohw
.p2align 6
_chacha20_poly1305_seal_nohw:
_CET_ENDBR
pushq %rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %r9
subq $288 + 0 + 32,%rsp
leaq 32(%rsp),%rbp
andq $-32,%rbp
movq 56(%r9),%rbx
addq %rdx,%rbx
movq %r8,0+0+32(%rbp)
movq %rbx,8+0+32(%rbp)
movq %rdx,%rbx
cmpq $128,%rbx
jbe L$seal_sse_128
movdqa L$chacha20_consts(%rip),%xmm0
movdqu 0(%r9),%xmm4
movdqu 16(%r9),%xmm8
movdqu 32(%r9),%xmm12
movdqa %xmm0,%xmm1
movdqa %xmm0,%xmm2
movdqa %xmm0,%xmm3
movdqa %xmm4,%xmm5
movdqa %xmm4,%xmm6
movdqa %xmm4,%xmm7
movdqa %xmm8,%xmm9
movdqa %xmm8,%xmm10
movdqa %xmm8,%xmm11
movdqa %xmm12,%xmm15
paddd L$sse_inc(%rip),%xmm12
movdqa %xmm12,%xmm14
paddd L$sse_inc(%rip),%xmm12
movdqa %xmm12,%xmm13
paddd L$sse_inc(%rip),%xmm12
movdqa %xmm4,0+48(%rbp)
movdqa %xmm8,0+64(%rbp)
movdqa %xmm12,0+96(%rbp)
movdqa %xmm13,0+112(%rbp)
movdqa %xmm14,0+128(%rbp)
movdqa %xmm15,0+144(%rbp)
movq $10,%r10
L$seal_sse_init_rounds:
movdqa %xmm8,0+80(%rbp)
movdqa L$rol16(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm4
pxor %xmm8,%xmm4
movdqa L$rol8(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm4
pxor %xmm8,%xmm4
movdqa 0+80(%rbp),%xmm8
.byte 102,15,58,15,255,4
.byte 102,69,15,58,15,219,8
.byte 102,69,15,58,15,255,12
.byte 102,15,58,15,246,4
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,12
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
movdqa %xmm8,0+80(%rbp)
movdqa L$rol16(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm4
pxor %xmm8,%xmm4
movdqa L$rol8(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm4
pxor %xmm8,%xmm4
movdqa 0+80(%rbp),%xmm8
.byte 102,15,58,15,255,12
.byte 102,69,15,58,15,219,8
.byte 102,69,15,58,15,255,4
.byte 102,15,58,15,246,12
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,4
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
decq %r10
jnz L$seal_sse_init_rounds
paddd L$chacha20_consts(%rip),%xmm3
paddd 0+48(%rbp),%xmm7
paddd 0+64(%rbp),%xmm11
paddd 0+144(%rbp),%xmm15
paddd L$chacha20_consts(%rip),%xmm2
paddd 0+48(%rbp),%xmm6
paddd 0+64(%rbp),%xmm10
paddd 0+128(%rbp),%xmm14
paddd L$chacha20_consts(%rip),%xmm1
paddd 0+48(%rbp),%xmm5
paddd 0+64(%rbp),%xmm9
paddd 0+112(%rbp),%xmm13
paddd L$chacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
pand L$clamp(%rip),%xmm3
movdqa %xmm3,0+0(%rbp)
movdqa %xmm7,0+16(%rbp)
movq %r8,%r8
call poly_hash_ad_internal
movdqu 0 + 0(%rsi),%xmm3
movdqu 16 + 0(%rsi),%xmm7
movdqu 32 + 0(%rsi),%xmm11
movdqu 48 + 0(%rsi),%xmm15
pxor %xmm3,%xmm2
pxor %xmm7,%xmm6
pxor %xmm11,%xmm10
pxor %xmm14,%xmm15
movdqu %xmm2,0 + 0(%rdi)
movdqu %xmm6,16 + 0(%rdi)
movdqu %xmm10,32 + 0(%rdi)
movdqu %xmm15,48 + 0(%rdi)
movdqu 0 + 64(%rsi),%xmm3
movdqu 16 + 64(%rsi),%xmm7
movdqu 32 + 64(%rsi),%xmm11
movdqu 48 + 64(%rsi),%xmm15
pxor %xmm3,%xmm1
pxor %xmm7,%xmm5
pxor %xmm11,%xmm9
pxor %xmm13,%xmm15
movdqu %xmm1,0 + 64(%rdi)
movdqu %xmm5,16 + 64(%rdi)
movdqu %xmm9,32 + 64(%rdi)
movdqu %xmm15,48 + 64(%rdi)
cmpq $192,%rbx
ja L$seal_sse_main_init
movq $128,%rcx
subq $128,%rbx
leaq 128(%rsi),%rsi
jmp L$seal_sse_128_tail_hash
L$seal_sse_main_init:
movdqu 0 + 128(%rsi),%xmm3
movdqu 16 + 128(%rsi),%xmm7
movdqu 32 + 128(%rsi),%xmm11
movdqu 48 + 128(%rsi),%xmm15
pxor %xmm3,%xmm0
pxor %xmm7,%xmm4
pxor %xmm11,%xmm8
pxor %xmm12,%xmm15
movdqu %xmm0,0 + 128(%rdi)
movdqu %xmm4,16 + 128(%rdi)
movdqu %xmm8,32 + 128(%rdi)
movdqu %xmm15,48 + 128(%rdi)
movq $192,%rcx
subq $192,%rbx
leaq 192(%rsi),%rsi
movq $2,%rcx
movq $8,%r8
cmpq $64,%rbx
jbe L$seal_sse_tail_64
cmpq $128,%rbx
jbe L$seal_sse_tail_128
cmpq $192,%rbx
jbe L$seal_sse_tail_192
L$seal_sse_main_loop:
movdqa L$chacha20_consts(%rip),%xmm0
movdqa 0+48(%rbp),%xmm4
movdqa 0+64(%rbp),%xmm8
movdqa %xmm0,%xmm1
movdqa %xmm4,%xmm5
movdqa %xmm8,%xmm9
movdqa %xmm0,%xmm2
movdqa %xmm4,%xmm6
movdqa %xmm8,%xmm10
movdqa %xmm0,%xmm3
movdqa %xmm4,%xmm7
movdqa %xmm8,%xmm11
movdqa 0+96(%rbp),%xmm15
paddd L$sse_inc(%rip),%xmm15
movdqa %xmm15,%xmm14
paddd L$sse_inc(%rip),%xmm14
movdqa %xmm14,%xmm13
paddd L$sse_inc(%rip),%xmm13
movdqa %xmm13,%xmm12
paddd L$sse_inc(%rip),%xmm12
movdqa %xmm12,0+96(%rbp)
movdqa %xmm13,0+112(%rbp)
movdqa %xmm14,0+128(%rbp)
movdqa %xmm15,0+144(%rbp)
.p2align 5
L$seal_sse_main_rounds:
movdqa %xmm8,0+80(%rbp)
movdqa L$rol16(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm4
pxor %xmm8,%xmm4
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movdqa L$rol8(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm4
pxor %xmm8,%xmm4
movdqa 0+80(%rbp),%xmm8
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
.byte 102,15,58,15,255,4
.byte 102,69,15,58,15,219,8
.byte 102,69,15,58,15,255,12
.byte 102,15,58,15,246,4
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,12
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
movdqa %xmm8,0+80(%rbp)
movdqa L$rol16(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $20,%xmm8
pslld $32-20,%xmm4
pxor %xmm8,%xmm4
movdqa L$rol8(%rip),%xmm8
paddd %xmm7,%xmm3
paddd %xmm6,%xmm2
paddd %xmm5,%xmm1
paddd %xmm4,%xmm0
pxor %xmm3,%xmm15
pxor %xmm2,%xmm14
pxor %xmm1,%xmm13
pxor %xmm0,%xmm12
.byte 102,69,15,56,0,248
.byte 102,69,15,56,0,240
.byte 102,69,15,56,0,232
.byte 102,69,15,56,0,224
movdqa 0+80(%rbp),%xmm8
paddd %xmm15,%xmm11
paddd %xmm14,%xmm10
paddd %xmm13,%xmm9
paddd %xmm12,%xmm8
pxor %xmm11,%xmm7
pxor %xmm10,%xmm6
pxor %xmm9,%xmm5
pxor %xmm8,%xmm4
movdqa %xmm8,0+80(%rbp)
movdqa %xmm7,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm7
pxor %xmm8,%xmm7
movdqa %xmm6,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm6
pxor %xmm8,%xmm6
movdqa %xmm5,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm5
pxor %xmm8,%xmm5
movdqa %xmm4,%xmm8
psrld $25,%xmm8
pslld $32-25,%xmm4
pxor %xmm8,%xmm4
movdqa 0+80(%rbp),%xmm8
.byte 102,15,58,15,255,12
.byte 102,69,15,58,15,219,8
.byte 102,69,15,58,15,255,4
.byte 102,15,58,15,246,12
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,4
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
leaq 16(%rdi),%rdi
decq %r8
jge L$seal_sse_main_rounds
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
decq %rcx
jg L$seal_sse_main_rounds
paddd L$chacha20_consts(%rip),%xmm3
paddd 0+48(%rbp),%xmm7
paddd 0+64(%rbp),%xmm11
paddd 0+144(%rbp),%xmm15
paddd L$chacha20_consts(%rip),%xmm2
paddd 0+48(%rbp),%xmm6
paddd 0+64(%rbp),%xmm10
paddd 0+128(%rbp),%xmm14
paddd L$chacha20_consts(%rip),%xmm1
paddd 0+48(%rbp),%xmm5
paddd 0+64(%rbp),%xmm9
paddd 0+112(%rbp),%xmm13
paddd L$chacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
movdqa %xmm14,0+80(%rbp)
movdqa %xmm14,0+80(%rbp)
movdqu 0 + 0(%rsi),%xmm14
pxor %xmm3,%xmm14
movdqu %xmm14,0 + 0(%rdi)
movdqu 16 + 0(%rsi),%xmm14
pxor %xmm7,%xmm14
movdqu %xmm14,16 + 0(%rdi)
movdqu 32 + 0(%rsi),%xmm14
pxor %xmm11,%xmm14
movdqu %xmm14,32 + 0(%rdi)
movdqu 48 + 0(%rsi),%xmm14
pxor %xmm15,%xmm14
movdqu %xmm14,48 + 0(%rdi)
movdqa 0+80(%rbp),%xmm14
movdqu 0 + 64(%rsi),%xmm3
movdqu 16 + 64(%rsi),%xmm7
movdqu 32 + 64(%rsi),%xmm11
movdqu 48 + 64(%rsi),%xmm15
pxor %xmm3,%xmm2
pxor %xmm7,%xmm6
pxor %xmm11,%xmm10
pxor %xmm14,%xmm15
movdqu %xmm2,0 + 64(%rdi)
movdqu %xmm6,16 + 64(%rdi)
movdqu %xmm10,32 + 64(%rdi)
movdqu %xmm15,48 + 64(%rdi)
movdqu 0 + 128(%rsi),%xmm3
movdqu 16 + 128(%rsi),%xmm7
movdqu 32 + 128(%rsi),%xmm11
movdqu 48 + 128(%rsi),%xmm15
pxor %xmm3,%xmm1
pxor %xmm7,%xmm5
pxor %xmm11,%xmm9
pxor %xmm13,%xmm15
movdqu %xmm1,0 + 128(%rdi)
movdqu %xmm5,16 + 128(%rdi)
movdqu %xmm9,32 + 128(%rdi)
movdqu %xmm15,48 + 128(%rdi)
cmpq $256,%rbx
ja L$seal_sse_main_loop_xor
movq $192,%rcx
subq $192,%rbx
leaq 192(%rsi),%rsi
jmp L$seal_sse_128_tail_hash
L$seal_sse_main_loop_xor:
movdqu 0 + 192(%rsi),%xmm3
movdqu 16 + 192(%rsi),%xmm7
movdqu 32 + 192(%rsi),%xmm11
movdqu 48 + 192(%rsi),%xmm15
pxor %xmm3,%xmm0
pxor %xmm7,%xmm4
pxor %xmm11,%xmm8
pxor %xmm12,%xmm15
movdqu %xmm0,0 + 192(%rdi)
movdqu %xmm4,16 + 192(%rdi)
movdqu %xmm8,32 + 192(%rdi)
movdqu %xmm15,48 + 192(%rdi)
leaq 256(%rsi),%rsi
subq $256,%rbx
movq $6,%rcx
movq $4,%r8
cmpq $192,%rbx
jg L$seal_sse_main_loop
movq %rbx,%rcx
testq %rbx,%rbx
je L$seal_sse_128_tail_hash
movq $6,%rcx
cmpq $128,%rbx
ja L$seal_sse_tail_192
cmpq $64,%rbx
ja L$seal_sse_tail_128
L$seal_sse_tail_64:
movdqa L$chacha20_consts(%rip),%xmm0
movdqa 0+48(%rbp),%xmm4
movdqa 0+64(%rbp),%xmm8
movdqa 0+96(%rbp),%xmm12
paddd L$sse_inc(%rip),%xmm12
movdqa %xmm12,0+96(%rbp)
L$seal_sse_tail_64_rounds_and_x2hash:
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
L$seal_sse_tail_64_rounds_and_x1hash:
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
decq %rcx
jg L$seal_sse_tail_64_rounds_and_x2hash
decq %r8
jge L$seal_sse_tail_64_rounds_and_x1hash
paddd L$chacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
jmp L$seal_sse_128_tail_xor
L$seal_sse_tail_128:
movdqa L$chacha20_consts(%rip),%xmm0
movdqa 0+48(%rbp),%xmm4
movdqa 0+64(%rbp),%xmm8
movdqa %xmm0,%xmm1
movdqa %xmm4,%xmm5
movdqa %xmm8,%xmm9
movdqa 0+96(%rbp),%xmm13
paddd L$sse_inc(%rip),%xmm13
movdqa %xmm13,%xmm12
paddd L$sse_inc(%rip),%xmm12
movdqa %xmm12,0+96(%rbp)
movdqa %xmm13,0+112(%rbp)
L$seal_sse_tail_128_rounds_and_x2hash:
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
L$seal_sse_tail_128_rounds_and_x1hash:
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
leaq 16(%rdi),%rdi
decq %rcx
jg L$seal_sse_tail_128_rounds_and_x2hash
decq %r8
jge L$seal_sse_tail_128_rounds_and_x1hash
paddd L$chacha20_consts(%rip),%xmm1
paddd 0+48(%rbp),%xmm5
paddd 0+64(%rbp),%xmm9
paddd 0+112(%rbp),%xmm13
paddd L$chacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
movdqu 0 + 0(%rsi),%xmm3
movdqu 16 + 0(%rsi),%xmm7
movdqu 32 + 0(%rsi),%xmm11
movdqu 48 + 0(%rsi),%xmm15
pxor %xmm3,%xmm1
pxor %xmm7,%xmm5
pxor %xmm11,%xmm9
pxor %xmm13,%xmm15
movdqu %xmm1,0 + 0(%rdi)
movdqu %xmm5,16 + 0(%rdi)
movdqu %xmm9,32 + 0(%rdi)
movdqu %xmm15,48 + 0(%rdi)
movq $64,%rcx
subq $64,%rbx
leaq 64(%rsi),%rsi
jmp L$seal_sse_128_tail_hash
L$seal_sse_tail_192:
movdqa L$chacha20_consts(%rip),%xmm0
movdqa 0+48(%rbp),%xmm4
movdqa 0+64(%rbp),%xmm8
movdqa %xmm0,%xmm1
movdqa %xmm4,%xmm5
movdqa %xmm8,%xmm9
movdqa %xmm0,%xmm2
movdqa %xmm4,%xmm6
movdqa %xmm8,%xmm10
movdqa 0+96(%rbp),%xmm14
paddd L$sse_inc(%rip),%xmm14
movdqa %xmm14,%xmm13
paddd L$sse_inc(%rip),%xmm13
movdqa %xmm13,%xmm12
paddd L$sse_inc(%rip),%xmm12
movdqa %xmm12,0+96(%rbp)
movdqa %xmm13,0+112(%rbp)
movdqa %xmm14,0+128(%rbp)
L$seal_sse_tail_192_rounds_and_x2hash:
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
L$seal_sse_tail_192_rounds_and_x1hash:
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $12,%xmm3
psrld $20,%xmm6
pxor %xmm3,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $7,%xmm3
psrld $25,%xmm6
pxor %xmm3,%xmm6
.byte 102,15,58,15,246,4
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,12
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $12,%xmm3
psrld $20,%xmm6
pxor %xmm3,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $7,%xmm3
psrld $25,%xmm6
pxor %xmm3,%xmm6
.byte 102,15,58,15,246,12
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,4
leaq 16(%rdi),%rdi
decq %rcx
jg L$seal_sse_tail_192_rounds_and_x2hash
decq %r8
jge L$seal_sse_tail_192_rounds_and_x1hash
paddd L$chacha20_consts(%rip),%xmm2
paddd 0+48(%rbp),%xmm6
paddd 0+64(%rbp),%xmm10
paddd 0+128(%rbp),%xmm14
paddd L$chacha20_consts(%rip),%xmm1
paddd 0+48(%rbp),%xmm5
paddd 0+64(%rbp),%xmm9
paddd 0+112(%rbp),%xmm13
paddd L$chacha20_consts(%rip),%xmm0
paddd 0+48(%rbp),%xmm4
paddd 0+64(%rbp),%xmm8
paddd 0+96(%rbp),%xmm12
movdqu 0 + 0(%rsi),%xmm3
movdqu 16 + 0(%rsi),%xmm7
movdqu 32 + 0(%rsi),%xmm11
movdqu 48 + 0(%rsi),%xmm15
pxor %xmm3,%xmm2
pxor %xmm7,%xmm6
pxor %xmm11,%xmm10
pxor %xmm14,%xmm15
movdqu %xmm2,0 + 0(%rdi)
movdqu %xmm6,16 + 0(%rdi)
movdqu %xmm10,32 + 0(%rdi)
movdqu %xmm15,48 + 0(%rdi)
movdqu 0 + 64(%rsi),%xmm3
movdqu 16 + 64(%rsi),%xmm7
movdqu 32 + 64(%rsi),%xmm11
movdqu 48 + 64(%rsi),%xmm15
pxor %xmm3,%xmm1
pxor %xmm7,%xmm5
pxor %xmm11,%xmm9
pxor %xmm13,%xmm15
movdqu %xmm1,0 + 64(%rdi)
movdqu %xmm5,16 + 64(%rdi)
movdqu %xmm9,32 + 64(%rdi)
movdqu %xmm15,48 + 64(%rdi)
movq $128,%rcx
subq $128,%rbx
leaq 128(%rsi),%rsi
L$seal_sse_128_tail_hash:
cmpq $16,%rcx
jb L$seal_sse_128_tail_xor
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
subq $16,%rcx
leaq 16(%rdi),%rdi
jmp L$seal_sse_128_tail_hash
L$seal_sse_128_tail_xor:
cmpq $16,%rbx
jb L$seal_sse_tail_16
subq $16,%rbx
movdqu 0(%rsi),%xmm3
pxor %xmm3,%xmm0
movdqu %xmm0,0(%rdi)
addq 0(%rdi),%r10
adcq 8(%rdi),%r11
adcq $1,%r12
leaq 16(%rsi),%rsi
leaq 16(%rdi),%rdi
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
movdqa %xmm4,%xmm0
movdqa %xmm8,%xmm4
movdqa %xmm12,%xmm8
movdqa %xmm1,%xmm12
movdqa %xmm5,%xmm1
movdqa %xmm9,%xmm5
movdqa %xmm13,%xmm9
jmp L$seal_sse_128_tail_xor
L$seal_sse_tail_16:
testq %rbx,%rbx
jz L$process_blocks_of_extra_in
movq %rbx,%r8
movq %rbx,%rcx
leaq -1(%rsi,%rbx,1),%rsi
pxor %xmm15,%xmm15
L$seal_sse_tail_16_compose:
pslldq $1,%xmm15
pinsrb $0,(%rsi),%xmm15
leaq -1(%rsi),%rsi
decq %rcx
jne L$seal_sse_tail_16_compose
pxor %xmm0,%xmm15
movq %rbx,%rcx
movdqu %xmm15,%xmm0
L$seal_sse_tail_16_extract:
pextrb $0,%xmm0,(%rdi)
psrldq $1,%xmm0
addq $1,%rdi
subq $1,%rcx
jnz L$seal_sse_tail_16_extract
movq 288 + 0 + 32(%rsp),%r9
movq 56(%r9),%r14
movq 48(%r9),%r13
testq %r14,%r14
jz L$process_partial_block
movq $16,%r15
subq %rbx,%r15
cmpq %r15,%r14
jge L$load_extra_in
movq %r14,%r15
L$load_extra_in:
leaq -1(%r13,%r15,1),%rsi
addq %r15,%r13
subq %r15,%r14
movq %r13,48(%r9)
movq %r14,56(%r9)
addq %r15,%r8
pxor %xmm11,%xmm11
L$load_extra_load_loop:
pslldq $1,%xmm11
pinsrb $0,(%rsi),%xmm11
leaq -1(%rsi),%rsi
subq $1,%r15
jnz L$load_extra_load_loop
movq %rbx,%r15
L$load_extra_shift_loop:
pslldq $1,%xmm11
subq $1,%r15
jnz L$load_extra_shift_loop
leaq L$and_masks(%rip),%r15
shlq $4,%rbx
pand -16(%r15,%rbx,1),%xmm15
por %xmm11,%xmm15
.byte 102,77,15,126,253
pextrq $1,%xmm15,%r14
addq %r13,%r10
adcq %r14,%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
L$process_blocks_of_extra_in:
movq 288+32+0 (%rsp),%r9
movq 48(%r9),%rsi
movq 56(%r9),%r8
movq %r8,%rcx
shrq $4,%r8
L$process_extra_hash_loop:
jz process_extra_in_trailer
addq 0+0(%rsi),%r10
adcq 8+0(%rsi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rsi),%rsi
subq $1,%r8
jmp L$process_extra_hash_loop
process_extra_in_trailer:
andq $15,%rcx
movq %rcx,%rbx
jz L$do_length_block
leaq -1(%rsi,%rcx,1),%rsi
L$process_extra_in_trailer_load:
pslldq $1,%xmm15
pinsrb $0,(%rsi),%xmm15
leaq -1(%rsi),%rsi
subq $1,%rcx
jnz L$process_extra_in_trailer_load
L$process_partial_block:
leaq L$and_masks(%rip),%r15
shlq $4,%rbx
pand -16(%r15,%rbx,1),%xmm15
.byte 102,77,15,126,253
pextrq $1,%xmm15,%r14
addq %r13,%r10
adcq %r14,%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
L$do_length_block:
addq 0+0+32(%rbp),%r10
adcq 8+0+32(%rbp),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
movq %r10,%r13
movq %r11,%r14
movq %r12,%r15
subq $-5,%r10
sbbq $-1,%r11
sbbq $3,%r12
cmovcq %r13,%r10
cmovcq %r14,%r11
cmovcq %r15,%r12
addq 0+0+16(%rbp),%r10
adcq 8+0+16(%rbp),%r11
addq $288 + 0 + 32,%rsp
popq %r9
movq %r10,(%r9)
movq %r11,8(%r9)
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbx
popq %rbp
ret
L$seal_sse_128:
movdqu L$chacha20_consts(%rip),%xmm0
movdqa %xmm0,%xmm1
movdqa %xmm0,%xmm2
movdqu 0(%r9),%xmm4
movdqa %xmm4,%xmm5
movdqa %xmm4,%xmm6
movdqu 16(%r9),%xmm8
movdqa %xmm8,%xmm9
movdqa %xmm8,%xmm10
movdqu 32(%r9),%xmm14
movdqa %xmm14,%xmm12
paddd L$sse_inc(%rip),%xmm12
movdqa %xmm12,%xmm13
paddd L$sse_inc(%rip),%xmm13
movdqa %xmm4,%xmm7
movdqa %xmm8,%xmm11
movdqa %xmm12,%xmm15
movq $10,%r10
L$seal_sse_128_rounds:
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,4
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,12
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,4
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,12
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $12,%xmm3
psrld $20,%xmm6
pxor %xmm3,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $7,%xmm3
psrld $25,%xmm6
pxor %xmm3,%xmm6
.byte 102,15,58,15,246,4
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,12
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol16(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $12,%xmm3
psrld $20,%xmm4
pxor %xmm3,%xmm4
paddd %xmm4,%xmm0
pxor %xmm0,%xmm12
pshufb L$rol8(%rip),%xmm12
paddd %xmm12,%xmm8
pxor %xmm8,%xmm4
movdqa %xmm4,%xmm3
pslld $7,%xmm3
psrld $25,%xmm4
pxor %xmm3,%xmm4
.byte 102,15,58,15,228,12
.byte 102,69,15,58,15,192,8
.byte 102,69,15,58,15,228,4
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol16(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $12,%xmm3
psrld $20,%xmm5
pxor %xmm3,%xmm5
paddd %xmm5,%xmm1
pxor %xmm1,%xmm13
pshufb L$rol8(%rip),%xmm13
paddd %xmm13,%xmm9
pxor %xmm9,%xmm5
movdqa %xmm5,%xmm3
pslld $7,%xmm3
psrld $25,%xmm5
pxor %xmm3,%xmm5
.byte 102,15,58,15,237,12
.byte 102,69,15,58,15,201,8
.byte 102,69,15,58,15,237,4
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol16(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $12,%xmm3
psrld $20,%xmm6
pxor %xmm3,%xmm6
paddd %xmm6,%xmm2
pxor %xmm2,%xmm14
pshufb L$rol8(%rip),%xmm14
paddd %xmm14,%xmm10
pxor %xmm10,%xmm6
movdqa %xmm6,%xmm3
pslld $7,%xmm3
psrld $25,%xmm6
pxor %xmm3,%xmm6
.byte 102,15,58,15,246,12
.byte 102,69,15,58,15,210,8
.byte 102,69,15,58,15,246,4
decq %r10
jnz L$seal_sse_128_rounds
paddd L$chacha20_consts(%rip),%xmm0
paddd L$chacha20_consts(%rip),%xmm1
paddd L$chacha20_consts(%rip),%xmm2
paddd %xmm7,%xmm4
paddd %xmm7,%xmm5
paddd %xmm7,%xmm6
paddd %xmm11,%xmm8
paddd %xmm11,%xmm9
paddd %xmm15,%xmm12
paddd L$sse_inc(%rip),%xmm15
paddd %xmm15,%xmm13
pand L$clamp(%rip),%xmm2
movdqa %xmm2,0+0(%rbp)
movdqa %xmm6,0+16(%rbp)
movq %r8,%r8
call poly_hash_ad_internal
jmp L$seal_sse_128_tail_xor
.globl _chacha20_poly1305_open_avx2
.private_extern _chacha20_poly1305_open_avx2
.p2align 6
_chacha20_poly1305_open_avx2:
_CET_ENDBR
pushq %rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %r9
subq $288 + 0 + 32,%rsp
leaq 32(%rsp),%rbp
andq $-32,%rbp
movq %rdx,%rbx
movq %r8,0+0+32(%rbp)
movq %rbx,8+0+32(%rbp)
vzeroupper
vmovdqa L$chacha20_consts(%rip),%ymm0
vbroadcasti128 0(%r9),%ymm4
vbroadcasti128 16(%r9),%ymm8
vbroadcasti128 32(%r9),%ymm12
vpaddd L$avx2_init(%rip),%ymm12,%ymm12
cmpq $192,%rbx
jbe L$open_avx2_192
cmpq $320,%rbx
jbe L$open_avx2_320
vmovdqa %ymm4,0+64(%rbp)
vmovdqa %ymm8,0+96(%rbp)
vmovdqa %ymm12,0+160(%rbp)
movq $10,%r10
L$open_avx2_init_rounds:
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
decq %r10
jne L$open_avx2_init_rounds
vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vperm2i128 $0x02,%ymm0,%ymm4,%ymm3
vpand L$clamp(%rip),%ymm3,%ymm3
vmovdqa %ymm3,0+0(%rbp)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm0
vperm2i128 $0x13,%ymm8,%ymm12,%ymm4
movq %r8,%r8
call poly_hash_ad_internal
xorq %rcx,%rcx
L$open_avx2_init_hash:
addq 0+0(%rsi,%rcx,1),%r10
adcq 8+0(%rsi,%rcx,1),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
addq $16,%rcx
cmpq $64,%rcx
jne L$open_avx2_init_hash
vpxor 0(%rsi),%ymm0,%ymm0
vpxor 32(%rsi),%ymm4,%ymm4
vmovdqu %ymm0,0(%rdi)
vmovdqu %ymm4,32(%rdi)
leaq 64(%rsi),%rsi
leaq 64(%rdi),%rdi
subq $64,%rbx
L$open_avx2_main_loop:
cmpq $512,%rbx
jb L$open_avx2_main_loop_done
vmovdqa L$chacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa %ymm0,%ymm1
vmovdqa %ymm4,%ymm5
vmovdqa %ymm8,%ymm9
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm10
vmovdqa %ymm0,%ymm3
vmovdqa %ymm4,%ymm7
vmovdqa %ymm8,%ymm11
vmovdqa L$avx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm15
vpaddd %ymm15,%ymm12,%ymm14
vpaddd %ymm14,%ymm12,%ymm13
vpaddd %ymm13,%ymm12,%ymm12
vmovdqa %ymm15,0+256(%rbp)
vmovdqa %ymm14,0+224(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm12,0+160(%rbp)
xorq %rcx,%rcx
L$open_avx2_main_loop_rounds:
addq 0+0(%rsi,%rcx,1),%r10
adcq 8+0(%rsi,%rcx,1),%r11
adcq $1,%r12
vmovdqa %ymm8,0+128(%rbp)
vmovdqa L$rol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
addq %rax,%r15
adcq %rdx,%r9
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa L$rol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
addq 0+16(%rsi,%rcx,1),%r10
adcq 8+16(%rsi,%rcx,1),%r11
adcq $1,%r12
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $4,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $12,%ymm15,%ymm15,%ymm15
vpalignr $4,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm14,%ymm14,%ymm14
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
vpalignr $4,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $4,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm12,%ymm12,%ymm12
vmovdqa %ymm8,0+128(%rbp)
vmovdqa L$rol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
addq %rax,%r15
adcq %rdx,%r9
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
addq 0+32(%rsi,%rcx,1),%r10
adcq 8+32(%rsi,%rcx,1),%r11
adcq $1,%r12
leaq 48(%rcx),%rcx
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa L$rol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
addq %rax,%r15
adcq %rdx,%r9
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $12,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $4,%ymm15,%ymm15,%ymm15
vpalignr $12,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $12,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $12,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpalignr $4,%ymm12,%ymm12,%ymm12
cmpq $60*8,%rcx
jne L$open_avx2_main_loop_rounds
vpaddd L$chacha20_consts(%rip),%ymm3,%ymm3
vpaddd 0+64(%rbp),%ymm7,%ymm7
vpaddd 0+96(%rbp),%ymm11,%ymm11
vpaddd 0+256(%rbp),%ymm15,%ymm15
vpaddd L$chacha20_consts(%rip),%ymm2,%ymm2
vpaddd 0+64(%rbp),%ymm6,%ymm6
vpaddd 0+96(%rbp),%ymm10,%ymm10
vpaddd 0+224(%rbp),%ymm14,%ymm14
vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1
vpaddd 0+64(%rbp),%ymm5,%ymm5
vpaddd 0+96(%rbp),%ymm9,%ymm9
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vmovdqa %ymm0,0+128(%rbp)
addq 0+60*8(%rsi),%r10
adcq 8+60*8(%rsi),%r11
adcq $1,%r12
vperm2i128 $0x02,%ymm3,%ymm7,%ymm0
vperm2i128 $0x13,%ymm3,%ymm7,%ymm7
vperm2i128 $0x02,%ymm11,%ymm15,%ymm3
vperm2i128 $0x13,%ymm11,%ymm15,%ymm11
vpxor 0+0(%rsi),%ymm0,%ymm0
vpxor 32+0(%rsi),%ymm3,%ymm3
vpxor 64+0(%rsi),%ymm7,%ymm7
vpxor 96+0(%rsi),%ymm11,%ymm11
vmovdqu %ymm0,0+0(%rdi)
vmovdqu %ymm3,32+0(%rdi)
vmovdqu %ymm7,64+0(%rdi)
vmovdqu %ymm11,96+0(%rdi)
vmovdqa 0+128(%rbp),%ymm0
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vperm2i128 $0x02,%ymm2,%ymm6,%ymm3
vperm2i128 $0x13,%ymm2,%ymm6,%ymm6
vperm2i128 $0x02,%ymm10,%ymm14,%ymm2
vperm2i128 $0x13,%ymm10,%ymm14,%ymm10
vpxor 0+128(%rsi),%ymm3,%ymm3
vpxor 32+128(%rsi),%ymm2,%ymm2
vpxor 64+128(%rsi),%ymm6,%ymm6
vpxor 96+128(%rsi),%ymm10,%ymm10
vmovdqu %ymm3,0+128(%rdi)
vmovdqu %ymm2,32+128(%rdi)
vmovdqu %ymm6,64+128(%rdi)
vmovdqu %ymm10,96+128(%rdi)
addq 0+60*8+16(%rsi),%r10
adcq 8+60*8+16(%rsi),%r11
adcq $1,%r12
vperm2i128 $0x02,%ymm1,%ymm5,%ymm3
vperm2i128 $0x13,%ymm1,%ymm5,%ymm5
vperm2i128 $0x02,%ymm9,%ymm13,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm9
vpxor 0+256(%rsi),%ymm3,%ymm3
vpxor 32+256(%rsi),%ymm1,%ymm1
vpxor 64+256(%rsi),%ymm5,%ymm5
vpxor 96+256(%rsi),%ymm9,%ymm9
vmovdqu %ymm3,0+256(%rdi)
vmovdqu %ymm1,32+256(%rdi)
vmovdqu %ymm5,64+256(%rdi)
vmovdqu %ymm9,96+256(%rdi)
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vperm2i128 $0x02,%ymm0,%ymm4,%ymm3
vperm2i128 $0x13,%ymm0,%ymm4,%ymm4
vperm2i128 $0x02,%ymm8,%ymm12,%ymm0
vperm2i128 $0x13,%ymm8,%ymm12,%ymm8
vpxor 0+384(%rsi),%ymm3,%ymm3
vpxor 32+384(%rsi),%ymm0,%ymm0
vpxor 64+384(%rsi),%ymm4,%ymm4
vpxor 96+384(%rsi),%ymm8,%ymm8
vmovdqu %ymm3,0+384(%rdi)
vmovdqu %ymm0,32+384(%rdi)
vmovdqu %ymm4,64+384(%rdi)
vmovdqu %ymm8,96+384(%rdi)
leaq 512(%rsi),%rsi
leaq 512(%rdi),%rdi
subq $512,%rbx
jmp L$open_avx2_main_loop
L$open_avx2_main_loop_done:
testq %rbx,%rbx
vzeroupper
je L$open_sse_finalize
cmpq $384,%rbx
ja L$open_avx2_tail_512
cmpq $256,%rbx
ja L$open_avx2_tail_384
cmpq $128,%rbx
ja L$open_avx2_tail_256
vmovdqa L$chacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa L$avx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm12
vmovdqa %ymm12,0+160(%rbp)
xorq %r8,%r8
movq %rbx,%rcx
andq $-16,%rcx
testq %rcx,%rcx
je L$open_avx2_tail_128_rounds
L$open_avx2_tail_128_rounds_and_x1hash:
addq 0+0(%rsi,%r8,1),%r10
adcq 8+0(%rsi,%r8,1),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
L$open_avx2_tail_128_rounds:
addq $16,%r8
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
cmpq %rcx,%r8
jb L$open_avx2_tail_128_rounds_and_x1hash
cmpq $160,%r8
jne L$open_avx2_tail_128_rounds
vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vperm2i128 $0x13,%ymm0,%ymm4,%ymm3
vperm2i128 $0x02,%ymm0,%ymm4,%ymm0
vperm2i128 $0x02,%ymm8,%ymm12,%ymm4
vperm2i128 $0x13,%ymm8,%ymm12,%ymm12
vmovdqa %ymm3,%ymm8
jmp L$open_avx2_tail_128_xor
L$open_avx2_tail_256:
vmovdqa L$chacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa %ymm0,%ymm1
vmovdqa %ymm4,%ymm5
vmovdqa %ymm8,%ymm9
vmovdqa L$avx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm13
vpaddd %ymm13,%ymm12,%ymm12
vmovdqa %ymm12,0+160(%rbp)
vmovdqa %ymm13,0+192(%rbp)
movq %rbx,0+128(%rbp)
movq %rbx,%rcx
subq $128,%rcx
shrq $4,%rcx
movq $10,%r8
cmpq $10,%rcx
cmovgq %r8,%rcx
movq %rsi,%rbx
xorq %r8,%r8
L$open_avx2_tail_256_rounds_and_x1hash:
addq 0+0(%rbx),%r10
adcq 8+0(%rbx),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rbx),%rbx
L$open_avx2_tail_256_rounds:
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm5,%ymm5,%ymm5
incq %r8
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm5,%ymm5,%ymm5
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol16(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpsrld $20,%ymm6,%ymm3
vpslld $12,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol8(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpslld $7,%ymm6,%ymm3
vpsrld $25,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm6,%ymm6,%ymm6
cmpq %rcx,%r8
jb L$open_avx2_tail_256_rounds_and_x1hash
cmpq $10,%r8
jne L$open_avx2_tail_256_rounds
movq %rbx,%r8
subq %rsi,%rbx
movq %rbx,%rcx
movq 0+128(%rbp),%rbx
L$open_avx2_tail_256_hash:
addq $16,%rcx
cmpq %rbx,%rcx
jg L$open_avx2_tail_256_done
addq 0+0(%r8),%r10
adcq 8+0(%r8),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%r8),%r8
jmp L$open_avx2_tail_256_hash
L$open_avx2_tail_256_done:
vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1
vpaddd 0+64(%rbp),%ymm5,%ymm5
vpaddd 0+96(%rbp),%ymm9,%ymm9
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vperm2i128 $0x02,%ymm1,%ymm5,%ymm3
vperm2i128 $0x13,%ymm1,%ymm5,%ymm5
vperm2i128 $0x02,%ymm9,%ymm13,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm9
vpxor 0+0(%rsi),%ymm3,%ymm3
vpxor 32+0(%rsi),%ymm1,%ymm1
vpxor 64+0(%rsi),%ymm5,%ymm5
vpxor 96+0(%rsi),%ymm9,%ymm9
vmovdqu %ymm3,0+0(%rdi)
vmovdqu %ymm1,32+0(%rdi)
vmovdqu %ymm5,64+0(%rdi)
vmovdqu %ymm9,96+0(%rdi)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm3
vperm2i128 $0x02,%ymm0,%ymm4,%ymm0
vperm2i128 $0x02,%ymm8,%ymm12,%ymm4
vperm2i128 $0x13,%ymm8,%ymm12,%ymm12
vmovdqa %ymm3,%ymm8
leaq 128(%rsi),%rsi
leaq 128(%rdi),%rdi
subq $128,%rbx
jmp L$open_avx2_tail_128_xor
L$open_avx2_tail_384:
vmovdqa L$chacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa %ymm0,%ymm1
vmovdqa %ymm4,%ymm5
vmovdqa %ymm8,%ymm9
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm10
vmovdqa L$avx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm14
vpaddd %ymm14,%ymm12,%ymm13
vpaddd %ymm13,%ymm12,%ymm12
vmovdqa %ymm12,0+160(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm14,0+224(%rbp)
movq %rbx,0+128(%rbp)
movq %rbx,%rcx
subq $256,%rcx
shrq $4,%rcx
addq $6,%rcx
movq $10,%r8
cmpq $10,%rcx
cmovgq %r8,%rcx
movq %rsi,%rbx
xorq %r8,%r8
L$open_avx2_tail_384_rounds_and_x2hash:
addq 0+0(%rbx),%r10
adcq 8+0(%rbx),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rbx),%rbx
L$open_avx2_tail_384_rounds_and_x1hash:
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol16(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpsrld $20,%ymm6,%ymm3
vpslld $12,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol8(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpslld $7,%ymm6,%ymm3
vpsrld $25,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpalignr $12,%ymm14,%ymm14,%ymm14
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm6,%ymm6,%ymm6
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm5,%ymm5,%ymm5
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
addq 0+0(%rbx),%r10
adcq 8+0(%rbx),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rbx),%rbx
incq %r8
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol16(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpsrld $20,%ymm6,%ymm3
vpslld $12,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol8(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpslld $7,%ymm6,%ymm3
vpsrld $25,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm6,%ymm6,%ymm6
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm5,%ymm5,%ymm5
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
cmpq %rcx,%r8
jb L$open_avx2_tail_384_rounds_and_x2hash
cmpq $10,%r8
jne L$open_avx2_tail_384_rounds_and_x1hash
movq %rbx,%r8
subq %rsi,%rbx
movq %rbx,%rcx
movq 0+128(%rbp),%rbx
L$open_avx2_384_tail_hash:
addq $16,%rcx
cmpq %rbx,%rcx
jg L$open_avx2_384_tail_done
addq 0+0(%r8),%r10
adcq 8+0(%r8),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%r8),%r8
jmp L$open_avx2_384_tail_hash
L$open_avx2_384_tail_done:
vpaddd L$chacha20_consts(%rip),%ymm2,%ymm2
vpaddd 0+64(%rbp),%ymm6,%ymm6
vpaddd 0+96(%rbp),%ymm10,%ymm10
vpaddd 0+224(%rbp),%ymm14,%ymm14
vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1
vpaddd 0+64(%rbp),%ymm5,%ymm5
vpaddd 0+96(%rbp),%ymm9,%ymm9
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vperm2i128 $0x02,%ymm2,%ymm6,%ymm3
vperm2i128 $0x13,%ymm2,%ymm6,%ymm6
vperm2i128 $0x02,%ymm10,%ymm14,%ymm2
vperm2i128 $0x13,%ymm10,%ymm14,%ymm10
vpxor 0+0(%rsi),%ymm3,%ymm3
vpxor 32+0(%rsi),%ymm2,%ymm2
vpxor 64+0(%rsi),%ymm6,%ymm6
vpxor 96+0(%rsi),%ymm10,%ymm10
vmovdqu %ymm3,0+0(%rdi)
vmovdqu %ymm2,32+0(%rdi)
vmovdqu %ymm6,64+0(%rdi)
vmovdqu %ymm10,96+0(%rdi)
vperm2i128 $0x02,%ymm1,%ymm5,%ymm3
vperm2i128 $0x13,%ymm1,%ymm5,%ymm5
vperm2i128 $0x02,%ymm9,%ymm13,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm9
vpxor 0+128(%rsi),%ymm3,%ymm3
vpxor 32+128(%rsi),%ymm1,%ymm1
vpxor 64+128(%rsi),%ymm5,%ymm5
vpxor 96+128(%rsi),%ymm9,%ymm9
vmovdqu %ymm3,0+128(%rdi)
vmovdqu %ymm1,32+128(%rdi)
vmovdqu %ymm5,64+128(%rdi)
vmovdqu %ymm9,96+128(%rdi)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm3
vperm2i128 $0x02,%ymm0,%ymm4,%ymm0
vperm2i128 $0x02,%ymm8,%ymm12,%ymm4
vperm2i128 $0x13,%ymm8,%ymm12,%ymm12
vmovdqa %ymm3,%ymm8
leaq 256(%rsi),%rsi
leaq 256(%rdi),%rdi
subq $256,%rbx
jmp L$open_avx2_tail_128_xor
L$open_avx2_tail_512:
vmovdqa L$chacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa %ymm0,%ymm1
vmovdqa %ymm4,%ymm5
vmovdqa %ymm8,%ymm9
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm10
vmovdqa %ymm0,%ymm3
vmovdqa %ymm4,%ymm7
vmovdqa %ymm8,%ymm11
vmovdqa L$avx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm15
vpaddd %ymm15,%ymm12,%ymm14
vpaddd %ymm14,%ymm12,%ymm13
vpaddd %ymm13,%ymm12,%ymm12
vmovdqa %ymm15,0+256(%rbp)
vmovdqa %ymm14,0+224(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm12,0+160(%rbp)
xorq %rcx,%rcx
movq %rsi,%r8
L$open_avx2_tail_512_rounds_and_x2hash:
addq 0+0(%r8),%r10
adcq 8+0(%r8),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%r8),%r8
L$open_avx2_tail_512_rounds_and_x1hash:
vmovdqa %ymm8,0+128(%rbp)
vmovdqa L$rol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa L$rol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
addq 0+0(%r8),%r10
adcq 8+0(%r8),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $4,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $12,%ymm15,%ymm15,%ymm15
vpalignr $4,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm14,%ymm14,%ymm14
vpalignr $4,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $4,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm12,%ymm12,%ymm12
vmovdqa %ymm8,0+128(%rbp)
vmovdqa L$rol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
addq 0+16(%r8),%r10
adcq 8+16(%r8),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 32(%r8),%r8
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa L$rol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $12,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $4,%ymm15,%ymm15,%ymm15
vpalignr $12,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $12,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $12,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm12,%ymm12,%ymm12
incq %rcx
cmpq $4,%rcx
jl L$open_avx2_tail_512_rounds_and_x2hash
cmpq $10,%rcx
jne L$open_avx2_tail_512_rounds_and_x1hash
movq %rbx,%rcx
subq $384,%rcx
andq $-16,%rcx
L$open_avx2_tail_512_hash:
testq %rcx,%rcx
je L$open_avx2_tail_512_done
addq 0+0(%r8),%r10
adcq 8+0(%r8),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%r8),%r8
subq $16,%rcx
jmp L$open_avx2_tail_512_hash
L$open_avx2_tail_512_done:
vpaddd L$chacha20_consts(%rip),%ymm3,%ymm3
vpaddd 0+64(%rbp),%ymm7,%ymm7
vpaddd 0+96(%rbp),%ymm11,%ymm11
vpaddd 0+256(%rbp),%ymm15,%ymm15
vpaddd L$chacha20_consts(%rip),%ymm2,%ymm2
vpaddd 0+64(%rbp),%ymm6,%ymm6
vpaddd 0+96(%rbp),%ymm10,%ymm10
vpaddd 0+224(%rbp),%ymm14,%ymm14
vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1
vpaddd 0+64(%rbp),%ymm5,%ymm5
vpaddd 0+96(%rbp),%ymm9,%ymm9
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vmovdqa %ymm0,0+128(%rbp)
vperm2i128 $0x02,%ymm3,%ymm7,%ymm0
vperm2i128 $0x13,%ymm3,%ymm7,%ymm7
vperm2i128 $0x02,%ymm11,%ymm15,%ymm3
vperm2i128 $0x13,%ymm11,%ymm15,%ymm11
vpxor 0+0(%rsi),%ymm0,%ymm0
vpxor 32+0(%rsi),%ymm3,%ymm3
vpxor 64+0(%rsi),%ymm7,%ymm7
vpxor 96+0(%rsi),%ymm11,%ymm11
vmovdqu %ymm0,0+0(%rdi)
vmovdqu %ymm3,32+0(%rdi)
vmovdqu %ymm7,64+0(%rdi)
vmovdqu %ymm11,96+0(%rdi)
vmovdqa 0+128(%rbp),%ymm0
vperm2i128 $0x02,%ymm2,%ymm6,%ymm3
vperm2i128 $0x13,%ymm2,%ymm6,%ymm6
vperm2i128 $0x02,%ymm10,%ymm14,%ymm2
vperm2i128 $0x13,%ymm10,%ymm14,%ymm10
vpxor 0+128(%rsi),%ymm3,%ymm3
vpxor 32+128(%rsi),%ymm2,%ymm2
vpxor 64+128(%rsi),%ymm6,%ymm6
vpxor 96+128(%rsi),%ymm10,%ymm10
vmovdqu %ymm3,0+128(%rdi)
vmovdqu %ymm2,32+128(%rdi)
vmovdqu %ymm6,64+128(%rdi)
vmovdqu %ymm10,96+128(%rdi)
vperm2i128 $0x02,%ymm1,%ymm5,%ymm3
vperm2i128 $0x13,%ymm1,%ymm5,%ymm5
vperm2i128 $0x02,%ymm9,%ymm13,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm9
vpxor 0+256(%rsi),%ymm3,%ymm3
vpxor 32+256(%rsi),%ymm1,%ymm1
vpxor 64+256(%rsi),%ymm5,%ymm5
vpxor 96+256(%rsi),%ymm9,%ymm9
vmovdqu %ymm3,0+256(%rdi)
vmovdqu %ymm1,32+256(%rdi)
vmovdqu %ymm5,64+256(%rdi)
vmovdqu %ymm9,96+256(%rdi)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm3
vperm2i128 $0x02,%ymm0,%ymm4,%ymm0
vperm2i128 $0x02,%ymm8,%ymm12,%ymm4
vperm2i128 $0x13,%ymm8,%ymm12,%ymm12
vmovdqa %ymm3,%ymm8
leaq 384(%rsi),%rsi
leaq 384(%rdi),%rdi
subq $384,%rbx
L$open_avx2_tail_128_xor:
cmpq $32,%rbx
jb L$open_avx2_tail_32_xor
subq $32,%rbx
vpxor (%rsi),%ymm0,%ymm0
vmovdqu %ymm0,(%rdi)
leaq 32(%rsi),%rsi
leaq 32(%rdi),%rdi
vmovdqa %ymm4,%ymm0
vmovdqa %ymm8,%ymm4
vmovdqa %ymm12,%ymm8
jmp L$open_avx2_tail_128_xor
L$open_avx2_tail_32_xor:
cmpq $16,%rbx
vmovdqa %xmm0,%xmm1
jb L$open_avx2_exit
subq $16,%rbx
vpxor (%rsi),%xmm0,%xmm1
vmovdqu %xmm1,(%rdi)
leaq 16(%rsi),%rsi
leaq 16(%rdi),%rdi
vperm2i128 $0x11,%ymm0,%ymm0,%ymm0
vmovdqa %xmm0,%xmm1
L$open_avx2_exit:
vzeroupper
jmp L$open_sse_tail_16
L$open_avx2_192:
vmovdqa %ymm0,%ymm1
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm5
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm9
vmovdqa %ymm8,%ymm10
vpaddd L$avx2_inc(%rip),%ymm12,%ymm13
vmovdqa %ymm12,%ymm11
vmovdqa %ymm13,%ymm15
movq $10,%r10
L$open_avx2_192_rounds:
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm5,%ymm5,%ymm5
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm5,%ymm5,%ymm5
decq %r10
jne L$open_avx2_192_rounds
vpaddd %ymm2,%ymm0,%ymm0
vpaddd %ymm2,%ymm1,%ymm1
vpaddd %ymm6,%ymm4,%ymm4
vpaddd %ymm6,%ymm5,%ymm5
vpaddd %ymm10,%ymm8,%ymm8
vpaddd %ymm10,%ymm9,%ymm9
vpaddd %ymm11,%ymm12,%ymm12
vpaddd %ymm15,%ymm13,%ymm13
vperm2i128 $0x02,%ymm0,%ymm4,%ymm3
vpand L$clamp(%rip),%ymm3,%ymm3
vmovdqa %ymm3,0+0(%rbp)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm0
vperm2i128 $0x13,%ymm8,%ymm12,%ymm4
vperm2i128 $0x02,%ymm1,%ymm5,%ymm8
vperm2i128 $0x02,%ymm9,%ymm13,%ymm12
vperm2i128 $0x13,%ymm1,%ymm5,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm5
L$open_avx2_short:
movq %r8,%r8
call poly_hash_ad_internal
L$open_avx2_short_hash_and_xor_loop:
cmpq $32,%rbx
jb L$open_avx2_short_tail_32
subq $32,%rbx
addq 0+0(%rsi),%r10
adcq 8+0(%rsi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
addq 0+16(%rsi),%r10
adcq 8+16(%rsi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpxor (%rsi),%ymm0,%ymm0
vmovdqu %ymm0,(%rdi)
leaq 32(%rsi),%rsi
leaq 32(%rdi),%rdi
vmovdqa %ymm4,%ymm0
vmovdqa %ymm8,%ymm4
vmovdqa %ymm12,%ymm8
vmovdqa %ymm1,%ymm12
vmovdqa %ymm5,%ymm1
vmovdqa %ymm9,%ymm5
vmovdqa %ymm13,%ymm9
vmovdqa %ymm2,%ymm13
vmovdqa %ymm6,%ymm2
jmp L$open_avx2_short_hash_and_xor_loop
L$open_avx2_short_tail_32:
cmpq $16,%rbx
vmovdqa %xmm0,%xmm1
jb L$open_avx2_short_tail_32_exit
subq $16,%rbx
addq 0+0(%rsi),%r10
adcq 8+0(%rsi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpxor (%rsi),%xmm0,%xmm3
vmovdqu %xmm3,(%rdi)
leaq 16(%rsi),%rsi
leaq 16(%rdi),%rdi
vextracti128 $1,%ymm0,%xmm1
L$open_avx2_short_tail_32_exit:
vzeroupper
jmp L$open_sse_tail_16
L$open_avx2_320:
vmovdqa %ymm0,%ymm1
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm5
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm9
vmovdqa %ymm8,%ymm10
vpaddd L$avx2_inc(%rip),%ymm12,%ymm13
vpaddd L$avx2_inc(%rip),%ymm13,%ymm14
vmovdqa %ymm4,%ymm7
vmovdqa %ymm8,%ymm11
vmovdqa %ymm12,0+160(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm14,0+224(%rbp)
movq $10,%r10
L$open_avx2_320_rounds:
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm5,%ymm5,%ymm5
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol16(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpsrld $20,%ymm6,%ymm3
vpslld $12,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol8(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpslld $7,%ymm6,%ymm3
vpsrld $25,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpalignr $12,%ymm14,%ymm14,%ymm14
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm6,%ymm6,%ymm6
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm5,%ymm5,%ymm5
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol16(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpsrld $20,%ymm6,%ymm3
vpslld $12,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol8(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpslld $7,%ymm6,%ymm3
vpsrld $25,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm6,%ymm6,%ymm6
decq %r10
jne L$open_avx2_320_rounds
vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0
vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1
vpaddd L$chacha20_consts(%rip),%ymm2,%ymm2
vpaddd %ymm7,%ymm4,%ymm4
vpaddd %ymm7,%ymm5,%ymm5
vpaddd %ymm7,%ymm6,%ymm6
vpaddd %ymm11,%ymm8,%ymm8
vpaddd %ymm11,%ymm9,%ymm9
vpaddd %ymm11,%ymm10,%ymm10
vpaddd 0+160(%rbp),%ymm12,%ymm12
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd 0+224(%rbp),%ymm14,%ymm14
vperm2i128 $0x02,%ymm0,%ymm4,%ymm3
vpand L$clamp(%rip),%ymm3,%ymm3
vmovdqa %ymm3,0+0(%rbp)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm0
vperm2i128 $0x13,%ymm8,%ymm12,%ymm4
vperm2i128 $0x02,%ymm1,%ymm5,%ymm8
vperm2i128 $0x02,%ymm9,%ymm13,%ymm12
vperm2i128 $0x13,%ymm1,%ymm5,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm5
vperm2i128 $0x02,%ymm2,%ymm6,%ymm9
vperm2i128 $0x02,%ymm10,%ymm14,%ymm13
vperm2i128 $0x13,%ymm2,%ymm6,%ymm2
vperm2i128 $0x13,%ymm10,%ymm14,%ymm6
jmp L$open_avx2_short
.globl _chacha20_poly1305_seal_avx2
.private_extern _chacha20_poly1305_seal_avx2
.p2align 6
_chacha20_poly1305_seal_avx2:
_CET_ENDBR
pushq %rbp
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushq %r9
subq $288 + 0 + 32,%rsp
leaq 32(%rsp),%rbp
andq $-32,%rbp
movq 56(%r9),%rbx
addq %rdx,%rbx
movq %r8,0+0+32(%rbp)
movq %rbx,8+0+32(%rbp)
movq %rdx,%rbx
vzeroupper
vmovdqa L$chacha20_consts(%rip),%ymm0
vbroadcasti128 0(%r9),%ymm4
vbroadcasti128 16(%r9),%ymm8
vbroadcasti128 32(%r9),%ymm12
vpaddd L$avx2_init(%rip),%ymm12,%ymm12
cmpq $192,%rbx
jbe L$seal_avx2_192
cmpq $320,%rbx
jbe L$seal_avx2_320
vmovdqa %ymm0,%ymm1
vmovdqa %ymm0,%ymm2
vmovdqa %ymm0,%ymm3
vmovdqa %ymm4,%ymm5
vmovdqa %ymm4,%ymm6
vmovdqa %ymm4,%ymm7
vmovdqa %ymm4,0+64(%rbp)
vmovdqa %ymm8,%ymm9
vmovdqa %ymm8,%ymm10
vmovdqa %ymm8,%ymm11
vmovdqa %ymm8,0+96(%rbp)
vmovdqa %ymm12,%ymm15
vpaddd L$avx2_inc(%rip),%ymm15,%ymm14
vpaddd L$avx2_inc(%rip),%ymm14,%ymm13
vpaddd L$avx2_inc(%rip),%ymm13,%ymm12
vmovdqa %ymm12,0+160(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm14,0+224(%rbp)
vmovdqa %ymm15,0+256(%rbp)
movq $10,%r10
L$seal_avx2_init_rounds:
vmovdqa %ymm8,0+128(%rbp)
vmovdqa L$rol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa L$rol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $4,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $12,%ymm15,%ymm15,%ymm15
vpalignr $4,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm14,%ymm14,%ymm14
vpalignr $4,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $4,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm12,%ymm12,%ymm12
vmovdqa %ymm8,0+128(%rbp)
vmovdqa L$rol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa L$rol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $12,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $4,%ymm15,%ymm15,%ymm15
vpalignr $12,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $12,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $12,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm12,%ymm12,%ymm12
decq %r10
jnz L$seal_avx2_init_rounds
vpaddd L$chacha20_consts(%rip),%ymm3,%ymm3
vpaddd 0+64(%rbp),%ymm7,%ymm7
vpaddd 0+96(%rbp),%ymm11,%ymm11
vpaddd 0+256(%rbp),%ymm15,%ymm15
vpaddd L$chacha20_consts(%rip),%ymm2,%ymm2
vpaddd 0+64(%rbp),%ymm6,%ymm6
vpaddd 0+96(%rbp),%ymm10,%ymm10
vpaddd 0+224(%rbp),%ymm14,%ymm14
vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1
vpaddd 0+64(%rbp),%ymm5,%ymm5
vpaddd 0+96(%rbp),%ymm9,%ymm9
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vperm2i128 $0x13,%ymm11,%ymm15,%ymm11
vperm2i128 $0x02,%ymm3,%ymm7,%ymm15
vperm2i128 $0x13,%ymm3,%ymm7,%ymm3
vpand L$clamp(%rip),%ymm15,%ymm15
vmovdqa %ymm15,0+0(%rbp)
movq %r8,%r8
call poly_hash_ad_internal
vpxor 0(%rsi),%ymm3,%ymm3
vpxor 32(%rsi),%ymm11,%ymm11
vmovdqu %ymm3,0(%rdi)
vmovdqu %ymm11,32(%rdi)
vperm2i128 $0x02,%ymm2,%ymm6,%ymm15
vperm2i128 $0x13,%ymm2,%ymm6,%ymm6
vperm2i128 $0x02,%ymm10,%ymm14,%ymm2
vperm2i128 $0x13,%ymm10,%ymm14,%ymm10
vpxor 0+64(%rsi),%ymm15,%ymm15
vpxor 32+64(%rsi),%ymm2,%ymm2
vpxor 64+64(%rsi),%ymm6,%ymm6
vpxor 96+64(%rsi),%ymm10,%ymm10
vmovdqu %ymm15,0+64(%rdi)
vmovdqu %ymm2,32+64(%rdi)
vmovdqu %ymm6,64+64(%rdi)
vmovdqu %ymm10,96+64(%rdi)
vperm2i128 $0x02,%ymm1,%ymm5,%ymm15
vperm2i128 $0x13,%ymm1,%ymm5,%ymm5
vperm2i128 $0x02,%ymm9,%ymm13,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm9
vpxor 0+192(%rsi),%ymm15,%ymm15
vpxor 32+192(%rsi),%ymm1,%ymm1
vpxor 64+192(%rsi),%ymm5,%ymm5
vpxor 96+192(%rsi),%ymm9,%ymm9
vmovdqu %ymm15,0+192(%rdi)
vmovdqu %ymm1,32+192(%rdi)
vmovdqu %ymm5,64+192(%rdi)
vmovdqu %ymm9,96+192(%rdi)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm15
vperm2i128 $0x02,%ymm0,%ymm4,%ymm0
vperm2i128 $0x02,%ymm8,%ymm12,%ymm4
vperm2i128 $0x13,%ymm8,%ymm12,%ymm12
vmovdqa %ymm15,%ymm8
leaq 320(%rsi),%rsi
subq $320,%rbx
movq $320,%rcx
cmpq $128,%rbx
jbe L$seal_avx2_short_hash_remainder
vpxor 0(%rsi),%ymm0,%ymm0
vpxor 32(%rsi),%ymm4,%ymm4
vpxor 64(%rsi),%ymm8,%ymm8
vpxor 96(%rsi),%ymm12,%ymm12
vmovdqu %ymm0,320(%rdi)
vmovdqu %ymm4,352(%rdi)
vmovdqu %ymm8,384(%rdi)
vmovdqu %ymm12,416(%rdi)
leaq 128(%rsi),%rsi
subq $128,%rbx
movq $8,%rcx
movq $2,%r8
cmpq $128,%rbx
jbe L$seal_avx2_tail_128
cmpq $256,%rbx
jbe L$seal_avx2_tail_256
cmpq $384,%rbx
jbe L$seal_avx2_tail_384
cmpq $512,%rbx
jbe L$seal_avx2_tail_512
vmovdqa L$chacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa %ymm0,%ymm1
vmovdqa %ymm4,%ymm5
vmovdqa %ymm8,%ymm9
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm10
vmovdqa %ymm0,%ymm3
vmovdqa %ymm4,%ymm7
vmovdqa %ymm8,%ymm11
vmovdqa L$avx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm15
vpaddd %ymm15,%ymm12,%ymm14
vpaddd %ymm14,%ymm12,%ymm13
vpaddd %ymm13,%ymm12,%ymm12
vmovdqa %ymm15,0+256(%rbp)
vmovdqa %ymm14,0+224(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm12,0+160(%rbp)
vmovdqa %ymm8,0+128(%rbp)
vmovdqa L$rol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa L$rol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $4,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $12,%ymm15,%ymm15,%ymm15
vpalignr $4,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm14,%ymm14,%ymm14
vpalignr $4,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $4,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm12,%ymm12,%ymm12
vmovdqa %ymm8,0+128(%rbp)
vmovdqa L$rol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa L$rol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $12,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $4,%ymm15,%ymm15,%ymm15
vpalignr $12,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $12,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $12,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm12,%ymm12,%ymm12
vmovdqa %ymm8,0+128(%rbp)
vmovdqa L$rol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa L$rol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
subq $16,%rdi
movq $9,%rcx
jmp L$seal_avx2_main_loop_rounds_entry
.p2align 5
L$seal_avx2_main_loop:
vmovdqa L$chacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa %ymm0,%ymm1
vmovdqa %ymm4,%ymm5
vmovdqa %ymm8,%ymm9
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm10
vmovdqa %ymm0,%ymm3
vmovdqa %ymm4,%ymm7
vmovdqa %ymm8,%ymm11
vmovdqa L$avx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm15
vpaddd %ymm15,%ymm12,%ymm14
vpaddd %ymm14,%ymm12,%ymm13
vpaddd %ymm13,%ymm12,%ymm12
vmovdqa %ymm15,0+256(%rbp)
vmovdqa %ymm14,0+224(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm12,0+160(%rbp)
movq $10,%rcx
.p2align 5
L$seal_avx2_main_loop_rounds:
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
vmovdqa %ymm8,0+128(%rbp)
vmovdqa L$rol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
addq %rax,%r15
adcq %rdx,%r9
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa L$rol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
L$seal_avx2_main_loop_rounds_entry:
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
addq 0+16(%rdi),%r10
adcq 8+16(%rdi),%r11
adcq $1,%r12
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $4,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $12,%ymm15,%ymm15,%ymm15
vpalignr $4,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm14,%ymm14,%ymm14
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
vpalignr $4,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $4,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm12,%ymm12,%ymm12
vmovdqa %ymm8,0+128(%rbp)
vmovdqa L$rol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
addq %rax,%r15
adcq %rdx,%r9
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
addq 0+32(%rdi),%r10
adcq 8+32(%rdi),%r11
adcq $1,%r12
leaq 48(%rdi),%rdi
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa L$rol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
addq %rax,%r15
adcq %rdx,%r9
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $12,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $4,%ymm15,%ymm15,%ymm15
vpalignr $12,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $12,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $12,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpalignr $4,%ymm12,%ymm12,%ymm12
decq %rcx
jne L$seal_avx2_main_loop_rounds
vpaddd L$chacha20_consts(%rip),%ymm3,%ymm3
vpaddd 0+64(%rbp),%ymm7,%ymm7
vpaddd 0+96(%rbp),%ymm11,%ymm11
vpaddd 0+256(%rbp),%ymm15,%ymm15
vpaddd L$chacha20_consts(%rip),%ymm2,%ymm2
vpaddd 0+64(%rbp),%ymm6,%ymm6
vpaddd 0+96(%rbp),%ymm10,%ymm10
vpaddd 0+224(%rbp),%ymm14,%ymm14
vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1
vpaddd 0+64(%rbp),%ymm5,%ymm5
vpaddd 0+96(%rbp),%ymm9,%ymm9
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vmovdqa %ymm0,0+128(%rbp)
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
addq 0+16(%rdi),%r10
adcq 8+16(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 32(%rdi),%rdi
vperm2i128 $0x02,%ymm3,%ymm7,%ymm0
vperm2i128 $0x13,%ymm3,%ymm7,%ymm7
vperm2i128 $0x02,%ymm11,%ymm15,%ymm3
vperm2i128 $0x13,%ymm11,%ymm15,%ymm11
vpxor 0+0(%rsi),%ymm0,%ymm0
vpxor 32+0(%rsi),%ymm3,%ymm3
vpxor 64+0(%rsi),%ymm7,%ymm7
vpxor 96+0(%rsi),%ymm11,%ymm11
vmovdqu %ymm0,0+0(%rdi)
vmovdqu %ymm3,32+0(%rdi)
vmovdqu %ymm7,64+0(%rdi)
vmovdqu %ymm11,96+0(%rdi)
vmovdqa 0+128(%rbp),%ymm0
vperm2i128 $0x02,%ymm2,%ymm6,%ymm3
vperm2i128 $0x13,%ymm2,%ymm6,%ymm6
vperm2i128 $0x02,%ymm10,%ymm14,%ymm2
vperm2i128 $0x13,%ymm10,%ymm14,%ymm10
vpxor 0+128(%rsi),%ymm3,%ymm3
vpxor 32+128(%rsi),%ymm2,%ymm2
vpxor 64+128(%rsi),%ymm6,%ymm6
vpxor 96+128(%rsi),%ymm10,%ymm10
vmovdqu %ymm3,0+128(%rdi)
vmovdqu %ymm2,32+128(%rdi)
vmovdqu %ymm6,64+128(%rdi)
vmovdqu %ymm10,96+128(%rdi)
vperm2i128 $0x02,%ymm1,%ymm5,%ymm3
vperm2i128 $0x13,%ymm1,%ymm5,%ymm5
vperm2i128 $0x02,%ymm9,%ymm13,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm9
vpxor 0+256(%rsi),%ymm3,%ymm3
vpxor 32+256(%rsi),%ymm1,%ymm1
vpxor 64+256(%rsi),%ymm5,%ymm5
vpxor 96+256(%rsi),%ymm9,%ymm9
vmovdqu %ymm3,0+256(%rdi)
vmovdqu %ymm1,32+256(%rdi)
vmovdqu %ymm5,64+256(%rdi)
vmovdqu %ymm9,96+256(%rdi)
vperm2i128 $0x02,%ymm0,%ymm4,%ymm3
vperm2i128 $0x13,%ymm0,%ymm4,%ymm4
vperm2i128 $0x02,%ymm8,%ymm12,%ymm0
vperm2i128 $0x13,%ymm8,%ymm12,%ymm8
vpxor 0+384(%rsi),%ymm3,%ymm3
vpxor 32+384(%rsi),%ymm0,%ymm0
vpxor 64+384(%rsi),%ymm4,%ymm4
vpxor 96+384(%rsi),%ymm8,%ymm8
vmovdqu %ymm3,0+384(%rdi)
vmovdqu %ymm0,32+384(%rdi)
vmovdqu %ymm4,64+384(%rdi)
vmovdqu %ymm8,96+384(%rdi)
leaq 512(%rsi),%rsi
subq $512,%rbx
cmpq $512,%rbx
jg L$seal_avx2_main_loop
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
addq 0+16(%rdi),%r10
adcq 8+16(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 32(%rdi),%rdi
movq $10,%rcx
xorq %r8,%r8
cmpq $384,%rbx
ja L$seal_avx2_tail_512
cmpq $256,%rbx
ja L$seal_avx2_tail_384
cmpq $128,%rbx
ja L$seal_avx2_tail_256
L$seal_avx2_tail_128:
vmovdqa L$chacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa L$avx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm12
vmovdqa %ymm12,0+160(%rbp)
L$seal_avx2_tail_128_rounds_and_3xhash:
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
L$seal_avx2_tail_128_rounds_and_2xhash:
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
addq 0+16(%rdi),%r10
adcq 8+16(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 32(%rdi),%rdi
decq %rcx
jg L$seal_avx2_tail_128_rounds_and_3xhash
decq %r8
jge L$seal_avx2_tail_128_rounds_and_2xhash
vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vperm2i128 $0x13,%ymm0,%ymm4,%ymm3
vperm2i128 $0x02,%ymm0,%ymm4,%ymm0
vperm2i128 $0x02,%ymm8,%ymm12,%ymm4
vperm2i128 $0x13,%ymm8,%ymm12,%ymm12
vmovdqa %ymm3,%ymm8
jmp L$seal_avx2_short_loop
L$seal_avx2_tail_256:
vmovdqa L$chacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa %ymm0,%ymm1
vmovdqa %ymm4,%ymm5
vmovdqa %ymm8,%ymm9
vmovdqa L$avx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm13
vpaddd %ymm13,%ymm12,%ymm12
vmovdqa %ymm12,0+160(%rbp)
vmovdqa %ymm13,0+192(%rbp)
L$seal_avx2_tail_256_rounds_and_3xhash:
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
L$seal_avx2_tail_256_rounds_and_2xhash:
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm5,%ymm5,%ymm5
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm5,%ymm5,%ymm5
addq 0+16(%rdi),%r10
adcq 8+16(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 32(%rdi),%rdi
decq %rcx
jg L$seal_avx2_tail_256_rounds_and_3xhash
decq %r8
jge L$seal_avx2_tail_256_rounds_and_2xhash
vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1
vpaddd 0+64(%rbp),%ymm5,%ymm5
vpaddd 0+96(%rbp),%ymm9,%ymm9
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vperm2i128 $0x02,%ymm1,%ymm5,%ymm3
vperm2i128 $0x13,%ymm1,%ymm5,%ymm5
vperm2i128 $0x02,%ymm9,%ymm13,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm9
vpxor 0+0(%rsi),%ymm3,%ymm3
vpxor 32+0(%rsi),%ymm1,%ymm1
vpxor 64+0(%rsi),%ymm5,%ymm5
vpxor 96+0(%rsi),%ymm9,%ymm9
vmovdqu %ymm3,0+0(%rdi)
vmovdqu %ymm1,32+0(%rdi)
vmovdqu %ymm5,64+0(%rdi)
vmovdqu %ymm9,96+0(%rdi)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm3
vperm2i128 $0x02,%ymm0,%ymm4,%ymm0
vperm2i128 $0x02,%ymm8,%ymm12,%ymm4
vperm2i128 $0x13,%ymm8,%ymm12,%ymm12
vmovdqa %ymm3,%ymm8
movq $128,%rcx
leaq 128(%rsi),%rsi
subq $128,%rbx
jmp L$seal_avx2_short_hash_remainder
L$seal_avx2_tail_384:
vmovdqa L$chacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa %ymm0,%ymm1
vmovdqa %ymm4,%ymm5
vmovdqa %ymm8,%ymm9
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm10
vmovdqa L$avx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm14
vpaddd %ymm14,%ymm12,%ymm13
vpaddd %ymm13,%ymm12,%ymm12
vmovdqa %ymm12,0+160(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm14,0+224(%rbp)
L$seal_avx2_tail_384_rounds_and_3xhash:
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
L$seal_avx2_tail_384_rounds_and_2xhash:
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm5,%ymm5,%ymm5
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol16(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpsrld $20,%ymm6,%ymm3
vpslld $12,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol8(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpslld $7,%ymm6,%ymm3
vpsrld $25,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpalignr $12,%ymm14,%ymm14,%ymm14
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm6,%ymm6,%ymm6
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
addq 0+16(%rdi),%r10
adcq 8+16(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm5,%ymm5,%ymm5
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol16(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpsrld $20,%ymm6,%ymm3
vpslld $12,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol8(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpslld $7,%ymm6,%ymm3
vpsrld $25,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm6,%ymm6,%ymm6
leaq 32(%rdi),%rdi
decq %rcx
jg L$seal_avx2_tail_384_rounds_and_3xhash
decq %r8
jge L$seal_avx2_tail_384_rounds_and_2xhash
vpaddd L$chacha20_consts(%rip),%ymm2,%ymm2
vpaddd 0+64(%rbp),%ymm6,%ymm6
vpaddd 0+96(%rbp),%ymm10,%ymm10
vpaddd 0+224(%rbp),%ymm14,%ymm14
vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1
vpaddd 0+64(%rbp),%ymm5,%ymm5
vpaddd 0+96(%rbp),%ymm9,%ymm9
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vperm2i128 $0x02,%ymm2,%ymm6,%ymm3
vperm2i128 $0x13,%ymm2,%ymm6,%ymm6
vperm2i128 $0x02,%ymm10,%ymm14,%ymm2
vperm2i128 $0x13,%ymm10,%ymm14,%ymm10
vpxor 0+0(%rsi),%ymm3,%ymm3
vpxor 32+0(%rsi),%ymm2,%ymm2
vpxor 64+0(%rsi),%ymm6,%ymm6
vpxor 96+0(%rsi),%ymm10,%ymm10
vmovdqu %ymm3,0+0(%rdi)
vmovdqu %ymm2,32+0(%rdi)
vmovdqu %ymm6,64+0(%rdi)
vmovdqu %ymm10,96+0(%rdi)
vperm2i128 $0x02,%ymm1,%ymm5,%ymm3
vperm2i128 $0x13,%ymm1,%ymm5,%ymm5
vperm2i128 $0x02,%ymm9,%ymm13,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm9
vpxor 0+128(%rsi),%ymm3,%ymm3
vpxor 32+128(%rsi),%ymm1,%ymm1
vpxor 64+128(%rsi),%ymm5,%ymm5
vpxor 96+128(%rsi),%ymm9,%ymm9
vmovdqu %ymm3,0+128(%rdi)
vmovdqu %ymm1,32+128(%rdi)
vmovdqu %ymm5,64+128(%rdi)
vmovdqu %ymm9,96+128(%rdi)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm3
vperm2i128 $0x02,%ymm0,%ymm4,%ymm0
vperm2i128 $0x02,%ymm8,%ymm12,%ymm4
vperm2i128 $0x13,%ymm8,%ymm12,%ymm12
vmovdqa %ymm3,%ymm8
movq $256,%rcx
leaq 256(%rsi),%rsi
subq $256,%rbx
jmp L$seal_avx2_short_hash_remainder
L$seal_avx2_tail_512:
vmovdqa L$chacha20_consts(%rip),%ymm0
vmovdqa 0+64(%rbp),%ymm4
vmovdqa 0+96(%rbp),%ymm8
vmovdqa %ymm0,%ymm1
vmovdqa %ymm4,%ymm5
vmovdqa %ymm8,%ymm9
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm10
vmovdqa %ymm0,%ymm3
vmovdqa %ymm4,%ymm7
vmovdqa %ymm8,%ymm11
vmovdqa L$avx2_inc(%rip),%ymm12
vpaddd 0+160(%rbp),%ymm12,%ymm15
vpaddd %ymm15,%ymm12,%ymm14
vpaddd %ymm14,%ymm12,%ymm13
vpaddd %ymm13,%ymm12,%ymm12
vmovdqa %ymm15,0+256(%rbp)
vmovdqa %ymm14,0+224(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm12,0+160(%rbp)
L$seal_avx2_tail_512_rounds_and_3xhash:
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
L$seal_avx2_tail_512_rounds_and_2xhash:
vmovdqa %ymm8,0+128(%rbp)
vmovdqa L$rol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa L$rol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $4,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $12,%ymm15,%ymm15,%ymm15
vpalignr $4,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm14,%ymm14,%ymm14
vpalignr $4,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $4,%ymm4,%ymm4,%ymm4
addq %rax,%r15
adcq %rdx,%r9
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm12,%ymm12,%ymm12
vmovdqa %ymm8,0+128(%rbp)
vmovdqa L$rol16(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $20,%ymm7,%ymm8
vpslld $32-20,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $20,%ymm6,%ymm8
vpslld $32-20,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $20,%ymm5,%ymm8
vpslld $32-20,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $20,%ymm4,%ymm8
vpslld $32-20,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa L$rol8(%rip),%ymm8
vpaddd %ymm7,%ymm3,%ymm3
vpaddd %ymm6,%ymm2,%ymm2
addq 0+16(%rdi),%r10
adcq 8+16(%rdi),%r11
adcq $1,%r12
vpaddd %ymm5,%ymm1,%ymm1
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm3,%ymm15,%ymm15
vpxor %ymm2,%ymm14,%ymm14
vpxor %ymm1,%ymm13,%ymm13
vpxor %ymm0,%ymm12,%ymm12
vpshufb %ymm8,%ymm15,%ymm15
vpshufb %ymm8,%ymm14,%ymm14
vpshufb %ymm8,%ymm13,%ymm13
vpshufb %ymm8,%ymm12,%ymm12
vpaddd %ymm15,%ymm11,%ymm11
vpaddd %ymm14,%ymm10,%ymm10
vpaddd %ymm13,%ymm9,%ymm9
vpaddd 0+128(%rbp),%ymm12,%ymm8
vpxor %ymm11,%ymm7,%ymm7
vpxor %ymm10,%ymm6,%ymm6
vpxor %ymm9,%ymm5,%ymm5
vpxor %ymm8,%ymm4,%ymm4
vmovdqa %ymm8,0+128(%rbp)
vpsrld $25,%ymm7,%ymm8
movq 0+0+0(%rbp),%rdx
movq %rdx,%r15
mulxq %r10,%r13,%r14
mulxq %r11,%rax,%rdx
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
vpslld $32-25,%ymm7,%ymm7
vpxor %ymm8,%ymm7,%ymm7
vpsrld $25,%ymm6,%ymm8
vpslld $32-25,%ymm6,%ymm6
vpxor %ymm8,%ymm6,%ymm6
vpsrld $25,%ymm5,%ymm8
vpslld $32-25,%ymm5,%ymm5
vpxor %ymm8,%ymm5,%ymm5
vpsrld $25,%ymm4,%ymm8
vpslld $32-25,%ymm4,%ymm4
vpxor %ymm8,%ymm4,%ymm4
vmovdqa 0+128(%rbp),%ymm8
vpalignr $12,%ymm7,%ymm7,%ymm7
vpalignr $8,%ymm11,%ymm11,%ymm11
vpalignr $4,%ymm15,%ymm15,%ymm15
vpalignr $12,%ymm6,%ymm6,%ymm6
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $12,%ymm5,%ymm5,%ymm5
vpalignr $8,%ymm9,%ymm9,%ymm9
movq 8+0+0(%rbp),%rdx
mulxq %r10,%r10,%rax
addq %r10,%r14
mulxq %r11,%r11,%r9
adcq %r11,%r15
adcq $0,%r9
imulq %r12,%rdx
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $12,%ymm4,%ymm4,%ymm4
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm12,%ymm12,%ymm12
addq %rax,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 32(%rdi),%rdi
decq %rcx
jg L$seal_avx2_tail_512_rounds_and_3xhash
decq %r8
jge L$seal_avx2_tail_512_rounds_and_2xhash
vpaddd L$chacha20_consts(%rip),%ymm3,%ymm3
vpaddd 0+64(%rbp),%ymm7,%ymm7
vpaddd 0+96(%rbp),%ymm11,%ymm11
vpaddd 0+256(%rbp),%ymm15,%ymm15
vpaddd L$chacha20_consts(%rip),%ymm2,%ymm2
vpaddd 0+64(%rbp),%ymm6,%ymm6
vpaddd 0+96(%rbp),%ymm10,%ymm10
vpaddd 0+224(%rbp),%ymm14,%ymm14
vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1
vpaddd 0+64(%rbp),%ymm5,%ymm5
vpaddd 0+96(%rbp),%ymm9,%ymm9
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0
vpaddd 0+64(%rbp),%ymm4,%ymm4
vpaddd 0+96(%rbp),%ymm8,%ymm8
vpaddd 0+160(%rbp),%ymm12,%ymm12
vmovdqa %ymm0,0+128(%rbp)
vperm2i128 $0x02,%ymm3,%ymm7,%ymm0
vperm2i128 $0x13,%ymm3,%ymm7,%ymm7
vperm2i128 $0x02,%ymm11,%ymm15,%ymm3
vperm2i128 $0x13,%ymm11,%ymm15,%ymm11
vpxor 0+0(%rsi),%ymm0,%ymm0
vpxor 32+0(%rsi),%ymm3,%ymm3
vpxor 64+0(%rsi),%ymm7,%ymm7
vpxor 96+0(%rsi),%ymm11,%ymm11
vmovdqu %ymm0,0+0(%rdi)
vmovdqu %ymm3,32+0(%rdi)
vmovdqu %ymm7,64+0(%rdi)
vmovdqu %ymm11,96+0(%rdi)
vmovdqa 0+128(%rbp),%ymm0
vperm2i128 $0x02,%ymm2,%ymm6,%ymm3
vperm2i128 $0x13,%ymm2,%ymm6,%ymm6
vperm2i128 $0x02,%ymm10,%ymm14,%ymm2
vperm2i128 $0x13,%ymm10,%ymm14,%ymm10
vpxor 0+128(%rsi),%ymm3,%ymm3
vpxor 32+128(%rsi),%ymm2,%ymm2
vpxor 64+128(%rsi),%ymm6,%ymm6
vpxor 96+128(%rsi),%ymm10,%ymm10
vmovdqu %ymm3,0+128(%rdi)
vmovdqu %ymm2,32+128(%rdi)
vmovdqu %ymm6,64+128(%rdi)
vmovdqu %ymm10,96+128(%rdi)
vperm2i128 $0x02,%ymm1,%ymm5,%ymm3
vperm2i128 $0x13,%ymm1,%ymm5,%ymm5
vperm2i128 $0x02,%ymm9,%ymm13,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm9
vpxor 0+256(%rsi),%ymm3,%ymm3
vpxor 32+256(%rsi),%ymm1,%ymm1
vpxor 64+256(%rsi),%ymm5,%ymm5
vpxor 96+256(%rsi),%ymm9,%ymm9
vmovdqu %ymm3,0+256(%rdi)
vmovdqu %ymm1,32+256(%rdi)
vmovdqu %ymm5,64+256(%rdi)
vmovdqu %ymm9,96+256(%rdi)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm3
vperm2i128 $0x02,%ymm0,%ymm4,%ymm0
vperm2i128 $0x02,%ymm8,%ymm12,%ymm4
vperm2i128 $0x13,%ymm8,%ymm12,%ymm12
vmovdqa %ymm3,%ymm8
movq $384,%rcx
leaq 384(%rsi),%rsi
subq $384,%rbx
jmp L$seal_avx2_short_hash_remainder
L$seal_avx2_320:
vmovdqa %ymm0,%ymm1
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm5
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm9
vmovdqa %ymm8,%ymm10
vpaddd L$avx2_inc(%rip),%ymm12,%ymm13
vpaddd L$avx2_inc(%rip),%ymm13,%ymm14
vmovdqa %ymm4,%ymm7
vmovdqa %ymm8,%ymm11
vmovdqa %ymm12,0+160(%rbp)
vmovdqa %ymm13,0+192(%rbp)
vmovdqa %ymm14,0+224(%rbp)
movq $10,%r10
L$seal_avx2_320_rounds:
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm5,%ymm5,%ymm5
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol16(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpsrld $20,%ymm6,%ymm3
vpslld $12,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol8(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpslld $7,%ymm6,%ymm3
vpsrld $25,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpalignr $12,%ymm14,%ymm14,%ymm14
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $4,%ymm6,%ymm6,%ymm6
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm5,%ymm5,%ymm5
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol16(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpsrld $20,%ymm6,%ymm3
vpslld $12,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpaddd %ymm6,%ymm2,%ymm2
vpxor %ymm2,%ymm14,%ymm14
vpshufb L$rol8(%rip),%ymm14,%ymm14
vpaddd %ymm14,%ymm10,%ymm10
vpxor %ymm10,%ymm6,%ymm6
vpslld $7,%ymm6,%ymm3
vpsrld $25,%ymm6,%ymm6
vpxor %ymm3,%ymm6,%ymm6
vpalignr $4,%ymm14,%ymm14,%ymm14
vpalignr $8,%ymm10,%ymm10,%ymm10
vpalignr $12,%ymm6,%ymm6,%ymm6
decq %r10
jne L$seal_avx2_320_rounds
vpaddd L$chacha20_consts(%rip),%ymm0,%ymm0
vpaddd L$chacha20_consts(%rip),%ymm1,%ymm1
vpaddd L$chacha20_consts(%rip),%ymm2,%ymm2
vpaddd %ymm7,%ymm4,%ymm4
vpaddd %ymm7,%ymm5,%ymm5
vpaddd %ymm7,%ymm6,%ymm6
vpaddd %ymm11,%ymm8,%ymm8
vpaddd %ymm11,%ymm9,%ymm9
vpaddd %ymm11,%ymm10,%ymm10
vpaddd 0+160(%rbp),%ymm12,%ymm12
vpaddd 0+192(%rbp),%ymm13,%ymm13
vpaddd 0+224(%rbp),%ymm14,%ymm14
vperm2i128 $0x02,%ymm0,%ymm4,%ymm3
vpand L$clamp(%rip),%ymm3,%ymm3
vmovdqa %ymm3,0+0(%rbp)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm0
vperm2i128 $0x13,%ymm8,%ymm12,%ymm4
vperm2i128 $0x02,%ymm1,%ymm5,%ymm8
vperm2i128 $0x02,%ymm9,%ymm13,%ymm12
vperm2i128 $0x13,%ymm1,%ymm5,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm5
vperm2i128 $0x02,%ymm2,%ymm6,%ymm9
vperm2i128 $0x02,%ymm10,%ymm14,%ymm13
vperm2i128 $0x13,%ymm2,%ymm6,%ymm2
vperm2i128 $0x13,%ymm10,%ymm14,%ymm6
jmp L$seal_avx2_short
L$seal_avx2_192:
vmovdqa %ymm0,%ymm1
vmovdqa %ymm0,%ymm2
vmovdqa %ymm4,%ymm5
vmovdqa %ymm4,%ymm6
vmovdqa %ymm8,%ymm9
vmovdqa %ymm8,%ymm10
vpaddd L$avx2_inc(%rip),%ymm12,%ymm13
vmovdqa %ymm12,%ymm11
vmovdqa %ymm13,%ymm15
movq $10,%r10
L$seal_avx2_192_rounds:
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $12,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $4,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $12,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $4,%ymm5,%ymm5,%ymm5
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol16(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpsrld $20,%ymm4,%ymm3
vpslld $12,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpaddd %ymm4,%ymm0,%ymm0
vpxor %ymm0,%ymm12,%ymm12
vpshufb L$rol8(%rip),%ymm12,%ymm12
vpaddd %ymm12,%ymm8,%ymm8
vpxor %ymm8,%ymm4,%ymm4
vpslld $7,%ymm4,%ymm3
vpsrld $25,%ymm4,%ymm4
vpxor %ymm3,%ymm4,%ymm4
vpalignr $4,%ymm12,%ymm12,%ymm12
vpalignr $8,%ymm8,%ymm8,%ymm8
vpalignr $12,%ymm4,%ymm4,%ymm4
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol16(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpsrld $20,%ymm5,%ymm3
vpslld $12,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpaddd %ymm5,%ymm1,%ymm1
vpxor %ymm1,%ymm13,%ymm13
vpshufb L$rol8(%rip),%ymm13,%ymm13
vpaddd %ymm13,%ymm9,%ymm9
vpxor %ymm9,%ymm5,%ymm5
vpslld $7,%ymm5,%ymm3
vpsrld $25,%ymm5,%ymm5
vpxor %ymm3,%ymm5,%ymm5
vpalignr $4,%ymm13,%ymm13,%ymm13
vpalignr $8,%ymm9,%ymm9,%ymm9
vpalignr $12,%ymm5,%ymm5,%ymm5
decq %r10
jne L$seal_avx2_192_rounds
vpaddd %ymm2,%ymm0,%ymm0
vpaddd %ymm2,%ymm1,%ymm1
vpaddd %ymm6,%ymm4,%ymm4
vpaddd %ymm6,%ymm5,%ymm5
vpaddd %ymm10,%ymm8,%ymm8
vpaddd %ymm10,%ymm9,%ymm9
vpaddd %ymm11,%ymm12,%ymm12
vpaddd %ymm15,%ymm13,%ymm13
vperm2i128 $0x02,%ymm0,%ymm4,%ymm3
vpand L$clamp(%rip),%ymm3,%ymm3
vmovdqa %ymm3,0+0(%rbp)
vperm2i128 $0x13,%ymm0,%ymm4,%ymm0
vperm2i128 $0x13,%ymm8,%ymm12,%ymm4
vperm2i128 $0x02,%ymm1,%ymm5,%ymm8
vperm2i128 $0x02,%ymm9,%ymm13,%ymm12
vperm2i128 $0x13,%ymm1,%ymm5,%ymm1
vperm2i128 $0x13,%ymm9,%ymm13,%ymm5
L$seal_avx2_short:
movq %r8,%r8
call poly_hash_ad_internal
xorq %rcx,%rcx
L$seal_avx2_short_hash_remainder:
cmpq $16,%rcx
jb L$seal_avx2_short_loop
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
subq $16,%rcx
addq $16,%rdi
jmp L$seal_avx2_short_hash_remainder
L$seal_avx2_short_loop:
cmpq $32,%rbx
jb L$seal_avx2_short_tail
subq $32,%rbx
vpxor (%rsi),%ymm0,%ymm0
vmovdqu %ymm0,(%rdi)
leaq 32(%rsi),%rsi
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
addq 0+16(%rdi),%r10
adcq 8+16(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 32(%rdi),%rdi
vmovdqa %ymm4,%ymm0
vmovdqa %ymm8,%ymm4
vmovdqa %ymm12,%ymm8
vmovdqa %ymm1,%ymm12
vmovdqa %ymm5,%ymm1
vmovdqa %ymm9,%ymm5
vmovdqa %ymm13,%ymm9
vmovdqa %ymm2,%ymm13
vmovdqa %ymm6,%ymm2
jmp L$seal_avx2_short_loop
L$seal_avx2_short_tail:
cmpq $16,%rbx
jb L$seal_avx2_exit
subq $16,%rbx
vpxor (%rsi),%xmm0,%xmm3
vmovdqu %xmm3,(%rdi)
leaq 16(%rsi),%rsi
addq 0+0(%rdi),%r10
adcq 8+0(%rdi),%r11
adcq $1,%r12
movq 0+0+0(%rbp),%rax
movq %rax,%r15
mulq %r10
movq %rax,%r13
movq %rdx,%r14
movq 0+0+0(%rbp),%rax
mulq %r11
imulq %r12,%r15
addq %rax,%r14
adcq %rdx,%r15
movq 8+0+0(%rbp),%rax
movq %rax,%r9
mulq %r10
addq %rax,%r14
adcq $0,%rdx
movq %rdx,%r10
movq 8+0+0(%rbp),%rax
mulq %r11
addq %rax,%r15
adcq $0,%rdx
imulq %r12,%r9
addq %r10,%r15
adcq %rdx,%r9
movq %r13,%r10
movq %r14,%r11
movq %r15,%r12
andq $3,%r12
movq %r15,%r13
andq $-4,%r13
movq %r9,%r14
shrdq $2,%r9,%r15
shrq $2,%r9
addq %r13,%r15
adcq %r14,%r9
addq %r15,%r10
adcq %r9,%r11
adcq $0,%r12
leaq 16(%rdi),%rdi
vextracti128 $1,%ymm0,%xmm0
L$seal_avx2_exit:
vzeroupper
jmp L$seal_sse_tail_16
#endif
|
marvin-hansen/iggy-streaming-system
| 10,908
|
thirdparty/crates/ring-0.17.9/pregenerated/ghash-neon-armv8-ios64.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__APPLE__)
#include <ring-core/arm_arch.h>
.text
.globl _gcm_init_neon
.private_extern _gcm_init_neon
.align 4
_gcm_init_neon:
AARCH64_VALID_CALL_TARGET
// This function is adapted from gcm_init_v8. xC2 is t3.
ld1 {v17.2d}, [x1] // load H
movi v19.16b, #0xe1
shl v19.2d, v19.2d, #57 // 0xc2.0
ext v3.16b, v17.16b, v17.16b, #8
ushr v18.2d, v19.2d, #63
dup v17.4s, v17.s[1]
ext v16.16b, v18.16b, v19.16b, #8 // t0=0xc2....01
ushr v18.2d, v3.2d, #63
sshr v17.4s, v17.4s, #31 // broadcast carry bit
and v18.16b, v18.16b, v16.16b
shl v3.2d, v3.2d, #1
ext v18.16b, v18.16b, v18.16b, #8
and v16.16b, v16.16b, v17.16b
orr v3.16b, v3.16b, v18.16b // H<<<=1
eor v5.16b, v3.16b, v16.16b // twisted H
st1 {v5.2d}, [x0] // store Htable[0]
ret
.globl _gcm_gmult_neon
.private_extern _gcm_gmult_neon
.align 4
_gcm_gmult_neon:
AARCH64_VALID_CALL_TARGET
ld1 {v3.16b}, [x0] // load Xi
ld1 {v5.1d}, [x1], #8 // load twisted H
ld1 {v6.1d}, [x1]
adrp x9, Lmasks@PAGE // load constants
add x9, x9, Lmasks@PAGEOFF
ld1 {v24.2d, v25.2d}, [x9]
rev64 v3.16b, v3.16b // byteswap Xi
ext v3.16b, v3.16b, v3.16b, #8
eor v7.8b, v5.8b, v6.8b // Karatsuba pre-processing
mov x3, #16
b Lgmult_neon
.globl _gcm_ghash_neon
.private_extern _gcm_ghash_neon
.align 4
_gcm_ghash_neon:
AARCH64_VALID_CALL_TARGET
ld1 {v0.16b}, [x0] // load Xi
ld1 {v5.1d}, [x1], #8 // load twisted H
ld1 {v6.1d}, [x1]
adrp x9, Lmasks@PAGE // load constants
add x9, x9, Lmasks@PAGEOFF
ld1 {v24.2d, v25.2d}, [x9]
rev64 v0.16b, v0.16b // byteswap Xi
ext v0.16b, v0.16b, v0.16b, #8
eor v7.8b, v5.8b, v6.8b // Karatsuba pre-processing
Loop_neon:
ld1 {v3.16b}, [x2], #16 // load inp
rev64 v3.16b, v3.16b // byteswap inp
ext v3.16b, v3.16b, v3.16b, #8
eor v3.16b, v3.16b, v0.16b // inp ^= Xi
Lgmult_neon:
// Split the input into v3 and v4. (The upper halves are unused,
// so it is okay to leave them alone.)
ins v4.d[0], v3.d[1]
ext v16.8b, v5.8b, v5.8b, #1 // A1
pmull v16.8h, v16.8b, v3.8b // F = A1*B
ext v0.8b, v3.8b, v3.8b, #1 // B1
pmull v0.8h, v5.8b, v0.8b // E = A*B1
ext v17.8b, v5.8b, v5.8b, #2 // A2
pmull v17.8h, v17.8b, v3.8b // H = A2*B
ext v19.8b, v3.8b, v3.8b, #2 // B2
pmull v19.8h, v5.8b, v19.8b // G = A*B2
ext v18.8b, v5.8b, v5.8b, #3 // A3
eor v16.16b, v16.16b, v0.16b // L = E + F
pmull v18.8h, v18.8b, v3.8b // J = A3*B
ext v0.8b, v3.8b, v3.8b, #3 // B3
eor v17.16b, v17.16b, v19.16b // M = G + H
pmull v0.8h, v5.8b, v0.8b // I = A*B3
// Here we diverge from the 32-bit version. It computes the following
// (instructions reordered for clarity):
//
// veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L)
// vand $t0#hi, $t0#hi, $k48
// veor $t0#lo, $t0#lo, $t0#hi
//
// veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M)
// vand $t1#hi, $t1#hi, $k32
// veor $t1#lo, $t1#lo, $t1#hi
//
// veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N)
// vand $t2#hi, $t2#hi, $k16
// veor $t2#lo, $t2#lo, $t2#hi
//
// veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K)
// vmov.i64 $t3#hi, #0
//
// $kN is a mask with the bottom N bits set. AArch64 cannot compute on
// upper halves of SIMD registers, so we must split each half into
// separate registers. To compensate, we pair computations up and
// parallelize.
ext v19.8b, v3.8b, v3.8b, #4 // B4
eor v18.16b, v18.16b, v0.16b // N = I + J
pmull v19.8h, v5.8b, v19.8b // K = A*B4
// This can probably be scheduled more efficiently. For now, we just
// pair up independent instructions.
zip1 v20.2d, v16.2d, v17.2d
zip1 v22.2d, v18.2d, v19.2d
zip2 v21.2d, v16.2d, v17.2d
zip2 v23.2d, v18.2d, v19.2d
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
and v21.16b, v21.16b, v24.16b
and v23.16b, v23.16b, v25.16b
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
zip1 v16.2d, v20.2d, v21.2d
zip1 v18.2d, v22.2d, v23.2d
zip2 v17.2d, v20.2d, v21.2d
zip2 v19.2d, v22.2d, v23.2d
ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8
ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16
pmull v0.8h, v5.8b, v3.8b // D = A*B
ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32
ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24
eor v16.16b, v16.16b, v17.16b
eor v18.16b, v18.16b, v19.16b
eor v0.16b, v0.16b, v16.16b
eor v0.16b, v0.16b, v18.16b
eor v3.8b, v3.8b, v4.8b // Karatsuba pre-processing
ext v16.8b, v7.8b, v7.8b, #1 // A1
pmull v16.8h, v16.8b, v3.8b // F = A1*B
ext v1.8b, v3.8b, v3.8b, #1 // B1
pmull v1.8h, v7.8b, v1.8b // E = A*B1
ext v17.8b, v7.8b, v7.8b, #2 // A2
pmull v17.8h, v17.8b, v3.8b // H = A2*B
ext v19.8b, v3.8b, v3.8b, #2 // B2
pmull v19.8h, v7.8b, v19.8b // G = A*B2
ext v18.8b, v7.8b, v7.8b, #3 // A3
eor v16.16b, v16.16b, v1.16b // L = E + F
pmull v18.8h, v18.8b, v3.8b // J = A3*B
ext v1.8b, v3.8b, v3.8b, #3 // B3
eor v17.16b, v17.16b, v19.16b // M = G + H
pmull v1.8h, v7.8b, v1.8b // I = A*B3
// Here we diverge from the 32-bit version. It computes the following
// (instructions reordered for clarity):
//
// veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L)
// vand $t0#hi, $t0#hi, $k48
// veor $t0#lo, $t0#lo, $t0#hi
//
// veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M)
// vand $t1#hi, $t1#hi, $k32
// veor $t1#lo, $t1#lo, $t1#hi
//
// veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N)
// vand $t2#hi, $t2#hi, $k16
// veor $t2#lo, $t2#lo, $t2#hi
//
// veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K)
// vmov.i64 $t3#hi, #0
//
// $kN is a mask with the bottom N bits set. AArch64 cannot compute on
// upper halves of SIMD registers, so we must split each half into
// separate registers. To compensate, we pair computations up and
// parallelize.
ext v19.8b, v3.8b, v3.8b, #4 // B4
eor v18.16b, v18.16b, v1.16b // N = I + J
pmull v19.8h, v7.8b, v19.8b // K = A*B4
// This can probably be scheduled more efficiently. For now, we just
// pair up independent instructions.
zip1 v20.2d, v16.2d, v17.2d
zip1 v22.2d, v18.2d, v19.2d
zip2 v21.2d, v16.2d, v17.2d
zip2 v23.2d, v18.2d, v19.2d
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
and v21.16b, v21.16b, v24.16b
and v23.16b, v23.16b, v25.16b
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
zip1 v16.2d, v20.2d, v21.2d
zip1 v18.2d, v22.2d, v23.2d
zip2 v17.2d, v20.2d, v21.2d
zip2 v19.2d, v22.2d, v23.2d
ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8
ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16
pmull v1.8h, v7.8b, v3.8b // D = A*B
ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32
ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24
eor v16.16b, v16.16b, v17.16b
eor v18.16b, v18.16b, v19.16b
eor v1.16b, v1.16b, v16.16b
eor v1.16b, v1.16b, v18.16b
ext v16.8b, v6.8b, v6.8b, #1 // A1
pmull v16.8h, v16.8b, v4.8b // F = A1*B
ext v2.8b, v4.8b, v4.8b, #1 // B1
pmull v2.8h, v6.8b, v2.8b // E = A*B1
ext v17.8b, v6.8b, v6.8b, #2 // A2
pmull v17.8h, v17.8b, v4.8b // H = A2*B
ext v19.8b, v4.8b, v4.8b, #2 // B2
pmull v19.8h, v6.8b, v19.8b // G = A*B2
ext v18.8b, v6.8b, v6.8b, #3 // A3
eor v16.16b, v16.16b, v2.16b // L = E + F
pmull v18.8h, v18.8b, v4.8b // J = A3*B
ext v2.8b, v4.8b, v4.8b, #3 // B3
eor v17.16b, v17.16b, v19.16b // M = G + H
pmull v2.8h, v6.8b, v2.8b // I = A*B3
// Here we diverge from the 32-bit version. It computes the following
// (instructions reordered for clarity):
//
// veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L)
// vand $t0#hi, $t0#hi, $k48
// veor $t0#lo, $t0#lo, $t0#hi
//
// veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M)
// vand $t1#hi, $t1#hi, $k32
// veor $t1#lo, $t1#lo, $t1#hi
//
// veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N)
// vand $t2#hi, $t2#hi, $k16
// veor $t2#lo, $t2#lo, $t2#hi
//
// veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K)
// vmov.i64 $t3#hi, #0
//
// $kN is a mask with the bottom N bits set. AArch64 cannot compute on
// upper halves of SIMD registers, so we must split each half into
// separate registers. To compensate, we pair computations up and
// parallelize.
ext v19.8b, v4.8b, v4.8b, #4 // B4
eor v18.16b, v18.16b, v2.16b // N = I + J
pmull v19.8h, v6.8b, v19.8b // K = A*B4
// This can probably be scheduled more efficiently. For now, we just
// pair up independent instructions.
zip1 v20.2d, v16.2d, v17.2d
zip1 v22.2d, v18.2d, v19.2d
zip2 v21.2d, v16.2d, v17.2d
zip2 v23.2d, v18.2d, v19.2d
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
and v21.16b, v21.16b, v24.16b
and v23.16b, v23.16b, v25.16b
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
zip1 v16.2d, v20.2d, v21.2d
zip1 v18.2d, v22.2d, v23.2d
zip2 v17.2d, v20.2d, v21.2d
zip2 v19.2d, v22.2d, v23.2d
ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8
ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16
pmull v2.8h, v6.8b, v4.8b // D = A*B
ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32
ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24
eor v16.16b, v16.16b, v17.16b
eor v18.16b, v18.16b, v19.16b
eor v2.16b, v2.16b, v16.16b
eor v2.16b, v2.16b, v18.16b
ext v16.16b, v0.16b, v2.16b, #8
eor v1.16b, v1.16b, v0.16b // Karatsuba post-processing
eor v1.16b, v1.16b, v2.16b
eor v1.16b, v1.16b, v16.16b // Xm overlaps Xh.lo and Xl.hi
ins v0.d[1], v1.d[0] // Xh|Xl - 256-bit result
// This is a no-op due to the ins instruction below.
// ins v2.d[0], v1.d[1]
// equivalent of reduction_avx from ghash-x86_64.pl
shl v17.2d, v0.2d, #57 // 1st phase
shl v18.2d, v0.2d, #62
eor v18.16b, v18.16b, v17.16b //
shl v17.2d, v0.2d, #63
eor v18.16b, v18.16b, v17.16b //
// Note Xm contains {Xl.d[1], Xh.d[0]}.
eor v18.16b, v18.16b, v1.16b
ins v0.d[1], v18.d[0] // Xl.d[1] ^= t2.d[0]
ins v2.d[0], v18.d[1] // Xh.d[0] ^= t2.d[1]
ushr v18.2d, v0.2d, #1 // 2nd phase
eor v2.16b, v2.16b,v0.16b
eor v0.16b, v0.16b,v18.16b //
ushr v18.2d, v18.2d, #6
ushr v0.2d, v0.2d, #1 //
eor v0.16b, v0.16b, v2.16b //
eor v0.16b, v0.16b, v18.16b //
subs x3, x3, #16
bne Loop_neon
rev64 v0.16b, v0.16b // byteswap Xi and write
ext v0.16b, v0.16b, v0.16b, #8
st1 {v0.16b}, [x0]
ret
.section __TEXT,__const
.align 4
Lmasks:
.quad 0x0000ffffffffffff // k48
.quad 0x00000000ffffffff // k32
.quad 0x000000000000ffff // k16
.quad 0x0000000000000000 // k0
.byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,100,101,114,105,118,101,100,32,102,114,111,109,32,65,82,77,118,52,32,118,101,114,115,105,111,110,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__APPLE__)
|
marvin-hansen/iggy-streaming-system
| 17,747
|
thirdparty/crates/ring-0.17.9/pregenerated/bsaes-armv7-linux32.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__ELF__)
@ Copyright 2012-2016 The OpenSSL Project Authors. All Rights Reserved.
@
@ Licensed under the OpenSSL license (the "License"). You may not use
@ this file except in compliance with the License. You can obtain a copy
@ in the file LICENSE in the source distribution or at
@ https://www.openssl.org/source/license.html
@ ====================================================================
@ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
@ project. The module is, however, dual licensed under OpenSSL and
@ CRYPTOGAMS licenses depending on where you obtain it. For further
@ details see http://www.openssl.org/~appro/cryptogams/.
@
@ Specific modes and adaptation for Linux kernel by Ard Biesheuvel
@ of Linaro. Permission to use under GPL terms is granted.
@ ====================================================================
@ Bit-sliced AES for ARM NEON
@
@ February 2012.
@
@ This implementation is direct adaptation of bsaes-x86_64 module for
@ ARM NEON. Except that this module is endian-neutral [in sense that
@ it can be compiled for either endianness] by courtesy of vld1.8's
@ neutrality. Initial version doesn't implement interface to OpenSSL,
@ only low-level primitives and unsupported entry points, just enough
@ to collect performance results, which for Cortex-A8 core are:
@
@ encrypt 19.5 cycles per byte processed with 128-bit key
@ decrypt 22.1 cycles per byte processed with 128-bit key
@ key conv. 440 cycles per 128-bit key/0.18 of 8x block
@
@ Snapdragon S4 encrypts byte in 17.6 cycles and decrypts in 19.7,
@ which is [much] worse than anticipated (for further details see
@ http://www.openssl.org/~appro/Snapdragon-S4.html).
@
@ Cortex-A15 manages in 14.2/16.1 cycles [when integer-only code
@ manages in 20.0 cycles].
@
@ When comparing to x86_64 results keep in mind that NEON unit is
@ [mostly] single-issue and thus can't [fully] benefit from
@ instruction-level parallelism. And when comparing to aes-armv4
@ results keep in mind key schedule conversion overhead (see
@ bsaes-x86_64.pl for further details)...
@
@ <appro@openssl.org>
@ April-August 2013
@ Add CBC, CTR and XTS subroutines and adapt for kernel use; courtesy of Ard.
#ifndef __KERNEL__
# include <ring-core/arm_arch.h>
# define VFP_ABI_PUSH vstmdb sp!,{d8-d15}
# define VFP_ABI_POP vldmia sp!,{d8-d15}
# define VFP_ABI_FRAME 0x40
#else
# define VFP_ABI_PUSH
# define VFP_ABI_POP
# define VFP_ABI_FRAME 0
# define BSAES_ASM_EXTENDED_KEY
# define __ARM_MAX_ARCH__ 7
#endif
#ifdef __thumb__
# define adrl adr
#endif
#if __ARM_MAX_ARCH__>=7
.arch armv7-a
.fpu neon
.text
.syntax unified @ ARMv7-capable assembler is expected to handle this
#if defined(__thumb2__) && !defined(__APPLE__)
.thumb
#else
.code 32
# undef __thumb2__
#endif
.type _bsaes_const,%object
.align 6
_bsaes_const:
.LM0ISR:@ InvShiftRows constants
.quad 0x0a0e0206070b0f03, 0x0004080c0d010509
.LISR:
.quad 0x0504070602010003, 0x0f0e0d0c080b0a09
.LISRM0:
.quad 0x01040b0e0205080f, 0x0306090c00070a0d
.LM0SR:@ ShiftRows constants
.quad 0x0a0e02060f03070b, 0x0004080c05090d01
.LSR:
.quad 0x0504070600030201, 0x0f0e0d0c0a09080b
.LSRM0:
.quad 0x0304090e00050a0f, 0x01060b0c0207080d
.LM0:
.quad 0x02060a0e03070b0f, 0x0004080c0105090d
.LREVM0SR:
.quad 0x090d01050c000408, 0x03070b0f060a0e02
.byte 66,105,116,45,115,108,105,99,101,100,32,65,69,83,32,102,111,114,32,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 6
.size _bsaes_const,.-_bsaes_const
.type _bsaes_encrypt8,%function
.align 4
_bsaes_encrypt8:
adr r6,.
vldmia r4!, {q9} @ round 0 key
#if defined(__thumb2__) || defined(__APPLE__)
adr r6,.LM0SR
#else
sub r6,r6,#_bsaes_encrypt8-.LM0SR
#endif
vldmia r6!, {q8} @ .LM0SR
_bsaes_encrypt8_alt:
veor q10, q0, q9 @ xor with round0 key
veor q11, q1, q9
vtbl.8 d0, {q10}, d16
vtbl.8 d1, {q10}, d17
veor q12, q2, q9
vtbl.8 d2, {q11}, d16
vtbl.8 d3, {q11}, d17
veor q13, q3, q9
vtbl.8 d4, {q12}, d16
vtbl.8 d5, {q12}, d17
veor q14, q4, q9
vtbl.8 d6, {q13}, d16
vtbl.8 d7, {q13}, d17
veor q15, q5, q9
vtbl.8 d8, {q14}, d16
vtbl.8 d9, {q14}, d17
veor q10, q6, q9
vtbl.8 d10, {q15}, d16
vtbl.8 d11, {q15}, d17
veor q11, q7, q9
vtbl.8 d12, {q10}, d16
vtbl.8 d13, {q10}, d17
vtbl.8 d14, {q11}, d16
vtbl.8 d15, {q11}, d17
_bsaes_encrypt8_bitslice:
vmov.i8 q8,#0x55 @ compose .LBS0
vmov.i8 q9,#0x33 @ compose .LBS1
vshr.u64 q10, q6, #1
vshr.u64 q11, q4, #1
veor q10, q10, q7
veor q11, q11, q5
vand q10, q10, q8
vand q11, q11, q8
veor q7, q7, q10
vshl.u64 q10, q10, #1
veor q5, q5, q11
vshl.u64 q11, q11, #1
veor q6, q6, q10
veor q4, q4, q11
vshr.u64 q10, q2, #1
vshr.u64 q11, q0, #1
veor q10, q10, q3
veor q11, q11, q1
vand q10, q10, q8
vand q11, q11, q8
veor q3, q3, q10
vshl.u64 q10, q10, #1
veor q1, q1, q11
vshl.u64 q11, q11, #1
veor q2, q2, q10
veor q0, q0, q11
vmov.i8 q8,#0x0f @ compose .LBS2
vshr.u64 q10, q5, #2
vshr.u64 q11, q4, #2
veor q10, q10, q7
veor q11, q11, q6
vand q10, q10, q9
vand q11, q11, q9
veor q7, q7, q10
vshl.u64 q10, q10, #2
veor q6, q6, q11
vshl.u64 q11, q11, #2
veor q5, q5, q10
veor q4, q4, q11
vshr.u64 q10, q1, #2
vshr.u64 q11, q0, #2
veor q10, q10, q3
veor q11, q11, q2
vand q10, q10, q9
vand q11, q11, q9
veor q3, q3, q10
vshl.u64 q10, q10, #2
veor q2, q2, q11
vshl.u64 q11, q11, #2
veor q1, q1, q10
veor q0, q0, q11
vshr.u64 q10, q3, #4
vshr.u64 q11, q2, #4
veor q10, q10, q7
veor q11, q11, q6
vand q10, q10, q8
vand q11, q11, q8
veor q7, q7, q10
vshl.u64 q10, q10, #4
veor q6, q6, q11
vshl.u64 q11, q11, #4
veor q3, q3, q10
veor q2, q2, q11
vshr.u64 q10, q1, #4
vshr.u64 q11, q0, #4
veor q10, q10, q5
veor q11, q11, q4
vand q10, q10, q8
vand q11, q11, q8
veor q5, q5, q10
vshl.u64 q10, q10, #4
veor q4, q4, q11
vshl.u64 q11, q11, #4
veor q1, q1, q10
veor q0, q0, q11
sub r5,r5,#1
b .Lenc_sbox
.align 4
.Lenc_loop:
vldmia r4!, {q8,q9,q10,q11}
veor q8, q8, q0
veor q9, q9, q1
vtbl.8 d0, {q8}, d24
vtbl.8 d1, {q8}, d25
vldmia r4!, {q8}
veor q10, q10, q2
vtbl.8 d2, {q9}, d24
vtbl.8 d3, {q9}, d25
vldmia r4!, {q9}
veor q11, q11, q3
vtbl.8 d4, {q10}, d24
vtbl.8 d5, {q10}, d25
vldmia r4!, {q10}
vtbl.8 d6, {q11}, d24
vtbl.8 d7, {q11}, d25
vldmia r4!, {q11}
veor q8, q8, q4
veor q9, q9, q5
vtbl.8 d8, {q8}, d24
vtbl.8 d9, {q8}, d25
veor q10, q10, q6
vtbl.8 d10, {q9}, d24
vtbl.8 d11, {q9}, d25
veor q11, q11, q7
vtbl.8 d12, {q10}, d24
vtbl.8 d13, {q10}, d25
vtbl.8 d14, {q11}, d24
vtbl.8 d15, {q11}, d25
.Lenc_sbox:
veor q2, q2, q1
veor q5, q5, q6
veor q3, q3, q0
veor q6, q6, q2
veor q5, q5, q0
veor q6, q6, q3
veor q3, q3, q7
veor q7, q7, q5
veor q3, q3, q4
veor q4, q4, q5
veor q2, q2, q7
veor q3, q3, q1
veor q1, q1, q5
veor q11, q7, q4
veor q10, q1, q2
veor q9, q5, q3
veor q13, q2, q4
vmov q8, q10
veor q12, q6, q0
vorr q10, q10, q9
veor q15, q11, q8
vand q14, q11, q12
vorr q11, q11, q12
veor q12, q12, q9
vand q8, q8, q9
veor q9, q3, q0
vand q15, q15, q12
vand q13, q13, q9
veor q9, q7, q1
veor q12, q5, q6
veor q11, q11, q13
veor q10, q10, q13
vand q13, q9, q12
vorr q9, q9, q12
veor q11, q11, q15
veor q8, q8, q13
veor q10, q10, q14
veor q9, q9, q15
veor q8, q8, q14
vand q12, q2, q3
veor q9, q9, q14
vand q13, q4, q0
vand q14, q1, q5
vorr q15, q7, q6
veor q11, q11, q12
veor q9, q9, q14
veor q8, q8, q15
veor q10, q10, q13
@ Inv_GF16 0, 1, 2, 3, s0, s1, s2, s3
@ new smaller inversion
vand q14, q11, q9
vmov q12, q8
veor q13, q10, q14
veor q15, q8, q14
veor q14, q8, q14 @ q14=q15
vbsl q13, q9, q8
vbsl q15, q11, q10
veor q11, q11, q10
vbsl q12, q13, q14
vbsl q8, q14, q13
vand q14, q12, q15
veor q9, q9, q8
veor q14, q14, q11
veor q12, q6, q0
veor q8, q5, q3
veor q10, q15, q14
vand q10, q10, q6
veor q6, q6, q5
vand q11, q5, q15
vand q6, q6, q14
veor q5, q11, q10
veor q6, q6, q11
veor q15, q15, q13
veor q14, q14, q9
veor q11, q15, q14
veor q10, q13, q9
vand q11, q11, q12
vand q10, q10, q0
veor q12, q12, q8
veor q0, q0, q3
vand q8, q8, q15
vand q3, q3, q13
vand q12, q12, q14
vand q0, q0, q9
veor q8, q8, q12
veor q0, q0, q3
veor q12, q12, q11
veor q3, q3, q10
veor q6, q6, q12
veor q0, q0, q12
veor q5, q5, q8
veor q3, q3, q8
veor q12, q7, q4
veor q8, q1, q2
veor q11, q15, q14
veor q10, q13, q9
vand q11, q11, q12
vand q10, q10, q4
veor q12, q12, q8
veor q4, q4, q2
vand q8, q8, q15
vand q2, q2, q13
vand q12, q12, q14
vand q4, q4, q9
veor q8, q8, q12
veor q4, q4, q2
veor q12, q12, q11
veor q2, q2, q10
veor q15, q15, q13
veor q14, q14, q9
veor q10, q15, q14
vand q10, q10, q7
veor q7, q7, q1
vand q11, q1, q15
vand q7, q7, q14
veor q1, q11, q10
veor q7, q7, q11
veor q7, q7, q12
veor q4, q4, q12
veor q1, q1, q8
veor q2, q2, q8
veor q7, q7, q0
veor q1, q1, q6
veor q6, q6, q0
veor q4, q4, q7
veor q0, q0, q1
veor q1, q1, q5
veor q5, q5, q2
veor q2, q2, q3
veor q3, q3, q5
veor q4, q4, q5
veor q6, q6, q3
subs r5,r5,#1
bcc .Lenc_done
vext.8 q8, q0, q0, #12 @ x0 <<< 32
vext.8 q9, q1, q1, #12
veor q0, q0, q8 @ x0 ^ (x0 <<< 32)
vext.8 q10, q4, q4, #12
veor q1, q1, q9
vext.8 q11, q6, q6, #12
veor q4, q4, q10
vext.8 q12, q3, q3, #12
veor q6, q6, q11
vext.8 q13, q7, q7, #12
veor q3, q3, q12
vext.8 q14, q2, q2, #12
veor q7, q7, q13
vext.8 q15, q5, q5, #12
veor q2, q2, q14
veor q9, q9, q0
veor q5, q5, q15
vext.8 q0, q0, q0, #8 @ (x0 ^ (x0 <<< 32)) <<< 64)
veor q10, q10, q1
veor q8, q8, q5
veor q9, q9, q5
vext.8 q1, q1, q1, #8
veor q13, q13, q3
veor q0, q0, q8
veor q14, q14, q7
veor q1, q1, q9
vext.8 q8, q3, q3, #8
veor q12, q12, q6
vext.8 q9, q7, q7, #8
veor q15, q15, q2
vext.8 q3, q6, q6, #8
veor q11, q11, q4
vext.8 q7, q5, q5, #8
veor q12, q12, q5
vext.8 q6, q2, q2, #8
veor q11, q11, q5
vext.8 q2, q4, q4, #8
veor q5, q9, q13
veor q4, q8, q12
veor q3, q3, q11
veor q7, q7, q15
veor q6, q6, q14
@ vmov q4, q8
veor q2, q2, q10
@ vmov q5, q9
vldmia r6, {q12} @ .LSR
ite eq @ Thumb2 thing, samity check in ARM
addeq r6,r6,#0x10
bne .Lenc_loop
vldmia r6, {q12} @ .LSRM0
b .Lenc_loop
.align 4
.Lenc_done:
vmov.i8 q8,#0x55 @ compose .LBS0
vmov.i8 q9,#0x33 @ compose .LBS1
vshr.u64 q10, q2, #1
vshr.u64 q11, q3, #1
veor q10, q10, q5
veor q11, q11, q7
vand q10, q10, q8
vand q11, q11, q8
veor q5, q5, q10
vshl.u64 q10, q10, #1
veor q7, q7, q11
vshl.u64 q11, q11, #1
veor q2, q2, q10
veor q3, q3, q11
vshr.u64 q10, q4, #1
vshr.u64 q11, q0, #1
veor q10, q10, q6
veor q11, q11, q1
vand q10, q10, q8
vand q11, q11, q8
veor q6, q6, q10
vshl.u64 q10, q10, #1
veor q1, q1, q11
vshl.u64 q11, q11, #1
veor q4, q4, q10
veor q0, q0, q11
vmov.i8 q8,#0x0f @ compose .LBS2
vshr.u64 q10, q7, #2
vshr.u64 q11, q3, #2
veor q10, q10, q5
veor q11, q11, q2
vand q10, q10, q9
vand q11, q11, q9
veor q5, q5, q10
vshl.u64 q10, q10, #2
veor q2, q2, q11
vshl.u64 q11, q11, #2
veor q7, q7, q10
veor q3, q3, q11
vshr.u64 q10, q1, #2
vshr.u64 q11, q0, #2
veor q10, q10, q6
veor q11, q11, q4
vand q10, q10, q9
vand q11, q11, q9
veor q6, q6, q10
vshl.u64 q10, q10, #2
veor q4, q4, q11
vshl.u64 q11, q11, #2
veor q1, q1, q10
veor q0, q0, q11
vshr.u64 q10, q6, #4
vshr.u64 q11, q4, #4
veor q10, q10, q5
veor q11, q11, q2
vand q10, q10, q8
vand q11, q11, q8
veor q5, q5, q10
vshl.u64 q10, q10, #4
veor q2, q2, q11
vshl.u64 q11, q11, #4
veor q6, q6, q10
veor q4, q4, q11
vshr.u64 q10, q1, #4
vshr.u64 q11, q0, #4
veor q10, q10, q7
veor q11, q11, q3
vand q10, q10, q8
vand q11, q11, q8
veor q7, q7, q10
vshl.u64 q10, q10, #4
veor q3, q3, q11
vshl.u64 q11, q11, #4
veor q1, q1, q10
veor q0, q0, q11
vldmia r4, {q8} @ last round key
veor q4, q4, q8
veor q6, q6, q8
veor q3, q3, q8
veor q7, q7, q8
veor q2, q2, q8
veor q5, q5, q8
veor q0, q0, q8
veor q1, q1, q8
bx lr
.size _bsaes_encrypt8,.-_bsaes_encrypt8
.type _bsaes_key_convert,%function
.align 4
_bsaes_key_convert:
adr r6,.
vld1.8 {q7}, [r4]! @ load round 0 key
#if defined(__thumb2__) || defined(__APPLE__)
adr r6,.LM0
#else
sub r6,r6,#_bsaes_key_convert-.LM0
#endif
vld1.8 {q15}, [r4]! @ load round 1 key
vmov.i8 q8, #0x01 @ bit masks
vmov.i8 q9, #0x02
vmov.i8 q10, #0x04
vmov.i8 q11, #0x08
vmov.i8 q12, #0x10
vmov.i8 q13, #0x20
vldmia r6, {q14} @ .LM0
#ifdef __ARMEL__
vrev32.8 q7, q7
vrev32.8 q15, q15
#endif
sub r5,r5,#1
vstmia r12!, {q7} @ save round 0 key
b .Lkey_loop
.align 4
.Lkey_loop:
vtbl.8 d14,{q15},d28
vtbl.8 d15,{q15},d29
vmov.i8 q6, #0x40
vmov.i8 q15, #0x80
vtst.8 q0, q7, q8
vtst.8 q1, q7, q9
vtst.8 q2, q7, q10
vtst.8 q3, q7, q11
vtst.8 q4, q7, q12
vtst.8 q5, q7, q13
vtst.8 q6, q7, q6
vtst.8 q7, q7, q15
vld1.8 {q15}, [r4]! @ load next round key
vmvn q0, q0 @ "pnot"
vmvn q1, q1
vmvn q5, q5
vmvn q6, q6
#ifdef __ARMEL__
vrev32.8 q15, q15
#endif
subs r5,r5,#1
vstmia r12!,{q0,q1,q2,q3,q4,q5,q6,q7} @ write bit-sliced round key
bne .Lkey_loop
vmov.i8 q7,#0x63 @ compose .L63
@ don't save last round key
bx lr
.size _bsaes_key_convert,.-_bsaes_key_convert
.globl bsaes_ctr32_encrypt_blocks
.hidden bsaes_ctr32_encrypt_blocks
.type bsaes_ctr32_encrypt_blocks,%function
.align 5
bsaes_ctr32_encrypt_blocks:
@ In OpenSSL, short inputs fall back to aes_nohw_* here. We patch this
@ out to retain a constant-time implementation.
mov ip, sp
stmdb sp!, {r4,r5,r6,r7,r8,r9,r10, lr}
VFP_ABI_PUSH
ldr r8, [ip] @ ctr is 1st arg on the stack
sub sp, sp, #0x10 @ scratch space to carry over the ctr
mov r9, sp @ save sp
ldr r10, [r3, #240] @ get # of rounds
#ifndef BSAES_ASM_EXTENDED_KEY
@ allocate the key schedule on the stack
sub r12, sp, r10, lsl#7 @ 128 bytes per inner round key
add r12, #96 @ size of bit-sliced key schedule
@ populate the key schedule
mov r4, r3 @ pass key
mov r5, r10 @ pass # of rounds
mov sp, r12 @ sp is sp
bl _bsaes_key_convert
veor q7,q7,q15 @ fix up last round key
vstmia r12, {q7} @ save last round key
vld1.8 {q0}, [r8] @ load counter
#ifdef __APPLE__
mov r8, #:lower16:(.LREVM0SR-.LM0)
add r8, r6, r8
#else
add r8, r6, #.LREVM0SR-.LM0 @ borrow r8
#endif
vldmia sp, {q4} @ load round0 key
#else
ldr r12, [r3, #244]
eors r12, #1
beq 0f
@ populate the key schedule
str r12, [r3, #244]
mov r4, r3 @ pass key
mov r5, r10 @ pass # of rounds
add r12, r3, #248 @ pass key schedule
bl _bsaes_key_convert
veor q7,q7,q15 @ fix up last round key
vstmia r12, {q7} @ save last round key
.align 2
add r12, r3, #248
vld1.8 {q0}, [r8] @ load counter
adrl r8, .LREVM0SR @ borrow r8
vldmia r12, {q4} @ load round0 key
sub sp, #0x10 @ place for adjusted round0 key
#endif
vmov.i32 q8,#1 @ compose 1<<96
veor q9,q9,q9
vrev32.8 q0,q0
vext.8 q8,q9,q8,#4
vrev32.8 q4,q4
vadd.u32 q9,q8,q8 @ compose 2<<96
vstmia sp, {q4} @ save adjusted round0 key
b .Lctr_enc_loop
.align 4
.Lctr_enc_loop:
vadd.u32 q10, q8, q9 @ compose 3<<96
vadd.u32 q1, q0, q8 @ +1
vadd.u32 q2, q0, q9 @ +2
vadd.u32 q3, q0, q10 @ +3
vadd.u32 q4, q1, q10
vadd.u32 q5, q2, q10
vadd.u32 q6, q3, q10
vadd.u32 q7, q4, q10
vadd.u32 q10, q5, q10 @ next counter
@ Borrow prologue from _bsaes_encrypt8 to use the opportunity
@ to flip byte order in 32-bit counter
vldmia sp, {q9} @ load round0 key
#ifndef BSAES_ASM_EXTENDED_KEY
add r4, sp, #0x10 @ pass next round key
#else
add r4, r3, #264
#endif
vldmia r8, {q8} @ .LREVM0SR
mov r5, r10 @ pass rounds
vstmia r9, {q10} @ save next counter
#ifdef __APPLE__
mov r6, #:lower16:(.LREVM0SR-.LSR)
sub r6, r8, r6
#else
sub r6, r8, #.LREVM0SR-.LSR @ pass constants
#endif
bl _bsaes_encrypt8_alt
subs r2, r2, #8
blo .Lctr_enc_loop_done
vld1.8 {q8,q9}, [r0]! @ load input
vld1.8 {q10,q11}, [r0]!
veor q0, q8
veor q1, q9
vld1.8 {q12,q13}, [r0]!
veor q4, q10
veor q6, q11
vld1.8 {q14,q15}, [r0]!
veor q3, q12
vst1.8 {q0,q1}, [r1]! @ write output
veor q7, q13
veor q2, q14
vst1.8 {q4}, [r1]!
veor q5, q15
vst1.8 {q6}, [r1]!
vmov.i32 q8, #1 @ compose 1<<96
vst1.8 {q3}, [r1]!
veor q9, q9, q9
vst1.8 {q7}, [r1]!
vext.8 q8, q9, q8, #4
vst1.8 {q2}, [r1]!
vadd.u32 q9,q8,q8 @ compose 2<<96
vst1.8 {q5}, [r1]!
vldmia r9, {q0} @ load counter
bne .Lctr_enc_loop
b .Lctr_enc_done
.align 4
.Lctr_enc_loop_done:
add r2, r2, #8
vld1.8 {q8}, [r0]! @ load input
veor q0, q8
vst1.8 {q0}, [r1]! @ write output
cmp r2, #2
blo .Lctr_enc_done
vld1.8 {q9}, [r0]!
veor q1, q9
vst1.8 {q1}, [r1]!
beq .Lctr_enc_done
vld1.8 {q10}, [r0]!
veor q4, q10
vst1.8 {q4}, [r1]!
cmp r2, #4
blo .Lctr_enc_done
vld1.8 {q11}, [r0]!
veor q6, q11
vst1.8 {q6}, [r1]!
beq .Lctr_enc_done
vld1.8 {q12}, [r0]!
veor q3, q12
vst1.8 {q3}, [r1]!
cmp r2, #6
blo .Lctr_enc_done
vld1.8 {q13}, [r0]!
veor q7, q13
vst1.8 {q7}, [r1]!
beq .Lctr_enc_done
vld1.8 {q14}, [r0]
veor q2, q14
vst1.8 {q2}, [r1]!
.Lctr_enc_done:
vmov.i32 q0, #0
vmov.i32 q1, #0
#ifndef BSAES_ASM_EXTENDED_KEY
.Lctr_enc_bzero:@ wipe key schedule [if any]
vstmia sp!, {q0,q1}
cmp sp, r9
bne .Lctr_enc_bzero
#else
vstmia sp, {q0,q1}
#endif
mov sp, r9
add sp, #0x10 @ add sp,r9,#0x10 is no good for thumb
VFP_ABI_POP
ldmia sp!, {r4,r5,r6,r7,r8,r9,r10, pc} @ return
@ OpenSSL contains aes_nohw_* fallback code here. We patch this
@ out to retain a constant-time implementation.
.size bsaes_ctr32_encrypt_blocks,.-bsaes_ctr32_encrypt_blocks
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__ELF__)
|
marvin-hansen/iggy-streaming-system
| 70,675
|
thirdparty/crates/ring-0.17.9/pregenerated/sha256-x86_64-elf.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__)
.text
.globl sha256_block_data_order_nohw
.hidden sha256_block_data_order_nohw
.type sha256_block_data_order_nohw,@function
.align 16
sha256_block_data_order_nohw:
.cfi_startproc
_CET_ENDBR
movq %rsp,%rax
.cfi_def_cfa_register %rax
pushq %rbx
.cfi_offset %rbx,-16
pushq %rbp
.cfi_offset %rbp,-24
pushq %r12
.cfi_offset %r12,-32
pushq %r13
.cfi_offset %r13,-40
pushq %r14
.cfi_offset %r14,-48
pushq %r15
.cfi_offset %r15,-56
shlq $4,%rdx
subq $64+32,%rsp
leaq (%rsi,%rdx,4),%rdx
andq $-64,%rsp
movq %rdi,64+0(%rsp)
movq %rsi,64+8(%rsp)
movq %rdx,64+16(%rsp)
movq %rax,88(%rsp)
.cfi_escape 0x0f,0x06,0x77,0xd8,0x00,0x06,0x23,0x08
.Lprologue:
movl 0(%rdi),%eax
movl 4(%rdi),%ebx
movl 8(%rdi),%ecx
movl 12(%rdi),%edx
movl 16(%rdi),%r8d
movl 20(%rdi),%r9d
movl 24(%rdi),%r10d
movl 28(%rdi),%r11d
jmp .Lloop
.align 16
.Lloop:
movl %ebx,%edi
leaq K256(%rip),%rbp
xorl %ecx,%edi
movl 0(%rsi),%r12d
movl %r8d,%r13d
movl %eax,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r9d,%r15d
xorl %r8d,%r13d
rorl $9,%r14d
xorl %r10d,%r15d
movl %r12d,0(%rsp)
xorl %eax,%r14d
andl %r8d,%r15d
rorl $5,%r13d
addl %r11d,%r12d
xorl %r10d,%r15d
rorl $11,%r14d
xorl %r8d,%r13d
addl %r15d,%r12d
movl %eax,%r15d
addl (%rbp),%r12d
xorl %eax,%r14d
xorl %ebx,%r15d
rorl $6,%r13d
movl %ebx,%r11d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r11d
addl %r12d,%edx
addl %r12d,%r11d
leaq 4(%rbp),%rbp
addl %r14d,%r11d
movl 4(%rsi),%r12d
movl %edx,%r13d
movl %r11d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r8d,%edi
xorl %edx,%r13d
rorl $9,%r14d
xorl %r9d,%edi
movl %r12d,4(%rsp)
xorl %r11d,%r14d
andl %edx,%edi
rorl $5,%r13d
addl %r10d,%r12d
xorl %r9d,%edi
rorl $11,%r14d
xorl %edx,%r13d
addl %edi,%r12d
movl %r11d,%edi
addl (%rbp),%r12d
xorl %r11d,%r14d
xorl %eax,%edi
rorl $6,%r13d
movl %eax,%r10d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r10d
addl %r12d,%ecx
addl %r12d,%r10d
leaq 4(%rbp),%rbp
addl %r14d,%r10d
movl 8(%rsi),%r12d
movl %ecx,%r13d
movl %r10d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %edx,%r15d
xorl %ecx,%r13d
rorl $9,%r14d
xorl %r8d,%r15d
movl %r12d,8(%rsp)
xorl %r10d,%r14d
andl %ecx,%r15d
rorl $5,%r13d
addl %r9d,%r12d
xorl %r8d,%r15d
rorl $11,%r14d
xorl %ecx,%r13d
addl %r15d,%r12d
movl %r10d,%r15d
addl (%rbp),%r12d
xorl %r10d,%r14d
xorl %r11d,%r15d
rorl $6,%r13d
movl %r11d,%r9d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r9d
addl %r12d,%ebx
addl %r12d,%r9d
leaq 4(%rbp),%rbp
addl %r14d,%r9d
movl 12(%rsi),%r12d
movl %ebx,%r13d
movl %r9d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %ecx,%edi
xorl %ebx,%r13d
rorl $9,%r14d
xorl %edx,%edi
movl %r12d,12(%rsp)
xorl %r9d,%r14d
andl %ebx,%edi
rorl $5,%r13d
addl %r8d,%r12d
xorl %edx,%edi
rorl $11,%r14d
xorl %ebx,%r13d
addl %edi,%r12d
movl %r9d,%edi
addl (%rbp),%r12d
xorl %r9d,%r14d
xorl %r10d,%edi
rorl $6,%r13d
movl %r10d,%r8d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r8d
addl %r12d,%eax
addl %r12d,%r8d
leaq 20(%rbp),%rbp
addl %r14d,%r8d
movl 16(%rsi),%r12d
movl %eax,%r13d
movl %r8d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %ebx,%r15d
xorl %eax,%r13d
rorl $9,%r14d
xorl %ecx,%r15d
movl %r12d,16(%rsp)
xorl %r8d,%r14d
andl %eax,%r15d
rorl $5,%r13d
addl %edx,%r12d
xorl %ecx,%r15d
rorl $11,%r14d
xorl %eax,%r13d
addl %r15d,%r12d
movl %r8d,%r15d
addl (%rbp),%r12d
xorl %r8d,%r14d
xorl %r9d,%r15d
rorl $6,%r13d
movl %r9d,%edx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%edx
addl %r12d,%r11d
addl %r12d,%edx
leaq 4(%rbp),%rbp
addl %r14d,%edx
movl 20(%rsi),%r12d
movl %r11d,%r13d
movl %edx,%r14d
bswapl %r12d
rorl $14,%r13d
movl %eax,%edi
xorl %r11d,%r13d
rorl $9,%r14d
xorl %ebx,%edi
movl %r12d,20(%rsp)
xorl %edx,%r14d
andl %r11d,%edi
rorl $5,%r13d
addl %ecx,%r12d
xorl %ebx,%edi
rorl $11,%r14d
xorl %r11d,%r13d
addl %edi,%r12d
movl %edx,%edi
addl (%rbp),%r12d
xorl %edx,%r14d
xorl %r8d,%edi
rorl $6,%r13d
movl %r8d,%ecx
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%ecx
addl %r12d,%r10d
addl %r12d,%ecx
leaq 4(%rbp),%rbp
addl %r14d,%ecx
movl 24(%rsi),%r12d
movl %r10d,%r13d
movl %ecx,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r11d,%r15d
xorl %r10d,%r13d
rorl $9,%r14d
xorl %eax,%r15d
movl %r12d,24(%rsp)
xorl %ecx,%r14d
andl %r10d,%r15d
rorl $5,%r13d
addl %ebx,%r12d
xorl %eax,%r15d
rorl $11,%r14d
xorl %r10d,%r13d
addl %r15d,%r12d
movl %ecx,%r15d
addl (%rbp),%r12d
xorl %ecx,%r14d
xorl %edx,%r15d
rorl $6,%r13d
movl %edx,%ebx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%ebx
addl %r12d,%r9d
addl %r12d,%ebx
leaq 4(%rbp),%rbp
addl %r14d,%ebx
movl 28(%rsi),%r12d
movl %r9d,%r13d
movl %ebx,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r10d,%edi
xorl %r9d,%r13d
rorl $9,%r14d
xorl %r11d,%edi
movl %r12d,28(%rsp)
xorl %ebx,%r14d
andl %r9d,%edi
rorl $5,%r13d
addl %eax,%r12d
xorl %r11d,%edi
rorl $11,%r14d
xorl %r9d,%r13d
addl %edi,%r12d
movl %ebx,%edi
addl (%rbp),%r12d
xorl %ebx,%r14d
xorl %ecx,%edi
rorl $6,%r13d
movl %ecx,%eax
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%eax
addl %r12d,%r8d
addl %r12d,%eax
leaq 20(%rbp),%rbp
addl %r14d,%eax
movl 32(%rsi),%r12d
movl %r8d,%r13d
movl %eax,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r9d,%r15d
xorl %r8d,%r13d
rorl $9,%r14d
xorl %r10d,%r15d
movl %r12d,32(%rsp)
xorl %eax,%r14d
andl %r8d,%r15d
rorl $5,%r13d
addl %r11d,%r12d
xorl %r10d,%r15d
rorl $11,%r14d
xorl %r8d,%r13d
addl %r15d,%r12d
movl %eax,%r15d
addl (%rbp),%r12d
xorl %eax,%r14d
xorl %ebx,%r15d
rorl $6,%r13d
movl %ebx,%r11d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r11d
addl %r12d,%edx
addl %r12d,%r11d
leaq 4(%rbp),%rbp
addl %r14d,%r11d
movl 36(%rsi),%r12d
movl %edx,%r13d
movl %r11d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r8d,%edi
xorl %edx,%r13d
rorl $9,%r14d
xorl %r9d,%edi
movl %r12d,36(%rsp)
xorl %r11d,%r14d
andl %edx,%edi
rorl $5,%r13d
addl %r10d,%r12d
xorl %r9d,%edi
rorl $11,%r14d
xorl %edx,%r13d
addl %edi,%r12d
movl %r11d,%edi
addl (%rbp),%r12d
xorl %r11d,%r14d
xorl %eax,%edi
rorl $6,%r13d
movl %eax,%r10d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r10d
addl %r12d,%ecx
addl %r12d,%r10d
leaq 4(%rbp),%rbp
addl %r14d,%r10d
movl 40(%rsi),%r12d
movl %ecx,%r13d
movl %r10d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %edx,%r15d
xorl %ecx,%r13d
rorl $9,%r14d
xorl %r8d,%r15d
movl %r12d,40(%rsp)
xorl %r10d,%r14d
andl %ecx,%r15d
rorl $5,%r13d
addl %r9d,%r12d
xorl %r8d,%r15d
rorl $11,%r14d
xorl %ecx,%r13d
addl %r15d,%r12d
movl %r10d,%r15d
addl (%rbp),%r12d
xorl %r10d,%r14d
xorl %r11d,%r15d
rorl $6,%r13d
movl %r11d,%r9d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r9d
addl %r12d,%ebx
addl %r12d,%r9d
leaq 4(%rbp),%rbp
addl %r14d,%r9d
movl 44(%rsi),%r12d
movl %ebx,%r13d
movl %r9d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %ecx,%edi
xorl %ebx,%r13d
rorl $9,%r14d
xorl %edx,%edi
movl %r12d,44(%rsp)
xorl %r9d,%r14d
andl %ebx,%edi
rorl $5,%r13d
addl %r8d,%r12d
xorl %edx,%edi
rorl $11,%r14d
xorl %ebx,%r13d
addl %edi,%r12d
movl %r9d,%edi
addl (%rbp),%r12d
xorl %r9d,%r14d
xorl %r10d,%edi
rorl $6,%r13d
movl %r10d,%r8d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r8d
addl %r12d,%eax
addl %r12d,%r8d
leaq 20(%rbp),%rbp
addl %r14d,%r8d
movl 48(%rsi),%r12d
movl %eax,%r13d
movl %r8d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %ebx,%r15d
xorl %eax,%r13d
rorl $9,%r14d
xorl %ecx,%r15d
movl %r12d,48(%rsp)
xorl %r8d,%r14d
andl %eax,%r15d
rorl $5,%r13d
addl %edx,%r12d
xorl %ecx,%r15d
rorl $11,%r14d
xorl %eax,%r13d
addl %r15d,%r12d
movl %r8d,%r15d
addl (%rbp),%r12d
xorl %r8d,%r14d
xorl %r9d,%r15d
rorl $6,%r13d
movl %r9d,%edx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%edx
addl %r12d,%r11d
addl %r12d,%edx
leaq 4(%rbp),%rbp
addl %r14d,%edx
movl 52(%rsi),%r12d
movl %r11d,%r13d
movl %edx,%r14d
bswapl %r12d
rorl $14,%r13d
movl %eax,%edi
xorl %r11d,%r13d
rorl $9,%r14d
xorl %ebx,%edi
movl %r12d,52(%rsp)
xorl %edx,%r14d
andl %r11d,%edi
rorl $5,%r13d
addl %ecx,%r12d
xorl %ebx,%edi
rorl $11,%r14d
xorl %r11d,%r13d
addl %edi,%r12d
movl %edx,%edi
addl (%rbp),%r12d
xorl %edx,%r14d
xorl %r8d,%edi
rorl $6,%r13d
movl %r8d,%ecx
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%ecx
addl %r12d,%r10d
addl %r12d,%ecx
leaq 4(%rbp),%rbp
addl %r14d,%ecx
movl 56(%rsi),%r12d
movl %r10d,%r13d
movl %ecx,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r11d,%r15d
xorl %r10d,%r13d
rorl $9,%r14d
xorl %eax,%r15d
movl %r12d,56(%rsp)
xorl %ecx,%r14d
andl %r10d,%r15d
rorl $5,%r13d
addl %ebx,%r12d
xorl %eax,%r15d
rorl $11,%r14d
xorl %r10d,%r13d
addl %r15d,%r12d
movl %ecx,%r15d
addl (%rbp),%r12d
xorl %ecx,%r14d
xorl %edx,%r15d
rorl $6,%r13d
movl %edx,%ebx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%ebx
addl %r12d,%r9d
addl %r12d,%ebx
leaq 4(%rbp),%rbp
addl %r14d,%ebx
movl 60(%rsi),%r12d
movl %r9d,%r13d
movl %ebx,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r10d,%edi
xorl %r9d,%r13d
rorl $9,%r14d
xorl %r11d,%edi
movl %r12d,60(%rsp)
xorl %ebx,%r14d
andl %r9d,%edi
rorl $5,%r13d
addl %eax,%r12d
xorl %r11d,%edi
rorl $11,%r14d
xorl %r9d,%r13d
addl %edi,%r12d
movl %ebx,%edi
addl (%rbp),%r12d
xorl %ebx,%r14d
xorl %ecx,%edi
rorl $6,%r13d
movl %ecx,%eax
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%eax
addl %r12d,%r8d
addl %r12d,%eax
leaq 20(%rbp),%rbp
jmp .Lrounds_16_xx
.align 16
.Lrounds_16_xx:
movl 4(%rsp),%r13d
movl 56(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%eax
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 36(%rsp),%r12d
addl 0(%rsp),%r12d
movl %r8d,%r13d
addl %r15d,%r12d
movl %eax,%r14d
rorl $14,%r13d
movl %r9d,%r15d
xorl %r8d,%r13d
rorl $9,%r14d
xorl %r10d,%r15d
movl %r12d,0(%rsp)
xorl %eax,%r14d
andl %r8d,%r15d
rorl $5,%r13d
addl %r11d,%r12d
xorl %r10d,%r15d
rorl $11,%r14d
xorl %r8d,%r13d
addl %r15d,%r12d
movl %eax,%r15d
addl (%rbp),%r12d
xorl %eax,%r14d
xorl %ebx,%r15d
rorl $6,%r13d
movl %ebx,%r11d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r11d
addl %r12d,%edx
addl %r12d,%r11d
leaq 4(%rbp),%rbp
movl 8(%rsp),%r13d
movl 60(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r11d
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 40(%rsp),%r12d
addl 4(%rsp),%r12d
movl %edx,%r13d
addl %edi,%r12d
movl %r11d,%r14d
rorl $14,%r13d
movl %r8d,%edi
xorl %edx,%r13d
rorl $9,%r14d
xorl %r9d,%edi
movl %r12d,4(%rsp)
xorl %r11d,%r14d
andl %edx,%edi
rorl $5,%r13d
addl %r10d,%r12d
xorl %r9d,%edi
rorl $11,%r14d
xorl %edx,%r13d
addl %edi,%r12d
movl %r11d,%edi
addl (%rbp),%r12d
xorl %r11d,%r14d
xorl %eax,%edi
rorl $6,%r13d
movl %eax,%r10d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r10d
addl %r12d,%ecx
addl %r12d,%r10d
leaq 4(%rbp),%rbp
movl 12(%rsp),%r13d
movl 0(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r10d
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 44(%rsp),%r12d
addl 8(%rsp),%r12d
movl %ecx,%r13d
addl %r15d,%r12d
movl %r10d,%r14d
rorl $14,%r13d
movl %edx,%r15d
xorl %ecx,%r13d
rorl $9,%r14d
xorl %r8d,%r15d
movl %r12d,8(%rsp)
xorl %r10d,%r14d
andl %ecx,%r15d
rorl $5,%r13d
addl %r9d,%r12d
xorl %r8d,%r15d
rorl $11,%r14d
xorl %ecx,%r13d
addl %r15d,%r12d
movl %r10d,%r15d
addl (%rbp),%r12d
xorl %r10d,%r14d
xorl %r11d,%r15d
rorl $6,%r13d
movl %r11d,%r9d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r9d
addl %r12d,%ebx
addl %r12d,%r9d
leaq 4(%rbp),%rbp
movl 16(%rsp),%r13d
movl 4(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r9d
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 48(%rsp),%r12d
addl 12(%rsp),%r12d
movl %ebx,%r13d
addl %edi,%r12d
movl %r9d,%r14d
rorl $14,%r13d
movl %ecx,%edi
xorl %ebx,%r13d
rorl $9,%r14d
xorl %edx,%edi
movl %r12d,12(%rsp)
xorl %r9d,%r14d
andl %ebx,%edi
rorl $5,%r13d
addl %r8d,%r12d
xorl %edx,%edi
rorl $11,%r14d
xorl %ebx,%r13d
addl %edi,%r12d
movl %r9d,%edi
addl (%rbp),%r12d
xorl %r9d,%r14d
xorl %r10d,%edi
rorl $6,%r13d
movl %r10d,%r8d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r8d
addl %r12d,%eax
addl %r12d,%r8d
leaq 20(%rbp),%rbp
movl 20(%rsp),%r13d
movl 8(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r8d
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 52(%rsp),%r12d
addl 16(%rsp),%r12d
movl %eax,%r13d
addl %r15d,%r12d
movl %r8d,%r14d
rorl $14,%r13d
movl %ebx,%r15d
xorl %eax,%r13d
rorl $9,%r14d
xorl %ecx,%r15d
movl %r12d,16(%rsp)
xorl %r8d,%r14d
andl %eax,%r15d
rorl $5,%r13d
addl %edx,%r12d
xorl %ecx,%r15d
rorl $11,%r14d
xorl %eax,%r13d
addl %r15d,%r12d
movl %r8d,%r15d
addl (%rbp),%r12d
xorl %r8d,%r14d
xorl %r9d,%r15d
rorl $6,%r13d
movl %r9d,%edx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%edx
addl %r12d,%r11d
addl %r12d,%edx
leaq 4(%rbp),%rbp
movl 24(%rsp),%r13d
movl 12(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%edx
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 56(%rsp),%r12d
addl 20(%rsp),%r12d
movl %r11d,%r13d
addl %edi,%r12d
movl %edx,%r14d
rorl $14,%r13d
movl %eax,%edi
xorl %r11d,%r13d
rorl $9,%r14d
xorl %ebx,%edi
movl %r12d,20(%rsp)
xorl %edx,%r14d
andl %r11d,%edi
rorl $5,%r13d
addl %ecx,%r12d
xorl %ebx,%edi
rorl $11,%r14d
xorl %r11d,%r13d
addl %edi,%r12d
movl %edx,%edi
addl (%rbp),%r12d
xorl %edx,%r14d
xorl %r8d,%edi
rorl $6,%r13d
movl %r8d,%ecx
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%ecx
addl %r12d,%r10d
addl %r12d,%ecx
leaq 4(%rbp),%rbp
movl 28(%rsp),%r13d
movl 16(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%ecx
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 60(%rsp),%r12d
addl 24(%rsp),%r12d
movl %r10d,%r13d
addl %r15d,%r12d
movl %ecx,%r14d
rorl $14,%r13d
movl %r11d,%r15d
xorl %r10d,%r13d
rorl $9,%r14d
xorl %eax,%r15d
movl %r12d,24(%rsp)
xorl %ecx,%r14d
andl %r10d,%r15d
rorl $5,%r13d
addl %ebx,%r12d
xorl %eax,%r15d
rorl $11,%r14d
xorl %r10d,%r13d
addl %r15d,%r12d
movl %ecx,%r15d
addl (%rbp),%r12d
xorl %ecx,%r14d
xorl %edx,%r15d
rorl $6,%r13d
movl %edx,%ebx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%ebx
addl %r12d,%r9d
addl %r12d,%ebx
leaq 4(%rbp),%rbp
movl 32(%rsp),%r13d
movl 20(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%ebx
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 0(%rsp),%r12d
addl 28(%rsp),%r12d
movl %r9d,%r13d
addl %edi,%r12d
movl %ebx,%r14d
rorl $14,%r13d
movl %r10d,%edi
xorl %r9d,%r13d
rorl $9,%r14d
xorl %r11d,%edi
movl %r12d,28(%rsp)
xorl %ebx,%r14d
andl %r9d,%edi
rorl $5,%r13d
addl %eax,%r12d
xorl %r11d,%edi
rorl $11,%r14d
xorl %r9d,%r13d
addl %edi,%r12d
movl %ebx,%edi
addl (%rbp),%r12d
xorl %ebx,%r14d
xorl %ecx,%edi
rorl $6,%r13d
movl %ecx,%eax
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%eax
addl %r12d,%r8d
addl %r12d,%eax
leaq 20(%rbp),%rbp
movl 36(%rsp),%r13d
movl 24(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%eax
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 4(%rsp),%r12d
addl 32(%rsp),%r12d
movl %r8d,%r13d
addl %r15d,%r12d
movl %eax,%r14d
rorl $14,%r13d
movl %r9d,%r15d
xorl %r8d,%r13d
rorl $9,%r14d
xorl %r10d,%r15d
movl %r12d,32(%rsp)
xorl %eax,%r14d
andl %r8d,%r15d
rorl $5,%r13d
addl %r11d,%r12d
xorl %r10d,%r15d
rorl $11,%r14d
xorl %r8d,%r13d
addl %r15d,%r12d
movl %eax,%r15d
addl (%rbp),%r12d
xorl %eax,%r14d
xorl %ebx,%r15d
rorl $6,%r13d
movl %ebx,%r11d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r11d
addl %r12d,%edx
addl %r12d,%r11d
leaq 4(%rbp),%rbp
movl 40(%rsp),%r13d
movl 28(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r11d
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 8(%rsp),%r12d
addl 36(%rsp),%r12d
movl %edx,%r13d
addl %edi,%r12d
movl %r11d,%r14d
rorl $14,%r13d
movl %r8d,%edi
xorl %edx,%r13d
rorl $9,%r14d
xorl %r9d,%edi
movl %r12d,36(%rsp)
xorl %r11d,%r14d
andl %edx,%edi
rorl $5,%r13d
addl %r10d,%r12d
xorl %r9d,%edi
rorl $11,%r14d
xorl %edx,%r13d
addl %edi,%r12d
movl %r11d,%edi
addl (%rbp),%r12d
xorl %r11d,%r14d
xorl %eax,%edi
rorl $6,%r13d
movl %eax,%r10d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r10d
addl %r12d,%ecx
addl %r12d,%r10d
leaq 4(%rbp),%rbp
movl 44(%rsp),%r13d
movl 32(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r10d
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 12(%rsp),%r12d
addl 40(%rsp),%r12d
movl %ecx,%r13d
addl %r15d,%r12d
movl %r10d,%r14d
rorl $14,%r13d
movl %edx,%r15d
xorl %ecx,%r13d
rorl $9,%r14d
xorl %r8d,%r15d
movl %r12d,40(%rsp)
xorl %r10d,%r14d
andl %ecx,%r15d
rorl $5,%r13d
addl %r9d,%r12d
xorl %r8d,%r15d
rorl $11,%r14d
xorl %ecx,%r13d
addl %r15d,%r12d
movl %r10d,%r15d
addl (%rbp),%r12d
xorl %r10d,%r14d
xorl %r11d,%r15d
rorl $6,%r13d
movl %r11d,%r9d
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%r9d
addl %r12d,%ebx
addl %r12d,%r9d
leaq 4(%rbp),%rbp
movl 48(%rsp),%r13d
movl 36(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r9d
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 16(%rsp),%r12d
addl 44(%rsp),%r12d
movl %ebx,%r13d
addl %edi,%r12d
movl %r9d,%r14d
rorl $14,%r13d
movl %ecx,%edi
xorl %ebx,%r13d
rorl $9,%r14d
xorl %edx,%edi
movl %r12d,44(%rsp)
xorl %r9d,%r14d
andl %ebx,%edi
rorl $5,%r13d
addl %r8d,%r12d
xorl %edx,%edi
rorl $11,%r14d
xorl %ebx,%r13d
addl %edi,%r12d
movl %r9d,%edi
addl (%rbp),%r12d
xorl %r9d,%r14d
xorl %r10d,%edi
rorl $6,%r13d
movl %r10d,%r8d
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%r8d
addl %r12d,%eax
addl %r12d,%r8d
leaq 20(%rbp),%rbp
movl 52(%rsp),%r13d
movl 40(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%r8d
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 20(%rsp),%r12d
addl 48(%rsp),%r12d
movl %eax,%r13d
addl %r15d,%r12d
movl %r8d,%r14d
rorl $14,%r13d
movl %ebx,%r15d
xorl %eax,%r13d
rorl $9,%r14d
xorl %ecx,%r15d
movl %r12d,48(%rsp)
xorl %r8d,%r14d
andl %eax,%r15d
rorl $5,%r13d
addl %edx,%r12d
xorl %ecx,%r15d
rorl $11,%r14d
xorl %eax,%r13d
addl %r15d,%r12d
movl %r8d,%r15d
addl (%rbp),%r12d
xorl %r8d,%r14d
xorl %r9d,%r15d
rorl $6,%r13d
movl %r9d,%edx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%edx
addl %r12d,%r11d
addl %r12d,%edx
leaq 4(%rbp),%rbp
movl 56(%rsp),%r13d
movl 44(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%edx
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 24(%rsp),%r12d
addl 52(%rsp),%r12d
movl %r11d,%r13d
addl %edi,%r12d
movl %edx,%r14d
rorl $14,%r13d
movl %eax,%edi
xorl %r11d,%r13d
rorl $9,%r14d
xorl %ebx,%edi
movl %r12d,52(%rsp)
xorl %edx,%r14d
andl %r11d,%edi
rorl $5,%r13d
addl %ecx,%r12d
xorl %ebx,%edi
rorl $11,%r14d
xorl %r11d,%r13d
addl %edi,%r12d
movl %edx,%edi
addl (%rbp),%r12d
xorl %edx,%r14d
xorl %r8d,%edi
rorl $6,%r13d
movl %r8d,%ecx
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%ecx
addl %r12d,%r10d
addl %r12d,%ecx
leaq 4(%rbp),%rbp
movl 60(%rsp),%r13d
movl 48(%rsp),%r15d
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%ecx
movl %r15d,%r14d
rorl $2,%r15d
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
xorl %r13d,%r12d
xorl %r14d,%r15d
addl 28(%rsp),%r12d
addl 56(%rsp),%r12d
movl %r10d,%r13d
addl %r15d,%r12d
movl %ecx,%r14d
rorl $14,%r13d
movl %r11d,%r15d
xorl %r10d,%r13d
rorl $9,%r14d
xorl %eax,%r15d
movl %r12d,56(%rsp)
xorl %ecx,%r14d
andl %r10d,%r15d
rorl $5,%r13d
addl %ebx,%r12d
xorl %eax,%r15d
rorl $11,%r14d
xorl %r10d,%r13d
addl %r15d,%r12d
movl %ecx,%r15d
addl (%rbp),%r12d
xorl %ecx,%r14d
xorl %edx,%r15d
rorl $6,%r13d
movl %edx,%ebx
andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
xorl %edi,%ebx
addl %r12d,%r9d
addl %r12d,%ebx
leaq 4(%rbp),%rbp
movl 0(%rsp),%r13d
movl 52(%rsp),%edi
movl %r13d,%r12d
rorl $11,%r13d
addl %r14d,%ebx
movl %edi,%r14d
rorl $2,%edi
xorl %r12d,%r13d
shrl $3,%r12d
rorl $7,%r13d
xorl %r14d,%edi
shrl $10,%r14d
rorl $17,%edi
xorl %r13d,%r12d
xorl %r14d,%edi
addl 32(%rsp),%r12d
addl 60(%rsp),%r12d
movl %r9d,%r13d
addl %edi,%r12d
movl %ebx,%r14d
rorl $14,%r13d
movl %r10d,%edi
xorl %r9d,%r13d
rorl $9,%r14d
xorl %r11d,%edi
movl %r12d,60(%rsp)
xorl %ebx,%r14d
andl %r9d,%edi
rorl $5,%r13d
addl %eax,%r12d
xorl %r11d,%edi
rorl $11,%r14d
xorl %r9d,%r13d
addl %edi,%r12d
movl %ebx,%edi
addl (%rbp),%r12d
xorl %ebx,%r14d
xorl %ecx,%edi
rorl $6,%r13d
movl %ecx,%eax
andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
xorl %r15d,%eax
addl %r12d,%r8d
addl %r12d,%eax
leaq 20(%rbp),%rbp
cmpb $0,3(%rbp)
jnz .Lrounds_16_xx
movq 64+0(%rsp),%rdi
addl %r14d,%eax
leaq 64(%rsi),%rsi
addl 0(%rdi),%eax
addl 4(%rdi),%ebx
addl 8(%rdi),%ecx
addl 12(%rdi),%edx
addl 16(%rdi),%r8d
addl 20(%rdi),%r9d
addl 24(%rdi),%r10d
addl 28(%rdi),%r11d
cmpq 64+16(%rsp),%rsi
movl %eax,0(%rdi)
movl %ebx,4(%rdi)
movl %ecx,8(%rdi)
movl %edx,12(%rdi)
movl %r8d,16(%rdi)
movl %r9d,20(%rdi)
movl %r10d,24(%rdi)
movl %r11d,28(%rdi)
jb .Lloop
movq 88(%rsp),%rsi
.cfi_def_cfa %rsi,8
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbp
.cfi_restore %rbp
movq -8(%rsi),%rbx
.cfi_restore %rbx
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lepilogue:
ret
.cfi_endproc
.size sha256_block_data_order_nohw,.-sha256_block_data_order_nohw
.section .rodata
.align 64
.type K256,@object
K256:
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
.long 0x03020100,0x0b0a0908,0xffffffff,0xffffffff
.long 0x03020100,0x0b0a0908,0xffffffff,0xffffffff
.long 0xffffffff,0xffffffff,0x03020100,0x0b0a0908
.long 0xffffffff,0xffffffff,0x03020100,0x0b0a0908
.byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.text
.globl sha256_block_data_order_hw
.hidden sha256_block_data_order_hw
.type sha256_block_data_order_hw,@function
.align 64
sha256_block_data_order_hw:
.cfi_startproc
_CET_ENDBR
leaq K256+128(%rip),%rcx
movdqu (%rdi),%xmm1
movdqu 16(%rdi),%xmm2
movdqa 512-128(%rcx),%xmm7
pshufd $0x1b,%xmm1,%xmm0
pshufd $0xb1,%xmm1,%xmm1
pshufd $0x1b,%xmm2,%xmm2
movdqa %xmm7,%xmm8
.byte 102,15,58,15,202,8
punpcklqdq %xmm0,%xmm2
jmp .Loop_shaext
.align 16
.Loop_shaext:
movdqu (%rsi),%xmm3
movdqu 16(%rsi),%xmm4
movdqu 32(%rsi),%xmm5
.byte 102,15,56,0,223
movdqu 48(%rsi),%xmm6
movdqa 0-128(%rcx),%xmm0
paddd %xmm3,%xmm0
.byte 102,15,56,0,231
movdqa %xmm2,%xmm10
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
nop
movdqa %xmm1,%xmm9
.byte 15,56,203,202
movdqa 32-128(%rcx),%xmm0
paddd %xmm4,%xmm0
.byte 102,15,56,0,239
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
leaq 64(%rsi),%rsi
.byte 15,56,204,220
.byte 15,56,203,202
movdqa 64-128(%rcx),%xmm0
paddd %xmm5,%xmm0
.byte 102,15,56,0,247
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm6,%xmm7
.byte 102,15,58,15,253,4
nop
paddd %xmm7,%xmm3
.byte 15,56,204,229
.byte 15,56,203,202
movdqa 96-128(%rcx),%xmm0
paddd %xmm6,%xmm0
.byte 15,56,205,222
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm3,%xmm7
.byte 102,15,58,15,254,4
nop
paddd %xmm7,%xmm4
.byte 15,56,204,238
.byte 15,56,203,202
movdqa 128-128(%rcx),%xmm0
paddd %xmm3,%xmm0
.byte 15,56,205,227
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm4,%xmm7
.byte 102,15,58,15,251,4
nop
paddd %xmm7,%xmm5
.byte 15,56,204,243
.byte 15,56,203,202
movdqa 160-128(%rcx),%xmm0
paddd %xmm4,%xmm0
.byte 15,56,205,236
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm5,%xmm7
.byte 102,15,58,15,252,4
nop
paddd %xmm7,%xmm6
.byte 15,56,204,220
.byte 15,56,203,202
movdqa 192-128(%rcx),%xmm0
paddd %xmm5,%xmm0
.byte 15,56,205,245
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm6,%xmm7
.byte 102,15,58,15,253,4
nop
paddd %xmm7,%xmm3
.byte 15,56,204,229
.byte 15,56,203,202
movdqa 224-128(%rcx),%xmm0
paddd %xmm6,%xmm0
.byte 15,56,205,222
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm3,%xmm7
.byte 102,15,58,15,254,4
nop
paddd %xmm7,%xmm4
.byte 15,56,204,238
.byte 15,56,203,202
movdqa 256-128(%rcx),%xmm0
paddd %xmm3,%xmm0
.byte 15,56,205,227
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm4,%xmm7
.byte 102,15,58,15,251,4
nop
paddd %xmm7,%xmm5
.byte 15,56,204,243
.byte 15,56,203,202
movdqa 288-128(%rcx),%xmm0
paddd %xmm4,%xmm0
.byte 15,56,205,236
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm5,%xmm7
.byte 102,15,58,15,252,4
nop
paddd %xmm7,%xmm6
.byte 15,56,204,220
.byte 15,56,203,202
movdqa 320-128(%rcx),%xmm0
paddd %xmm5,%xmm0
.byte 15,56,205,245
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm6,%xmm7
.byte 102,15,58,15,253,4
nop
paddd %xmm7,%xmm3
.byte 15,56,204,229
.byte 15,56,203,202
movdqa 352-128(%rcx),%xmm0
paddd %xmm6,%xmm0
.byte 15,56,205,222
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm3,%xmm7
.byte 102,15,58,15,254,4
nop
paddd %xmm7,%xmm4
.byte 15,56,204,238
.byte 15,56,203,202
movdqa 384-128(%rcx),%xmm0
paddd %xmm3,%xmm0
.byte 15,56,205,227
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm4,%xmm7
.byte 102,15,58,15,251,4
nop
paddd %xmm7,%xmm5
.byte 15,56,204,243
.byte 15,56,203,202
movdqa 416-128(%rcx),%xmm0
paddd %xmm4,%xmm0
.byte 15,56,205,236
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
movdqa %xmm5,%xmm7
.byte 102,15,58,15,252,4
.byte 15,56,203,202
paddd %xmm7,%xmm6
movdqa 448-128(%rcx),%xmm0
paddd %xmm5,%xmm0
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
.byte 15,56,205,245
movdqa %xmm8,%xmm7
.byte 15,56,203,202
movdqa 480-128(%rcx),%xmm0
paddd %xmm6,%xmm0
nop
.byte 15,56,203,209
pshufd $0x0e,%xmm0,%xmm0
decq %rdx
nop
.byte 15,56,203,202
paddd %xmm10,%xmm2
paddd %xmm9,%xmm1
jnz .Loop_shaext
pshufd $0xb1,%xmm2,%xmm2
pshufd $0x1b,%xmm1,%xmm7
pshufd $0xb1,%xmm1,%xmm1
punpckhqdq %xmm2,%xmm1
.byte 102,15,58,15,215,8
movdqu %xmm1,(%rdi)
movdqu %xmm2,16(%rdi)
ret
.cfi_endproc
.size sha256_block_data_order_hw,.-sha256_block_data_order_hw
.globl sha256_block_data_order_ssse3
.hidden sha256_block_data_order_ssse3
.type sha256_block_data_order_ssse3,@function
.align 64
sha256_block_data_order_ssse3:
.cfi_startproc
_CET_ENDBR
movq %rsp,%rax
.cfi_def_cfa_register %rax
pushq %rbx
.cfi_offset %rbx,-16
pushq %rbp
.cfi_offset %rbp,-24
pushq %r12
.cfi_offset %r12,-32
pushq %r13
.cfi_offset %r13,-40
pushq %r14
.cfi_offset %r14,-48
pushq %r15
.cfi_offset %r15,-56
shlq $4,%rdx
subq $96,%rsp
leaq (%rsi,%rdx,4),%rdx
andq $-64,%rsp
movq %rdi,64+0(%rsp)
movq %rsi,64+8(%rsp)
movq %rdx,64+16(%rsp)
movq %rax,88(%rsp)
.cfi_escape 0x0f,0x06,0x77,0xd8,0x00,0x06,0x23,0x08
.Lprologue_ssse3:
movl 0(%rdi),%eax
movl 4(%rdi),%ebx
movl 8(%rdi),%ecx
movl 12(%rdi),%edx
movl 16(%rdi),%r8d
movl 20(%rdi),%r9d
movl 24(%rdi),%r10d
movl 28(%rdi),%r11d
jmp .Lloop_ssse3
.align 16
.Lloop_ssse3:
movdqa K256+512(%rip),%xmm7
movdqu 0(%rsi),%xmm0
movdqu 16(%rsi),%xmm1
movdqu 32(%rsi),%xmm2
.byte 102,15,56,0,199
movdqu 48(%rsi),%xmm3
leaq K256(%rip),%rbp
.byte 102,15,56,0,207
movdqa 0(%rbp),%xmm4
movdqa 32(%rbp),%xmm5
.byte 102,15,56,0,215
paddd %xmm0,%xmm4
movdqa 64(%rbp),%xmm6
.byte 102,15,56,0,223
movdqa 96(%rbp),%xmm7
paddd %xmm1,%xmm5
paddd %xmm2,%xmm6
paddd %xmm3,%xmm7
movdqa %xmm4,0(%rsp)
movl %eax,%r14d
movdqa %xmm5,16(%rsp)
movl %ebx,%edi
movdqa %xmm6,32(%rsp)
xorl %ecx,%edi
movdqa %xmm7,48(%rsp)
movl %r8d,%r13d
jmp .Lssse3_00_47
.align 16
.Lssse3_00_47:
subq $-128,%rbp
rorl $14,%r13d
movdqa %xmm1,%xmm4
movl %r14d,%eax
movl %r9d,%r12d
movdqa %xmm3,%xmm7
rorl $9,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
rorl $5,%r13d
xorl %eax,%r14d
.byte 102,15,58,15,224,4
andl %r8d,%r12d
xorl %r8d,%r13d
.byte 102,15,58,15,250,4
addl 0(%rsp),%r11d
movl %eax,%r15d
xorl %r10d,%r12d
rorl $11,%r14d
movdqa %xmm4,%xmm5
xorl %ebx,%r15d
addl %r12d,%r11d
movdqa %xmm4,%xmm6
rorl $6,%r13d
andl %r15d,%edi
psrld $3,%xmm4
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
paddd %xmm7,%xmm0
rorl $2,%r14d
addl %r11d,%edx
psrld $7,%xmm6
addl %edi,%r11d
movl %edx,%r13d
pshufd $250,%xmm3,%xmm7
addl %r11d,%r14d
rorl $14,%r13d
pslld $14,%xmm5
movl %r14d,%r11d
movl %r8d,%r12d
pxor %xmm6,%xmm4
rorl $9,%r14d
xorl %edx,%r13d
xorl %r9d,%r12d
rorl $5,%r13d
psrld $11,%xmm6
xorl %r11d,%r14d
pxor %xmm5,%xmm4
andl %edx,%r12d
xorl %edx,%r13d
pslld $11,%xmm5
addl 4(%rsp),%r10d
movl %r11d,%edi
pxor %xmm6,%xmm4
xorl %r9d,%r12d
rorl $11,%r14d
movdqa %xmm7,%xmm6
xorl %eax,%edi
addl %r12d,%r10d
pxor %xmm5,%xmm4
rorl $6,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
psrld $10,%xmm7
addl %r13d,%r10d
xorl %eax,%r15d
paddd %xmm4,%xmm0
rorl $2,%r14d
addl %r10d,%ecx
psrlq $17,%xmm6
addl %r15d,%r10d
movl %ecx,%r13d
addl %r10d,%r14d
pxor %xmm6,%xmm7
rorl $14,%r13d
movl %r14d,%r10d
movl %edx,%r12d
rorl $9,%r14d
psrlq $2,%xmm6
xorl %ecx,%r13d
xorl %r8d,%r12d
pxor %xmm6,%xmm7
rorl $5,%r13d
xorl %r10d,%r14d
andl %ecx,%r12d
pshufd $128,%xmm7,%xmm7
xorl %ecx,%r13d
addl 8(%rsp),%r9d
movl %r10d,%r15d
psrldq $8,%xmm7
xorl %r8d,%r12d
rorl $11,%r14d
xorl %r11d,%r15d
addl %r12d,%r9d
rorl $6,%r13d
paddd %xmm7,%xmm0
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
pshufd $80,%xmm0,%xmm7
xorl %r11d,%edi
rorl $2,%r14d
addl %r9d,%ebx
movdqa %xmm7,%xmm6
addl %edi,%r9d
movl %ebx,%r13d
psrld $10,%xmm7
addl %r9d,%r14d
rorl $14,%r13d
psrlq $17,%xmm6
movl %r14d,%r9d
movl %ecx,%r12d
pxor %xmm6,%xmm7
rorl $9,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
rorl $5,%r13d
xorl %r9d,%r14d
psrlq $2,%xmm6
andl %ebx,%r12d
xorl %ebx,%r13d
addl 12(%rsp),%r8d
pxor %xmm6,%xmm7
movl %r9d,%edi
xorl %edx,%r12d
rorl $11,%r14d
pshufd $8,%xmm7,%xmm7
xorl %r10d,%edi
addl %r12d,%r8d
movdqa 0(%rbp),%xmm6
rorl $6,%r13d
andl %edi,%r15d
pslldq $8,%xmm7
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
paddd %xmm7,%xmm0
rorl $2,%r14d
addl %r8d,%eax
addl %r15d,%r8d
paddd %xmm0,%xmm6
movl %eax,%r13d
addl %r8d,%r14d
movdqa %xmm6,0(%rsp)
rorl $14,%r13d
movdqa %xmm2,%xmm4
movl %r14d,%r8d
movl %ebx,%r12d
movdqa %xmm0,%xmm7
rorl $9,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
rorl $5,%r13d
xorl %r8d,%r14d
.byte 102,15,58,15,225,4
andl %eax,%r12d
xorl %eax,%r13d
.byte 102,15,58,15,251,4
addl 16(%rsp),%edx
movl %r8d,%r15d
xorl %ecx,%r12d
rorl $11,%r14d
movdqa %xmm4,%xmm5
xorl %r9d,%r15d
addl %r12d,%edx
movdqa %xmm4,%xmm6
rorl $6,%r13d
andl %r15d,%edi
psrld $3,%xmm4
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
paddd %xmm7,%xmm1
rorl $2,%r14d
addl %edx,%r11d
psrld $7,%xmm6
addl %edi,%edx
movl %r11d,%r13d
pshufd $250,%xmm0,%xmm7
addl %edx,%r14d
rorl $14,%r13d
pslld $14,%xmm5
movl %r14d,%edx
movl %eax,%r12d
pxor %xmm6,%xmm4
rorl $9,%r14d
xorl %r11d,%r13d
xorl %ebx,%r12d
rorl $5,%r13d
psrld $11,%xmm6
xorl %edx,%r14d
pxor %xmm5,%xmm4
andl %r11d,%r12d
xorl %r11d,%r13d
pslld $11,%xmm5
addl 20(%rsp),%ecx
movl %edx,%edi
pxor %xmm6,%xmm4
xorl %ebx,%r12d
rorl $11,%r14d
movdqa %xmm7,%xmm6
xorl %r8d,%edi
addl %r12d,%ecx
pxor %xmm5,%xmm4
rorl $6,%r13d
andl %edi,%r15d
xorl %edx,%r14d
psrld $10,%xmm7
addl %r13d,%ecx
xorl %r8d,%r15d
paddd %xmm4,%xmm1
rorl $2,%r14d
addl %ecx,%r10d
psrlq $17,%xmm6
addl %r15d,%ecx
movl %r10d,%r13d
addl %ecx,%r14d
pxor %xmm6,%xmm7
rorl $14,%r13d
movl %r14d,%ecx
movl %r11d,%r12d
rorl $9,%r14d
psrlq $2,%xmm6
xorl %r10d,%r13d
xorl %eax,%r12d
pxor %xmm6,%xmm7
rorl $5,%r13d
xorl %ecx,%r14d
andl %r10d,%r12d
pshufd $128,%xmm7,%xmm7
xorl %r10d,%r13d
addl 24(%rsp),%ebx
movl %ecx,%r15d
psrldq $8,%xmm7
xorl %eax,%r12d
rorl $11,%r14d
xorl %edx,%r15d
addl %r12d,%ebx
rorl $6,%r13d
paddd %xmm7,%xmm1
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
pshufd $80,%xmm1,%xmm7
xorl %edx,%edi
rorl $2,%r14d
addl %ebx,%r9d
movdqa %xmm7,%xmm6
addl %edi,%ebx
movl %r9d,%r13d
psrld $10,%xmm7
addl %ebx,%r14d
rorl $14,%r13d
psrlq $17,%xmm6
movl %r14d,%ebx
movl %r10d,%r12d
pxor %xmm6,%xmm7
rorl $9,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
rorl $5,%r13d
xorl %ebx,%r14d
psrlq $2,%xmm6
andl %r9d,%r12d
xorl %r9d,%r13d
addl 28(%rsp),%eax
pxor %xmm6,%xmm7
movl %ebx,%edi
xorl %r11d,%r12d
rorl $11,%r14d
pshufd $8,%xmm7,%xmm7
xorl %ecx,%edi
addl %r12d,%eax
movdqa 32(%rbp),%xmm6
rorl $6,%r13d
andl %edi,%r15d
pslldq $8,%xmm7
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
paddd %xmm7,%xmm1
rorl $2,%r14d
addl %eax,%r8d
addl %r15d,%eax
paddd %xmm1,%xmm6
movl %r8d,%r13d
addl %eax,%r14d
movdqa %xmm6,16(%rsp)
rorl $14,%r13d
movdqa %xmm3,%xmm4
movl %r14d,%eax
movl %r9d,%r12d
movdqa %xmm1,%xmm7
rorl $9,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
rorl $5,%r13d
xorl %eax,%r14d
.byte 102,15,58,15,226,4
andl %r8d,%r12d
xorl %r8d,%r13d
.byte 102,15,58,15,248,4
addl 32(%rsp),%r11d
movl %eax,%r15d
xorl %r10d,%r12d
rorl $11,%r14d
movdqa %xmm4,%xmm5
xorl %ebx,%r15d
addl %r12d,%r11d
movdqa %xmm4,%xmm6
rorl $6,%r13d
andl %r15d,%edi
psrld $3,%xmm4
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
paddd %xmm7,%xmm2
rorl $2,%r14d
addl %r11d,%edx
psrld $7,%xmm6
addl %edi,%r11d
movl %edx,%r13d
pshufd $250,%xmm1,%xmm7
addl %r11d,%r14d
rorl $14,%r13d
pslld $14,%xmm5
movl %r14d,%r11d
movl %r8d,%r12d
pxor %xmm6,%xmm4
rorl $9,%r14d
xorl %edx,%r13d
xorl %r9d,%r12d
rorl $5,%r13d
psrld $11,%xmm6
xorl %r11d,%r14d
pxor %xmm5,%xmm4
andl %edx,%r12d
xorl %edx,%r13d
pslld $11,%xmm5
addl 36(%rsp),%r10d
movl %r11d,%edi
pxor %xmm6,%xmm4
xorl %r9d,%r12d
rorl $11,%r14d
movdqa %xmm7,%xmm6
xorl %eax,%edi
addl %r12d,%r10d
pxor %xmm5,%xmm4
rorl $6,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
psrld $10,%xmm7
addl %r13d,%r10d
xorl %eax,%r15d
paddd %xmm4,%xmm2
rorl $2,%r14d
addl %r10d,%ecx
psrlq $17,%xmm6
addl %r15d,%r10d
movl %ecx,%r13d
addl %r10d,%r14d
pxor %xmm6,%xmm7
rorl $14,%r13d
movl %r14d,%r10d
movl %edx,%r12d
rorl $9,%r14d
psrlq $2,%xmm6
xorl %ecx,%r13d
xorl %r8d,%r12d
pxor %xmm6,%xmm7
rorl $5,%r13d
xorl %r10d,%r14d
andl %ecx,%r12d
pshufd $128,%xmm7,%xmm7
xorl %ecx,%r13d
addl 40(%rsp),%r9d
movl %r10d,%r15d
psrldq $8,%xmm7
xorl %r8d,%r12d
rorl $11,%r14d
xorl %r11d,%r15d
addl %r12d,%r9d
rorl $6,%r13d
paddd %xmm7,%xmm2
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
pshufd $80,%xmm2,%xmm7
xorl %r11d,%edi
rorl $2,%r14d
addl %r9d,%ebx
movdqa %xmm7,%xmm6
addl %edi,%r9d
movl %ebx,%r13d
psrld $10,%xmm7
addl %r9d,%r14d
rorl $14,%r13d
psrlq $17,%xmm6
movl %r14d,%r9d
movl %ecx,%r12d
pxor %xmm6,%xmm7
rorl $9,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
rorl $5,%r13d
xorl %r9d,%r14d
psrlq $2,%xmm6
andl %ebx,%r12d
xorl %ebx,%r13d
addl 44(%rsp),%r8d
pxor %xmm6,%xmm7
movl %r9d,%edi
xorl %edx,%r12d
rorl $11,%r14d
pshufd $8,%xmm7,%xmm7
xorl %r10d,%edi
addl %r12d,%r8d
movdqa 64(%rbp),%xmm6
rorl $6,%r13d
andl %edi,%r15d
pslldq $8,%xmm7
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
paddd %xmm7,%xmm2
rorl $2,%r14d
addl %r8d,%eax
addl %r15d,%r8d
paddd %xmm2,%xmm6
movl %eax,%r13d
addl %r8d,%r14d
movdqa %xmm6,32(%rsp)
rorl $14,%r13d
movdqa %xmm0,%xmm4
movl %r14d,%r8d
movl %ebx,%r12d
movdqa %xmm2,%xmm7
rorl $9,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
rorl $5,%r13d
xorl %r8d,%r14d
.byte 102,15,58,15,227,4
andl %eax,%r12d
xorl %eax,%r13d
.byte 102,15,58,15,249,4
addl 48(%rsp),%edx
movl %r8d,%r15d
xorl %ecx,%r12d
rorl $11,%r14d
movdqa %xmm4,%xmm5
xorl %r9d,%r15d
addl %r12d,%edx
movdqa %xmm4,%xmm6
rorl $6,%r13d
andl %r15d,%edi
psrld $3,%xmm4
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
paddd %xmm7,%xmm3
rorl $2,%r14d
addl %edx,%r11d
psrld $7,%xmm6
addl %edi,%edx
movl %r11d,%r13d
pshufd $250,%xmm2,%xmm7
addl %edx,%r14d
rorl $14,%r13d
pslld $14,%xmm5
movl %r14d,%edx
movl %eax,%r12d
pxor %xmm6,%xmm4
rorl $9,%r14d
xorl %r11d,%r13d
xorl %ebx,%r12d
rorl $5,%r13d
psrld $11,%xmm6
xorl %edx,%r14d
pxor %xmm5,%xmm4
andl %r11d,%r12d
xorl %r11d,%r13d
pslld $11,%xmm5
addl 52(%rsp),%ecx
movl %edx,%edi
pxor %xmm6,%xmm4
xorl %ebx,%r12d
rorl $11,%r14d
movdqa %xmm7,%xmm6
xorl %r8d,%edi
addl %r12d,%ecx
pxor %xmm5,%xmm4
rorl $6,%r13d
andl %edi,%r15d
xorl %edx,%r14d
psrld $10,%xmm7
addl %r13d,%ecx
xorl %r8d,%r15d
paddd %xmm4,%xmm3
rorl $2,%r14d
addl %ecx,%r10d
psrlq $17,%xmm6
addl %r15d,%ecx
movl %r10d,%r13d
addl %ecx,%r14d
pxor %xmm6,%xmm7
rorl $14,%r13d
movl %r14d,%ecx
movl %r11d,%r12d
rorl $9,%r14d
psrlq $2,%xmm6
xorl %r10d,%r13d
xorl %eax,%r12d
pxor %xmm6,%xmm7
rorl $5,%r13d
xorl %ecx,%r14d
andl %r10d,%r12d
pshufd $128,%xmm7,%xmm7
xorl %r10d,%r13d
addl 56(%rsp),%ebx
movl %ecx,%r15d
psrldq $8,%xmm7
xorl %eax,%r12d
rorl $11,%r14d
xorl %edx,%r15d
addl %r12d,%ebx
rorl $6,%r13d
paddd %xmm7,%xmm3
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
pshufd $80,%xmm3,%xmm7
xorl %edx,%edi
rorl $2,%r14d
addl %ebx,%r9d
movdqa %xmm7,%xmm6
addl %edi,%ebx
movl %r9d,%r13d
psrld $10,%xmm7
addl %ebx,%r14d
rorl $14,%r13d
psrlq $17,%xmm6
movl %r14d,%ebx
movl %r10d,%r12d
pxor %xmm6,%xmm7
rorl $9,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
rorl $5,%r13d
xorl %ebx,%r14d
psrlq $2,%xmm6
andl %r9d,%r12d
xorl %r9d,%r13d
addl 60(%rsp),%eax
pxor %xmm6,%xmm7
movl %ebx,%edi
xorl %r11d,%r12d
rorl $11,%r14d
pshufd $8,%xmm7,%xmm7
xorl %ecx,%edi
addl %r12d,%eax
movdqa 96(%rbp),%xmm6
rorl $6,%r13d
andl %edi,%r15d
pslldq $8,%xmm7
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
paddd %xmm7,%xmm3
rorl $2,%r14d
addl %eax,%r8d
addl %r15d,%eax
paddd %xmm3,%xmm6
movl %r8d,%r13d
addl %eax,%r14d
movdqa %xmm6,48(%rsp)
cmpb $0,131(%rbp)
jne .Lssse3_00_47
rorl $14,%r13d
movl %r14d,%eax
movl %r9d,%r12d
rorl $9,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
rorl $5,%r13d
xorl %eax,%r14d
andl %r8d,%r12d
xorl %r8d,%r13d
addl 0(%rsp),%r11d
movl %eax,%r15d
xorl %r10d,%r12d
rorl $11,%r14d
xorl %ebx,%r15d
addl %r12d,%r11d
rorl $6,%r13d
andl %r15d,%edi
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
rorl $2,%r14d
addl %r11d,%edx
addl %edi,%r11d
movl %edx,%r13d
addl %r11d,%r14d
rorl $14,%r13d
movl %r14d,%r11d
movl %r8d,%r12d
rorl $9,%r14d
xorl %edx,%r13d
xorl %r9d,%r12d
rorl $5,%r13d
xorl %r11d,%r14d
andl %edx,%r12d
xorl %edx,%r13d
addl 4(%rsp),%r10d
movl %r11d,%edi
xorl %r9d,%r12d
rorl $11,%r14d
xorl %eax,%edi
addl %r12d,%r10d
rorl $6,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
addl %r13d,%r10d
xorl %eax,%r15d
rorl $2,%r14d
addl %r10d,%ecx
addl %r15d,%r10d
movl %ecx,%r13d
addl %r10d,%r14d
rorl $14,%r13d
movl %r14d,%r10d
movl %edx,%r12d
rorl $9,%r14d
xorl %ecx,%r13d
xorl %r8d,%r12d
rorl $5,%r13d
xorl %r10d,%r14d
andl %ecx,%r12d
xorl %ecx,%r13d
addl 8(%rsp),%r9d
movl %r10d,%r15d
xorl %r8d,%r12d
rorl $11,%r14d
xorl %r11d,%r15d
addl %r12d,%r9d
rorl $6,%r13d
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
xorl %r11d,%edi
rorl $2,%r14d
addl %r9d,%ebx
addl %edi,%r9d
movl %ebx,%r13d
addl %r9d,%r14d
rorl $14,%r13d
movl %r14d,%r9d
movl %ecx,%r12d
rorl $9,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
rorl $5,%r13d
xorl %r9d,%r14d
andl %ebx,%r12d
xorl %ebx,%r13d
addl 12(%rsp),%r8d
movl %r9d,%edi
xorl %edx,%r12d
rorl $11,%r14d
xorl %r10d,%edi
addl %r12d,%r8d
rorl $6,%r13d
andl %edi,%r15d
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
rorl $2,%r14d
addl %r8d,%eax
addl %r15d,%r8d
movl %eax,%r13d
addl %r8d,%r14d
rorl $14,%r13d
movl %r14d,%r8d
movl %ebx,%r12d
rorl $9,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
rorl $5,%r13d
xorl %r8d,%r14d
andl %eax,%r12d
xorl %eax,%r13d
addl 16(%rsp),%edx
movl %r8d,%r15d
xorl %ecx,%r12d
rorl $11,%r14d
xorl %r9d,%r15d
addl %r12d,%edx
rorl $6,%r13d
andl %r15d,%edi
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
rorl $2,%r14d
addl %edx,%r11d
addl %edi,%edx
movl %r11d,%r13d
addl %edx,%r14d
rorl $14,%r13d
movl %r14d,%edx
movl %eax,%r12d
rorl $9,%r14d
xorl %r11d,%r13d
xorl %ebx,%r12d
rorl $5,%r13d
xorl %edx,%r14d
andl %r11d,%r12d
xorl %r11d,%r13d
addl 20(%rsp),%ecx
movl %edx,%edi
xorl %ebx,%r12d
rorl $11,%r14d
xorl %r8d,%edi
addl %r12d,%ecx
rorl $6,%r13d
andl %edi,%r15d
xorl %edx,%r14d
addl %r13d,%ecx
xorl %r8d,%r15d
rorl $2,%r14d
addl %ecx,%r10d
addl %r15d,%ecx
movl %r10d,%r13d
addl %ecx,%r14d
rorl $14,%r13d
movl %r14d,%ecx
movl %r11d,%r12d
rorl $9,%r14d
xorl %r10d,%r13d
xorl %eax,%r12d
rorl $5,%r13d
xorl %ecx,%r14d
andl %r10d,%r12d
xorl %r10d,%r13d
addl 24(%rsp),%ebx
movl %ecx,%r15d
xorl %eax,%r12d
rorl $11,%r14d
xorl %edx,%r15d
addl %r12d,%ebx
rorl $6,%r13d
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
xorl %edx,%edi
rorl $2,%r14d
addl %ebx,%r9d
addl %edi,%ebx
movl %r9d,%r13d
addl %ebx,%r14d
rorl $14,%r13d
movl %r14d,%ebx
movl %r10d,%r12d
rorl $9,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
rorl $5,%r13d
xorl %ebx,%r14d
andl %r9d,%r12d
xorl %r9d,%r13d
addl 28(%rsp),%eax
movl %ebx,%edi
xorl %r11d,%r12d
rorl $11,%r14d
xorl %ecx,%edi
addl %r12d,%eax
rorl $6,%r13d
andl %edi,%r15d
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
rorl $2,%r14d
addl %eax,%r8d
addl %r15d,%eax
movl %r8d,%r13d
addl %eax,%r14d
rorl $14,%r13d
movl %r14d,%eax
movl %r9d,%r12d
rorl $9,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
rorl $5,%r13d
xorl %eax,%r14d
andl %r8d,%r12d
xorl %r8d,%r13d
addl 32(%rsp),%r11d
movl %eax,%r15d
xorl %r10d,%r12d
rorl $11,%r14d
xorl %ebx,%r15d
addl %r12d,%r11d
rorl $6,%r13d
andl %r15d,%edi
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
rorl $2,%r14d
addl %r11d,%edx
addl %edi,%r11d
movl %edx,%r13d
addl %r11d,%r14d
rorl $14,%r13d
movl %r14d,%r11d
movl %r8d,%r12d
rorl $9,%r14d
xorl %edx,%r13d
xorl %r9d,%r12d
rorl $5,%r13d
xorl %r11d,%r14d
andl %edx,%r12d
xorl %edx,%r13d
addl 36(%rsp),%r10d
movl %r11d,%edi
xorl %r9d,%r12d
rorl $11,%r14d
xorl %eax,%edi
addl %r12d,%r10d
rorl $6,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
addl %r13d,%r10d
xorl %eax,%r15d
rorl $2,%r14d
addl %r10d,%ecx
addl %r15d,%r10d
movl %ecx,%r13d
addl %r10d,%r14d
rorl $14,%r13d
movl %r14d,%r10d
movl %edx,%r12d
rorl $9,%r14d
xorl %ecx,%r13d
xorl %r8d,%r12d
rorl $5,%r13d
xorl %r10d,%r14d
andl %ecx,%r12d
xorl %ecx,%r13d
addl 40(%rsp),%r9d
movl %r10d,%r15d
xorl %r8d,%r12d
rorl $11,%r14d
xorl %r11d,%r15d
addl %r12d,%r9d
rorl $6,%r13d
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
xorl %r11d,%edi
rorl $2,%r14d
addl %r9d,%ebx
addl %edi,%r9d
movl %ebx,%r13d
addl %r9d,%r14d
rorl $14,%r13d
movl %r14d,%r9d
movl %ecx,%r12d
rorl $9,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
rorl $5,%r13d
xorl %r9d,%r14d
andl %ebx,%r12d
xorl %ebx,%r13d
addl 44(%rsp),%r8d
movl %r9d,%edi
xorl %edx,%r12d
rorl $11,%r14d
xorl %r10d,%edi
addl %r12d,%r8d
rorl $6,%r13d
andl %edi,%r15d
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
rorl $2,%r14d
addl %r8d,%eax
addl %r15d,%r8d
movl %eax,%r13d
addl %r8d,%r14d
rorl $14,%r13d
movl %r14d,%r8d
movl %ebx,%r12d
rorl $9,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
rorl $5,%r13d
xorl %r8d,%r14d
andl %eax,%r12d
xorl %eax,%r13d
addl 48(%rsp),%edx
movl %r8d,%r15d
xorl %ecx,%r12d
rorl $11,%r14d
xorl %r9d,%r15d
addl %r12d,%edx
rorl $6,%r13d
andl %r15d,%edi
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
rorl $2,%r14d
addl %edx,%r11d
addl %edi,%edx
movl %r11d,%r13d
addl %edx,%r14d
rorl $14,%r13d
movl %r14d,%edx
movl %eax,%r12d
rorl $9,%r14d
xorl %r11d,%r13d
xorl %ebx,%r12d
rorl $5,%r13d
xorl %edx,%r14d
andl %r11d,%r12d
xorl %r11d,%r13d
addl 52(%rsp),%ecx
movl %edx,%edi
xorl %ebx,%r12d
rorl $11,%r14d
xorl %r8d,%edi
addl %r12d,%ecx
rorl $6,%r13d
andl %edi,%r15d
xorl %edx,%r14d
addl %r13d,%ecx
xorl %r8d,%r15d
rorl $2,%r14d
addl %ecx,%r10d
addl %r15d,%ecx
movl %r10d,%r13d
addl %ecx,%r14d
rorl $14,%r13d
movl %r14d,%ecx
movl %r11d,%r12d
rorl $9,%r14d
xorl %r10d,%r13d
xorl %eax,%r12d
rorl $5,%r13d
xorl %ecx,%r14d
andl %r10d,%r12d
xorl %r10d,%r13d
addl 56(%rsp),%ebx
movl %ecx,%r15d
xorl %eax,%r12d
rorl $11,%r14d
xorl %edx,%r15d
addl %r12d,%ebx
rorl $6,%r13d
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
xorl %edx,%edi
rorl $2,%r14d
addl %ebx,%r9d
addl %edi,%ebx
movl %r9d,%r13d
addl %ebx,%r14d
rorl $14,%r13d
movl %r14d,%ebx
movl %r10d,%r12d
rorl $9,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
rorl $5,%r13d
xorl %ebx,%r14d
andl %r9d,%r12d
xorl %r9d,%r13d
addl 60(%rsp),%eax
movl %ebx,%edi
xorl %r11d,%r12d
rorl $11,%r14d
xorl %ecx,%edi
addl %r12d,%eax
rorl $6,%r13d
andl %edi,%r15d
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
rorl $2,%r14d
addl %eax,%r8d
addl %r15d,%eax
movl %r8d,%r13d
addl %eax,%r14d
movq 64+0(%rsp),%rdi
movl %r14d,%eax
addl 0(%rdi),%eax
leaq 64(%rsi),%rsi
addl 4(%rdi),%ebx
addl 8(%rdi),%ecx
addl 12(%rdi),%edx
addl 16(%rdi),%r8d
addl 20(%rdi),%r9d
addl 24(%rdi),%r10d
addl 28(%rdi),%r11d
cmpq 64+16(%rsp),%rsi
movl %eax,0(%rdi)
movl %ebx,4(%rdi)
movl %ecx,8(%rdi)
movl %edx,12(%rdi)
movl %r8d,16(%rdi)
movl %r9d,20(%rdi)
movl %r10d,24(%rdi)
movl %r11d,28(%rdi)
jb .Lloop_ssse3
movq 88(%rsp),%rsi
.cfi_def_cfa %rsi,8
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbp
.cfi_restore %rbp
movq -8(%rsi),%rbx
.cfi_restore %rbx
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lepilogue_ssse3:
ret
.cfi_endproc
.size sha256_block_data_order_ssse3,.-sha256_block_data_order_ssse3
.globl sha256_block_data_order_avx
.hidden sha256_block_data_order_avx
.type sha256_block_data_order_avx,@function
.align 64
sha256_block_data_order_avx:
.cfi_startproc
_CET_ENDBR
movq %rsp,%rax
.cfi_def_cfa_register %rax
pushq %rbx
.cfi_offset %rbx,-16
pushq %rbp
.cfi_offset %rbp,-24
pushq %r12
.cfi_offset %r12,-32
pushq %r13
.cfi_offset %r13,-40
pushq %r14
.cfi_offset %r14,-48
pushq %r15
.cfi_offset %r15,-56
shlq $4,%rdx
subq $96,%rsp
leaq (%rsi,%rdx,4),%rdx
andq $-64,%rsp
movq %rdi,64+0(%rsp)
movq %rsi,64+8(%rsp)
movq %rdx,64+16(%rsp)
movq %rax,88(%rsp)
.cfi_escape 0x0f,0x06,0x77,0xd8,0x00,0x06,0x23,0x08
.Lprologue_avx:
vzeroupper
movl 0(%rdi),%eax
movl 4(%rdi),%ebx
movl 8(%rdi),%ecx
movl 12(%rdi),%edx
movl 16(%rdi),%r8d
movl 20(%rdi),%r9d
movl 24(%rdi),%r10d
movl 28(%rdi),%r11d
vmovdqa K256+512+32(%rip),%xmm8
vmovdqa K256+512+64(%rip),%xmm9
jmp .Lloop_avx
.align 16
.Lloop_avx:
vmovdqa K256+512(%rip),%xmm7
vmovdqu 0(%rsi),%xmm0
vmovdqu 16(%rsi),%xmm1
vmovdqu 32(%rsi),%xmm2
vmovdqu 48(%rsi),%xmm3
vpshufb %xmm7,%xmm0,%xmm0
leaq K256(%rip),%rbp
vpshufb %xmm7,%xmm1,%xmm1
vpshufb %xmm7,%xmm2,%xmm2
vpaddd 0(%rbp),%xmm0,%xmm4
vpshufb %xmm7,%xmm3,%xmm3
vpaddd 32(%rbp),%xmm1,%xmm5
vpaddd 64(%rbp),%xmm2,%xmm6
vpaddd 96(%rbp),%xmm3,%xmm7
vmovdqa %xmm4,0(%rsp)
movl %eax,%r14d
vmovdqa %xmm5,16(%rsp)
movl %ebx,%edi
vmovdqa %xmm6,32(%rsp)
xorl %ecx,%edi
vmovdqa %xmm7,48(%rsp)
movl %r8d,%r13d
jmp .Lavx_00_47
.align 16
.Lavx_00_47:
subq $-128,%rbp
vpalignr $4,%xmm0,%xmm1,%xmm4
shrdl $14,%r13d,%r13d
movl %r14d,%eax
movl %r9d,%r12d
vpalignr $4,%xmm2,%xmm3,%xmm7
shrdl $9,%r14d,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
vpsrld $7,%xmm4,%xmm6
shrdl $5,%r13d,%r13d
xorl %eax,%r14d
andl %r8d,%r12d
vpaddd %xmm7,%xmm0,%xmm0
xorl %r8d,%r13d
addl 0(%rsp),%r11d
movl %eax,%r15d
vpsrld $3,%xmm4,%xmm7
xorl %r10d,%r12d
shrdl $11,%r14d,%r14d
xorl %ebx,%r15d
vpslld $14,%xmm4,%xmm5
addl %r12d,%r11d
shrdl $6,%r13d,%r13d
andl %r15d,%edi
vpxor %xmm6,%xmm7,%xmm4
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
vpshufd $250,%xmm3,%xmm7
shrdl $2,%r14d,%r14d
addl %r11d,%edx
addl %edi,%r11d
vpsrld $11,%xmm6,%xmm6
movl %edx,%r13d
addl %r11d,%r14d
shrdl $14,%r13d,%r13d
vpxor %xmm5,%xmm4,%xmm4
movl %r14d,%r11d
movl %r8d,%r12d
shrdl $9,%r14d,%r14d
vpslld $11,%xmm5,%xmm5
xorl %edx,%r13d
xorl %r9d,%r12d
shrdl $5,%r13d,%r13d
vpxor %xmm6,%xmm4,%xmm4
xorl %r11d,%r14d
andl %edx,%r12d
xorl %edx,%r13d
vpsrld $10,%xmm7,%xmm6
addl 4(%rsp),%r10d
movl %r11d,%edi
xorl %r9d,%r12d
vpxor %xmm5,%xmm4,%xmm4
shrdl $11,%r14d,%r14d
xorl %eax,%edi
addl %r12d,%r10d
vpsrlq $17,%xmm7,%xmm7
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
vpaddd %xmm4,%xmm0,%xmm0
addl %r13d,%r10d
xorl %eax,%r15d
shrdl $2,%r14d,%r14d
vpxor %xmm7,%xmm6,%xmm6
addl %r10d,%ecx
addl %r15d,%r10d
movl %ecx,%r13d
vpsrlq $2,%xmm7,%xmm7
addl %r10d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r10d
vpxor %xmm7,%xmm6,%xmm6
movl %edx,%r12d
shrdl $9,%r14d,%r14d
xorl %ecx,%r13d
vpshufb %xmm8,%xmm6,%xmm6
xorl %r8d,%r12d
shrdl $5,%r13d,%r13d
xorl %r10d,%r14d
vpaddd %xmm6,%xmm0,%xmm0
andl %ecx,%r12d
xorl %ecx,%r13d
addl 8(%rsp),%r9d
vpshufd $80,%xmm0,%xmm7
movl %r10d,%r15d
xorl %r8d,%r12d
shrdl $11,%r14d,%r14d
vpsrld $10,%xmm7,%xmm6
xorl %r11d,%r15d
addl %r12d,%r9d
shrdl $6,%r13d,%r13d
vpsrlq $17,%xmm7,%xmm7
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
vpxor %xmm7,%xmm6,%xmm6
xorl %r11d,%edi
shrdl $2,%r14d,%r14d
addl %r9d,%ebx
vpsrlq $2,%xmm7,%xmm7
addl %edi,%r9d
movl %ebx,%r13d
addl %r9d,%r14d
vpxor %xmm7,%xmm6,%xmm6
shrdl $14,%r13d,%r13d
movl %r14d,%r9d
movl %ecx,%r12d
vpshufb %xmm9,%xmm6,%xmm6
shrdl $9,%r14d,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
vpaddd %xmm6,%xmm0,%xmm0
shrdl $5,%r13d,%r13d
xorl %r9d,%r14d
andl %ebx,%r12d
vpaddd 0(%rbp),%xmm0,%xmm6
xorl %ebx,%r13d
addl 12(%rsp),%r8d
movl %r9d,%edi
xorl %edx,%r12d
shrdl $11,%r14d,%r14d
xorl %r10d,%edi
addl %r12d,%r8d
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
shrdl $2,%r14d,%r14d
addl %r8d,%eax
addl %r15d,%r8d
movl %eax,%r13d
addl %r8d,%r14d
vmovdqa %xmm6,0(%rsp)
vpalignr $4,%xmm1,%xmm2,%xmm4
shrdl $14,%r13d,%r13d
movl %r14d,%r8d
movl %ebx,%r12d
vpalignr $4,%xmm3,%xmm0,%xmm7
shrdl $9,%r14d,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
vpsrld $7,%xmm4,%xmm6
shrdl $5,%r13d,%r13d
xorl %r8d,%r14d
andl %eax,%r12d
vpaddd %xmm7,%xmm1,%xmm1
xorl %eax,%r13d
addl 16(%rsp),%edx
movl %r8d,%r15d
vpsrld $3,%xmm4,%xmm7
xorl %ecx,%r12d
shrdl $11,%r14d,%r14d
xorl %r9d,%r15d
vpslld $14,%xmm4,%xmm5
addl %r12d,%edx
shrdl $6,%r13d,%r13d
andl %r15d,%edi
vpxor %xmm6,%xmm7,%xmm4
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
vpshufd $250,%xmm0,%xmm7
shrdl $2,%r14d,%r14d
addl %edx,%r11d
addl %edi,%edx
vpsrld $11,%xmm6,%xmm6
movl %r11d,%r13d
addl %edx,%r14d
shrdl $14,%r13d,%r13d
vpxor %xmm5,%xmm4,%xmm4
movl %r14d,%edx
movl %eax,%r12d
shrdl $9,%r14d,%r14d
vpslld $11,%xmm5,%xmm5
xorl %r11d,%r13d
xorl %ebx,%r12d
shrdl $5,%r13d,%r13d
vpxor %xmm6,%xmm4,%xmm4
xorl %edx,%r14d
andl %r11d,%r12d
xorl %r11d,%r13d
vpsrld $10,%xmm7,%xmm6
addl 20(%rsp),%ecx
movl %edx,%edi
xorl %ebx,%r12d
vpxor %xmm5,%xmm4,%xmm4
shrdl $11,%r14d,%r14d
xorl %r8d,%edi
addl %r12d,%ecx
vpsrlq $17,%xmm7,%xmm7
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %edx,%r14d
vpaddd %xmm4,%xmm1,%xmm1
addl %r13d,%ecx
xorl %r8d,%r15d
shrdl $2,%r14d,%r14d
vpxor %xmm7,%xmm6,%xmm6
addl %ecx,%r10d
addl %r15d,%ecx
movl %r10d,%r13d
vpsrlq $2,%xmm7,%xmm7
addl %ecx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%ecx
vpxor %xmm7,%xmm6,%xmm6
movl %r11d,%r12d
shrdl $9,%r14d,%r14d
xorl %r10d,%r13d
vpshufb %xmm8,%xmm6,%xmm6
xorl %eax,%r12d
shrdl $5,%r13d,%r13d
xorl %ecx,%r14d
vpaddd %xmm6,%xmm1,%xmm1
andl %r10d,%r12d
xorl %r10d,%r13d
addl 24(%rsp),%ebx
vpshufd $80,%xmm1,%xmm7
movl %ecx,%r15d
xorl %eax,%r12d
shrdl $11,%r14d,%r14d
vpsrld $10,%xmm7,%xmm6
xorl %edx,%r15d
addl %r12d,%ebx
shrdl $6,%r13d,%r13d
vpsrlq $17,%xmm7,%xmm7
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
vpxor %xmm7,%xmm6,%xmm6
xorl %edx,%edi
shrdl $2,%r14d,%r14d
addl %ebx,%r9d
vpsrlq $2,%xmm7,%xmm7
addl %edi,%ebx
movl %r9d,%r13d
addl %ebx,%r14d
vpxor %xmm7,%xmm6,%xmm6
shrdl $14,%r13d,%r13d
movl %r14d,%ebx
movl %r10d,%r12d
vpshufb %xmm9,%xmm6,%xmm6
shrdl $9,%r14d,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
vpaddd %xmm6,%xmm1,%xmm1
shrdl $5,%r13d,%r13d
xorl %ebx,%r14d
andl %r9d,%r12d
vpaddd 32(%rbp),%xmm1,%xmm6
xorl %r9d,%r13d
addl 28(%rsp),%eax
movl %ebx,%edi
xorl %r11d,%r12d
shrdl $11,%r14d,%r14d
xorl %ecx,%edi
addl %r12d,%eax
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
shrdl $2,%r14d,%r14d
addl %eax,%r8d
addl %r15d,%eax
movl %r8d,%r13d
addl %eax,%r14d
vmovdqa %xmm6,16(%rsp)
vpalignr $4,%xmm2,%xmm3,%xmm4
shrdl $14,%r13d,%r13d
movl %r14d,%eax
movl %r9d,%r12d
vpalignr $4,%xmm0,%xmm1,%xmm7
shrdl $9,%r14d,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
vpsrld $7,%xmm4,%xmm6
shrdl $5,%r13d,%r13d
xorl %eax,%r14d
andl %r8d,%r12d
vpaddd %xmm7,%xmm2,%xmm2
xorl %r8d,%r13d
addl 32(%rsp),%r11d
movl %eax,%r15d
vpsrld $3,%xmm4,%xmm7
xorl %r10d,%r12d
shrdl $11,%r14d,%r14d
xorl %ebx,%r15d
vpslld $14,%xmm4,%xmm5
addl %r12d,%r11d
shrdl $6,%r13d,%r13d
andl %r15d,%edi
vpxor %xmm6,%xmm7,%xmm4
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
vpshufd $250,%xmm1,%xmm7
shrdl $2,%r14d,%r14d
addl %r11d,%edx
addl %edi,%r11d
vpsrld $11,%xmm6,%xmm6
movl %edx,%r13d
addl %r11d,%r14d
shrdl $14,%r13d,%r13d
vpxor %xmm5,%xmm4,%xmm4
movl %r14d,%r11d
movl %r8d,%r12d
shrdl $9,%r14d,%r14d
vpslld $11,%xmm5,%xmm5
xorl %edx,%r13d
xorl %r9d,%r12d
shrdl $5,%r13d,%r13d
vpxor %xmm6,%xmm4,%xmm4
xorl %r11d,%r14d
andl %edx,%r12d
xorl %edx,%r13d
vpsrld $10,%xmm7,%xmm6
addl 36(%rsp),%r10d
movl %r11d,%edi
xorl %r9d,%r12d
vpxor %xmm5,%xmm4,%xmm4
shrdl $11,%r14d,%r14d
xorl %eax,%edi
addl %r12d,%r10d
vpsrlq $17,%xmm7,%xmm7
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
vpaddd %xmm4,%xmm2,%xmm2
addl %r13d,%r10d
xorl %eax,%r15d
shrdl $2,%r14d,%r14d
vpxor %xmm7,%xmm6,%xmm6
addl %r10d,%ecx
addl %r15d,%r10d
movl %ecx,%r13d
vpsrlq $2,%xmm7,%xmm7
addl %r10d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r10d
vpxor %xmm7,%xmm6,%xmm6
movl %edx,%r12d
shrdl $9,%r14d,%r14d
xorl %ecx,%r13d
vpshufb %xmm8,%xmm6,%xmm6
xorl %r8d,%r12d
shrdl $5,%r13d,%r13d
xorl %r10d,%r14d
vpaddd %xmm6,%xmm2,%xmm2
andl %ecx,%r12d
xorl %ecx,%r13d
addl 40(%rsp),%r9d
vpshufd $80,%xmm2,%xmm7
movl %r10d,%r15d
xorl %r8d,%r12d
shrdl $11,%r14d,%r14d
vpsrld $10,%xmm7,%xmm6
xorl %r11d,%r15d
addl %r12d,%r9d
shrdl $6,%r13d,%r13d
vpsrlq $17,%xmm7,%xmm7
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
vpxor %xmm7,%xmm6,%xmm6
xorl %r11d,%edi
shrdl $2,%r14d,%r14d
addl %r9d,%ebx
vpsrlq $2,%xmm7,%xmm7
addl %edi,%r9d
movl %ebx,%r13d
addl %r9d,%r14d
vpxor %xmm7,%xmm6,%xmm6
shrdl $14,%r13d,%r13d
movl %r14d,%r9d
movl %ecx,%r12d
vpshufb %xmm9,%xmm6,%xmm6
shrdl $9,%r14d,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
vpaddd %xmm6,%xmm2,%xmm2
shrdl $5,%r13d,%r13d
xorl %r9d,%r14d
andl %ebx,%r12d
vpaddd 64(%rbp),%xmm2,%xmm6
xorl %ebx,%r13d
addl 44(%rsp),%r8d
movl %r9d,%edi
xorl %edx,%r12d
shrdl $11,%r14d,%r14d
xorl %r10d,%edi
addl %r12d,%r8d
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
shrdl $2,%r14d,%r14d
addl %r8d,%eax
addl %r15d,%r8d
movl %eax,%r13d
addl %r8d,%r14d
vmovdqa %xmm6,32(%rsp)
vpalignr $4,%xmm3,%xmm0,%xmm4
shrdl $14,%r13d,%r13d
movl %r14d,%r8d
movl %ebx,%r12d
vpalignr $4,%xmm1,%xmm2,%xmm7
shrdl $9,%r14d,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
vpsrld $7,%xmm4,%xmm6
shrdl $5,%r13d,%r13d
xorl %r8d,%r14d
andl %eax,%r12d
vpaddd %xmm7,%xmm3,%xmm3
xorl %eax,%r13d
addl 48(%rsp),%edx
movl %r8d,%r15d
vpsrld $3,%xmm4,%xmm7
xorl %ecx,%r12d
shrdl $11,%r14d,%r14d
xorl %r9d,%r15d
vpslld $14,%xmm4,%xmm5
addl %r12d,%edx
shrdl $6,%r13d,%r13d
andl %r15d,%edi
vpxor %xmm6,%xmm7,%xmm4
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
vpshufd $250,%xmm2,%xmm7
shrdl $2,%r14d,%r14d
addl %edx,%r11d
addl %edi,%edx
vpsrld $11,%xmm6,%xmm6
movl %r11d,%r13d
addl %edx,%r14d
shrdl $14,%r13d,%r13d
vpxor %xmm5,%xmm4,%xmm4
movl %r14d,%edx
movl %eax,%r12d
shrdl $9,%r14d,%r14d
vpslld $11,%xmm5,%xmm5
xorl %r11d,%r13d
xorl %ebx,%r12d
shrdl $5,%r13d,%r13d
vpxor %xmm6,%xmm4,%xmm4
xorl %edx,%r14d
andl %r11d,%r12d
xorl %r11d,%r13d
vpsrld $10,%xmm7,%xmm6
addl 52(%rsp),%ecx
movl %edx,%edi
xorl %ebx,%r12d
vpxor %xmm5,%xmm4,%xmm4
shrdl $11,%r14d,%r14d
xorl %r8d,%edi
addl %r12d,%ecx
vpsrlq $17,%xmm7,%xmm7
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %edx,%r14d
vpaddd %xmm4,%xmm3,%xmm3
addl %r13d,%ecx
xorl %r8d,%r15d
shrdl $2,%r14d,%r14d
vpxor %xmm7,%xmm6,%xmm6
addl %ecx,%r10d
addl %r15d,%ecx
movl %r10d,%r13d
vpsrlq $2,%xmm7,%xmm7
addl %ecx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%ecx
vpxor %xmm7,%xmm6,%xmm6
movl %r11d,%r12d
shrdl $9,%r14d,%r14d
xorl %r10d,%r13d
vpshufb %xmm8,%xmm6,%xmm6
xorl %eax,%r12d
shrdl $5,%r13d,%r13d
xorl %ecx,%r14d
vpaddd %xmm6,%xmm3,%xmm3
andl %r10d,%r12d
xorl %r10d,%r13d
addl 56(%rsp),%ebx
vpshufd $80,%xmm3,%xmm7
movl %ecx,%r15d
xorl %eax,%r12d
shrdl $11,%r14d,%r14d
vpsrld $10,%xmm7,%xmm6
xorl %edx,%r15d
addl %r12d,%ebx
shrdl $6,%r13d,%r13d
vpsrlq $17,%xmm7,%xmm7
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
vpxor %xmm7,%xmm6,%xmm6
xorl %edx,%edi
shrdl $2,%r14d,%r14d
addl %ebx,%r9d
vpsrlq $2,%xmm7,%xmm7
addl %edi,%ebx
movl %r9d,%r13d
addl %ebx,%r14d
vpxor %xmm7,%xmm6,%xmm6
shrdl $14,%r13d,%r13d
movl %r14d,%ebx
movl %r10d,%r12d
vpshufb %xmm9,%xmm6,%xmm6
shrdl $9,%r14d,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
vpaddd %xmm6,%xmm3,%xmm3
shrdl $5,%r13d,%r13d
xorl %ebx,%r14d
andl %r9d,%r12d
vpaddd 96(%rbp),%xmm3,%xmm6
xorl %r9d,%r13d
addl 60(%rsp),%eax
movl %ebx,%edi
xorl %r11d,%r12d
shrdl $11,%r14d,%r14d
xorl %ecx,%edi
addl %r12d,%eax
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
shrdl $2,%r14d,%r14d
addl %eax,%r8d
addl %r15d,%eax
movl %r8d,%r13d
addl %eax,%r14d
vmovdqa %xmm6,48(%rsp)
cmpb $0,131(%rbp)
jne .Lavx_00_47
shrdl $14,%r13d,%r13d
movl %r14d,%eax
movl %r9d,%r12d
shrdl $9,%r14d,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
shrdl $5,%r13d,%r13d
xorl %eax,%r14d
andl %r8d,%r12d
xorl %r8d,%r13d
addl 0(%rsp),%r11d
movl %eax,%r15d
xorl %r10d,%r12d
shrdl $11,%r14d,%r14d
xorl %ebx,%r15d
addl %r12d,%r11d
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
shrdl $2,%r14d,%r14d
addl %r11d,%edx
addl %edi,%r11d
movl %edx,%r13d
addl %r11d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r11d
movl %r8d,%r12d
shrdl $9,%r14d,%r14d
xorl %edx,%r13d
xorl %r9d,%r12d
shrdl $5,%r13d,%r13d
xorl %r11d,%r14d
andl %edx,%r12d
xorl %edx,%r13d
addl 4(%rsp),%r10d
movl %r11d,%edi
xorl %r9d,%r12d
shrdl $11,%r14d,%r14d
xorl %eax,%edi
addl %r12d,%r10d
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
addl %r13d,%r10d
xorl %eax,%r15d
shrdl $2,%r14d,%r14d
addl %r10d,%ecx
addl %r15d,%r10d
movl %ecx,%r13d
addl %r10d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r10d
movl %edx,%r12d
shrdl $9,%r14d,%r14d
xorl %ecx,%r13d
xorl %r8d,%r12d
shrdl $5,%r13d,%r13d
xorl %r10d,%r14d
andl %ecx,%r12d
xorl %ecx,%r13d
addl 8(%rsp),%r9d
movl %r10d,%r15d
xorl %r8d,%r12d
shrdl $11,%r14d,%r14d
xorl %r11d,%r15d
addl %r12d,%r9d
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
xorl %r11d,%edi
shrdl $2,%r14d,%r14d
addl %r9d,%ebx
addl %edi,%r9d
movl %ebx,%r13d
addl %r9d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r9d
movl %ecx,%r12d
shrdl $9,%r14d,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
shrdl $5,%r13d,%r13d
xorl %r9d,%r14d
andl %ebx,%r12d
xorl %ebx,%r13d
addl 12(%rsp),%r8d
movl %r9d,%edi
xorl %edx,%r12d
shrdl $11,%r14d,%r14d
xorl %r10d,%edi
addl %r12d,%r8d
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
shrdl $2,%r14d,%r14d
addl %r8d,%eax
addl %r15d,%r8d
movl %eax,%r13d
addl %r8d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r8d
movl %ebx,%r12d
shrdl $9,%r14d,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
shrdl $5,%r13d,%r13d
xorl %r8d,%r14d
andl %eax,%r12d
xorl %eax,%r13d
addl 16(%rsp),%edx
movl %r8d,%r15d
xorl %ecx,%r12d
shrdl $11,%r14d,%r14d
xorl %r9d,%r15d
addl %r12d,%edx
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
shrdl $2,%r14d,%r14d
addl %edx,%r11d
addl %edi,%edx
movl %r11d,%r13d
addl %edx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%edx
movl %eax,%r12d
shrdl $9,%r14d,%r14d
xorl %r11d,%r13d
xorl %ebx,%r12d
shrdl $5,%r13d,%r13d
xorl %edx,%r14d
andl %r11d,%r12d
xorl %r11d,%r13d
addl 20(%rsp),%ecx
movl %edx,%edi
xorl %ebx,%r12d
shrdl $11,%r14d,%r14d
xorl %r8d,%edi
addl %r12d,%ecx
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %edx,%r14d
addl %r13d,%ecx
xorl %r8d,%r15d
shrdl $2,%r14d,%r14d
addl %ecx,%r10d
addl %r15d,%ecx
movl %r10d,%r13d
addl %ecx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%ecx
movl %r11d,%r12d
shrdl $9,%r14d,%r14d
xorl %r10d,%r13d
xorl %eax,%r12d
shrdl $5,%r13d,%r13d
xorl %ecx,%r14d
andl %r10d,%r12d
xorl %r10d,%r13d
addl 24(%rsp),%ebx
movl %ecx,%r15d
xorl %eax,%r12d
shrdl $11,%r14d,%r14d
xorl %edx,%r15d
addl %r12d,%ebx
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
xorl %edx,%edi
shrdl $2,%r14d,%r14d
addl %ebx,%r9d
addl %edi,%ebx
movl %r9d,%r13d
addl %ebx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%ebx
movl %r10d,%r12d
shrdl $9,%r14d,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
shrdl $5,%r13d,%r13d
xorl %ebx,%r14d
andl %r9d,%r12d
xorl %r9d,%r13d
addl 28(%rsp),%eax
movl %ebx,%edi
xorl %r11d,%r12d
shrdl $11,%r14d,%r14d
xorl %ecx,%edi
addl %r12d,%eax
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
shrdl $2,%r14d,%r14d
addl %eax,%r8d
addl %r15d,%eax
movl %r8d,%r13d
addl %eax,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%eax
movl %r9d,%r12d
shrdl $9,%r14d,%r14d
xorl %r8d,%r13d
xorl %r10d,%r12d
shrdl $5,%r13d,%r13d
xorl %eax,%r14d
andl %r8d,%r12d
xorl %r8d,%r13d
addl 32(%rsp),%r11d
movl %eax,%r15d
xorl %r10d,%r12d
shrdl $11,%r14d,%r14d
xorl %ebx,%r15d
addl %r12d,%r11d
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %eax,%r14d
addl %r13d,%r11d
xorl %ebx,%edi
shrdl $2,%r14d,%r14d
addl %r11d,%edx
addl %edi,%r11d
movl %edx,%r13d
addl %r11d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r11d
movl %r8d,%r12d
shrdl $9,%r14d,%r14d
xorl %edx,%r13d
xorl %r9d,%r12d
shrdl $5,%r13d,%r13d
xorl %r11d,%r14d
andl %edx,%r12d
xorl %edx,%r13d
addl 36(%rsp),%r10d
movl %r11d,%edi
xorl %r9d,%r12d
shrdl $11,%r14d,%r14d
xorl %eax,%edi
addl %r12d,%r10d
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r11d,%r14d
addl %r13d,%r10d
xorl %eax,%r15d
shrdl $2,%r14d,%r14d
addl %r10d,%ecx
addl %r15d,%r10d
movl %ecx,%r13d
addl %r10d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r10d
movl %edx,%r12d
shrdl $9,%r14d,%r14d
xorl %ecx,%r13d
xorl %r8d,%r12d
shrdl $5,%r13d,%r13d
xorl %r10d,%r14d
andl %ecx,%r12d
xorl %ecx,%r13d
addl 40(%rsp),%r9d
movl %r10d,%r15d
xorl %r8d,%r12d
shrdl $11,%r14d,%r14d
xorl %r11d,%r15d
addl %r12d,%r9d
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %r10d,%r14d
addl %r13d,%r9d
xorl %r11d,%edi
shrdl $2,%r14d,%r14d
addl %r9d,%ebx
addl %edi,%r9d
movl %ebx,%r13d
addl %r9d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r9d
movl %ecx,%r12d
shrdl $9,%r14d,%r14d
xorl %ebx,%r13d
xorl %edx,%r12d
shrdl $5,%r13d,%r13d
xorl %r9d,%r14d
andl %ebx,%r12d
xorl %ebx,%r13d
addl 44(%rsp),%r8d
movl %r9d,%edi
xorl %edx,%r12d
shrdl $11,%r14d,%r14d
xorl %r10d,%edi
addl %r12d,%r8d
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %r9d,%r14d
addl %r13d,%r8d
xorl %r10d,%r15d
shrdl $2,%r14d,%r14d
addl %r8d,%eax
addl %r15d,%r8d
movl %eax,%r13d
addl %r8d,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%r8d
movl %ebx,%r12d
shrdl $9,%r14d,%r14d
xorl %eax,%r13d
xorl %ecx,%r12d
shrdl $5,%r13d,%r13d
xorl %r8d,%r14d
andl %eax,%r12d
xorl %eax,%r13d
addl 48(%rsp),%edx
movl %r8d,%r15d
xorl %ecx,%r12d
shrdl $11,%r14d,%r14d
xorl %r9d,%r15d
addl %r12d,%edx
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %r8d,%r14d
addl %r13d,%edx
xorl %r9d,%edi
shrdl $2,%r14d,%r14d
addl %edx,%r11d
addl %edi,%edx
movl %r11d,%r13d
addl %edx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%edx
movl %eax,%r12d
shrdl $9,%r14d,%r14d
xorl %r11d,%r13d
xorl %ebx,%r12d
shrdl $5,%r13d,%r13d
xorl %edx,%r14d
andl %r11d,%r12d
xorl %r11d,%r13d
addl 52(%rsp),%ecx
movl %edx,%edi
xorl %ebx,%r12d
shrdl $11,%r14d,%r14d
xorl %r8d,%edi
addl %r12d,%ecx
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %edx,%r14d
addl %r13d,%ecx
xorl %r8d,%r15d
shrdl $2,%r14d,%r14d
addl %ecx,%r10d
addl %r15d,%ecx
movl %r10d,%r13d
addl %ecx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%ecx
movl %r11d,%r12d
shrdl $9,%r14d,%r14d
xorl %r10d,%r13d
xorl %eax,%r12d
shrdl $5,%r13d,%r13d
xorl %ecx,%r14d
andl %r10d,%r12d
xorl %r10d,%r13d
addl 56(%rsp),%ebx
movl %ecx,%r15d
xorl %eax,%r12d
shrdl $11,%r14d,%r14d
xorl %edx,%r15d
addl %r12d,%ebx
shrdl $6,%r13d,%r13d
andl %r15d,%edi
xorl %ecx,%r14d
addl %r13d,%ebx
xorl %edx,%edi
shrdl $2,%r14d,%r14d
addl %ebx,%r9d
addl %edi,%ebx
movl %r9d,%r13d
addl %ebx,%r14d
shrdl $14,%r13d,%r13d
movl %r14d,%ebx
movl %r10d,%r12d
shrdl $9,%r14d,%r14d
xorl %r9d,%r13d
xorl %r11d,%r12d
shrdl $5,%r13d,%r13d
xorl %ebx,%r14d
andl %r9d,%r12d
xorl %r9d,%r13d
addl 60(%rsp),%eax
movl %ebx,%edi
xorl %r11d,%r12d
shrdl $11,%r14d,%r14d
xorl %ecx,%edi
addl %r12d,%eax
shrdl $6,%r13d,%r13d
andl %edi,%r15d
xorl %ebx,%r14d
addl %r13d,%eax
xorl %ecx,%r15d
shrdl $2,%r14d,%r14d
addl %eax,%r8d
addl %r15d,%eax
movl %r8d,%r13d
addl %eax,%r14d
movq 64+0(%rsp),%rdi
movl %r14d,%eax
addl 0(%rdi),%eax
leaq 64(%rsi),%rsi
addl 4(%rdi),%ebx
addl 8(%rdi),%ecx
addl 12(%rdi),%edx
addl 16(%rdi),%r8d
addl 20(%rdi),%r9d
addl 24(%rdi),%r10d
addl 28(%rdi),%r11d
cmpq 64+16(%rsp),%rsi
movl %eax,0(%rdi)
movl %ebx,4(%rdi)
movl %ecx,8(%rdi)
movl %edx,12(%rdi)
movl %r8d,16(%rdi)
movl %r9d,20(%rdi)
movl %r10d,24(%rdi)
movl %r11d,28(%rdi)
jb .Lloop_avx
movq 88(%rsp),%rsi
.cfi_def_cfa %rsi,8
vzeroupper
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbp
.cfi_restore %rbp
movq -8(%rsi),%rbx
.cfi_restore %rbx
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lepilogue_avx:
ret
.cfi_endproc
.size sha256_block_data_order_avx,.-sha256_block_data_order_avx
#endif
|
marvin-hansen/iggy-streaming-system
| 4,299
|
thirdparty/crates/ring-0.17.9/pregenerated/ghashv8-armx-linux64.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__ELF__)
#include <ring-core/arm_arch.h>
#if __ARM_MAX_ARCH__>=7
.text
.arch armv8-a+crypto
.globl gcm_init_clmul
.hidden gcm_init_clmul
.type gcm_init_clmul,%function
.align 4
gcm_init_clmul:
AARCH64_VALID_CALL_TARGET
ld1 {v17.2d},[x1] //load input H
movi v19.16b,#0xe1
shl v19.2d,v19.2d,#57 //0xc2.0
ext v3.16b,v17.16b,v17.16b,#8
ushr v18.2d,v19.2d,#63
dup v17.4s,v17.s[1]
ext v16.16b,v18.16b,v19.16b,#8 //t0=0xc2....01
ushr v18.2d,v3.2d,#63
sshr v17.4s,v17.4s,#31 //broadcast carry bit
and v18.16b,v18.16b,v16.16b
shl v3.2d,v3.2d,#1
ext v18.16b,v18.16b,v18.16b,#8
and v16.16b,v16.16b,v17.16b
orr v3.16b,v3.16b,v18.16b //H<<<=1
eor v20.16b,v3.16b,v16.16b //twisted H
st1 {v20.2d},[x0],#16 //store Htable[0]
//calculate H^2
ext v16.16b,v20.16b,v20.16b,#8 //Karatsuba pre-processing
pmull v0.1q,v20.1d,v20.1d
eor v16.16b,v16.16b,v20.16b
pmull2 v2.1q,v20.2d,v20.2d
pmull v1.1q,v16.1d,v16.1d
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v17.16b
eor v1.16b,v1.16b,v18.16b
pmull v18.1q,v0.1d,v19.1d //1st phase
ins v2.d[0],v1.d[1]
ins v1.d[1],v0.d[0]
eor v0.16b,v1.16b,v18.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase
pmull v0.1q,v0.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v22.16b,v0.16b,v18.16b
ext v17.16b,v22.16b,v22.16b,#8 //Karatsuba pre-processing
eor v17.16b,v17.16b,v22.16b
ext v21.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed
st1 {v21.2d,v22.2d},[x0],#32 //store Htable[1..2]
//calculate H^3 and H^4
pmull v0.1q,v20.1d, v22.1d
pmull v5.1q,v22.1d,v22.1d
pmull2 v2.1q,v20.2d, v22.2d
pmull2 v7.1q,v22.2d,v22.2d
pmull v1.1q,v16.1d,v17.1d
pmull v6.1q,v17.1d,v17.1d
ext v16.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
ext v17.16b,v5.16b,v7.16b,#8
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v16.16b
eor v4.16b,v5.16b,v7.16b
eor v6.16b,v6.16b,v17.16b
eor v1.16b,v1.16b,v18.16b
pmull v18.1q,v0.1d,v19.1d //1st phase
eor v6.16b,v6.16b,v4.16b
pmull v4.1q,v5.1d,v19.1d
ins v2.d[0],v1.d[1]
ins v7.d[0],v6.d[1]
ins v1.d[1],v0.d[0]
ins v6.d[1],v5.d[0]
eor v0.16b,v1.16b,v18.16b
eor v5.16b,v6.16b,v4.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase
ext v4.16b,v5.16b,v5.16b,#8
pmull v0.1q,v0.1d,v19.1d
pmull v5.1q,v5.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v4.16b,v4.16b,v7.16b
eor v20.16b, v0.16b,v18.16b //H^3
eor v22.16b,v5.16b,v4.16b //H^4
ext v16.16b,v20.16b, v20.16b,#8 //Karatsuba pre-processing
ext v17.16b,v22.16b,v22.16b,#8
eor v16.16b,v16.16b,v20.16b
eor v17.16b,v17.16b,v22.16b
ext v21.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed
st1 {v20.2d,v21.2d,v22.2d},[x0] //store Htable[3..5]
ret
.size gcm_init_clmul,.-gcm_init_clmul
.globl gcm_gmult_clmul
.hidden gcm_gmult_clmul
.type gcm_gmult_clmul,%function
.align 4
gcm_gmult_clmul:
AARCH64_VALID_CALL_TARGET
ld1 {v17.2d},[x0] //load Xi
movi v19.16b,#0xe1
ld1 {v20.2d,v21.2d},[x1] //load twisted H, ...
shl v19.2d,v19.2d,#57
#ifndef __AARCH64EB__
rev64 v17.16b,v17.16b
#endif
ext v3.16b,v17.16b,v17.16b,#8
pmull v0.1q,v20.1d,v3.1d //H.lo·Xi.lo
eor v17.16b,v17.16b,v3.16b //Karatsuba pre-processing
pmull2 v2.1q,v20.2d,v3.2d //H.hi·Xi.hi
pmull v1.1q,v21.1d,v17.1d //(H.lo+H.hi)·(Xi.lo+Xi.hi)
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v17.16b
eor v1.16b,v1.16b,v18.16b
pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
ins v2.d[0],v1.d[1]
ins v1.d[1],v0.d[0]
eor v0.16b,v1.16b,v18.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
pmull v0.1q,v0.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v0.16b,v0.16b,v18.16b
#ifndef __AARCH64EB__
rev64 v0.16b,v0.16b
#endif
ext v0.16b,v0.16b,v0.16b,#8
st1 {v0.2d},[x0] //write out Xi
ret
.size gcm_gmult_clmul,.-gcm_gmult_clmul
.byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__ELF__)
|
marvin-hansen/iggy-streaming-system
| 4,229
|
thirdparty/crates/ring-0.17.9/pregenerated/x86-mont-elf.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
.text
.globl bn_mul_mont
.hidden bn_mul_mont
.type bn_mul_mont,@function
.align 16
bn_mul_mont:
.L_bn_mul_mont_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
xorl %eax,%eax
movl 40(%esp),%edi
leal 20(%esp),%esi
leal 24(%esp),%edx
addl $2,%edi
negl %edi
leal -32(%esp,%edi,4),%ebp
negl %edi
movl %ebp,%eax
subl %edx,%eax
andl $2047,%eax
subl %eax,%ebp
xorl %ebp,%edx
andl $2048,%edx
xorl $2048,%edx
subl %edx,%ebp
andl $-64,%ebp
movl %esp,%eax
subl %ebp,%eax
andl $-4096,%eax
movl %esp,%edx
leal (%ebp,%eax,1),%esp
movl (%esp),%eax
cmpl %ebp,%esp
ja .L000page_walk
jmp .L001page_walk_done
.align 16
.L000page_walk:
leal -4096(%esp),%esp
movl (%esp),%eax
cmpl %ebp,%esp
ja .L000page_walk
.L001page_walk_done:
movl (%esi),%eax
movl 4(%esi),%ebx
movl 8(%esi),%ecx
movl 12(%esi),%ebp
movl 16(%esi),%esi
movl (%esi),%esi
movl %eax,4(%esp)
movl %ebx,8(%esp)
movl %ecx,12(%esp)
movl %ebp,16(%esp)
movl %esi,20(%esp)
leal -3(%edi),%ebx
movl %edx,24(%esp)
movl $-1,%eax
movd %eax,%mm7
movl 8(%esp),%esi
movl 12(%esp),%edi
movl 16(%esp),%ebp
xorl %edx,%edx
xorl %ecx,%ecx
movd (%edi),%mm4
movd (%esi),%mm5
movd (%ebp),%mm3
pmuludq %mm4,%mm5
movq %mm5,%mm2
movq %mm5,%mm0
pand %mm7,%mm0
pmuludq 20(%esp),%mm5
pmuludq %mm5,%mm3
paddq %mm0,%mm3
movd 4(%ebp),%mm1
movd 4(%esi),%mm0
psrlq $32,%mm2
psrlq $32,%mm3
incl %ecx
.align 16
.L0021st:
pmuludq %mm4,%mm0
pmuludq %mm5,%mm1
paddq %mm0,%mm2
paddq %mm1,%mm3
movq %mm2,%mm0
pand %mm7,%mm0
movd 4(%ebp,%ecx,4),%mm1
paddq %mm0,%mm3
movd 4(%esi,%ecx,4),%mm0
psrlq $32,%mm2
movd %mm3,28(%esp,%ecx,4)
psrlq $32,%mm3
leal 1(%ecx),%ecx
cmpl %ebx,%ecx
jl .L0021st
pmuludq %mm4,%mm0
pmuludq %mm5,%mm1
paddq %mm0,%mm2
paddq %mm1,%mm3
movq %mm2,%mm0
pand %mm7,%mm0
paddq %mm0,%mm3
movd %mm3,28(%esp,%ecx,4)
psrlq $32,%mm2
psrlq $32,%mm3
paddq %mm2,%mm3
movq %mm3,32(%esp,%ebx,4)
incl %edx
.L003outer:
xorl %ecx,%ecx
movd (%edi,%edx,4),%mm4
movd (%esi),%mm5
movd 32(%esp),%mm6
movd (%ebp),%mm3
pmuludq %mm4,%mm5
paddq %mm6,%mm5
movq %mm5,%mm0
movq %mm5,%mm2
pand %mm7,%mm0
pmuludq 20(%esp),%mm5
pmuludq %mm5,%mm3
paddq %mm0,%mm3
movd 36(%esp),%mm6
movd 4(%ebp),%mm1
movd 4(%esi),%mm0
psrlq $32,%mm2
psrlq $32,%mm3
paddq %mm6,%mm2
incl %ecx
decl %ebx
.L004inner:
pmuludq %mm4,%mm0
pmuludq %mm5,%mm1
paddq %mm0,%mm2
paddq %mm1,%mm3
movq %mm2,%mm0
movd 36(%esp,%ecx,4),%mm6
pand %mm7,%mm0
movd 4(%ebp,%ecx,4),%mm1
paddq %mm0,%mm3
movd 4(%esi,%ecx,4),%mm0
psrlq $32,%mm2
movd %mm3,28(%esp,%ecx,4)
psrlq $32,%mm3
paddq %mm6,%mm2
decl %ebx
leal 1(%ecx),%ecx
jnz .L004inner
movl %ecx,%ebx
pmuludq %mm4,%mm0
pmuludq %mm5,%mm1
paddq %mm0,%mm2
paddq %mm1,%mm3
movq %mm2,%mm0
pand %mm7,%mm0
paddq %mm0,%mm3
movd %mm3,28(%esp,%ecx,4)
psrlq $32,%mm2
psrlq $32,%mm3
movd 36(%esp,%ebx,4),%mm6
paddq %mm2,%mm3
paddq %mm6,%mm3
movq %mm3,32(%esp,%ebx,4)
leal 1(%edx),%edx
cmpl %ebx,%edx
jle .L003outer
emms
jmp .L005common_tail
.align 16
.L005common_tail:
movl 16(%esp),%ebp
movl 4(%esp),%edi
leal 32(%esp),%esi
movl (%esi),%eax
movl %ebx,%ecx
xorl %edx,%edx
.align 16
.L006sub:
sbbl (%ebp,%edx,4),%eax
movl %eax,(%edi,%edx,4)
decl %ecx
movl 4(%esi,%edx,4),%eax
leal 1(%edx),%edx
jge .L006sub
sbbl $0,%eax
movl $-1,%edx
xorl %eax,%edx
jmp .L007copy
.align 16
.L007copy:
movl 32(%esp,%ebx,4),%esi
movl (%edi,%ebx,4),%ebp
movl %ecx,32(%esp,%ebx,4)
andl %eax,%esi
andl %edx,%ebp
orl %esi,%ebp
movl %ebp,(%edi,%ebx,4)
decl %ebx
jge .L007copy
movl 24(%esp),%esp
movl $1,%eax
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size bn_mul_mont,.-.L_bn_mul_mont_begin
.byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105
.byte 112,108,105,99,97,116,105,111,110,32,102,111,114,32,120,56
.byte 54,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121
.byte 32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46
.byte 111,114,103,62,0
#endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
|
marvin-hansen/iggy-streaming-system
| 42,821
|
thirdparty/crates/ring-0.17.9/pregenerated/sha512-armv4-linux32.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__ELF__)
@ Copyright 2007-2016 The OpenSSL Project Authors. All Rights Reserved.
@
@ Licensed under the OpenSSL license (the "License"). You may not use
@ this file except in compliance with the License. You can obtain a copy
@ in the file LICENSE in the source distribution or at
@ https://www.openssl.org/source/license.html
@ ====================================================================
@ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
@ project. The module is, however, dual licensed under OpenSSL and
@ CRYPTOGAMS licenses depending on where you obtain it. For further
@ details see http://www.openssl.org/~appro/cryptogams/.
@
@ Permission to use under GPL terms is granted.
@ ====================================================================
@ SHA512 block procedure for ARMv4. September 2007.
@ This code is ~4.5 (four and a half) times faster than code generated
@ by gcc 3.4 and it spends ~72 clock cycles per byte [on single-issue
@ Xscale PXA250 core].
@
@ July 2010.
@
@ Rescheduling for dual-issue pipeline resulted in 6% improvement on
@ Cortex A8 core and ~40 cycles per processed byte.
@ February 2011.
@
@ Profiler-assisted and platform-specific optimization resulted in 7%
@ improvement on Coxtex A8 core and ~38 cycles per byte.
@ March 2011.
@
@ Add NEON implementation. On Cortex A8 it was measured to process
@ one byte in 23.3 cycles or ~60% faster than integer-only code.
@ August 2012.
@
@ Improve NEON performance by 12% on Snapdragon S4. In absolute
@ terms it's 22.6 cycles per byte, which is disappointing result.
@ Technical writers asserted that 3-way S4 pipeline can sustain
@ multiple NEON instructions per cycle, but dual NEON issue could
@ not be observed, see http://www.openssl.org/~appro/Snapdragon-S4.html
@ for further details. On side note Cortex-A15 processes one byte in
@ 16 cycles.
@ Byte order [in]dependence. =========================================
@
@ Originally caller was expected to maintain specific *dword* order in
@ h[0-7], namely with most significant dword at *lower* address, which
@ was reflected in below two parameters as 0 and 4. Now caller is
@ expected to maintain native byte order for whole 64-bit values.
#ifndef __KERNEL__
# include <ring-core/arm_arch.h>
# define VFP_ABI_PUSH vstmdb sp!,{d8-d15}
# define VFP_ABI_POP vldmia sp!,{d8-d15}
#else
# define __ARM_MAX_ARCH__ 7
# define VFP_ABI_PUSH
# define VFP_ABI_POP
#endif
@ Silence ARMv8 deprecated IT instruction warnings. This file is used by both
@ ARMv7 and ARMv8 processors and does not use ARMv8 instructions.
.arch armv7-a
#ifdef __ARMEL__
# define LO 0
# define HI 4
# define WORD64(hi0,lo0,hi1,lo1) .word lo0,hi0, lo1,hi1
#else
# define HI 0
# define LO 4
# define WORD64(hi0,lo0,hi1,lo1) .word hi0,lo0, hi1,lo1
#endif
.text
#if defined(__thumb2__)
.syntax unified
.thumb
# define adrl adr
#else
.code 32
#endif
.type K512,%object
.align 5
K512:
WORD64(0x428a2f98,0xd728ae22, 0x71374491,0x23ef65cd)
WORD64(0xb5c0fbcf,0xec4d3b2f, 0xe9b5dba5,0x8189dbbc)
WORD64(0x3956c25b,0xf348b538, 0x59f111f1,0xb605d019)
WORD64(0x923f82a4,0xaf194f9b, 0xab1c5ed5,0xda6d8118)
WORD64(0xd807aa98,0xa3030242, 0x12835b01,0x45706fbe)
WORD64(0x243185be,0x4ee4b28c, 0x550c7dc3,0xd5ffb4e2)
WORD64(0x72be5d74,0xf27b896f, 0x80deb1fe,0x3b1696b1)
WORD64(0x9bdc06a7,0x25c71235, 0xc19bf174,0xcf692694)
WORD64(0xe49b69c1,0x9ef14ad2, 0xefbe4786,0x384f25e3)
WORD64(0x0fc19dc6,0x8b8cd5b5, 0x240ca1cc,0x77ac9c65)
WORD64(0x2de92c6f,0x592b0275, 0x4a7484aa,0x6ea6e483)
WORD64(0x5cb0a9dc,0xbd41fbd4, 0x76f988da,0x831153b5)
WORD64(0x983e5152,0xee66dfab, 0xa831c66d,0x2db43210)
WORD64(0xb00327c8,0x98fb213f, 0xbf597fc7,0xbeef0ee4)
WORD64(0xc6e00bf3,0x3da88fc2, 0xd5a79147,0x930aa725)
WORD64(0x06ca6351,0xe003826f, 0x14292967,0x0a0e6e70)
WORD64(0x27b70a85,0x46d22ffc, 0x2e1b2138,0x5c26c926)
WORD64(0x4d2c6dfc,0x5ac42aed, 0x53380d13,0x9d95b3df)
WORD64(0x650a7354,0x8baf63de, 0x766a0abb,0x3c77b2a8)
WORD64(0x81c2c92e,0x47edaee6, 0x92722c85,0x1482353b)
WORD64(0xa2bfe8a1,0x4cf10364, 0xa81a664b,0xbc423001)
WORD64(0xc24b8b70,0xd0f89791, 0xc76c51a3,0x0654be30)
WORD64(0xd192e819,0xd6ef5218, 0xd6990624,0x5565a910)
WORD64(0xf40e3585,0x5771202a, 0x106aa070,0x32bbd1b8)
WORD64(0x19a4c116,0xb8d2d0c8, 0x1e376c08,0x5141ab53)
WORD64(0x2748774c,0xdf8eeb99, 0x34b0bcb5,0xe19b48a8)
WORD64(0x391c0cb3,0xc5c95a63, 0x4ed8aa4a,0xe3418acb)
WORD64(0x5b9cca4f,0x7763e373, 0x682e6ff3,0xd6b2b8a3)
WORD64(0x748f82ee,0x5defb2fc, 0x78a5636f,0x43172f60)
WORD64(0x84c87814,0xa1f0ab72, 0x8cc70208,0x1a6439ec)
WORD64(0x90befffa,0x23631e28, 0xa4506ceb,0xde82bde9)
WORD64(0xbef9a3f7,0xb2c67915, 0xc67178f2,0xe372532b)
WORD64(0xca273ece,0xea26619c, 0xd186b8c7,0x21c0c207)
WORD64(0xeada7dd6,0xcde0eb1e, 0xf57d4f7f,0xee6ed178)
WORD64(0x06f067aa,0x72176fba, 0x0a637dc5,0xa2c898a6)
WORD64(0x113f9804,0xbef90dae, 0x1b710b35,0x131c471b)
WORD64(0x28db77f5,0x23047d84, 0x32caab7b,0x40c72493)
WORD64(0x3c9ebe0a,0x15c9bebc, 0x431d67c4,0x9c100d4c)
WORD64(0x4cc5d4be,0xcb3e42b6, 0x597f299c,0xfc657e2a)
WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817)
.size K512,.-K512
.globl sha512_block_data_order_nohw
.hidden sha512_block_data_order_nohw
.type sha512_block_data_order_nohw,%function
sha512_block_data_order_nohw:
add r2,r1,r2,lsl#7 @ len to point at the end of inp
stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
adr r14,K512
sub sp,sp,#9*8
ldr r7,[r0,#32+LO]
ldr r8,[r0,#32+HI]
ldr r9, [r0,#48+LO]
ldr r10, [r0,#48+HI]
ldr r11, [r0,#56+LO]
ldr r12, [r0,#56+HI]
.Loop:
str r9, [sp,#48+0]
str r10, [sp,#48+4]
str r11, [sp,#56+0]
str r12, [sp,#56+4]
ldr r5,[r0,#0+LO]
ldr r6,[r0,#0+HI]
ldr r3,[r0,#8+LO]
ldr r4,[r0,#8+HI]
ldr r9, [r0,#16+LO]
ldr r10, [r0,#16+HI]
ldr r11, [r0,#24+LO]
ldr r12, [r0,#24+HI]
str r3,[sp,#8+0]
str r4,[sp,#8+4]
str r9, [sp,#16+0]
str r10, [sp,#16+4]
str r11, [sp,#24+0]
str r12, [sp,#24+4]
ldr r3,[r0,#40+LO]
ldr r4,[r0,#40+HI]
str r3,[sp,#40+0]
str r4,[sp,#40+4]
.L00_15:
#if __ARM_ARCH<7
ldrb r3,[r1,#7]
ldrb r9, [r1,#6]
ldrb r10, [r1,#5]
ldrb r11, [r1,#4]
ldrb r4,[r1,#3]
ldrb r12, [r1,#2]
orr r3,r3,r9,lsl#8
ldrb r9, [r1,#1]
orr r3,r3,r10,lsl#16
ldrb r10, [r1],#8
orr r3,r3,r11,lsl#24
orr r4,r4,r12,lsl#8
orr r4,r4,r9,lsl#16
orr r4,r4,r10,lsl#24
#else
ldr r3,[r1,#4]
ldr r4,[r1],#8
#ifdef __ARMEL__
rev r3,r3
rev r4,r4
#endif
#endif
@ Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41))
@ LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23
@ HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23
mov r9,r7,lsr#14
str r3,[sp,#64+0]
mov r10,r8,lsr#14
str r4,[sp,#64+4]
eor r9,r9,r8,lsl#18
ldr r11,[sp,#56+0] @ h.lo
eor r10,r10,r7,lsl#18
ldr r12,[sp,#56+4] @ h.hi
eor r9,r9,r7,lsr#18
eor r10,r10,r8,lsr#18
eor r9,r9,r8,lsl#14
eor r10,r10,r7,lsl#14
eor r9,r9,r8,lsr#9
eor r10,r10,r7,lsr#9
eor r9,r9,r7,lsl#23
eor r10,r10,r8,lsl#23 @ Sigma1(e)
adds r3,r3,r9
ldr r9,[sp,#40+0] @ f.lo
adc r4,r4,r10 @ T += Sigma1(e)
ldr r10,[sp,#40+4] @ f.hi
adds r3,r3,r11
ldr r11,[sp,#48+0] @ g.lo
adc r4,r4,r12 @ T += h
ldr r12,[sp,#48+4] @ g.hi
eor r9,r9,r11
str r7,[sp,#32+0]
eor r10,r10,r12
str r8,[sp,#32+4]
and r9,r9,r7
str r5,[sp,#0+0]
and r10,r10,r8
str r6,[sp,#0+4]
eor r9,r9,r11
ldr r11,[r14,#LO] @ K[i].lo
eor r10,r10,r12 @ Ch(e,f,g)
ldr r12,[r14,#HI] @ K[i].hi
adds r3,r3,r9
ldr r7,[sp,#24+0] @ d.lo
adc r4,r4,r10 @ T += Ch(e,f,g)
ldr r8,[sp,#24+4] @ d.hi
adds r3,r3,r11
and r9,r11,#0xff
adc r4,r4,r12 @ T += K[i]
adds r7,r7,r3
ldr r11,[sp,#8+0] @ b.lo
adc r8,r8,r4 @ d += T
teq r9,#148
ldr r12,[sp,#16+0] @ c.lo
#if __ARM_ARCH>=7
it eq @ Thumb2 thing, sanity check in ARM
#endif
orreq r14,r14,#1
@ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
@ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25
@ HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25
mov r9,r5,lsr#28
mov r10,r6,lsr#28
eor r9,r9,r6,lsl#4
eor r10,r10,r5,lsl#4
eor r9,r9,r6,lsr#2
eor r10,r10,r5,lsr#2
eor r9,r9,r5,lsl#30
eor r10,r10,r6,lsl#30
eor r9,r9,r6,lsr#7
eor r10,r10,r5,lsr#7
eor r9,r9,r5,lsl#25
eor r10,r10,r6,lsl#25 @ Sigma0(a)
adds r3,r3,r9
and r9,r5,r11
adc r4,r4,r10 @ T += Sigma0(a)
ldr r10,[sp,#8+4] @ b.hi
orr r5,r5,r11
ldr r11,[sp,#16+4] @ c.hi
and r5,r5,r12
and r12,r6,r10
orr r6,r6,r10
orr r5,r5,r9 @ Maj(a,b,c).lo
and r6,r6,r11
adds r5,r5,r3
orr r6,r6,r12 @ Maj(a,b,c).hi
sub sp,sp,#8
adc r6,r6,r4 @ h += T
tst r14,#1
add r14,r14,#8
tst r14,#1
beq .L00_15
ldr r9,[sp,#184+0]
ldr r10,[sp,#184+4]
bic r14,r14,#1
.L16_79:
@ sigma0(x) (ROTR((x),1) ^ ROTR((x),8) ^ ((x)>>7))
@ LO lo>>1^hi<<31 ^ lo>>8^hi<<24 ^ lo>>7^hi<<25
@ HI hi>>1^lo<<31 ^ hi>>8^lo<<24 ^ hi>>7
mov r3,r9,lsr#1
ldr r11,[sp,#80+0]
mov r4,r10,lsr#1
ldr r12,[sp,#80+4]
eor r3,r3,r10,lsl#31
eor r4,r4,r9,lsl#31
eor r3,r3,r9,lsr#8
eor r4,r4,r10,lsr#8
eor r3,r3,r10,lsl#24
eor r4,r4,r9,lsl#24
eor r3,r3,r9,lsr#7
eor r4,r4,r10,lsr#7
eor r3,r3,r10,lsl#25
@ sigma1(x) (ROTR((x),19) ^ ROTR((x),61) ^ ((x)>>6))
@ LO lo>>19^hi<<13 ^ hi>>29^lo<<3 ^ lo>>6^hi<<26
@ HI hi>>19^lo<<13 ^ lo>>29^hi<<3 ^ hi>>6
mov r9,r11,lsr#19
mov r10,r12,lsr#19
eor r9,r9,r12,lsl#13
eor r10,r10,r11,lsl#13
eor r9,r9,r12,lsr#29
eor r10,r10,r11,lsr#29
eor r9,r9,r11,lsl#3
eor r10,r10,r12,lsl#3
eor r9,r9,r11,lsr#6
eor r10,r10,r12,lsr#6
ldr r11,[sp,#120+0]
eor r9,r9,r12,lsl#26
ldr r12,[sp,#120+4]
adds r3,r3,r9
ldr r9,[sp,#192+0]
adc r4,r4,r10
ldr r10,[sp,#192+4]
adds r3,r3,r11
adc r4,r4,r12
adds r3,r3,r9
adc r4,r4,r10
@ Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41))
@ LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23
@ HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23
mov r9,r7,lsr#14
str r3,[sp,#64+0]
mov r10,r8,lsr#14
str r4,[sp,#64+4]
eor r9,r9,r8,lsl#18
ldr r11,[sp,#56+0] @ h.lo
eor r10,r10,r7,lsl#18
ldr r12,[sp,#56+4] @ h.hi
eor r9,r9,r7,lsr#18
eor r10,r10,r8,lsr#18
eor r9,r9,r8,lsl#14
eor r10,r10,r7,lsl#14
eor r9,r9,r8,lsr#9
eor r10,r10,r7,lsr#9
eor r9,r9,r7,lsl#23
eor r10,r10,r8,lsl#23 @ Sigma1(e)
adds r3,r3,r9
ldr r9,[sp,#40+0] @ f.lo
adc r4,r4,r10 @ T += Sigma1(e)
ldr r10,[sp,#40+4] @ f.hi
adds r3,r3,r11
ldr r11,[sp,#48+0] @ g.lo
adc r4,r4,r12 @ T += h
ldr r12,[sp,#48+4] @ g.hi
eor r9,r9,r11
str r7,[sp,#32+0]
eor r10,r10,r12
str r8,[sp,#32+4]
and r9,r9,r7
str r5,[sp,#0+0]
and r10,r10,r8
str r6,[sp,#0+4]
eor r9,r9,r11
ldr r11,[r14,#LO] @ K[i].lo
eor r10,r10,r12 @ Ch(e,f,g)
ldr r12,[r14,#HI] @ K[i].hi
adds r3,r3,r9
ldr r7,[sp,#24+0] @ d.lo
adc r4,r4,r10 @ T += Ch(e,f,g)
ldr r8,[sp,#24+4] @ d.hi
adds r3,r3,r11
and r9,r11,#0xff
adc r4,r4,r12 @ T += K[i]
adds r7,r7,r3
ldr r11,[sp,#8+0] @ b.lo
adc r8,r8,r4 @ d += T
teq r9,#23
ldr r12,[sp,#16+0] @ c.lo
#if __ARM_ARCH>=7
it eq @ Thumb2 thing, sanity check in ARM
#endif
orreq r14,r14,#1
@ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
@ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25
@ HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25
mov r9,r5,lsr#28
mov r10,r6,lsr#28
eor r9,r9,r6,lsl#4
eor r10,r10,r5,lsl#4
eor r9,r9,r6,lsr#2
eor r10,r10,r5,lsr#2
eor r9,r9,r5,lsl#30
eor r10,r10,r6,lsl#30
eor r9,r9,r6,lsr#7
eor r10,r10,r5,lsr#7
eor r9,r9,r5,lsl#25
eor r10,r10,r6,lsl#25 @ Sigma0(a)
adds r3,r3,r9
and r9,r5,r11
adc r4,r4,r10 @ T += Sigma0(a)
ldr r10,[sp,#8+4] @ b.hi
orr r5,r5,r11
ldr r11,[sp,#16+4] @ c.hi
and r5,r5,r12
and r12,r6,r10
orr r6,r6,r10
orr r5,r5,r9 @ Maj(a,b,c).lo
and r6,r6,r11
adds r5,r5,r3
orr r6,r6,r12 @ Maj(a,b,c).hi
sub sp,sp,#8
adc r6,r6,r4 @ h += T
tst r14,#1
add r14,r14,#8
#if __ARM_ARCH>=7
ittt eq @ Thumb2 thing, sanity check in ARM
#endif
ldreq r9,[sp,#184+0]
ldreq r10,[sp,#184+4]
beq .L16_79
bic r14,r14,#1
ldr r3,[sp,#8+0]
ldr r4,[sp,#8+4]
ldr r9, [r0,#0+LO]
ldr r10, [r0,#0+HI]
ldr r11, [r0,#8+LO]
ldr r12, [r0,#8+HI]
adds r9,r5,r9
str r9, [r0,#0+LO]
adc r10,r6,r10
str r10, [r0,#0+HI]
adds r11,r3,r11
str r11, [r0,#8+LO]
adc r12,r4,r12
str r12, [r0,#8+HI]
ldr r5,[sp,#16+0]
ldr r6,[sp,#16+4]
ldr r3,[sp,#24+0]
ldr r4,[sp,#24+4]
ldr r9, [r0,#16+LO]
ldr r10, [r0,#16+HI]
ldr r11, [r0,#24+LO]
ldr r12, [r0,#24+HI]
adds r9,r5,r9
str r9, [r0,#16+LO]
adc r10,r6,r10
str r10, [r0,#16+HI]
adds r11,r3,r11
str r11, [r0,#24+LO]
adc r12,r4,r12
str r12, [r0,#24+HI]
ldr r3,[sp,#40+0]
ldr r4,[sp,#40+4]
ldr r9, [r0,#32+LO]
ldr r10, [r0,#32+HI]
ldr r11, [r0,#40+LO]
ldr r12, [r0,#40+HI]
adds r7,r7,r9
str r7,[r0,#32+LO]
adc r8,r8,r10
str r8,[r0,#32+HI]
adds r11,r3,r11
str r11, [r0,#40+LO]
adc r12,r4,r12
str r12, [r0,#40+HI]
ldr r5,[sp,#48+0]
ldr r6,[sp,#48+4]
ldr r3,[sp,#56+0]
ldr r4,[sp,#56+4]
ldr r9, [r0,#48+LO]
ldr r10, [r0,#48+HI]
ldr r11, [r0,#56+LO]
ldr r12, [r0,#56+HI]
adds r9,r5,r9
str r9, [r0,#48+LO]
adc r10,r6,r10
str r10, [r0,#48+HI]
adds r11,r3,r11
str r11, [r0,#56+LO]
adc r12,r4,r12
str r12, [r0,#56+HI]
add sp,sp,#640
sub r14,r14,#640
teq r1,r2
bne .Loop
add sp,sp,#8*9 @ destroy frame
#if __ARM_ARCH>=5
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc}
#else
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
#endif
.size sha512_block_data_order_nohw,.-sha512_block_data_order_nohw
#if __ARM_MAX_ARCH__>=7
.arch armv7-a
.fpu neon
.globl sha512_block_data_order_neon
.hidden sha512_block_data_order_neon
.type sha512_block_data_order_neon,%function
.align 4
sha512_block_data_order_neon:
dmb @ errata #451034 on early Cortex A8
add r2,r1,r2,lsl#7 @ len to point at the end of inp
adr r3,K512
VFP_ABI_PUSH
vldmia r0,{d16,d17,d18,d19,d20,d21,d22,d23} @ load context
.Loop_neon:
vshr.u64 d24,d20,#14 @ 0
#if 0<16
vld1.64 {d0},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d20,#18
#if 0>0
vadd.i64 d16,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d20,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d20,#50
vsli.64 d25,d20,#46
vmov d29,d20
vsli.64 d26,d20,#23
#if 0<16 && defined(__ARMEL__)
vrev64.8 d0,d0
#endif
veor d25,d24
vbsl d29,d21,d22 @ Ch(e,f,g)
vshr.u64 d24,d16,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d23
vshr.u64 d25,d16,#34
vsli.64 d24,d16,#36
vadd.i64 d27,d26
vshr.u64 d26,d16,#39
vadd.i64 d28,d0
vsli.64 d25,d16,#30
veor d30,d16,d17
vsli.64 d26,d16,#25
veor d23,d24,d25
vadd.i64 d27,d28
vbsl d30,d18,d17 @ Maj(a,b,c)
veor d23,d26 @ Sigma0(a)
vadd.i64 d19,d27
vadd.i64 d30,d27
@ vadd.i64 d23,d30
vshr.u64 d24,d19,#14 @ 1
#if 1<16
vld1.64 {d1},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d19,#18
#if 1>0
vadd.i64 d23,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d19,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d19,#50
vsli.64 d25,d19,#46
vmov d29,d19
vsli.64 d26,d19,#23
#if 1<16 && defined(__ARMEL__)
vrev64.8 d1,d1
#endif
veor d25,d24
vbsl d29,d20,d21 @ Ch(e,f,g)
vshr.u64 d24,d23,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d22
vshr.u64 d25,d23,#34
vsli.64 d24,d23,#36
vadd.i64 d27,d26
vshr.u64 d26,d23,#39
vadd.i64 d28,d1
vsli.64 d25,d23,#30
veor d30,d23,d16
vsli.64 d26,d23,#25
veor d22,d24,d25
vadd.i64 d27,d28
vbsl d30,d17,d16 @ Maj(a,b,c)
veor d22,d26 @ Sigma0(a)
vadd.i64 d18,d27
vadd.i64 d30,d27
@ vadd.i64 d22,d30
vshr.u64 d24,d18,#14 @ 2
#if 2<16
vld1.64 {d2},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d18,#18
#if 2>0
vadd.i64 d22,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d18,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d18,#50
vsli.64 d25,d18,#46
vmov d29,d18
vsli.64 d26,d18,#23
#if 2<16 && defined(__ARMEL__)
vrev64.8 d2,d2
#endif
veor d25,d24
vbsl d29,d19,d20 @ Ch(e,f,g)
vshr.u64 d24,d22,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d21
vshr.u64 d25,d22,#34
vsli.64 d24,d22,#36
vadd.i64 d27,d26
vshr.u64 d26,d22,#39
vadd.i64 d28,d2
vsli.64 d25,d22,#30
veor d30,d22,d23
vsli.64 d26,d22,#25
veor d21,d24,d25
vadd.i64 d27,d28
vbsl d30,d16,d23 @ Maj(a,b,c)
veor d21,d26 @ Sigma0(a)
vadd.i64 d17,d27
vadd.i64 d30,d27
@ vadd.i64 d21,d30
vshr.u64 d24,d17,#14 @ 3
#if 3<16
vld1.64 {d3},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d17,#18
#if 3>0
vadd.i64 d21,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d17,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d17,#50
vsli.64 d25,d17,#46
vmov d29,d17
vsli.64 d26,d17,#23
#if 3<16 && defined(__ARMEL__)
vrev64.8 d3,d3
#endif
veor d25,d24
vbsl d29,d18,d19 @ Ch(e,f,g)
vshr.u64 d24,d21,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d20
vshr.u64 d25,d21,#34
vsli.64 d24,d21,#36
vadd.i64 d27,d26
vshr.u64 d26,d21,#39
vadd.i64 d28,d3
vsli.64 d25,d21,#30
veor d30,d21,d22
vsli.64 d26,d21,#25
veor d20,d24,d25
vadd.i64 d27,d28
vbsl d30,d23,d22 @ Maj(a,b,c)
veor d20,d26 @ Sigma0(a)
vadd.i64 d16,d27
vadd.i64 d30,d27
@ vadd.i64 d20,d30
vshr.u64 d24,d16,#14 @ 4
#if 4<16
vld1.64 {d4},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d16,#18
#if 4>0
vadd.i64 d20,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d16,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d16,#50
vsli.64 d25,d16,#46
vmov d29,d16
vsli.64 d26,d16,#23
#if 4<16 && defined(__ARMEL__)
vrev64.8 d4,d4
#endif
veor d25,d24
vbsl d29,d17,d18 @ Ch(e,f,g)
vshr.u64 d24,d20,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d19
vshr.u64 d25,d20,#34
vsli.64 d24,d20,#36
vadd.i64 d27,d26
vshr.u64 d26,d20,#39
vadd.i64 d28,d4
vsli.64 d25,d20,#30
veor d30,d20,d21
vsli.64 d26,d20,#25
veor d19,d24,d25
vadd.i64 d27,d28
vbsl d30,d22,d21 @ Maj(a,b,c)
veor d19,d26 @ Sigma0(a)
vadd.i64 d23,d27
vadd.i64 d30,d27
@ vadd.i64 d19,d30
vshr.u64 d24,d23,#14 @ 5
#if 5<16
vld1.64 {d5},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d23,#18
#if 5>0
vadd.i64 d19,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d23,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d23,#50
vsli.64 d25,d23,#46
vmov d29,d23
vsli.64 d26,d23,#23
#if 5<16 && defined(__ARMEL__)
vrev64.8 d5,d5
#endif
veor d25,d24
vbsl d29,d16,d17 @ Ch(e,f,g)
vshr.u64 d24,d19,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d18
vshr.u64 d25,d19,#34
vsli.64 d24,d19,#36
vadd.i64 d27,d26
vshr.u64 d26,d19,#39
vadd.i64 d28,d5
vsli.64 d25,d19,#30
veor d30,d19,d20
vsli.64 d26,d19,#25
veor d18,d24,d25
vadd.i64 d27,d28
vbsl d30,d21,d20 @ Maj(a,b,c)
veor d18,d26 @ Sigma0(a)
vadd.i64 d22,d27
vadd.i64 d30,d27
@ vadd.i64 d18,d30
vshr.u64 d24,d22,#14 @ 6
#if 6<16
vld1.64 {d6},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d22,#18
#if 6>0
vadd.i64 d18,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d22,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d22,#50
vsli.64 d25,d22,#46
vmov d29,d22
vsli.64 d26,d22,#23
#if 6<16 && defined(__ARMEL__)
vrev64.8 d6,d6
#endif
veor d25,d24
vbsl d29,d23,d16 @ Ch(e,f,g)
vshr.u64 d24,d18,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d17
vshr.u64 d25,d18,#34
vsli.64 d24,d18,#36
vadd.i64 d27,d26
vshr.u64 d26,d18,#39
vadd.i64 d28,d6
vsli.64 d25,d18,#30
veor d30,d18,d19
vsli.64 d26,d18,#25
veor d17,d24,d25
vadd.i64 d27,d28
vbsl d30,d20,d19 @ Maj(a,b,c)
veor d17,d26 @ Sigma0(a)
vadd.i64 d21,d27
vadd.i64 d30,d27
@ vadd.i64 d17,d30
vshr.u64 d24,d21,#14 @ 7
#if 7<16
vld1.64 {d7},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d21,#18
#if 7>0
vadd.i64 d17,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d21,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d21,#50
vsli.64 d25,d21,#46
vmov d29,d21
vsli.64 d26,d21,#23
#if 7<16 && defined(__ARMEL__)
vrev64.8 d7,d7
#endif
veor d25,d24
vbsl d29,d22,d23 @ Ch(e,f,g)
vshr.u64 d24,d17,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d16
vshr.u64 d25,d17,#34
vsli.64 d24,d17,#36
vadd.i64 d27,d26
vshr.u64 d26,d17,#39
vadd.i64 d28,d7
vsli.64 d25,d17,#30
veor d30,d17,d18
vsli.64 d26,d17,#25
veor d16,d24,d25
vadd.i64 d27,d28
vbsl d30,d19,d18 @ Maj(a,b,c)
veor d16,d26 @ Sigma0(a)
vadd.i64 d20,d27
vadd.i64 d30,d27
@ vadd.i64 d16,d30
vshr.u64 d24,d20,#14 @ 8
#if 8<16
vld1.64 {d8},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d20,#18
#if 8>0
vadd.i64 d16,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d20,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d20,#50
vsli.64 d25,d20,#46
vmov d29,d20
vsli.64 d26,d20,#23
#if 8<16 && defined(__ARMEL__)
vrev64.8 d8,d8
#endif
veor d25,d24
vbsl d29,d21,d22 @ Ch(e,f,g)
vshr.u64 d24,d16,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d23
vshr.u64 d25,d16,#34
vsli.64 d24,d16,#36
vadd.i64 d27,d26
vshr.u64 d26,d16,#39
vadd.i64 d28,d8
vsli.64 d25,d16,#30
veor d30,d16,d17
vsli.64 d26,d16,#25
veor d23,d24,d25
vadd.i64 d27,d28
vbsl d30,d18,d17 @ Maj(a,b,c)
veor d23,d26 @ Sigma0(a)
vadd.i64 d19,d27
vadd.i64 d30,d27
@ vadd.i64 d23,d30
vshr.u64 d24,d19,#14 @ 9
#if 9<16
vld1.64 {d9},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d19,#18
#if 9>0
vadd.i64 d23,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d19,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d19,#50
vsli.64 d25,d19,#46
vmov d29,d19
vsli.64 d26,d19,#23
#if 9<16 && defined(__ARMEL__)
vrev64.8 d9,d9
#endif
veor d25,d24
vbsl d29,d20,d21 @ Ch(e,f,g)
vshr.u64 d24,d23,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d22
vshr.u64 d25,d23,#34
vsli.64 d24,d23,#36
vadd.i64 d27,d26
vshr.u64 d26,d23,#39
vadd.i64 d28,d9
vsli.64 d25,d23,#30
veor d30,d23,d16
vsli.64 d26,d23,#25
veor d22,d24,d25
vadd.i64 d27,d28
vbsl d30,d17,d16 @ Maj(a,b,c)
veor d22,d26 @ Sigma0(a)
vadd.i64 d18,d27
vadd.i64 d30,d27
@ vadd.i64 d22,d30
vshr.u64 d24,d18,#14 @ 10
#if 10<16
vld1.64 {d10},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d18,#18
#if 10>0
vadd.i64 d22,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d18,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d18,#50
vsli.64 d25,d18,#46
vmov d29,d18
vsli.64 d26,d18,#23
#if 10<16 && defined(__ARMEL__)
vrev64.8 d10,d10
#endif
veor d25,d24
vbsl d29,d19,d20 @ Ch(e,f,g)
vshr.u64 d24,d22,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d21
vshr.u64 d25,d22,#34
vsli.64 d24,d22,#36
vadd.i64 d27,d26
vshr.u64 d26,d22,#39
vadd.i64 d28,d10
vsli.64 d25,d22,#30
veor d30,d22,d23
vsli.64 d26,d22,#25
veor d21,d24,d25
vadd.i64 d27,d28
vbsl d30,d16,d23 @ Maj(a,b,c)
veor d21,d26 @ Sigma0(a)
vadd.i64 d17,d27
vadd.i64 d30,d27
@ vadd.i64 d21,d30
vshr.u64 d24,d17,#14 @ 11
#if 11<16
vld1.64 {d11},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d17,#18
#if 11>0
vadd.i64 d21,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d17,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d17,#50
vsli.64 d25,d17,#46
vmov d29,d17
vsli.64 d26,d17,#23
#if 11<16 && defined(__ARMEL__)
vrev64.8 d11,d11
#endif
veor d25,d24
vbsl d29,d18,d19 @ Ch(e,f,g)
vshr.u64 d24,d21,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d20
vshr.u64 d25,d21,#34
vsli.64 d24,d21,#36
vadd.i64 d27,d26
vshr.u64 d26,d21,#39
vadd.i64 d28,d11
vsli.64 d25,d21,#30
veor d30,d21,d22
vsli.64 d26,d21,#25
veor d20,d24,d25
vadd.i64 d27,d28
vbsl d30,d23,d22 @ Maj(a,b,c)
veor d20,d26 @ Sigma0(a)
vadd.i64 d16,d27
vadd.i64 d30,d27
@ vadd.i64 d20,d30
vshr.u64 d24,d16,#14 @ 12
#if 12<16
vld1.64 {d12},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d16,#18
#if 12>0
vadd.i64 d20,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d16,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d16,#50
vsli.64 d25,d16,#46
vmov d29,d16
vsli.64 d26,d16,#23
#if 12<16 && defined(__ARMEL__)
vrev64.8 d12,d12
#endif
veor d25,d24
vbsl d29,d17,d18 @ Ch(e,f,g)
vshr.u64 d24,d20,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d19
vshr.u64 d25,d20,#34
vsli.64 d24,d20,#36
vadd.i64 d27,d26
vshr.u64 d26,d20,#39
vadd.i64 d28,d12
vsli.64 d25,d20,#30
veor d30,d20,d21
vsli.64 d26,d20,#25
veor d19,d24,d25
vadd.i64 d27,d28
vbsl d30,d22,d21 @ Maj(a,b,c)
veor d19,d26 @ Sigma0(a)
vadd.i64 d23,d27
vadd.i64 d30,d27
@ vadd.i64 d19,d30
vshr.u64 d24,d23,#14 @ 13
#if 13<16
vld1.64 {d13},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d23,#18
#if 13>0
vadd.i64 d19,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d23,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d23,#50
vsli.64 d25,d23,#46
vmov d29,d23
vsli.64 d26,d23,#23
#if 13<16 && defined(__ARMEL__)
vrev64.8 d13,d13
#endif
veor d25,d24
vbsl d29,d16,d17 @ Ch(e,f,g)
vshr.u64 d24,d19,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d18
vshr.u64 d25,d19,#34
vsli.64 d24,d19,#36
vadd.i64 d27,d26
vshr.u64 d26,d19,#39
vadd.i64 d28,d13
vsli.64 d25,d19,#30
veor d30,d19,d20
vsli.64 d26,d19,#25
veor d18,d24,d25
vadd.i64 d27,d28
vbsl d30,d21,d20 @ Maj(a,b,c)
veor d18,d26 @ Sigma0(a)
vadd.i64 d22,d27
vadd.i64 d30,d27
@ vadd.i64 d18,d30
vshr.u64 d24,d22,#14 @ 14
#if 14<16
vld1.64 {d14},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d22,#18
#if 14>0
vadd.i64 d18,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d22,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d22,#50
vsli.64 d25,d22,#46
vmov d29,d22
vsli.64 d26,d22,#23
#if 14<16 && defined(__ARMEL__)
vrev64.8 d14,d14
#endif
veor d25,d24
vbsl d29,d23,d16 @ Ch(e,f,g)
vshr.u64 d24,d18,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d17
vshr.u64 d25,d18,#34
vsli.64 d24,d18,#36
vadd.i64 d27,d26
vshr.u64 d26,d18,#39
vadd.i64 d28,d14
vsli.64 d25,d18,#30
veor d30,d18,d19
vsli.64 d26,d18,#25
veor d17,d24,d25
vadd.i64 d27,d28
vbsl d30,d20,d19 @ Maj(a,b,c)
veor d17,d26 @ Sigma0(a)
vadd.i64 d21,d27
vadd.i64 d30,d27
@ vadd.i64 d17,d30
vshr.u64 d24,d21,#14 @ 15
#if 15<16
vld1.64 {d15},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d21,#18
#if 15>0
vadd.i64 d17,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d21,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d21,#50
vsli.64 d25,d21,#46
vmov d29,d21
vsli.64 d26,d21,#23
#if 15<16 && defined(__ARMEL__)
vrev64.8 d15,d15
#endif
veor d25,d24
vbsl d29,d22,d23 @ Ch(e,f,g)
vshr.u64 d24,d17,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d16
vshr.u64 d25,d17,#34
vsli.64 d24,d17,#36
vadd.i64 d27,d26
vshr.u64 d26,d17,#39
vadd.i64 d28,d15
vsli.64 d25,d17,#30
veor d30,d17,d18
vsli.64 d26,d17,#25
veor d16,d24,d25
vadd.i64 d27,d28
vbsl d30,d19,d18 @ Maj(a,b,c)
veor d16,d26 @ Sigma0(a)
vadd.i64 d20,d27
vadd.i64 d30,d27
@ vadd.i64 d16,d30
mov r12,#4
.L16_79_neon:
subs r12,#1
vshr.u64 q12,q7,#19
vshr.u64 q13,q7,#61
vadd.i64 d16,d30 @ h+=Maj from the past
vshr.u64 q15,q7,#6
vsli.64 q12,q7,#45
vext.8 q14,q0,q1,#8 @ X[i+1]
vsli.64 q13,q7,#3
veor q15,q12
vshr.u64 q12,q14,#1
veor q15,q13 @ sigma1(X[i+14])
vshr.u64 q13,q14,#8
vadd.i64 q0,q15
vshr.u64 q15,q14,#7
vsli.64 q12,q14,#63
vsli.64 q13,q14,#56
vext.8 q14,q4,q5,#8 @ X[i+9]
veor q15,q12
vshr.u64 d24,d20,#14 @ from NEON_00_15
vadd.i64 q0,q14
vshr.u64 d25,d20,#18 @ from NEON_00_15
veor q15,q13 @ sigma0(X[i+1])
vshr.u64 d26,d20,#41 @ from NEON_00_15
vadd.i64 q0,q15
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d20,#50
vsli.64 d25,d20,#46
vmov d29,d20
vsli.64 d26,d20,#23
#if 16<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d21,d22 @ Ch(e,f,g)
vshr.u64 d24,d16,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d23
vshr.u64 d25,d16,#34
vsli.64 d24,d16,#36
vadd.i64 d27,d26
vshr.u64 d26,d16,#39
vadd.i64 d28,d0
vsli.64 d25,d16,#30
veor d30,d16,d17
vsli.64 d26,d16,#25
veor d23,d24,d25
vadd.i64 d27,d28
vbsl d30,d18,d17 @ Maj(a,b,c)
veor d23,d26 @ Sigma0(a)
vadd.i64 d19,d27
vadd.i64 d30,d27
@ vadd.i64 d23,d30
vshr.u64 d24,d19,#14 @ 17
#if 17<16
vld1.64 {d1},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d19,#18
#if 17>0
vadd.i64 d23,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d19,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d19,#50
vsli.64 d25,d19,#46
vmov d29,d19
vsli.64 d26,d19,#23
#if 17<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d20,d21 @ Ch(e,f,g)
vshr.u64 d24,d23,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d22
vshr.u64 d25,d23,#34
vsli.64 d24,d23,#36
vadd.i64 d27,d26
vshr.u64 d26,d23,#39
vadd.i64 d28,d1
vsli.64 d25,d23,#30
veor d30,d23,d16
vsli.64 d26,d23,#25
veor d22,d24,d25
vadd.i64 d27,d28
vbsl d30,d17,d16 @ Maj(a,b,c)
veor d22,d26 @ Sigma0(a)
vadd.i64 d18,d27
vadd.i64 d30,d27
@ vadd.i64 d22,d30
vshr.u64 q12,q0,#19
vshr.u64 q13,q0,#61
vadd.i64 d22,d30 @ h+=Maj from the past
vshr.u64 q15,q0,#6
vsli.64 q12,q0,#45
vext.8 q14,q1,q2,#8 @ X[i+1]
vsli.64 q13,q0,#3
veor q15,q12
vshr.u64 q12,q14,#1
veor q15,q13 @ sigma1(X[i+14])
vshr.u64 q13,q14,#8
vadd.i64 q1,q15
vshr.u64 q15,q14,#7
vsli.64 q12,q14,#63
vsli.64 q13,q14,#56
vext.8 q14,q5,q6,#8 @ X[i+9]
veor q15,q12
vshr.u64 d24,d18,#14 @ from NEON_00_15
vadd.i64 q1,q14
vshr.u64 d25,d18,#18 @ from NEON_00_15
veor q15,q13 @ sigma0(X[i+1])
vshr.u64 d26,d18,#41 @ from NEON_00_15
vadd.i64 q1,q15
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d18,#50
vsli.64 d25,d18,#46
vmov d29,d18
vsli.64 d26,d18,#23
#if 18<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d19,d20 @ Ch(e,f,g)
vshr.u64 d24,d22,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d21
vshr.u64 d25,d22,#34
vsli.64 d24,d22,#36
vadd.i64 d27,d26
vshr.u64 d26,d22,#39
vadd.i64 d28,d2
vsli.64 d25,d22,#30
veor d30,d22,d23
vsli.64 d26,d22,#25
veor d21,d24,d25
vadd.i64 d27,d28
vbsl d30,d16,d23 @ Maj(a,b,c)
veor d21,d26 @ Sigma0(a)
vadd.i64 d17,d27
vadd.i64 d30,d27
@ vadd.i64 d21,d30
vshr.u64 d24,d17,#14 @ 19
#if 19<16
vld1.64 {d3},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d17,#18
#if 19>0
vadd.i64 d21,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d17,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d17,#50
vsli.64 d25,d17,#46
vmov d29,d17
vsli.64 d26,d17,#23
#if 19<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d18,d19 @ Ch(e,f,g)
vshr.u64 d24,d21,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d20
vshr.u64 d25,d21,#34
vsli.64 d24,d21,#36
vadd.i64 d27,d26
vshr.u64 d26,d21,#39
vadd.i64 d28,d3
vsli.64 d25,d21,#30
veor d30,d21,d22
vsli.64 d26,d21,#25
veor d20,d24,d25
vadd.i64 d27,d28
vbsl d30,d23,d22 @ Maj(a,b,c)
veor d20,d26 @ Sigma0(a)
vadd.i64 d16,d27
vadd.i64 d30,d27
@ vadd.i64 d20,d30
vshr.u64 q12,q1,#19
vshr.u64 q13,q1,#61
vadd.i64 d20,d30 @ h+=Maj from the past
vshr.u64 q15,q1,#6
vsli.64 q12,q1,#45
vext.8 q14,q2,q3,#8 @ X[i+1]
vsli.64 q13,q1,#3
veor q15,q12
vshr.u64 q12,q14,#1
veor q15,q13 @ sigma1(X[i+14])
vshr.u64 q13,q14,#8
vadd.i64 q2,q15
vshr.u64 q15,q14,#7
vsli.64 q12,q14,#63
vsli.64 q13,q14,#56
vext.8 q14,q6,q7,#8 @ X[i+9]
veor q15,q12
vshr.u64 d24,d16,#14 @ from NEON_00_15
vadd.i64 q2,q14
vshr.u64 d25,d16,#18 @ from NEON_00_15
veor q15,q13 @ sigma0(X[i+1])
vshr.u64 d26,d16,#41 @ from NEON_00_15
vadd.i64 q2,q15
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d16,#50
vsli.64 d25,d16,#46
vmov d29,d16
vsli.64 d26,d16,#23
#if 20<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d17,d18 @ Ch(e,f,g)
vshr.u64 d24,d20,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d19
vshr.u64 d25,d20,#34
vsli.64 d24,d20,#36
vadd.i64 d27,d26
vshr.u64 d26,d20,#39
vadd.i64 d28,d4
vsli.64 d25,d20,#30
veor d30,d20,d21
vsli.64 d26,d20,#25
veor d19,d24,d25
vadd.i64 d27,d28
vbsl d30,d22,d21 @ Maj(a,b,c)
veor d19,d26 @ Sigma0(a)
vadd.i64 d23,d27
vadd.i64 d30,d27
@ vadd.i64 d19,d30
vshr.u64 d24,d23,#14 @ 21
#if 21<16
vld1.64 {d5},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d23,#18
#if 21>0
vadd.i64 d19,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d23,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d23,#50
vsli.64 d25,d23,#46
vmov d29,d23
vsli.64 d26,d23,#23
#if 21<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d16,d17 @ Ch(e,f,g)
vshr.u64 d24,d19,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d18
vshr.u64 d25,d19,#34
vsli.64 d24,d19,#36
vadd.i64 d27,d26
vshr.u64 d26,d19,#39
vadd.i64 d28,d5
vsli.64 d25,d19,#30
veor d30,d19,d20
vsli.64 d26,d19,#25
veor d18,d24,d25
vadd.i64 d27,d28
vbsl d30,d21,d20 @ Maj(a,b,c)
veor d18,d26 @ Sigma0(a)
vadd.i64 d22,d27
vadd.i64 d30,d27
@ vadd.i64 d18,d30
vshr.u64 q12,q2,#19
vshr.u64 q13,q2,#61
vadd.i64 d18,d30 @ h+=Maj from the past
vshr.u64 q15,q2,#6
vsli.64 q12,q2,#45
vext.8 q14,q3,q4,#8 @ X[i+1]
vsli.64 q13,q2,#3
veor q15,q12
vshr.u64 q12,q14,#1
veor q15,q13 @ sigma1(X[i+14])
vshr.u64 q13,q14,#8
vadd.i64 q3,q15
vshr.u64 q15,q14,#7
vsli.64 q12,q14,#63
vsli.64 q13,q14,#56
vext.8 q14,q7,q0,#8 @ X[i+9]
veor q15,q12
vshr.u64 d24,d22,#14 @ from NEON_00_15
vadd.i64 q3,q14
vshr.u64 d25,d22,#18 @ from NEON_00_15
veor q15,q13 @ sigma0(X[i+1])
vshr.u64 d26,d22,#41 @ from NEON_00_15
vadd.i64 q3,q15
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d22,#50
vsli.64 d25,d22,#46
vmov d29,d22
vsli.64 d26,d22,#23
#if 22<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d23,d16 @ Ch(e,f,g)
vshr.u64 d24,d18,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d17
vshr.u64 d25,d18,#34
vsli.64 d24,d18,#36
vadd.i64 d27,d26
vshr.u64 d26,d18,#39
vadd.i64 d28,d6
vsli.64 d25,d18,#30
veor d30,d18,d19
vsli.64 d26,d18,#25
veor d17,d24,d25
vadd.i64 d27,d28
vbsl d30,d20,d19 @ Maj(a,b,c)
veor d17,d26 @ Sigma0(a)
vadd.i64 d21,d27
vadd.i64 d30,d27
@ vadd.i64 d17,d30
vshr.u64 d24,d21,#14 @ 23
#if 23<16
vld1.64 {d7},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d21,#18
#if 23>0
vadd.i64 d17,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d21,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d21,#50
vsli.64 d25,d21,#46
vmov d29,d21
vsli.64 d26,d21,#23
#if 23<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d22,d23 @ Ch(e,f,g)
vshr.u64 d24,d17,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d16
vshr.u64 d25,d17,#34
vsli.64 d24,d17,#36
vadd.i64 d27,d26
vshr.u64 d26,d17,#39
vadd.i64 d28,d7
vsli.64 d25,d17,#30
veor d30,d17,d18
vsli.64 d26,d17,#25
veor d16,d24,d25
vadd.i64 d27,d28
vbsl d30,d19,d18 @ Maj(a,b,c)
veor d16,d26 @ Sigma0(a)
vadd.i64 d20,d27
vadd.i64 d30,d27
@ vadd.i64 d16,d30
vshr.u64 q12,q3,#19
vshr.u64 q13,q3,#61
vadd.i64 d16,d30 @ h+=Maj from the past
vshr.u64 q15,q3,#6
vsli.64 q12,q3,#45
vext.8 q14,q4,q5,#8 @ X[i+1]
vsli.64 q13,q3,#3
veor q15,q12
vshr.u64 q12,q14,#1
veor q15,q13 @ sigma1(X[i+14])
vshr.u64 q13,q14,#8
vadd.i64 q4,q15
vshr.u64 q15,q14,#7
vsli.64 q12,q14,#63
vsli.64 q13,q14,#56
vext.8 q14,q0,q1,#8 @ X[i+9]
veor q15,q12
vshr.u64 d24,d20,#14 @ from NEON_00_15
vadd.i64 q4,q14
vshr.u64 d25,d20,#18 @ from NEON_00_15
veor q15,q13 @ sigma0(X[i+1])
vshr.u64 d26,d20,#41 @ from NEON_00_15
vadd.i64 q4,q15
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d20,#50
vsli.64 d25,d20,#46
vmov d29,d20
vsli.64 d26,d20,#23
#if 24<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d21,d22 @ Ch(e,f,g)
vshr.u64 d24,d16,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d23
vshr.u64 d25,d16,#34
vsli.64 d24,d16,#36
vadd.i64 d27,d26
vshr.u64 d26,d16,#39
vadd.i64 d28,d8
vsli.64 d25,d16,#30
veor d30,d16,d17
vsli.64 d26,d16,#25
veor d23,d24,d25
vadd.i64 d27,d28
vbsl d30,d18,d17 @ Maj(a,b,c)
veor d23,d26 @ Sigma0(a)
vadd.i64 d19,d27
vadd.i64 d30,d27
@ vadd.i64 d23,d30
vshr.u64 d24,d19,#14 @ 25
#if 25<16
vld1.64 {d9},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d19,#18
#if 25>0
vadd.i64 d23,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d19,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d19,#50
vsli.64 d25,d19,#46
vmov d29,d19
vsli.64 d26,d19,#23
#if 25<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d20,d21 @ Ch(e,f,g)
vshr.u64 d24,d23,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d22
vshr.u64 d25,d23,#34
vsli.64 d24,d23,#36
vadd.i64 d27,d26
vshr.u64 d26,d23,#39
vadd.i64 d28,d9
vsli.64 d25,d23,#30
veor d30,d23,d16
vsli.64 d26,d23,#25
veor d22,d24,d25
vadd.i64 d27,d28
vbsl d30,d17,d16 @ Maj(a,b,c)
veor d22,d26 @ Sigma0(a)
vadd.i64 d18,d27
vadd.i64 d30,d27
@ vadd.i64 d22,d30
vshr.u64 q12,q4,#19
vshr.u64 q13,q4,#61
vadd.i64 d22,d30 @ h+=Maj from the past
vshr.u64 q15,q4,#6
vsli.64 q12,q4,#45
vext.8 q14,q5,q6,#8 @ X[i+1]
vsli.64 q13,q4,#3
veor q15,q12
vshr.u64 q12,q14,#1
veor q15,q13 @ sigma1(X[i+14])
vshr.u64 q13,q14,#8
vadd.i64 q5,q15
vshr.u64 q15,q14,#7
vsli.64 q12,q14,#63
vsli.64 q13,q14,#56
vext.8 q14,q1,q2,#8 @ X[i+9]
veor q15,q12
vshr.u64 d24,d18,#14 @ from NEON_00_15
vadd.i64 q5,q14
vshr.u64 d25,d18,#18 @ from NEON_00_15
veor q15,q13 @ sigma0(X[i+1])
vshr.u64 d26,d18,#41 @ from NEON_00_15
vadd.i64 q5,q15
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d18,#50
vsli.64 d25,d18,#46
vmov d29,d18
vsli.64 d26,d18,#23
#if 26<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d19,d20 @ Ch(e,f,g)
vshr.u64 d24,d22,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d21
vshr.u64 d25,d22,#34
vsli.64 d24,d22,#36
vadd.i64 d27,d26
vshr.u64 d26,d22,#39
vadd.i64 d28,d10
vsli.64 d25,d22,#30
veor d30,d22,d23
vsli.64 d26,d22,#25
veor d21,d24,d25
vadd.i64 d27,d28
vbsl d30,d16,d23 @ Maj(a,b,c)
veor d21,d26 @ Sigma0(a)
vadd.i64 d17,d27
vadd.i64 d30,d27
@ vadd.i64 d21,d30
vshr.u64 d24,d17,#14 @ 27
#if 27<16
vld1.64 {d11},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d17,#18
#if 27>0
vadd.i64 d21,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d17,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d17,#50
vsli.64 d25,d17,#46
vmov d29,d17
vsli.64 d26,d17,#23
#if 27<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d18,d19 @ Ch(e,f,g)
vshr.u64 d24,d21,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d20
vshr.u64 d25,d21,#34
vsli.64 d24,d21,#36
vadd.i64 d27,d26
vshr.u64 d26,d21,#39
vadd.i64 d28,d11
vsli.64 d25,d21,#30
veor d30,d21,d22
vsli.64 d26,d21,#25
veor d20,d24,d25
vadd.i64 d27,d28
vbsl d30,d23,d22 @ Maj(a,b,c)
veor d20,d26 @ Sigma0(a)
vadd.i64 d16,d27
vadd.i64 d30,d27
@ vadd.i64 d20,d30
vshr.u64 q12,q5,#19
vshr.u64 q13,q5,#61
vadd.i64 d20,d30 @ h+=Maj from the past
vshr.u64 q15,q5,#6
vsli.64 q12,q5,#45
vext.8 q14,q6,q7,#8 @ X[i+1]
vsli.64 q13,q5,#3
veor q15,q12
vshr.u64 q12,q14,#1
veor q15,q13 @ sigma1(X[i+14])
vshr.u64 q13,q14,#8
vadd.i64 q6,q15
vshr.u64 q15,q14,#7
vsli.64 q12,q14,#63
vsli.64 q13,q14,#56
vext.8 q14,q2,q3,#8 @ X[i+9]
veor q15,q12
vshr.u64 d24,d16,#14 @ from NEON_00_15
vadd.i64 q6,q14
vshr.u64 d25,d16,#18 @ from NEON_00_15
veor q15,q13 @ sigma0(X[i+1])
vshr.u64 d26,d16,#41 @ from NEON_00_15
vadd.i64 q6,q15
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d16,#50
vsli.64 d25,d16,#46
vmov d29,d16
vsli.64 d26,d16,#23
#if 28<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d17,d18 @ Ch(e,f,g)
vshr.u64 d24,d20,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d19
vshr.u64 d25,d20,#34
vsli.64 d24,d20,#36
vadd.i64 d27,d26
vshr.u64 d26,d20,#39
vadd.i64 d28,d12
vsli.64 d25,d20,#30
veor d30,d20,d21
vsli.64 d26,d20,#25
veor d19,d24,d25
vadd.i64 d27,d28
vbsl d30,d22,d21 @ Maj(a,b,c)
veor d19,d26 @ Sigma0(a)
vadd.i64 d23,d27
vadd.i64 d30,d27
@ vadd.i64 d19,d30
vshr.u64 d24,d23,#14 @ 29
#if 29<16
vld1.64 {d13},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d23,#18
#if 29>0
vadd.i64 d19,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d23,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d23,#50
vsli.64 d25,d23,#46
vmov d29,d23
vsli.64 d26,d23,#23
#if 29<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d16,d17 @ Ch(e,f,g)
vshr.u64 d24,d19,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d18
vshr.u64 d25,d19,#34
vsli.64 d24,d19,#36
vadd.i64 d27,d26
vshr.u64 d26,d19,#39
vadd.i64 d28,d13
vsli.64 d25,d19,#30
veor d30,d19,d20
vsli.64 d26,d19,#25
veor d18,d24,d25
vadd.i64 d27,d28
vbsl d30,d21,d20 @ Maj(a,b,c)
veor d18,d26 @ Sigma0(a)
vadd.i64 d22,d27
vadd.i64 d30,d27
@ vadd.i64 d18,d30
vshr.u64 q12,q6,#19
vshr.u64 q13,q6,#61
vadd.i64 d18,d30 @ h+=Maj from the past
vshr.u64 q15,q6,#6
vsli.64 q12,q6,#45
vext.8 q14,q7,q0,#8 @ X[i+1]
vsli.64 q13,q6,#3
veor q15,q12
vshr.u64 q12,q14,#1
veor q15,q13 @ sigma1(X[i+14])
vshr.u64 q13,q14,#8
vadd.i64 q7,q15
vshr.u64 q15,q14,#7
vsli.64 q12,q14,#63
vsli.64 q13,q14,#56
vext.8 q14,q3,q4,#8 @ X[i+9]
veor q15,q12
vshr.u64 d24,d22,#14 @ from NEON_00_15
vadd.i64 q7,q14
vshr.u64 d25,d22,#18 @ from NEON_00_15
veor q15,q13 @ sigma0(X[i+1])
vshr.u64 d26,d22,#41 @ from NEON_00_15
vadd.i64 q7,q15
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d22,#50
vsli.64 d25,d22,#46
vmov d29,d22
vsli.64 d26,d22,#23
#if 30<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d23,d16 @ Ch(e,f,g)
vshr.u64 d24,d18,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d17
vshr.u64 d25,d18,#34
vsli.64 d24,d18,#36
vadd.i64 d27,d26
vshr.u64 d26,d18,#39
vadd.i64 d28,d14
vsli.64 d25,d18,#30
veor d30,d18,d19
vsli.64 d26,d18,#25
veor d17,d24,d25
vadd.i64 d27,d28
vbsl d30,d20,d19 @ Maj(a,b,c)
veor d17,d26 @ Sigma0(a)
vadd.i64 d21,d27
vadd.i64 d30,d27
@ vadd.i64 d17,d30
vshr.u64 d24,d21,#14 @ 31
#if 31<16
vld1.64 {d15},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d21,#18
#if 31>0
vadd.i64 d17,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d21,#41
vld1.64 {d28},[r3,:64]! @ K[i++]
vsli.64 d24,d21,#50
vsli.64 d25,d21,#46
vmov d29,d21
vsli.64 d26,d21,#23
#if 31<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
veor d25,d24
vbsl d29,d22,d23 @ Ch(e,f,g)
vshr.u64 d24,d17,#28
veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d16
vshr.u64 d25,d17,#34
vsli.64 d24,d17,#36
vadd.i64 d27,d26
vshr.u64 d26,d17,#39
vadd.i64 d28,d15
vsli.64 d25,d17,#30
veor d30,d17,d18
vsli.64 d26,d17,#25
veor d16,d24,d25
vadd.i64 d27,d28
vbsl d30,d19,d18 @ Maj(a,b,c)
veor d16,d26 @ Sigma0(a)
vadd.i64 d20,d27
vadd.i64 d30,d27
@ vadd.i64 d16,d30
bne .L16_79_neon
vadd.i64 d16,d30 @ h+=Maj from the past
vldmia r0,{d24,d25,d26,d27,d28,d29,d30,d31} @ load context to temp
vadd.i64 q8,q12 @ vectorized accumulate
vadd.i64 q9,q13
vadd.i64 q10,q14
vadd.i64 q11,q15
vstmia r0,{d16,d17,d18,d19,d20,d21,d22,d23} @ save context
teq r1,r2
sub r3,#640 @ rewind K512
bne .Loop_neon
VFP_ABI_POP
bx lr @ .word 0xe12fff1e
.size sha512_block_data_order_neon,.-sha512_block_data_order_neon
#endif
.byte 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__ELF__)
|
marvin-hansen/iggy-streaming-system
| 20,965
|
thirdparty/crates/ring-0.17.9/pregenerated/aesni-x86_64-elf.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__)
.text
.type _aesni_encrypt2,@function
.align 16
_aesni_encrypt2:
.cfi_startproc
movups (%rcx),%xmm0
shll $4,%eax
movups 16(%rcx),%xmm1
xorps %xmm0,%xmm2
xorps %xmm0,%xmm3
movups 32(%rcx),%xmm0
leaq 32(%rcx,%rax,1),%rcx
negq %rax
addq $16,%rax
.Lenc_loop2:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
movups (%rcx,%rax,1),%xmm1
addq $32,%rax
.byte 102,15,56,220,208
.byte 102,15,56,220,216
movups -16(%rcx,%rax,1),%xmm0
jnz .Lenc_loop2
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,221,208
.byte 102,15,56,221,216
ret
.cfi_endproc
.size _aesni_encrypt2,.-_aesni_encrypt2
.type _aesni_encrypt3,@function
.align 16
_aesni_encrypt3:
.cfi_startproc
movups (%rcx),%xmm0
shll $4,%eax
movups 16(%rcx),%xmm1
xorps %xmm0,%xmm2
xorps %xmm0,%xmm3
xorps %xmm0,%xmm4
movups 32(%rcx),%xmm0
leaq 32(%rcx,%rax,1),%rcx
negq %rax
addq $16,%rax
.Lenc_loop3:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
movups (%rcx,%rax,1),%xmm1
addq $32,%rax
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
movups -16(%rcx,%rax,1),%xmm0
jnz .Lenc_loop3
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,221,208
.byte 102,15,56,221,216
.byte 102,15,56,221,224
ret
.cfi_endproc
.size _aesni_encrypt3,.-_aesni_encrypt3
.type _aesni_encrypt4,@function
.align 16
_aesni_encrypt4:
.cfi_startproc
movups (%rcx),%xmm0
shll $4,%eax
movups 16(%rcx),%xmm1
xorps %xmm0,%xmm2
xorps %xmm0,%xmm3
xorps %xmm0,%xmm4
xorps %xmm0,%xmm5
movups 32(%rcx),%xmm0
leaq 32(%rcx,%rax,1),%rcx
negq %rax
.byte 0x0f,0x1f,0x00
addq $16,%rax
.Lenc_loop4:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
movups (%rcx,%rax,1),%xmm1
addq $32,%rax
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
.byte 102,15,56,220,232
movups -16(%rcx,%rax,1),%xmm0
jnz .Lenc_loop4
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,221,208
.byte 102,15,56,221,216
.byte 102,15,56,221,224
.byte 102,15,56,221,232
ret
.cfi_endproc
.size _aesni_encrypt4,.-_aesni_encrypt4
.type _aesni_encrypt6,@function
.align 16
_aesni_encrypt6:
.cfi_startproc
movups (%rcx),%xmm0
shll $4,%eax
movups 16(%rcx),%xmm1
xorps %xmm0,%xmm2
pxor %xmm0,%xmm3
pxor %xmm0,%xmm4
.byte 102,15,56,220,209
leaq 32(%rcx,%rax,1),%rcx
negq %rax
.byte 102,15,56,220,217
pxor %xmm0,%xmm5
pxor %xmm0,%xmm6
.byte 102,15,56,220,225
pxor %xmm0,%xmm7
movups (%rcx,%rax,1),%xmm0
addq $16,%rax
jmp .Lenc_loop6_enter
.align 16
.Lenc_loop6:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.Lenc_loop6_enter:
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
movups (%rcx,%rax,1),%xmm1
addq $32,%rax
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
.byte 102,15,56,220,232
.byte 102,15,56,220,240
.byte 102,15,56,220,248
movups -16(%rcx,%rax,1),%xmm0
jnz .Lenc_loop6
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,15,56,221,208
.byte 102,15,56,221,216
.byte 102,15,56,221,224
.byte 102,15,56,221,232
.byte 102,15,56,221,240
.byte 102,15,56,221,248
ret
.cfi_endproc
.size _aesni_encrypt6,.-_aesni_encrypt6
.type _aesni_encrypt8,@function
.align 16
_aesni_encrypt8:
.cfi_startproc
movups (%rcx),%xmm0
shll $4,%eax
movups 16(%rcx),%xmm1
xorps %xmm0,%xmm2
xorps %xmm0,%xmm3
pxor %xmm0,%xmm4
pxor %xmm0,%xmm5
pxor %xmm0,%xmm6
leaq 32(%rcx,%rax,1),%rcx
negq %rax
.byte 102,15,56,220,209
pxor %xmm0,%xmm7
pxor %xmm0,%xmm8
.byte 102,15,56,220,217
pxor %xmm0,%xmm9
movups (%rcx,%rax,1),%xmm0
addq $16,%rax
jmp .Lenc_loop8_inner
.align 16
.Lenc_loop8:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.Lenc_loop8_inner:
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
.Lenc_loop8_enter:
movups (%rcx,%rax,1),%xmm1
addq $32,%rax
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
.byte 102,15,56,220,232
.byte 102,15,56,220,240
.byte 102,15,56,220,248
.byte 102,68,15,56,220,192
.byte 102,68,15,56,220,200
movups -16(%rcx,%rax,1),%xmm0
jnz .Lenc_loop8
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
.byte 102,15,56,221,208
.byte 102,15,56,221,216
.byte 102,15,56,221,224
.byte 102,15,56,221,232
.byte 102,15,56,221,240
.byte 102,15,56,221,248
.byte 102,68,15,56,221,192
.byte 102,68,15,56,221,200
ret
.cfi_endproc
.size _aesni_encrypt8,.-_aesni_encrypt8
.globl aes_hw_ctr32_encrypt_blocks
.hidden aes_hw_ctr32_encrypt_blocks
.type aes_hw_ctr32_encrypt_blocks,@function
.align 16
aes_hw_ctr32_encrypt_blocks:
.cfi_startproc
_CET_ENDBR
#ifdef BORINGSSL_DISPATCH_TEST
movb $1,BORINGSSL_function_hit(%rip)
#endif
cmpq $1,%rdx
jne .Lctr32_bulk
movups (%r8),%xmm2
movups (%rdi),%xmm3
movl 240(%rcx),%edx
movups (%rcx),%xmm0
movups 16(%rcx),%xmm1
leaq 32(%rcx),%rcx
xorps %xmm0,%xmm2
.Loop_enc1_1:
.byte 102,15,56,220,209
decl %edx
movups (%rcx),%xmm1
leaq 16(%rcx),%rcx
jnz .Loop_enc1_1
.byte 102,15,56,221,209
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
xorps %xmm3,%xmm2
pxor %xmm3,%xmm3
movups %xmm2,(%rsi)
xorps %xmm2,%xmm2
jmp .Lctr32_epilogue
.align 16
.Lctr32_bulk:
leaq (%rsp),%r11
.cfi_def_cfa_register %r11
pushq %rbp
.cfi_offset %rbp,-16
subq $128,%rsp
andq $-16,%rsp
movdqu (%r8),%xmm2
movdqu (%rcx),%xmm0
movl 12(%r8),%r8d
pxor %xmm0,%xmm2
movl 12(%rcx),%ebp
movdqa %xmm2,0(%rsp)
bswapl %r8d
movdqa %xmm2,%xmm3
movdqa %xmm2,%xmm4
movdqa %xmm2,%xmm5
movdqa %xmm2,64(%rsp)
movdqa %xmm2,80(%rsp)
movdqa %xmm2,96(%rsp)
movq %rdx,%r10
movdqa %xmm2,112(%rsp)
leaq 1(%r8),%rax
leaq 2(%r8),%rdx
bswapl %eax
bswapl %edx
xorl %ebp,%eax
xorl %ebp,%edx
.byte 102,15,58,34,216,3
leaq 3(%r8),%rax
movdqa %xmm3,16(%rsp)
.byte 102,15,58,34,226,3
bswapl %eax
movq %r10,%rdx
leaq 4(%r8),%r10
movdqa %xmm4,32(%rsp)
xorl %ebp,%eax
bswapl %r10d
.byte 102,15,58,34,232,3
xorl %ebp,%r10d
movdqa %xmm5,48(%rsp)
leaq 5(%r8),%r9
movl %r10d,64+12(%rsp)
bswapl %r9d
leaq 6(%r8),%r10
movl 240(%rcx),%eax
xorl %ebp,%r9d
bswapl %r10d
movl %r9d,80+12(%rsp)
xorl %ebp,%r10d
leaq 7(%r8),%r9
movl %r10d,96+12(%rsp)
bswapl %r9d
xorl %ebp,%r9d
movl %r9d,112+12(%rsp)
movups 16(%rcx),%xmm1
movdqa 64(%rsp),%xmm6
movdqa 80(%rsp),%xmm7
cmpq $8,%rdx
jb .Lctr32_tail
leaq 128(%rcx),%rcx
subq $8,%rdx
jmp .Lctr32_loop8
.align 32
.Lctr32_loop8:
addl $8,%r8d
movdqa 96(%rsp),%xmm8
.byte 102,15,56,220,209
movl %r8d,%r9d
movdqa 112(%rsp),%xmm9
.byte 102,15,56,220,217
bswapl %r9d
movups 32-128(%rcx),%xmm0
.byte 102,15,56,220,225
xorl %ebp,%r9d
nop
.byte 102,15,56,220,233
movl %r9d,0+12(%rsp)
leaq 1(%r8),%r9
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
movups 48-128(%rcx),%xmm1
bswapl %r9d
.byte 102,15,56,220,208
.byte 102,15,56,220,216
xorl %ebp,%r9d
.byte 0x66,0x90
.byte 102,15,56,220,224
.byte 102,15,56,220,232
movl %r9d,16+12(%rsp)
leaq 2(%r8),%r9
.byte 102,15,56,220,240
.byte 102,15,56,220,248
.byte 102,68,15,56,220,192
.byte 102,68,15,56,220,200
movups 64-128(%rcx),%xmm0
bswapl %r9d
.byte 102,15,56,220,209
.byte 102,15,56,220,217
xorl %ebp,%r9d
.byte 0x66,0x90
.byte 102,15,56,220,225
.byte 102,15,56,220,233
movl %r9d,32+12(%rsp)
leaq 3(%r8),%r9
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
movups 80-128(%rcx),%xmm1
bswapl %r9d
.byte 102,15,56,220,208
.byte 102,15,56,220,216
xorl %ebp,%r9d
.byte 0x66,0x90
.byte 102,15,56,220,224
.byte 102,15,56,220,232
movl %r9d,48+12(%rsp)
leaq 4(%r8),%r9
.byte 102,15,56,220,240
.byte 102,15,56,220,248
.byte 102,68,15,56,220,192
.byte 102,68,15,56,220,200
movups 96-128(%rcx),%xmm0
bswapl %r9d
.byte 102,15,56,220,209
.byte 102,15,56,220,217
xorl %ebp,%r9d
.byte 0x66,0x90
.byte 102,15,56,220,225
.byte 102,15,56,220,233
movl %r9d,64+12(%rsp)
leaq 5(%r8),%r9
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
movups 112-128(%rcx),%xmm1
bswapl %r9d
.byte 102,15,56,220,208
.byte 102,15,56,220,216
xorl %ebp,%r9d
.byte 0x66,0x90
.byte 102,15,56,220,224
.byte 102,15,56,220,232
movl %r9d,80+12(%rsp)
leaq 6(%r8),%r9
.byte 102,15,56,220,240
.byte 102,15,56,220,248
.byte 102,68,15,56,220,192
.byte 102,68,15,56,220,200
movups 128-128(%rcx),%xmm0
bswapl %r9d
.byte 102,15,56,220,209
.byte 102,15,56,220,217
xorl %ebp,%r9d
.byte 0x66,0x90
.byte 102,15,56,220,225
.byte 102,15,56,220,233
movl %r9d,96+12(%rsp)
leaq 7(%r8),%r9
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
movups 144-128(%rcx),%xmm1
bswapl %r9d
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
xorl %ebp,%r9d
movdqu 0(%rdi),%xmm10
.byte 102,15,56,220,232
movl %r9d,112+12(%rsp)
cmpl $11,%eax
.byte 102,15,56,220,240
.byte 102,15,56,220,248
.byte 102,68,15,56,220,192
.byte 102,68,15,56,220,200
movups 160-128(%rcx),%xmm0
jb .Lctr32_enc_done
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
movups 176-128(%rcx),%xmm1
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
.byte 102,15,56,220,232
.byte 102,15,56,220,240
.byte 102,15,56,220,248
.byte 102,68,15,56,220,192
.byte 102,68,15,56,220,200
movups 192-128(%rcx),%xmm0
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
movups 208-128(%rcx),%xmm1
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
.byte 102,15,56,220,232
.byte 102,15,56,220,240
.byte 102,15,56,220,248
.byte 102,68,15,56,220,192
.byte 102,68,15,56,220,200
movups 224-128(%rcx),%xmm0
jmp .Lctr32_enc_done
.align 16
.Lctr32_enc_done:
movdqu 16(%rdi),%xmm11
pxor %xmm0,%xmm10
movdqu 32(%rdi),%xmm12
pxor %xmm0,%xmm11
movdqu 48(%rdi),%xmm13
pxor %xmm0,%xmm12
movdqu 64(%rdi),%xmm14
pxor %xmm0,%xmm13
movdqu 80(%rdi),%xmm15
pxor %xmm0,%xmm14
prefetcht0 448(%rdi)
prefetcht0 512(%rdi)
pxor %xmm0,%xmm15
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
movdqu 96(%rdi),%xmm1
leaq 128(%rdi),%rdi
.byte 102,65,15,56,221,210
pxor %xmm0,%xmm1
movdqu 112-128(%rdi),%xmm10
.byte 102,65,15,56,221,219
pxor %xmm0,%xmm10
movdqa 0(%rsp),%xmm11
.byte 102,65,15,56,221,228
.byte 102,65,15,56,221,237
movdqa 16(%rsp),%xmm12
movdqa 32(%rsp),%xmm13
.byte 102,65,15,56,221,246
.byte 102,65,15,56,221,255
movdqa 48(%rsp),%xmm14
movdqa 64(%rsp),%xmm15
.byte 102,68,15,56,221,193
movdqa 80(%rsp),%xmm0
movups 16-128(%rcx),%xmm1
.byte 102,69,15,56,221,202
movups %xmm2,(%rsi)
movdqa %xmm11,%xmm2
movups %xmm3,16(%rsi)
movdqa %xmm12,%xmm3
movups %xmm4,32(%rsi)
movdqa %xmm13,%xmm4
movups %xmm5,48(%rsi)
movdqa %xmm14,%xmm5
movups %xmm6,64(%rsi)
movdqa %xmm15,%xmm6
movups %xmm7,80(%rsi)
movdqa %xmm0,%xmm7
movups %xmm8,96(%rsi)
movups %xmm9,112(%rsi)
leaq 128(%rsi),%rsi
subq $8,%rdx
jnc .Lctr32_loop8
addq $8,%rdx
jz .Lctr32_done
leaq -128(%rcx),%rcx
.Lctr32_tail:
leaq 16(%rcx),%rcx
cmpq $4,%rdx
jb .Lctr32_loop3
je .Lctr32_loop4
shll $4,%eax
movdqa 96(%rsp),%xmm8
pxor %xmm9,%xmm9
movups 16(%rcx),%xmm0
.byte 102,15,56,220,209
.byte 102,15,56,220,217
leaq 32-16(%rcx,%rax,1),%rcx
negq %rax
.byte 102,15,56,220,225
addq $16,%rax
movups (%rdi),%xmm10
.byte 102,15,56,220,233
.byte 102,15,56,220,241
movups 16(%rdi),%xmm11
movups 32(%rdi),%xmm12
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
call .Lenc_loop8_enter
movdqu 48(%rdi),%xmm13
pxor %xmm10,%xmm2
movdqu 64(%rdi),%xmm10
pxor %xmm11,%xmm3
movdqu %xmm2,(%rsi)
pxor %xmm12,%xmm4
movdqu %xmm3,16(%rsi)
pxor %xmm13,%xmm5
movdqu %xmm4,32(%rsi)
pxor %xmm10,%xmm6
movdqu %xmm5,48(%rsi)
movdqu %xmm6,64(%rsi)
cmpq $6,%rdx
jb .Lctr32_done
movups 80(%rdi),%xmm11
xorps %xmm11,%xmm7
movups %xmm7,80(%rsi)
je .Lctr32_done
movups 96(%rdi),%xmm12
xorps %xmm12,%xmm8
movups %xmm8,96(%rsi)
jmp .Lctr32_done
.align 32
.Lctr32_loop4:
.byte 102,15,56,220,209
leaq 16(%rcx),%rcx
decl %eax
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
movups (%rcx),%xmm1
jnz .Lctr32_loop4
.byte 102,15,56,221,209
.byte 102,15,56,221,217
movups (%rdi),%xmm10
movups 16(%rdi),%xmm11
.byte 102,15,56,221,225
.byte 102,15,56,221,233
movups 32(%rdi),%xmm12
movups 48(%rdi),%xmm13
xorps %xmm10,%xmm2
movups %xmm2,(%rsi)
xorps %xmm11,%xmm3
movups %xmm3,16(%rsi)
pxor %xmm12,%xmm4
movdqu %xmm4,32(%rsi)
pxor %xmm13,%xmm5
movdqu %xmm5,48(%rsi)
jmp .Lctr32_done
.align 32
.Lctr32_loop3:
.byte 102,15,56,220,209
leaq 16(%rcx),%rcx
decl %eax
.byte 102,15,56,220,217
.byte 102,15,56,220,225
movups (%rcx),%xmm1
jnz .Lctr32_loop3
.byte 102,15,56,221,209
.byte 102,15,56,221,217
.byte 102,15,56,221,225
movups (%rdi),%xmm10
xorps %xmm10,%xmm2
movups %xmm2,(%rsi)
cmpq $2,%rdx
jb .Lctr32_done
movups 16(%rdi),%xmm11
xorps %xmm11,%xmm3
movups %xmm3,16(%rsi)
je .Lctr32_done
movups 32(%rdi),%xmm12
xorps %xmm12,%xmm4
movups %xmm4,32(%rsi)
.Lctr32_done:
xorps %xmm0,%xmm0
xorl %ebp,%ebp
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
pxor %xmm6,%xmm6
pxor %xmm7,%xmm7
movaps %xmm0,0(%rsp)
pxor %xmm8,%xmm8
movaps %xmm0,16(%rsp)
pxor %xmm9,%xmm9
movaps %xmm0,32(%rsp)
pxor %xmm10,%xmm10
movaps %xmm0,48(%rsp)
pxor %xmm11,%xmm11
movaps %xmm0,64(%rsp)
pxor %xmm12,%xmm12
movaps %xmm0,80(%rsp)
pxor %xmm13,%xmm13
movaps %xmm0,96(%rsp)
pxor %xmm14,%xmm14
movaps %xmm0,112(%rsp)
pxor %xmm15,%xmm15
movq -8(%r11),%rbp
.cfi_restore %rbp
leaq (%r11),%rsp
.cfi_def_cfa_register %rsp
.Lctr32_epilogue:
ret
.cfi_endproc
.size aes_hw_ctr32_encrypt_blocks,.-aes_hw_ctr32_encrypt_blocks
.globl aes_hw_set_encrypt_key_base
.hidden aes_hw_set_encrypt_key_base
.type aes_hw_set_encrypt_key_base,@function
.align 16
aes_hw_set_encrypt_key_base:
.cfi_startproc
_CET_ENDBR
#ifdef BORINGSSL_DISPATCH_TEST
movb $1,BORINGSSL_function_hit+3(%rip)
#endif
subq $8,%rsp
.cfi_adjust_cfa_offset 8
movups (%rdi),%xmm0
xorps %xmm4,%xmm4
leaq 16(%rdx),%rax
cmpl $256,%esi
je .L14rounds
cmpl $128,%esi
jne .Lbad_keybits
.L10rounds:
movl $9,%esi
movups %xmm0,(%rdx)
.byte 102,15,58,223,200,1
call .Lkey_expansion_128_cold
.byte 102,15,58,223,200,2
call .Lkey_expansion_128
.byte 102,15,58,223,200,4
call .Lkey_expansion_128
.byte 102,15,58,223,200,8
call .Lkey_expansion_128
.byte 102,15,58,223,200,16
call .Lkey_expansion_128
.byte 102,15,58,223,200,32
call .Lkey_expansion_128
.byte 102,15,58,223,200,64
call .Lkey_expansion_128
.byte 102,15,58,223,200,128
call .Lkey_expansion_128
.byte 102,15,58,223,200,27
call .Lkey_expansion_128
.byte 102,15,58,223,200,54
call .Lkey_expansion_128
movups %xmm0,(%rax)
movl %esi,80(%rax)
xorl %eax,%eax
jmp .Lenc_key_ret
.align 16
.L14rounds:
movups 16(%rdi),%xmm2
movl $13,%esi
leaq 16(%rax),%rax
movups %xmm0,(%rdx)
movups %xmm2,16(%rdx)
.byte 102,15,58,223,202,1
call .Lkey_expansion_256a_cold
.byte 102,15,58,223,200,1
call .Lkey_expansion_256b
.byte 102,15,58,223,202,2
call .Lkey_expansion_256a
.byte 102,15,58,223,200,2
call .Lkey_expansion_256b
.byte 102,15,58,223,202,4
call .Lkey_expansion_256a
.byte 102,15,58,223,200,4
call .Lkey_expansion_256b
.byte 102,15,58,223,202,8
call .Lkey_expansion_256a
.byte 102,15,58,223,200,8
call .Lkey_expansion_256b
.byte 102,15,58,223,202,16
call .Lkey_expansion_256a
.byte 102,15,58,223,200,16
call .Lkey_expansion_256b
.byte 102,15,58,223,202,32
call .Lkey_expansion_256a
.byte 102,15,58,223,200,32
call .Lkey_expansion_256b
.byte 102,15,58,223,202,64
call .Lkey_expansion_256a
movups %xmm0,(%rax)
movl %esi,16(%rax)
xorq %rax,%rax
jmp .Lenc_key_ret
.align 16
.Lbad_keybits:
movq $-2,%rax
.Lenc_key_ret:
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
addq $8,%rsp
.cfi_adjust_cfa_offset -8
ret
.cfi_endproc
.align 16
.Lkey_expansion_128:
.cfi_startproc
movups %xmm0,(%rax)
leaq 16(%rax),%rax
.Lkey_expansion_128_cold:
shufps $16,%xmm0,%xmm4
xorps %xmm4,%xmm0
shufps $140,%xmm0,%xmm4
xorps %xmm4,%xmm0
shufps $255,%xmm1,%xmm1
xorps %xmm1,%xmm0
ret
.cfi_endproc
.align 16
.Lkey_expansion_256a:
.cfi_startproc
movups %xmm2,(%rax)
leaq 16(%rax),%rax
.Lkey_expansion_256a_cold:
shufps $16,%xmm0,%xmm4
xorps %xmm4,%xmm0
shufps $140,%xmm0,%xmm4
xorps %xmm4,%xmm0
shufps $255,%xmm1,%xmm1
xorps %xmm1,%xmm0
ret
.cfi_endproc
.align 16
.Lkey_expansion_256b:
.cfi_startproc
movups %xmm0,(%rax)
leaq 16(%rax),%rax
shufps $16,%xmm2,%xmm4
xorps %xmm4,%xmm2
shufps $140,%xmm2,%xmm4
xorps %xmm4,%xmm2
shufps $170,%xmm1,%xmm1
xorps %xmm1,%xmm2
ret
.cfi_endproc
.size aes_hw_set_encrypt_key_base,.-aes_hw_set_encrypt_key_base
.globl aes_hw_set_encrypt_key_alt
.hidden aes_hw_set_encrypt_key_alt
.type aes_hw_set_encrypt_key_alt,@function
.align 16
aes_hw_set_encrypt_key_alt:
.cfi_startproc
_CET_ENDBR
#ifdef BORINGSSL_DISPATCH_TEST
movb $1,BORINGSSL_function_hit+3(%rip)
#endif
subq $8,%rsp
.cfi_adjust_cfa_offset 8
movups (%rdi),%xmm0
xorps %xmm4,%xmm4
leaq 16(%rdx),%rax
cmpl $256,%esi
je .L14rounds_alt
cmpl $128,%esi
jne .Lbad_keybits_alt
movl $9,%esi
movdqa .Lkey_rotate(%rip),%xmm5
movl $8,%r10d
movdqa .Lkey_rcon1(%rip),%xmm4
movdqa %xmm0,%xmm2
movdqu %xmm0,(%rdx)
jmp .Loop_key128
.align 16
.Loop_key128:
.byte 102,15,56,0,197
.byte 102,15,56,221,196
pslld $1,%xmm4
leaq 16(%rax),%rax
movdqa %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm3,%xmm2
pxor %xmm2,%xmm0
movdqu %xmm0,-16(%rax)
movdqa %xmm0,%xmm2
decl %r10d
jnz .Loop_key128
movdqa .Lkey_rcon1b(%rip),%xmm4
.byte 102,15,56,0,197
.byte 102,15,56,221,196
pslld $1,%xmm4
movdqa %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm3,%xmm2
pxor %xmm2,%xmm0
movdqu %xmm0,(%rax)
movdqa %xmm0,%xmm2
.byte 102,15,56,0,197
.byte 102,15,56,221,196
movdqa %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm3,%xmm2
pxor %xmm2,%xmm0
movdqu %xmm0,16(%rax)
movl %esi,96(%rax)
xorl %eax,%eax
jmp .Lenc_key_ret_alt
.align 16
.L14rounds_alt:
movups 16(%rdi),%xmm2
movl $13,%esi
leaq 16(%rax),%rax
movdqa .Lkey_rotate(%rip),%xmm5
movdqa .Lkey_rcon1(%rip),%xmm4
movl $7,%r10d
movdqu %xmm0,0(%rdx)
movdqa %xmm2,%xmm1
movdqu %xmm2,16(%rdx)
jmp .Loop_key256
.align 16
.Loop_key256:
.byte 102,15,56,0,213
.byte 102,15,56,221,212
movdqa %xmm0,%xmm3
pslldq $4,%xmm0
pxor %xmm0,%xmm3
pslldq $4,%xmm0
pxor %xmm0,%xmm3
pslldq $4,%xmm0
pxor %xmm3,%xmm0
pslld $1,%xmm4
pxor %xmm2,%xmm0
movdqu %xmm0,(%rax)
decl %r10d
jz .Ldone_key256
pshufd $0xff,%xmm0,%xmm2
pxor %xmm3,%xmm3
.byte 102,15,56,221,211
movdqa %xmm1,%xmm3
pslldq $4,%xmm1
pxor %xmm1,%xmm3
pslldq $4,%xmm1
pxor %xmm1,%xmm3
pslldq $4,%xmm1
pxor %xmm3,%xmm1
pxor %xmm1,%xmm2
movdqu %xmm2,16(%rax)
leaq 32(%rax),%rax
movdqa %xmm2,%xmm1
jmp .Loop_key256
.Ldone_key256:
movl %esi,16(%rax)
xorl %eax,%eax
jmp .Lenc_key_ret_alt
.align 16
.Lbad_keybits_alt:
movq $-2,%rax
.Lenc_key_ret_alt:
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
addq $8,%rsp
.cfi_adjust_cfa_offset -8
ret
.cfi_endproc
.size aes_hw_set_encrypt_key_alt,.-aes_hw_set_encrypt_key_alt
.section .rodata
.align 64
.Lbswap_mask:
.byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
.Lincrement32:
.long 6,6,6,0
.Lincrement64:
.long 1,0,0,0
.Lincrement1:
.byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1
.Lkey_rotate:
.long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d
.Lkey_rotate192:
.long 0x04070605,0x04070605,0x04070605,0x04070605
.Lkey_rcon1:
.long 1,1,1,1
.Lkey_rcon1b:
.long 0x1b,0x1b,0x1b,0x1b
.byte 65,69,83,32,102,111,114,32,73,110,116,101,108,32,65,69,83,45,78,73,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 64
.text
#endif
|
marvin-hansen/iggy-streaming-system
| 11,047
|
thirdparty/crates/ring-0.17.9/pregenerated/vpaes-x86_64-macosx.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__)
.text
.p2align 4
_vpaes_encrypt_core:
movq %rdx,%r9
movq $16,%r11
movl 240(%rdx),%eax
movdqa %xmm9,%xmm1
movdqa L$k_ipt(%rip),%xmm2
pandn %xmm0,%xmm1
movdqu (%r9),%xmm5
psrld $4,%xmm1
pand %xmm9,%xmm0
.byte 102,15,56,0,208
movdqa L$k_ipt+16(%rip),%xmm0
.byte 102,15,56,0,193
pxor %xmm5,%xmm2
addq $16,%r9
pxor %xmm2,%xmm0
leaq L$k_mc_backward(%rip),%r10
jmp L$enc_entry
.p2align 4
L$enc_loop:
movdqa %xmm13,%xmm4
movdqa %xmm12,%xmm0
.byte 102,15,56,0,226
.byte 102,15,56,0,195
pxor %xmm5,%xmm4
movdqa %xmm15,%xmm5
pxor %xmm4,%xmm0
movdqa -64(%r11,%r10,1),%xmm1
.byte 102,15,56,0,234
movdqa (%r11,%r10,1),%xmm4
movdqa %xmm14,%xmm2
.byte 102,15,56,0,211
movdqa %xmm0,%xmm3
pxor %xmm5,%xmm2
.byte 102,15,56,0,193
addq $16,%r9
pxor %xmm2,%xmm0
.byte 102,15,56,0,220
addq $16,%r11
pxor %xmm0,%xmm3
.byte 102,15,56,0,193
andq $0x30,%r11
subq $1,%rax
pxor %xmm3,%xmm0
L$enc_entry:
movdqa %xmm9,%xmm1
movdqa %xmm11,%xmm5
pandn %xmm0,%xmm1
psrld $4,%xmm1
pand %xmm9,%xmm0
.byte 102,15,56,0,232
movdqa %xmm10,%xmm3
pxor %xmm1,%xmm0
.byte 102,15,56,0,217
movdqa %xmm10,%xmm4
pxor %xmm5,%xmm3
.byte 102,15,56,0,224
movdqa %xmm10,%xmm2
pxor %xmm5,%xmm4
.byte 102,15,56,0,211
movdqa %xmm10,%xmm3
pxor %xmm0,%xmm2
.byte 102,15,56,0,220
movdqu (%r9),%xmm5
pxor %xmm1,%xmm3
jnz L$enc_loop
movdqa -96(%r10),%xmm4
movdqa -80(%r10),%xmm0
.byte 102,15,56,0,226
pxor %xmm5,%xmm4
.byte 102,15,56,0,195
movdqa 64(%r11,%r10,1),%xmm1
pxor %xmm4,%xmm0
.byte 102,15,56,0,193
ret
.p2align 4
_vpaes_encrypt_core_2x:
movq %rdx,%r9
movq $16,%r11
movl 240(%rdx),%eax
movdqa %xmm9,%xmm1
movdqa %xmm9,%xmm7
movdqa L$k_ipt(%rip),%xmm2
movdqa %xmm2,%xmm8
pandn %xmm0,%xmm1
pandn %xmm6,%xmm7
movdqu (%r9),%xmm5
psrld $4,%xmm1
psrld $4,%xmm7
pand %xmm9,%xmm0
pand %xmm9,%xmm6
.byte 102,15,56,0,208
.byte 102,68,15,56,0,198
movdqa L$k_ipt+16(%rip),%xmm0
movdqa %xmm0,%xmm6
.byte 102,15,56,0,193
.byte 102,15,56,0,247
pxor %xmm5,%xmm2
pxor %xmm5,%xmm8
addq $16,%r9
pxor %xmm2,%xmm0
pxor %xmm8,%xmm6
leaq L$k_mc_backward(%rip),%r10
jmp L$enc2x_entry
.p2align 4
L$enc2x_loop:
movdqa L$k_sb1(%rip),%xmm4
movdqa L$k_sb1+16(%rip),%xmm0
movdqa %xmm4,%xmm12
movdqa %xmm0,%xmm6
.byte 102,15,56,0,226
.byte 102,69,15,56,0,224
.byte 102,15,56,0,195
.byte 102,65,15,56,0,243
pxor %xmm5,%xmm4
pxor %xmm5,%xmm12
movdqa L$k_sb2(%rip),%xmm5
movdqa %xmm5,%xmm13
pxor %xmm4,%xmm0
pxor %xmm12,%xmm6
movdqa -64(%r11,%r10,1),%xmm1
.byte 102,15,56,0,234
.byte 102,69,15,56,0,232
movdqa (%r11,%r10,1),%xmm4
movdqa L$k_sb2+16(%rip),%xmm2
movdqa %xmm2,%xmm8
.byte 102,15,56,0,211
.byte 102,69,15,56,0,195
movdqa %xmm0,%xmm3
movdqa %xmm6,%xmm11
pxor %xmm5,%xmm2
pxor %xmm13,%xmm8
.byte 102,15,56,0,193
.byte 102,15,56,0,241
addq $16,%r9
pxor %xmm2,%xmm0
pxor %xmm8,%xmm6
.byte 102,15,56,0,220
.byte 102,68,15,56,0,220
addq $16,%r11
pxor %xmm0,%xmm3
pxor %xmm6,%xmm11
.byte 102,15,56,0,193
.byte 102,15,56,0,241
andq $0x30,%r11
subq $1,%rax
pxor %xmm3,%xmm0
pxor %xmm11,%xmm6
L$enc2x_entry:
movdqa %xmm9,%xmm1
movdqa %xmm9,%xmm7
movdqa L$k_inv+16(%rip),%xmm5
movdqa %xmm5,%xmm13
pandn %xmm0,%xmm1
pandn %xmm6,%xmm7
psrld $4,%xmm1
psrld $4,%xmm7
pand %xmm9,%xmm0
pand %xmm9,%xmm6
.byte 102,15,56,0,232
.byte 102,68,15,56,0,238
movdqa %xmm10,%xmm3
movdqa %xmm10,%xmm11
pxor %xmm1,%xmm0
pxor %xmm7,%xmm6
.byte 102,15,56,0,217
.byte 102,68,15,56,0,223
movdqa %xmm10,%xmm4
movdqa %xmm10,%xmm12
pxor %xmm5,%xmm3
pxor %xmm13,%xmm11
.byte 102,15,56,0,224
.byte 102,68,15,56,0,230
movdqa %xmm10,%xmm2
movdqa %xmm10,%xmm8
pxor %xmm5,%xmm4
pxor %xmm13,%xmm12
.byte 102,15,56,0,211
.byte 102,69,15,56,0,195
movdqa %xmm10,%xmm3
movdqa %xmm10,%xmm11
pxor %xmm0,%xmm2
pxor %xmm6,%xmm8
.byte 102,15,56,0,220
.byte 102,69,15,56,0,220
movdqu (%r9),%xmm5
pxor %xmm1,%xmm3
pxor %xmm7,%xmm11
jnz L$enc2x_loop
movdqa -96(%r10),%xmm4
movdqa -80(%r10),%xmm0
movdqa %xmm4,%xmm12
movdqa %xmm0,%xmm6
.byte 102,15,56,0,226
.byte 102,69,15,56,0,224
pxor %xmm5,%xmm4
pxor %xmm5,%xmm12
.byte 102,15,56,0,195
.byte 102,65,15,56,0,243
movdqa 64(%r11,%r10,1),%xmm1
pxor %xmm4,%xmm0
pxor %xmm12,%xmm6
.byte 102,15,56,0,193
.byte 102,15,56,0,241
ret
.p2align 4
_vpaes_schedule_core:
call _vpaes_preheat
movdqa L$k_rcon(%rip),%xmm8
movdqu (%rdi),%xmm0
movdqa %xmm0,%xmm3
leaq L$k_ipt(%rip),%r11
call _vpaes_schedule_transform
movdqa %xmm0,%xmm7
leaq L$k_sr(%rip),%r10
movdqu %xmm0,(%rdx)
L$schedule_go:
cmpl $192,%esi
ja L$schedule_256
L$schedule_128:
movl $10,%esi
L$oop_schedule_128:
call _vpaes_schedule_round
decq %rsi
jz L$schedule_mangle_last
call _vpaes_schedule_mangle
jmp L$oop_schedule_128
.p2align 4
L$schedule_256:
movdqu 16(%rdi),%xmm0
call _vpaes_schedule_transform
movl $7,%esi
L$oop_schedule_256:
call _vpaes_schedule_mangle
movdqa %xmm0,%xmm6
call _vpaes_schedule_round
decq %rsi
jz L$schedule_mangle_last
call _vpaes_schedule_mangle
pshufd $0xFF,%xmm0,%xmm0
movdqa %xmm7,%xmm5
movdqa %xmm6,%xmm7
call _vpaes_schedule_low_round
movdqa %xmm5,%xmm7
jmp L$oop_schedule_256
.p2align 4
L$schedule_mangle_last:
leaq L$k_deskew(%rip),%r11
movdqa (%r8,%r10,1),%xmm1
.byte 102,15,56,0,193
leaq L$k_opt(%rip),%r11
addq $32,%rdx
L$schedule_mangle_last_dec:
addq $-16,%rdx
pxor L$k_s63(%rip),%xmm0
call _vpaes_schedule_transform
movdqu %xmm0,(%rdx)
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
pxor %xmm6,%xmm6
pxor %xmm7,%xmm7
ret
.p2align 4
_vpaes_schedule_round:
pxor %xmm1,%xmm1
.byte 102,65,15,58,15,200,15
.byte 102,69,15,58,15,192,15
pxor %xmm1,%xmm7
pshufd $0xFF,%xmm0,%xmm0
.byte 102,15,58,15,192,1
_vpaes_schedule_low_round:
movdqa %xmm7,%xmm1
pslldq $4,%xmm7
pxor %xmm1,%xmm7
movdqa %xmm7,%xmm1
pslldq $8,%xmm7
pxor %xmm1,%xmm7
pxor L$k_s63(%rip),%xmm7
movdqa %xmm9,%xmm1
pandn %xmm0,%xmm1
psrld $4,%xmm1
pand %xmm9,%xmm0
movdqa %xmm11,%xmm2
.byte 102,15,56,0,208
pxor %xmm1,%xmm0
movdqa %xmm10,%xmm3
.byte 102,15,56,0,217
pxor %xmm2,%xmm3
movdqa %xmm10,%xmm4
.byte 102,15,56,0,224
pxor %xmm2,%xmm4
movdqa %xmm10,%xmm2
.byte 102,15,56,0,211
pxor %xmm0,%xmm2
movdqa %xmm10,%xmm3
.byte 102,15,56,0,220
pxor %xmm1,%xmm3
movdqa %xmm13,%xmm4
.byte 102,15,56,0,226
movdqa %xmm12,%xmm0
.byte 102,15,56,0,195
pxor %xmm4,%xmm0
pxor %xmm7,%xmm0
movdqa %xmm0,%xmm7
ret
.p2align 4
_vpaes_schedule_transform:
movdqa %xmm9,%xmm1
pandn %xmm0,%xmm1
psrld $4,%xmm1
pand %xmm9,%xmm0
movdqa (%r11),%xmm2
.byte 102,15,56,0,208
movdqa 16(%r11),%xmm0
.byte 102,15,56,0,193
pxor %xmm2,%xmm0
ret
.p2align 4
_vpaes_schedule_mangle:
movdqa %xmm0,%xmm4
movdqa L$k_mc_forward(%rip),%xmm5
addq $16,%rdx
pxor L$k_s63(%rip),%xmm4
.byte 102,15,56,0,229
movdqa %xmm4,%xmm3
.byte 102,15,56,0,229
pxor %xmm4,%xmm3
.byte 102,15,56,0,229
pxor %xmm4,%xmm3
L$schedule_mangle_both:
movdqa (%r8,%r10,1),%xmm1
.byte 102,15,56,0,217
addq $-16,%r8
andq $0x30,%r8
movdqu %xmm3,(%rdx)
ret
.globl _vpaes_set_encrypt_key
.private_extern _vpaes_set_encrypt_key
.p2align 4
_vpaes_set_encrypt_key:
_CET_ENDBR
#ifdef BORINGSSL_DISPATCH_TEST
movb $1,_BORINGSSL_function_hit+5(%rip)
#endif
movl %esi,%eax
shrl $5,%eax
addl $5,%eax
movl %eax,240(%rdx)
movl $0,%ecx
movl $0x30,%r8d
call _vpaes_schedule_core
xorl %eax,%eax
ret
.globl _vpaes_ctr32_encrypt_blocks
.private_extern _vpaes_ctr32_encrypt_blocks
.p2align 4
_vpaes_ctr32_encrypt_blocks:
_CET_ENDBR
xchgq %rcx,%rdx
testq %rcx,%rcx
jz L$ctr32_abort
movdqu (%r8),%xmm0
movdqa L$ctr_add_one(%rip),%xmm8
subq %rdi,%rsi
call _vpaes_preheat
movdqa %xmm0,%xmm6
pshufb L$rev_ctr(%rip),%xmm6
testq $1,%rcx
jz L$ctr32_prep_loop
movdqu (%rdi),%xmm7
call _vpaes_encrypt_core
pxor %xmm7,%xmm0
paddd %xmm8,%xmm6
movdqu %xmm0,(%rsi,%rdi,1)
subq $1,%rcx
leaq 16(%rdi),%rdi
jz L$ctr32_done
L$ctr32_prep_loop:
movdqa %xmm6,%xmm14
movdqa %xmm6,%xmm15
paddd %xmm8,%xmm15
L$ctr32_loop:
movdqa L$rev_ctr(%rip),%xmm1
movdqa %xmm14,%xmm0
movdqa %xmm15,%xmm6
.byte 102,15,56,0,193
.byte 102,15,56,0,241
call _vpaes_encrypt_core_2x
movdqu (%rdi),%xmm1
movdqu 16(%rdi),%xmm2
movdqa L$ctr_add_two(%rip),%xmm3
pxor %xmm1,%xmm0
pxor %xmm2,%xmm6
paddd %xmm3,%xmm14
paddd %xmm3,%xmm15
movdqu %xmm0,(%rsi,%rdi,1)
movdqu %xmm6,16(%rsi,%rdi,1)
subq $2,%rcx
leaq 32(%rdi),%rdi
jnz L$ctr32_loop
L$ctr32_done:
L$ctr32_abort:
ret
.p2align 4
_vpaes_preheat:
leaq L$k_s0F(%rip),%r10
movdqa -32(%r10),%xmm10
movdqa -16(%r10),%xmm11
movdqa 0(%r10),%xmm9
movdqa 48(%r10),%xmm13
movdqa 64(%r10),%xmm12
movdqa 80(%r10),%xmm15
movdqa 96(%r10),%xmm14
ret
.section __DATA,__const
.p2align 6
_vpaes_consts:
L$k_inv:
.quad 0x0E05060F0D080180, 0x040703090A0B0C02
.quad 0x01040A060F0B0780, 0x030D0E0C02050809
L$k_s0F:
.quad 0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F
L$k_ipt:
.quad 0xC2B2E8985A2A7000, 0xCABAE09052227808
.quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81
L$k_sb1:
.quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544
.quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF
L$k_sb2:
.quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD
.quad 0x69EB88400AE12900, 0xC2A163C8AB82234A
L$k_sbo:
.quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878
.quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA
L$k_mc_forward:
.quad 0x0407060500030201, 0x0C0F0E0D080B0A09
.quad 0x080B0A0904070605, 0x000302010C0F0E0D
.quad 0x0C0F0E0D080B0A09, 0x0407060500030201
.quad 0x000302010C0F0E0D, 0x080B0A0904070605
L$k_mc_backward:
.quad 0x0605040702010003, 0x0E0D0C0F0A09080B
.quad 0x020100030E0D0C0F, 0x0A09080B06050407
.quad 0x0E0D0C0F0A09080B, 0x0605040702010003
.quad 0x0A09080B06050407, 0x020100030E0D0C0F
L$k_sr:
.quad 0x0706050403020100, 0x0F0E0D0C0B0A0908
.quad 0x030E09040F0A0500, 0x0B06010C07020D08
.quad 0x0F060D040B020900, 0x070E050C030A0108
.quad 0x0B0E0104070A0D00, 0x0306090C0F020508
L$k_rcon:
.quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81
L$k_s63:
.quad 0x5B5B5B5B5B5B5B5B, 0x5B5B5B5B5B5B5B5B
L$k_opt:
.quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808
.quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0
L$k_deskew:
.quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A
.quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77
L$rev_ctr:
.quad 0x0706050403020100, 0x0c0d0e0f0b0a0908
L$ctr_add_one:
.quad 0x0000000000000000, 0x0000000100000000
L$ctr_add_two:
.quad 0x0000000000000000, 0x0000000200000000
.byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105,111,110,32,65,69,83,32,102,111,114,32,120,56,54,95,54,52,47,83,83,83,69,51,44,32,77,105,107,101,32,72,97,109,98,117,114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105,118,101,114,115,105,116,121,41,0
.p2align 6
.text
#endif
|
marvin-hansen/iggy-streaming-system
| 78,605
|
thirdparty/crates/ring-0.17.9/pregenerated/p256-x86_64-asm-elf.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__)
.text
.section .rodata
.align 64
.Lpoly:
.quad 0xffffffffffffffff, 0x00000000ffffffff, 0x0000000000000000, 0xffffffff00000001
.LOne:
.long 1,1,1,1,1,1,1,1
.LTwo:
.long 2,2,2,2,2,2,2,2
.LThree:
.long 3,3,3,3,3,3,3,3
.LONE_mont:
.quad 0x0000000000000001, 0xffffffff00000000, 0xffffffffffffffff, 0x00000000fffffffe
.Lord:
.quad 0xf3b9cac2fc632551, 0xbce6faada7179e84, 0xffffffffffffffff, 0xffffffff00000000
.LordK:
.quad 0xccd1c8aaee00bc4f
.text
.globl ecp_nistz256_neg
.hidden ecp_nistz256_neg
.type ecp_nistz256_neg,@function
.align 32
ecp_nistz256_neg:
.cfi_startproc
_CET_ENDBR
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-16
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-24
.Lneg_body:
xorq %r8,%r8
xorq %r9,%r9
xorq %r10,%r10
xorq %r11,%r11
xorq %r13,%r13
subq 0(%rsi),%r8
sbbq 8(%rsi),%r9
sbbq 16(%rsi),%r10
movq %r8,%rax
sbbq 24(%rsi),%r11
leaq .Lpoly(%rip),%rsi
movq %r9,%rdx
sbbq $0,%r13
addq 0(%rsi),%r8
movq %r10,%rcx
adcq 8(%rsi),%r9
adcq 16(%rsi),%r10
movq %r11,%r12
adcq 24(%rsi),%r11
testq %r13,%r13
cmovzq %rax,%r8
cmovzq %rdx,%r9
movq %r8,0(%rdi)
cmovzq %rcx,%r10
movq %r9,8(%rdi)
cmovzq %r12,%r11
movq %r10,16(%rdi)
movq %r11,24(%rdi)
movq 0(%rsp),%r13
.cfi_restore %r13
movq 8(%rsp),%r12
.cfi_restore %r12
leaq 16(%rsp),%rsp
.cfi_adjust_cfa_offset -16
.Lneg_epilogue:
ret
.cfi_endproc
.size ecp_nistz256_neg,.-ecp_nistz256_neg
.globl ecp_nistz256_ord_mul_mont_nohw
.hidden ecp_nistz256_ord_mul_mont_nohw
.type ecp_nistz256_ord_mul_mont_nohw,@function
.align 32
ecp_nistz256_ord_mul_mont_nohw:
.cfi_startproc
_CET_ENDBR
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-16
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
.Lord_mul_body:
movq 0(%rdx),%rax
movq %rdx,%rbx
leaq .Lord(%rip),%r14
movq .LordK(%rip),%r15
movq %rax,%rcx
mulq 0(%rsi)
movq %rax,%r8
movq %rcx,%rax
movq %rdx,%r9
mulq 8(%rsi)
addq %rax,%r9
movq %rcx,%rax
adcq $0,%rdx
movq %rdx,%r10
mulq 16(%rsi)
addq %rax,%r10
movq %rcx,%rax
adcq $0,%rdx
movq %r8,%r13
imulq %r15,%r8
movq %rdx,%r11
mulq 24(%rsi)
addq %rax,%r11
movq %r8,%rax
adcq $0,%rdx
movq %rdx,%r12
mulq 0(%r14)
movq %r8,%rbp
addq %rax,%r13
movq %r8,%rax
adcq $0,%rdx
movq %rdx,%rcx
subq %r8,%r10
sbbq $0,%r8
mulq 8(%r14)
addq %rcx,%r9
adcq $0,%rdx
addq %rax,%r9
movq %rbp,%rax
adcq %rdx,%r10
movq %rbp,%rdx
adcq $0,%r8
shlq $32,%rax
shrq $32,%rdx
subq %rax,%r11
movq 8(%rbx),%rax
sbbq %rdx,%rbp
addq %r8,%r11
adcq %rbp,%r12
adcq $0,%r13
movq %rax,%rcx
mulq 0(%rsi)
addq %rax,%r9
movq %rcx,%rax
adcq $0,%rdx
movq %rdx,%rbp
mulq 8(%rsi)
addq %rbp,%r10
adcq $0,%rdx
addq %rax,%r10
movq %rcx,%rax
adcq $0,%rdx
movq %rdx,%rbp
mulq 16(%rsi)
addq %rbp,%r11
adcq $0,%rdx
addq %rax,%r11
movq %rcx,%rax
adcq $0,%rdx
movq %r9,%rcx
imulq %r15,%r9
movq %rdx,%rbp
mulq 24(%rsi)
addq %rbp,%r12
adcq $0,%rdx
xorq %r8,%r8
addq %rax,%r12
movq %r9,%rax
adcq %rdx,%r13
adcq $0,%r8
mulq 0(%r14)
movq %r9,%rbp
addq %rax,%rcx
movq %r9,%rax
adcq %rdx,%rcx
subq %r9,%r11
sbbq $0,%r9
mulq 8(%r14)
addq %rcx,%r10
adcq $0,%rdx
addq %rax,%r10
movq %rbp,%rax
adcq %rdx,%r11
movq %rbp,%rdx
adcq $0,%r9
shlq $32,%rax
shrq $32,%rdx
subq %rax,%r12
movq 16(%rbx),%rax
sbbq %rdx,%rbp
addq %r9,%r12
adcq %rbp,%r13
adcq $0,%r8
movq %rax,%rcx
mulq 0(%rsi)
addq %rax,%r10
movq %rcx,%rax
adcq $0,%rdx
movq %rdx,%rbp
mulq 8(%rsi)
addq %rbp,%r11
adcq $0,%rdx
addq %rax,%r11
movq %rcx,%rax
adcq $0,%rdx
movq %rdx,%rbp
mulq 16(%rsi)
addq %rbp,%r12
adcq $0,%rdx
addq %rax,%r12
movq %rcx,%rax
adcq $0,%rdx
movq %r10,%rcx
imulq %r15,%r10
movq %rdx,%rbp
mulq 24(%rsi)
addq %rbp,%r13
adcq $0,%rdx
xorq %r9,%r9
addq %rax,%r13
movq %r10,%rax
adcq %rdx,%r8
adcq $0,%r9
mulq 0(%r14)
movq %r10,%rbp
addq %rax,%rcx
movq %r10,%rax
adcq %rdx,%rcx
subq %r10,%r12
sbbq $0,%r10
mulq 8(%r14)
addq %rcx,%r11
adcq $0,%rdx
addq %rax,%r11
movq %rbp,%rax
adcq %rdx,%r12
movq %rbp,%rdx
adcq $0,%r10
shlq $32,%rax
shrq $32,%rdx
subq %rax,%r13
movq 24(%rbx),%rax
sbbq %rdx,%rbp
addq %r10,%r13
adcq %rbp,%r8
adcq $0,%r9
movq %rax,%rcx
mulq 0(%rsi)
addq %rax,%r11
movq %rcx,%rax
adcq $0,%rdx
movq %rdx,%rbp
mulq 8(%rsi)
addq %rbp,%r12
adcq $0,%rdx
addq %rax,%r12
movq %rcx,%rax
adcq $0,%rdx
movq %rdx,%rbp
mulq 16(%rsi)
addq %rbp,%r13
adcq $0,%rdx
addq %rax,%r13
movq %rcx,%rax
adcq $0,%rdx
movq %r11,%rcx
imulq %r15,%r11
movq %rdx,%rbp
mulq 24(%rsi)
addq %rbp,%r8
adcq $0,%rdx
xorq %r10,%r10
addq %rax,%r8
movq %r11,%rax
adcq %rdx,%r9
adcq $0,%r10
mulq 0(%r14)
movq %r11,%rbp
addq %rax,%rcx
movq %r11,%rax
adcq %rdx,%rcx
subq %r11,%r13
sbbq $0,%r11
mulq 8(%r14)
addq %rcx,%r12
adcq $0,%rdx
addq %rax,%r12
movq %rbp,%rax
adcq %rdx,%r13
movq %rbp,%rdx
adcq $0,%r11
shlq $32,%rax
shrq $32,%rdx
subq %rax,%r8
sbbq %rdx,%rbp
addq %r11,%r8
adcq %rbp,%r9
adcq $0,%r10
movq %r12,%rsi
subq 0(%r14),%r12
movq %r13,%r11
sbbq 8(%r14),%r13
movq %r8,%rcx
sbbq 16(%r14),%r8
movq %r9,%rbp
sbbq 24(%r14),%r9
sbbq $0,%r10
cmovcq %rsi,%r12
cmovcq %r11,%r13
cmovcq %rcx,%r8
cmovcq %rbp,%r9
movq %r12,0(%rdi)
movq %r13,8(%rdi)
movq %r8,16(%rdi)
movq %r9,24(%rdi)
movq 0(%rsp),%r15
.cfi_restore %r15
movq 8(%rsp),%r14
.cfi_restore %r14
movq 16(%rsp),%r13
.cfi_restore %r13
movq 24(%rsp),%r12
.cfi_restore %r12
movq 32(%rsp),%rbx
.cfi_restore %rbx
movq 40(%rsp),%rbp
.cfi_restore %rbp
leaq 48(%rsp),%rsp
.cfi_adjust_cfa_offset -48
.Lord_mul_epilogue:
ret
.cfi_endproc
.size ecp_nistz256_ord_mul_mont_nohw,.-ecp_nistz256_ord_mul_mont_nohw
.globl ecp_nistz256_ord_sqr_mont_nohw
.hidden ecp_nistz256_ord_sqr_mont_nohw
.type ecp_nistz256_ord_sqr_mont_nohw,@function
.align 32
ecp_nistz256_ord_sqr_mont_nohw:
.cfi_startproc
_CET_ENDBR
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-16
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
.Lord_sqr_body:
movq 0(%rsi),%r8
movq 8(%rsi),%rax
movq 16(%rsi),%r14
movq 24(%rsi),%r15
leaq .Lord(%rip),%rsi
movq %rdx,%rbx
jmp .Loop_ord_sqr
.align 32
.Loop_ord_sqr:
movq %rax,%rbp
mulq %r8
movq %rax,%r9
.byte 102,72,15,110,205
movq %r14,%rax
movq %rdx,%r10
mulq %r8
addq %rax,%r10
movq %r15,%rax
.byte 102,73,15,110,214
adcq $0,%rdx
movq %rdx,%r11
mulq %r8
addq %rax,%r11
movq %r15,%rax
.byte 102,73,15,110,223
adcq $0,%rdx
movq %rdx,%r12
mulq %r14
movq %rax,%r13
movq %r14,%rax
movq %rdx,%r14
mulq %rbp
addq %rax,%r11
movq %r15,%rax
adcq $0,%rdx
movq %rdx,%r15
mulq %rbp
addq %rax,%r12
adcq $0,%rdx
addq %r15,%r12
adcq %rdx,%r13
adcq $0,%r14
xorq %r15,%r15
movq %r8,%rax
addq %r9,%r9
adcq %r10,%r10
adcq %r11,%r11
adcq %r12,%r12
adcq %r13,%r13
adcq %r14,%r14
adcq $0,%r15
mulq %rax
movq %rax,%r8
.byte 102,72,15,126,200
movq %rdx,%rbp
mulq %rax
addq %rbp,%r9
adcq %rax,%r10
.byte 102,72,15,126,208
adcq $0,%rdx
movq %rdx,%rbp
mulq %rax
addq %rbp,%r11
adcq %rax,%r12
.byte 102,72,15,126,216
adcq $0,%rdx
movq %rdx,%rbp
movq %r8,%rcx
imulq 32(%rsi),%r8
mulq %rax
addq %rbp,%r13
adcq %rax,%r14
movq 0(%rsi),%rax
adcq %rdx,%r15
mulq %r8
movq %r8,%rbp
addq %rax,%rcx
movq 8(%rsi),%rax
adcq %rdx,%rcx
subq %r8,%r10
sbbq $0,%rbp
mulq %r8
addq %rcx,%r9
adcq $0,%rdx
addq %rax,%r9
movq %r8,%rax
adcq %rdx,%r10
movq %r8,%rdx
adcq $0,%rbp
movq %r9,%rcx
imulq 32(%rsi),%r9
shlq $32,%rax
shrq $32,%rdx
subq %rax,%r11
movq 0(%rsi),%rax
sbbq %rdx,%r8
addq %rbp,%r11
adcq $0,%r8
mulq %r9
movq %r9,%rbp
addq %rax,%rcx
movq 8(%rsi),%rax
adcq %rdx,%rcx
subq %r9,%r11
sbbq $0,%rbp
mulq %r9
addq %rcx,%r10
adcq $0,%rdx
addq %rax,%r10
movq %r9,%rax
adcq %rdx,%r11
movq %r9,%rdx
adcq $0,%rbp
movq %r10,%rcx
imulq 32(%rsi),%r10
shlq $32,%rax
shrq $32,%rdx
subq %rax,%r8
movq 0(%rsi),%rax
sbbq %rdx,%r9
addq %rbp,%r8
adcq $0,%r9
mulq %r10
movq %r10,%rbp
addq %rax,%rcx
movq 8(%rsi),%rax
adcq %rdx,%rcx
subq %r10,%r8
sbbq $0,%rbp
mulq %r10
addq %rcx,%r11
adcq $0,%rdx
addq %rax,%r11
movq %r10,%rax
adcq %rdx,%r8
movq %r10,%rdx
adcq $0,%rbp
movq %r11,%rcx
imulq 32(%rsi),%r11
shlq $32,%rax
shrq $32,%rdx
subq %rax,%r9
movq 0(%rsi),%rax
sbbq %rdx,%r10
addq %rbp,%r9
adcq $0,%r10
mulq %r11
movq %r11,%rbp
addq %rax,%rcx
movq 8(%rsi),%rax
adcq %rdx,%rcx
subq %r11,%r9
sbbq $0,%rbp
mulq %r11
addq %rcx,%r8
adcq $0,%rdx
addq %rax,%r8
movq %r11,%rax
adcq %rdx,%r9
movq %r11,%rdx
adcq $0,%rbp
shlq $32,%rax
shrq $32,%rdx
subq %rax,%r10
sbbq %rdx,%r11
addq %rbp,%r10
adcq $0,%r11
xorq %rdx,%rdx
addq %r12,%r8
adcq %r13,%r9
movq %r8,%r12
adcq %r14,%r10
adcq %r15,%r11
movq %r9,%rax
adcq $0,%rdx
subq 0(%rsi),%r8
movq %r10,%r14
sbbq 8(%rsi),%r9
sbbq 16(%rsi),%r10
movq %r11,%r15
sbbq 24(%rsi),%r11
sbbq $0,%rdx
cmovcq %r12,%r8
cmovncq %r9,%rax
cmovncq %r10,%r14
cmovncq %r11,%r15
decq %rbx
jnz .Loop_ord_sqr
movq %r8,0(%rdi)
movq %rax,8(%rdi)
pxor %xmm1,%xmm1
movq %r14,16(%rdi)
pxor %xmm2,%xmm2
movq %r15,24(%rdi)
pxor %xmm3,%xmm3
movq 0(%rsp),%r15
.cfi_restore %r15
movq 8(%rsp),%r14
.cfi_restore %r14
movq 16(%rsp),%r13
.cfi_restore %r13
movq 24(%rsp),%r12
.cfi_restore %r12
movq 32(%rsp),%rbx
.cfi_restore %rbx
movq 40(%rsp),%rbp
.cfi_restore %rbp
leaq 48(%rsp),%rsp
.cfi_adjust_cfa_offset -48
.Lord_sqr_epilogue:
ret
.cfi_endproc
.size ecp_nistz256_ord_sqr_mont_nohw,.-ecp_nistz256_ord_sqr_mont_nohw
.globl ecp_nistz256_ord_mul_mont_adx
.hidden ecp_nistz256_ord_mul_mont_adx
.type ecp_nistz256_ord_mul_mont_adx,@function
.align 32
ecp_nistz256_ord_mul_mont_adx:
.cfi_startproc
.Lecp_nistz256_ord_mul_mont_adx:
_CET_ENDBR
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-16
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
.Lord_mulx_body:
movq %rdx,%rbx
movq 0(%rdx),%rdx
movq 0(%rsi),%r9
movq 8(%rsi),%r10
movq 16(%rsi),%r11
movq 24(%rsi),%r12
leaq -128(%rsi),%rsi
leaq .Lord-128(%rip),%r14
movq .LordK(%rip),%r15
mulxq %r9,%r8,%r9
mulxq %r10,%rcx,%r10
mulxq %r11,%rbp,%r11
addq %rcx,%r9
mulxq %r12,%rcx,%r12
movq %r8,%rdx
mulxq %r15,%rdx,%rax
adcq %rbp,%r10
adcq %rcx,%r11
adcq $0,%r12
xorq %r13,%r13
mulxq 0+128(%r14),%rcx,%rbp
adcxq %rcx,%r8
adoxq %rbp,%r9
mulxq 8+128(%r14),%rcx,%rbp
adcxq %rcx,%r9
adoxq %rbp,%r10
mulxq 16+128(%r14),%rcx,%rbp
adcxq %rcx,%r10
adoxq %rbp,%r11
mulxq 24+128(%r14),%rcx,%rbp
movq 8(%rbx),%rdx
adcxq %rcx,%r11
adoxq %rbp,%r12
adcxq %r8,%r12
adoxq %r8,%r13
adcq $0,%r13
mulxq 0+128(%rsi),%rcx,%rbp
adcxq %rcx,%r9
adoxq %rbp,%r10
mulxq 8+128(%rsi),%rcx,%rbp
adcxq %rcx,%r10
adoxq %rbp,%r11
mulxq 16+128(%rsi),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq 24+128(%rsi),%rcx,%rbp
movq %r9,%rdx
mulxq %r15,%rdx,%rax
adcxq %rcx,%r12
adoxq %rbp,%r13
adcxq %r8,%r13
adoxq %r8,%r8
adcq $0,%r8
mulxq 0+128(%r14),%rcx,%rbp
adcxq %rcx,%r9
adoxq %rbp,%r10
mulxq 8+128(%r14),%rcx,%rbp
adcxq %rcx,%r10
adoxq %rbp,%r11
mulxq 16+128(%r14),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq 24+128(%r14),%rcx,%rbp
movq 16(%rbx),%rdx
adcxq %rcx,%r12
adoxq %rbp,%r13
adcxq %r9,%r13
adoxq %r9,%r8
adcq $0,%r8
mulxq 0+128(%rsi),%rcx,%rbp
adcxq %rcx,%r10
adoxq %rbp,%r11
mulxq 8+128(%rsi),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq 16+128(%rsi),%rcx,%rbp
adcxq %rcx,%r12
adoxq %rbp,%r13
mulxq 24+128(%rsi),%rcx,%rbp
movq %r10,%rdx
mulxq %r15,%rdx,%rax
adcxq %rcx,%r13
adoxq %rbp,%r8
adcxq %r9,%r8
adoxq %r9,%r9
adcq $0,%r9
mulxq 0+128(%r14),%rcx,%rbp
adcxq %rcx,%r10
adoxq %rbp,%r11
mulxq 8+128(%r14),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq 16+128(%r14),%rcx,%rbp
adcxq %rcx,%r12
adoxq %rbp,%r13
mulxq 24+128(%r14),%rcx,%rbp
movq 24(%rbx),%rdx
adcxq %rcx,%r13
adoxq %rbp,%r8
adcxq %r10,%r8
adoxq %r10,%r9
adcq $0,%r9
mulxq 0+128(%rsi),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq 8+128(%rsi),%rcx,%rbp
adcxq %rcx,%r12
adoxq %rbp,%r13
mulxq 16+128(%rsi),%rcx,%rbp
adcxq %rcx,%r13
adoxq %rbp,%r8
mulxq 24+128(%rsi),%rcx,%rbp
movq %r11,%rdx
mulxq %r15,%rdx,%rax
adcxq %rcx,%r8
adoxq %rbp,%r9
adcxq %r10,%r9
adoxq %r10,%r10
adcq $0,%r10
mulxq 0+128(%r14),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq 8+128(%r14),%rcx,%rbp
adcxq %rcx,%r12
adoxq %rbp,%r13
mulxq 16+128(%r14),%rcx,%rbp
adcxq %rcx,%r13
adoxq %rbp,%r8
mulxq 24+128(%r14),%rcx,%rbp
leaq 128(%r14),%r14
movq %r12,%rbx
adcxq %rcx,%r8
adoxq %rbp,%r9
movq %r13,%rdx
adcxq %r11,%r9
adoxq %r11,%r10
adcq $0,%r10
movq %r8,%rcx
subq 0(%r14),%r12
sbbq 8(%r14),%r13
sbbq 16(%r14),%r8
movq %r9,%rbp
sbbq 24(%r14),%r9
sbbq $0,%r10
cmovcq %rbx,%r12
cmovcq %rdx,%r13
cmovcq %rcx,%r8
cmovcq %rbp,%r9
movq %r12,0(%rdi)
movq %r13,8(%rdi)
movq %r8,16(%rdi)
movq %r9,24(%rdi)
movq 0(%rsp),%r15
.cfi_restore %r15
movq 8(%rsp),%r14
.cfi_restore %r14
movq 16(%rsp),%r13
.cfi_restore %r13
movq 24(%rsp),%r12
.cfi_restore %r12
movq 32(%rsp),%rbx
.cfi_restore %rbx
movq 40(%rsp),%rbp
.cfi_restore %rbp
leaq 48(%rsp),%rsp
.cfi_adjust_cfa_offset -48
.Lord_mulx_epilogue:
ret
.cfi_endproc
.size ecp_nistz256_ord_mul_mont_adx,.-ecp_nistz256_ord_mul_mont_adx
.globl ecp_nistz256_ord_sqr_mont_adx
.hidden ecp_nistz256_ord_sqr_mont_adx
.type ecp_nistz256_ord_sqr_mont_adx,@function
.align 32
ecp_nistz256_ord_sqr_mont_adx:
.cfi_startproc
_CET_ENDBR
.Lecp_nistz256_ord_sqr_mont_adx:
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-16
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
.Lord_sqrx_body:
movq %rdx,%rbx
movq 0(%rsi),%rdx
movq 8(%rsi),%r14
movq 16(%rsi),%r15
movq 24(%rsi),%r8
leaq .Lord(%rip),%rsi
jmp .Loop_ord_sqrx
.align 32
.Loop_ord_sqrx:
mulxq %r14,%r9,%r10
mulxq %r15,%rcx,%r11
movq %rdx,%rax
.byte 102,73,15,110,206
mulxq %r8,%rbp,%r12
movq %r14,%rdx
addq %rcx,%r10
.byte 102,73,15,110,215
adcq %rbp,%r11
adcq $0,%r12
xorq %r13,%r13
mulxq %r15,%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq %r8,%rcx,%rbp
movq %r15,%rdx
adcxq %rcx,%r12
adoxq %rbp,%r13
adcq $0,%r13
mulxq %r8,%rcx,%r14
movq %rax,%rdx
.byte 102,73,15,110,216
xorq %r15,%r15
adcxq %r9,%r9
adoxq %rcx,%r13
adcxq %r10,%r10
adoxq %r15,%r14
mulxq %rdx,%r8,%rbp
.byte 102,72,15,126,202
adcxq %r11,%r11
adoxq %rbp,%r9
adcxq %r12,%r12
mulxq %rdx,%rcx,%rax
.byte 102,72,15,126,210
adcxq %r13,%r13
adoxq %rcx,%r10
adcxq %r14,%r14
mulxq %rdx,%rcx,%rbp
.byte 0x67
.byte 102,72,15,126,218
adoxq %rax,%r11
adcxq %r15,%r15
adoxq %rcx,%r12
adoxq %rbp,%r13
mulxq %rdx,%rcx,%rax
adoxq %rcx,%r14
adoxq %rax,%r15
movq %r8,%rdx
mulxq 32(%rsi),%rdx,%rcx
xorq %rax,%rax
mulxq 0(%rsi),%rcx,%rbp
adcxq %rcx,%r8
adoxq %rbp,%r9
mulxq 8(%rsi),%rcx,%rbp
adcxq %rcx,%r9
adoxq %rbp,%r10
mulxq 16(%rsi),%rcx,%rbp
adcxq %rcx,%r10
adoxq %rbp,%r11
mulxq 24(%rsi),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r8
adcxq %rax,%r8
movq %r9,%rdx
mulxq 32(%rsi),%rdx,%rcx
mulxq 0(%rsi),%rcx,%rbp
adoxq %rcx,%r9
adcxq %rbp,%r10
mulxq 8(%rsi),%rcx,%rbp
adoxq %rcx,%r10
adcxq %rbp,%r11
mulxq 16(%rsi),%rcx,%rbp
adoxq %rcx,%r11
adcxq %rbp,%r8
mulxq 24(%rsi),%rcx,%rbp
adoxq %rcx,%r8
adcxq %rbp,%r9
adoxq %rax,%r9
movq %r10,%rdx
mulxq 32(%rsi),%rdx,%rcx
mulxq 0(%rsi),%rcx,%rbp
adcxq %rcx,%r10
adoxq %rbp,%r11
mulxq 8(%rsi),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r8
mulxq 16(%rsi),%rcx,%rbp
adcxq %rcx,%r8
adoxq %rbp,%r9
mulxq 24(%rsi),%rcx,%rbp
adcxq %rcx,%r9
adoxq %rbp,%r10
adcxq %rax,%r10
movq %r11,%rdx
mulxq 32(%rsi),%rdx,%rcx
mulxq 0(%rsi),%rcx,%rbp
adoxq %rcx,%r11
adcxq %rbp,%r8
mulxq 8(%rsi),%rcx,%rbp
adoxq %rcx,%r8
adcxq %rbp,%r9
mulxq 16(%rsi),%rcx,%rbp
adoxq %rcx,%r9
adcxq %rbp,%r10
mulxq 24(%rsi),%rcx,%rbp
adoxq %rcx,%r10
adcxq %rbp,%r11
adoxq %rax,%r11
addq %r8,%r12
adcq %r13,%r9
movq %r12,%rdx
adcq %r14,%r10
adcq %r15,%r11
movq %r9,%r14
adcq $0,%rax
subq 0(%rsi),%r12
movq %r10,%r15
sbbq 8(%rsi),%r9
sbbq 16(%rsi),%r10
movq %r11,%r8
sbbq 24(%rsi),%r11
sbbq $0,%rax
cmovncq %r12,%rdx
cmovncq %r9,%r14
cmovncq %r10,%r15
cmovncq %r11,%r8
decq %rbx
jnz .Loop_ord_sqrx
movq %rdx,0(%rdi)
movq %r14,8(%rdi)
pxor %xmm1,%xmm1
movq %r15,16(%rdi)
pxor %xmm2,%xmm2
movq %r8,24(%rdi)
pxor %xmm3,%xmm3
movq 0(%rsp),%r15
.cfi_restore %r15
movq 8(%rsp),%r14
.cfi_restore %r14
movq 16(%rsp),%r13
.cfi_restore %r13
movq 24(%rsp),%r12
.cfi_restore %r12
movq 32(%rsp),%rbx
.cfi_restore %rbx
movq 40(%rsp),%rbp
.cfi_restore %rbp
leaq 48(%rsp),%rsp
.cfi_adjust_cfa_offset -48
.Lord_sqrx_epilogue:
ret
.cfi_endproc
.size ecp_nistz256_ord_sqr_mont_adx,.-ecp_nistz256_ord_sqr_mont_adx
.globl ecp_nistz256_mul_mont_nohw
.hidden ecp_nistz256_mul_mont_nohw
.type ecp_nistz256_mul_mont_nohw,@function
.align 32
ecp_nistz256_mul_mont_nohw:
.cfi_startproc
_CET_ENDBR
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-16
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
.Lmul_body:
movq %rdx,%rbx
movq 0(%rdx),%rax
movq 0(%rsi),%r9
movq 8(%rsi),%r10
movq 16(%rsi),%r11
movq 24(%rsi),%r12
call __ecp_nistz256_mul_montq
movq 0(%rsp),%r15
.cfi_restore %r15
movq 8(%rsp),%r14
.cfi_restore %r14
movq 16(%rsp),%r13
.cfi_restore %r13
movq 24(%rsp),%r12
.cfi_restore %r12
movq 32(%rsp),%rbx
.cfi_restore %rbx
movq 40(%rsp),%rbp
.cfi_restore %rbp
leaq 48(%rsp),%rsp
.cfi_adjust_cfa_offset -48
.Lmul_epilogue:
ret
.cfi_endproc
.size ecp_nistz256_mul_mont_nohw,.-ecp_nistz256_mul_mont_nohw
.type __ecp_nistz256_mul_montq,@function
.align 32
__ecp_nistz256_mul_montq:
.cfi_startproc
movq %rax,%rbp
mulq %r9
movq .Lpoly+8(%rip),%r14
movq %rax,%r8
movq %rbp,%rax
movq %rdx,%r9
mulq %r10
movq .Lpoly+24(%rip),%r15
addq %rax,%r9
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%r10
mulq %r11
addq %rax,%r10
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%r11
mulq %r12
addq %rax,%r11
movq %r8,%rax
adcq $0,%rdx
xorq %r13,%r13
movq %rdx,%r12
movq %r8,%rbp
shlq $32,%r8
mulq %r15
shrq $32,%rbp
addq %r8,%r9
adcq %rbp,%r10
adcq %rax,%r11
movq 8(%rbx),%rax
adcq %rdx,%r12
adcq $0,%r13
xorq %r8,%r8
movq %rax,%rbp
mulq 0(%rsi)
addq %rax,%r9
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq 8(%rsi)
addq %rcx,%r10
adcq $0,%rdx
addq %rax,%r10
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq 16(%rsi)
addq %rcx,%r11
adcq $0,%rdx
addq %rax,%r11
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq 24(%rsi)
addq %rcx,%r12
adcq $0,%rdx
addq %rax,%r12
movq %r9,%rax
adcq %rdx,%r13
adcq $0,%r8
movq %r9,%rbp
shlq $32,%r9
mulq %r15
shrq $32,%rbp
addq %r9,%r10
adcq %rbp,%r11
adcq %rax,%r12
movq 16(%rbx),%rax
adcq %rdx,%r13
adcq $0,%r8
xorq %r9,%r9
movq %rax,%rbp
mulq 0(%rsi)
addq %rax,%r10
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq 8(%rsi)
addq %rcx,%r11
adcq $0,%rdx
addq %rax,%r11
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq 16(%rsi)
addq %rcx,%r12
adcq $0,%rdx
addq %rax,%r12
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq 24(%rsi)
addq %rcx,%r13
adcq $0,%rdx
addq %rax,%r13
movq %r10,%rax
adcq %rdx,%r8
adcq $0,%r9
movq %r10,%rbp
shlq $32,%r10
mulq %r15
shrq $32,%rbp
addq %r10,%r11
adcq %rbp,%r12
adcq %rax,%r13
movq 24(%rbx),%rax
adcq %rdx,%r8
adcq $0,%r9
xorq %r10,%r10
movq %rax,%rbp
mulq 0(%rsi)
addq %rax,%r11
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq 8(%rsi)
addq %rcx,%r12
adcq $0,%rdx
addq %rax,%r12
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq 16(%rsi)
addq %rcx,%r13
adcq $0,%rdx
addq %rax,%r13
movq %rbp,%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq 24(%rsi)
addq %rcx,%r8
adcq $0,%rdx
addq %rax,%r8
movq %r11,%rax
adcq %rdx,%r9
adcq $0,%r10
movq %r11,%rbp
shlq $32,%r11
mulq %r15
shrq $32,%rbp
addq %r11,%r12
adcq %rbp,%r13
movq %r12,%rcx
adcq %rax,%r8
adcq %rdx,%r9
movq %r13,%rbp
adcq $0,%r10
subq $-1,%r12
movq %r8,%rbx
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%rdx
sbbq %r15,%r9
sbbq $0,%r10
cmovcq %rcx,%r12
cmovcq %rbp,%r13
movq %r12,0(%rdi)
cmovcq %rbx,%r8
movq %r13,8(%rdi)
cmovcq %rdx,%r9
movq %r8,16(%rdi)
movq %r9,24(%rdi)
ret
.cfi_endproc
.size __ecp_nistz256_mul_montq,.-__ecp_nistz256_mul_montq
.globl ecp_nistz256_sqr_mont_nohw
.hidden ecp_nistz256_sqr_mont_nohw
.type ecp_nistz256_sqr_mont_nohw,@function
.align 32
ecp_nistz256_sqr_mont_nohw:
.cfi_startproc
_CET_ENDBR
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-16
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
.Lsqr_body:
movq 0(%rsi),%rax
movq 8(%rsi),%r14
movq 16(%rsi),%r15
movq 24(%rsi),%r8
call __ecp_nistz256_sqr_montq
movq 0(%rsp),%r15
.cfi_restore %r15
movq 8(%rsp),%r14
.cfi_restore %r14
movq 16(%rsp),%r13
.cfi_restore %r13
movq 24(%rsp),%r12
.cfi_restore %r12
movq 32(%rsp),%rbx
.cfi_restore %rbx
movq 40(%rsp),%rbp
.cfi_restore %rbp
leaq 48(%rsp),%rsp
.cfi_adjust_cfa_offset -48
.Lsqr_epilogue:
ret
.cfi_endproc
.size ecp_nistz256_sqr_mont_nohw,.-ecp_nistz256_sqr_mont_nohw
.type __ecp_nistz256_sqr_montq,@function
.align 32
__ecp_nistz256_sqr_montq:
.cfi_startproc
movq %rax,%r13
mulq %r14
movq %rax,%r9
movq %r15,%rax
movq %rdx,%r10
mulq %r13
addq %rax,%r10
movq %r8,%rax
adcq $0,%rdx
movq %rdx,%r11
mulq %r13
addq %rax,%r11
movq %r15,%rax
adcq $0,%rdx
movq %rdx,%r12
mulq %r14
addq %rax,%r11
movq %r8,%rax
adcq $0,%rdx
movq %rdx,%rbp
mulq %r14
addq %rax,%r12
movq %r8,%rax
adcq $0,%rdx
addq %rbp,%r12
movq %rdx,%r13
adcq $0,%r13
mulq %r15
xorq %r15,%r15
addq %rax,%r13
movq 0(%rsi),%rax
movq %rdx,%r14
adcq $0,%r14
addq %r9,%r9
adcq %r10,%r10
adcq %r11,%r11
adcq %r12,%r12
adcq %r13,%r13
adcq %r14,%r14
adcq $0,%r15
mulq %rax
movq %rax,%r8
movq 8(%rsi),%rax
movq %rdx,%rcx
mulq %rax
addq %rcx,%r9
adcq %rax,%r10
movq 16(%rsi),%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq %rax
addq %rcx,%r11
adcq %rax,%r12
movq 24(%rsi),%rax
adcq $0,%rdx
movq %rdx,%rcx
mulq %rax
addq %rcx,%r13
adcq %rax,%r14
movq %r8,%rax
adcq %rdx,%r15
movq .Lpoly+8(%rip),%rsi
movq .Lpoly+24(%rip),%rbp
movq %r8,%rcx
shlq $32,%r8
mulq %rbp
shrq $32,%rcx
addq %r8,%r9
adcq %rcx,%r10
adcq %rax,%r11
movq %r9,%rax
adcq $0,%rdx
movq %r9,%rcx
shlq $32,%r9
movq %rdx,%r8
mulq %rbp
shrq $32,%rcx
addq %r9,%r10
adcq %rcx,%r11
adcq %rax,%r8
movq %r10,%rax
adcq $0,%rdx
movq %r10,%rcx
shlq $32,%r10
movq %rdx,%r9
mulq %rbp
shrq $32,%rcx
addq %r10,%r11
adcq %rcx,%r8
adcq %rax,%r9
movq %r11,%rax
adcq $0,%rdx
movq %r11,%rcx
shlq $32,%r11
movq %rdx,%r10
mulq %rbp
shrq $32,%rcx
addq %r11,%r8
adcq %rcx,%r9
adcq %rax,%r10
adcq $0,%rdx
xorq %r11,%r11
addq %r8,%r12
adcq %r9,%r13
movq %r12,%r8
adcq %r10,%r14
adcq %rdx,%r15
movq %r13,%r9
adcq $0,%r11
subq $-1,%r12
movq %r14,%r10
sbbq %rsi,%r13
sbbq $0,%r14
movq %r15,%rcx
sbbq %rbp,%r15
sbbq $0,%r11
cmovcq %r8,%r12
cmovcq %r9,%r13
movq %r12,0(%rdi)
cmovcq %r10,%r14
movq %r13,8(%rdi)
cmovcq %rcx,%r15
movq %r14,16(%rdi)
movq %r15,24(%rdi)
ret
.cfi_endproc
.size __ecp_nistz256_sqr_montq,.-__ecp_nistz256_sqr_montq
.globl ecp_nistz256_mul_mont_adx
.hidden ecp_nistz256_mul_mont_adx
.type ecp_nistz256_mul_mont_adx,@function
.align 32
ecp_nistz256_mul_mont_adx:
.cfi_startproc
_CET_ENDBR
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-16
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
.Lmulx_body:
movq %rdx,%rbx
movq 0(%rdx),%rdx
movq 0(%rsi),%r9
movq 8(%rsi),%r10
movq 16(%rsi),%r11
movq 24(%rsi),%r12
leaq -128(%rsi),%rsi
call __ecp_nistz256_mul_montx
movq 0(%rsp),%r15
.cfi_restore %r15
movq 8(%rsp),%r14
.cfi_restore %r14
movq 16(%rsp),%r13
.cfi_restore %r13
movq 24(%rsp),%r12
.cfi_restore %r12
movq 32(%rsp),%rbx
.cfi_restore %rbx
movq 40(%rsp),%rbp
.cfi_restore %rbp
leaq 48(%rsp),%rsp
.cfi_adjust_cfa_offset -48
.Lmulx_epilogue:
ret
.cfi_endproc
.size ecp_nistz256_mul_mont_adx,.-ecp_nistz256_mul_mont_adx
.type __ecp_nistz256_mul_montx,@function
.align 32
__ecp_nistz256_mul_montx:
.cfi_startproc
mulxq %r9,%r8,%r9
mulxq %r10,%rcx,%r10
movq $32,%r14
xorq %r13,%r13
mulxq %r11,%rbp,%r11
movq .Lpoly+24(%rip),%r15
adcq %rcx,%r9
mulxq %r12,%rcx,%r12
movq %r8,%rdx
adcq %rbp,%r10
shlxq %r14,%r8,%rbp
adcq %rcx,%r11
shrxq %r14,%r8,%rcx
adcq $0,%r12
addq %rbp,%r9
adcq %rcx,%r10
mulxq %r15,%rcx,%rbp
movq 8(%rbx),%rdx
adcq %rcx,%r11
adcq %rbp,%r12
adcq $0,%r13
xorq %r8,%r8
mulxq 0+128(%rsi),%rcx,%rbp
adcxq %rcx,%r9
adoxq %rbp,%r10
mulxq 8+128(%rsi),%rcx,%rbp
adcxq %rcx,%r10
adoxq %rbp,%r11
mulxq 16+128(%rsi),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq 24+128(%rsi),%rcx,%rbp
movq %r9,%rdx
adcxq %rcx,%r12
shlxq %r14,%r9,%rcx
adoxq %rbp,%r13
shrxq %r14,%r9,%rbp
adcxq %r8,%r13
adoxq %r8,%r8
adcq $0,%r8
addq %rcx,%r10
adcq %rbp,%r11
mulxq %r15,%rcx,%rbp
movq 16(%rbx),%rdx
adcq %rcx,%r12
adcq %rbp,%r13
adcq $0,%r8
xorq %r9,%r9
mulxq 0+128(%rsi),%rcx,%rbp
adcxq %rcx,%r10
adoxq %rbp,%r11
mulxq 8+128(%rsi),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq 16+128(%rsi),%rcx,%rbp
adcxq %rcx,%r12
adoxq %rbp,%r13
mulxq 24+128(%rsi),%rcx,%rbp
movq %r10,%rdx
adcxq %rcx,%r13
shlxq %r14,%r10,%rcx
adoxq %rbp,%r8
shrxq %r14,%r10,%rbp
adcxq %r9,%r8
adoxq %r9,%r9
adcq $0,%r9
addq %rcx,%r11
adcq %rbp,%r12
mulxq %r15,%rcx,%rbp
movq 24(%rbx),%rdx
adcq %rcx,%r13
adcq %rbp,%r8
adcq $0,%r9
xorq %r10,%r10
mulxq 0+128(%rsi),%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq 8+128(%rsi),%rcx,%rbp
adcxq %rcx,%r12
adoxq %rbp,%r13
mulxq 16+128(%rsi),%rcx,%rbp
adcxq %rcx,%r13
adoxq %rbp,%r8
mulxq 24+128(%rsi),%rcx,%rbp
movq %r11,%rdx
adcxq %rcx,%r8
shlxq %r14,%r11,%rcx
adoxq %rbp,%r9
shrxq %r14,%r11,%rbp
adcxq %r10,%r9
adoxq %r10,%r10
adcq $0,%r10
addq %rcx,%r12
adcq %rbp,%r13
mulxq %r15,%rcx,%rbp
movq %r12,%rbx
movq .Lpoly+8(%rip),%r14
adcq %rcx,%r8
movq %r13,%rdx
adcq %rbp,%r9
adcq $0,%r10
xorl %eax,%eax
movq %r8,%rcx
sbbq $-1,%r12
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%rbp
sbbq %r15,%r9
sbbq $0,%r10
cmovcq %rbx,%r12
cmovcq %rdx,%r13
movq %r12,0(%rdi)
cmovcq %rcx,%r8
movq %r13,8(%rdi)
cmovcq %rbp,%r9
movq %r8,16(%rdi)
movq %r9,24(%rdi)
ret
.cfi_endproc
.size __ecp_nistz256_mul_montx,.-__ecp_nistz256_mul_montx
.globl ecp_nistz256_sqr_mont_adx
.hidden ecp_nistz256_sqr_mont_adx
.type ecp_nistz256_sqr_mont_adx,@function
.align 32
ecp_nistz256_sqr_mont_adx:
.cfi_startproc
_CET_ENDBR
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-16
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
.Lsqrx_body:
movq 0(%rsi),%rdx
movq 8(%rsi),%r14
movq 16(%rsi),%r15
movq 24(%rsi),%r8
leaq -128(%rsi),%rsi
call __ecp_nistz256_sqr_montx
movq 0(%rsp),%r15
.cfi_restore %r15
movq 8(%rsp),%r14
.cfi_restore %r14
movq 16(%rsp),%r13
.cfi_restore %r13
movq 24(%rsp),%r12
.cfi_restore %r12
movq 32(%rsp),%rbx
.cfi_restore %rbx
movq 40(%rsp),%rbp
.cfi_restore %rbp
leaq 48(%rsp),%rsp
.cfi_adjust_cfa_offset -48
.Lsqrx_epilogue:
ret
.cfi_endproc
.size ecp_nistz256_sqr_mont_adx,.-ecp_nistz256_sqr_mont_adx
.type __ecp_nistz256_sqr_montx,@function
.align 32
__ecp_nistz256_sqr_montx:
.cfi_startproc
mulxq %r14,%r9,%r10
mulxq %r15,%rcx,%r11
xorl %eax,%eax
adcq %rcx,%r10
mulxq %r8,%rbp,%r12
movq %r14,%rdx
adcq %rbp,%r11
adcq $0,%r12
xorq %r13,%r13
mulxq %r15,%rcx,%rbp
adcxq %rcx,%r11
adoxq %rbp,%r12
mulxq %r8,%rcx,%rbp
movq %r15,%rdx
adcxq %rcx,%r12
adoxq %rbp,%r13
adcq $0,%r13
mulxq %r8,%rcx,%r14
movq 0+128(%rsi),%rdx
xorq %r15,%r15
adcxq %r9,%r9
adoxq %rcx,%r13
adcxq %r10,%r10
adoxq %r15,%r14
mulxq %rdx,%r8,%rbp
movq 8+128(%rsi),%rdx
adcxq %r11,%r11
adoxq %rbp,%r9
adcxq %r12,%r12
mulxq %rdx,%rcx,%rax
movq 16+128(%rsi),%rdx
adcxq %r13,%r13
adoxq %rcx,%r10
adcxq %r14,%r14
.byte 0x67
mulxq %rdx,%rcx,%rbp
movq 24+128(%rsi),%rdx
adoxq %rax,%r11
adcxq %r15,%r15
adoxq %rcx,%r12
movq $32,%rsi
adoxq %rbp,%r13
.byte 0x67,0x67
mulxq %rdx,%rcx,%rax
movq .Lpoly+24(%rip),%rdx
adoxq %rcx,%r14
shlxq %rsi,%r8,%rcx
adoxq %rax,%r15
shrxq %rsi,%r8,%rax
movq %rdx,%rbp
addq %rcx,%r9
adcq %rax,%r10
mulxq %r8,%rcx,%r8
adcq %rcx,%r11
shlxq %rsi,%r9,%rcx
adcq $0,%r8
shrxq %rsi,%r9,%rax
addq %rcx,%r10
adcq %rax,%r11
mulxq %r9,%rcx,%r9
adcq %rcx,%r8
shlxq %rsi,%r10,%rcx
adcq $0,%r9
shrxq %rsi,%r10,%rax
addq %rcx,%r11
adcq %rax,%r8
mulxq %r10,%rcx,%r10
adcq %rcx,%r9
shlxq %rsi,%r11,%rcx
adcq $0,%r10
shrxq %rsi,%r11,%rax
addq %rcx,%r8
adcq %rax,%r9
mulxq %r11,%rcx,%r11
adcq %rcx,%r10
adcq $0,%r11
xorq %rdx,%rdx
addq %r8,%r12
movq .Lpoly+8(%rip),%rsi
adcq %r9,%r13
movq %r12,%r8
adcq %r10,%r14
adcq %r11,%r15
movq %r13,%r9
adcq $0,%rdx
subq $-1,%r12
movq %r14,%r10
sbbq %rsi,%r13
sbbq $0,%r14
movq %r15,%r11
sbbq %rbp,%r15
sbbq $0,%rdx
cmovcq %r8,%r12
cmovcq %r9,%r13
movq %r12,0(%rdi)
cmovcq %r10,%r14
movq %r13,8(%rdi)
cmovcq %r11,%r15
movq %r14,16(%rdi)
movq %r15,24(%rdi)
ret
.cfi_endproc
.size __ecp_nistz256_sqr_montx,.-__ecp_nistz256_sqr_montx
.globl ecp_nistz256_select_w5_nohw
.hidden ecp_nistz256_select_w5_nohw
.type ecp_nistz256_select_w5_nohw,@function
.align 32
ecp_nistz256_select_w5_nohw:
.cfi_startproc
_CET_ENDBR
movdqa .LOne(%rip),%xmm0
movd %edx,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
pxor %xmm6,%xmm6
pxor %xmm7,%xmm7
movdqa %xmm0,%xmm8
pshufd $0,%xmm1,%xmm1
movq $16,%rax
.Lselect_loop_sse_w5:
movdqa %xmm8,%xmm15
paddd %xmm0,%xmm8
pcmpeqd %xmm1,%xmm15
movdqa 0(%rsi),%xmm9
movdqa 16(%rsi),%xmm10
movdqa 32(%rsi),%xmm11
movdqa 48(%rsi),%xmm12
movdqa 64(%rsi),%xmm13
movdqa 80(%rsi),%xmm14
leaq 96(%rsi),%rsi
pand %xmm15,%xmm9
pand %xmm15,%xmm10
por %xmm9,%xmm2
pand %xmm15,%xmm11
por %xmm10,%xmm3
pand %xmm15,%xmm12
por %xmm11,%xmm4
pand %xmm15,%xmm13
por %xmm12,%xmm5
pand %xmm15,%xmm14
por %xmm13,%xmm6
por %xmm14,%xmm7
decq %rax
jnz .Lselect_loop_sse_w5
movdqu %xmm2,0(%rdi)
movdqu %xmm3,16(%rdi)
movdqu %xmm4,32(%rdi)
movdqu %xmm5,48(%rdi)
movdqu %xmm6,64(%rdi)
movdqu %xmm7,80(%rdi)
ret
.cfi_endproc
.LSEH_end_ecp_nistz256_select_w5_nohw:
.size ecp_nistz256_select_w5_nohw,.-ecp_nistz256_select_w5_nohw
.globl ecp_nistz256_select_w7_nohw
.hidden ecp_nistz256_select_w7_nohw
.type ecp_nistz256_select_w7_nohw,@function
.align 32
ecp_nistz256_select_w7_nohw:
.cfi_startproc
_CET_ENDBR
movdqa .LOne(%rip),%xmm8
movd %edx,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
movdqa %xmm8,%xmm0
pshufd $0,%xmm1,%xmm1
movq $64,%rax
.Lselect_loop_sse_w7:
movdqa %xmm8,%xmm15
paddd %xmm0,%xmm8
movdqa 0(%rsi),%xmm9
movdqa 16(%rsi),%xmm10
pcmpeqd %xmm1,%xmm15
movdqa 32(%rsi),%xmm11
movdqa 48(%rsi),%xmm12
leaq 64(%rsi),%rsi
pand %xmm15,%xmm9
pand %xmm15,%xmm10
por %xmm9,%xmm2
pand %xmm15,%xmm11
por %xmm10,%xmm3
pand %xmm15,%xmm12
por %xmm11,%xmm4
prefetcht0 255(%rsi)
por %xmm12,%xmm5
decq %rax
jnz .Lselect_loop_sse_w7
movdqu %xmm2,0(%rdi)
movdqu %xmm3,16(%rdi)
movdqu %xmm4,32(%rdi)
movdqu %xmm5,48(%rdi)
ret
.cfi_endproc
.LSEH_end_ecp_nistz256_select_w7_nohw:
.size ecp_nistz256_select_w7_nohw,.-ecp_nistz256_select_w7_nohw
.globl ecp_nistz256_select_w5_avx2
.hidden ecp_nistz256_select_w5_avx2
.type ecp_nistz256_select_w5_avx2,@function
.align 32
ecp_nistz256_select_w5_avx2:
.cfi_startproc
_CET_ENDBR
vzeroupper
vmovdqa .LTwo(%rip),%ymm0
vpxor %ymm2,%ymm2,%ymm2
vpxor %ymm3,%ymm3,%ymm3
vpxor %ymm4,%ymm4,%ymm4
vmovdqa .LOne(%rip),%ymm5
vmovdqa .LTwo(%rip),%ymm10
vmovd %edx,%xmm1
vpermd %ymm1,%ymm2,%ymm1
movq $8,%rax
.Lselect_loop_avx2_w5:
vmovdqa 0(%rsi),%ymm6
vmovdqa 32(%rsi),%ymm7
vmovdqa 64(%rsi),%ymm8
vmovdqa 96(%rsi),%ymm11
vmovdqa 128(%rsi),%ymm12
vmovdqa 160(%rsi),%ymm13
vpcmpeqd %ymm1,%ymm5,%ymm9
vpcmpeqd %ymm1,%ymm10,%ymm14
vpaddd %ymm0,%ymm5,%ymm5
vpaddd %ymm0,%ymm10,%ymm10
leaq 192(%rsi),%rsi
vpand %ymm9,%ymm6,%ymm6
vpand %ymm9,%ymm7,%ymm7
vpand %ymm9,%ymm8,%ymm8
vpand %ymm14,%ymm11,%ymm11
vpand %ymm14,%ymm12,%ymm12
vpand %ymm14,%ymm13,%ymm13
vpxor %ymm6,%ymm2,%ymm2
vpxor %ymm7,%ymm3,%ymm3
vpxor %ymm8,%ymm4,%ymm4
vpxor %ymm11,%ymm2,%ymm2
vpxor %ymm12,%ymm3,%ymm3
vpxor %ymm13,%ymm4,%ymm4
decq %rax
jnz .Lselect_loop_avx2_w5
vmovdqu %ymm2,0(%rdi)
vmovdqu %ymm3,32(%rdi)
vmovdqu %ymm4,64(%rdi)
vzeroupper
ret
.cfi_endproc
.LSEH_end_ecp_nistz256_select_w5_avx2:
.size ecp_nistz256_select_w5_avx2,.-ecp_nistz256_select_w5_avx2
.globl ecp_nistz256_select_w7_avx2
.hidden ecp_nistz256_select_w7_avx2
.type ecp_nistz256_select_w7_avx2,@function
.align 32
ecp_nistz256_select_w7_avx2:
.cfi_startproc
_CET_ENDBR
vzeroupper
vmovdqa .LThree(%rip),%ymm0
vpxor %ymm2,%ymm2,%ymm2
vpxor %ymm3,%ymm3,%ymm3
vmovdqa .LOne(%rip),%ymm4
vmovdqa .LTwo(%rip),%ymm8
vmovdqa .LThree(%rip),%ymm12
vmovd %edx,%xmm1
vpermd %ymm1,%ymm2,%ymm1
movq $21,%rax
.Lselect_loop_avx2_w7:
vmovdqa 0(%rsi),%ymm5
vmovdqa 32(%rsi),%ymm6
vmovdqa 64(%rsi),%ymm9
vmovdqa 96(%rsi),%ymm10
vmovdqa 128(%rsi),%ymm13
vmovdqa 160(%rsi),%ymm14
vpcmpeqd %ymm1,%ymm4,%ymm7
vpcmpeqd %ymm1,%ymm8,%ymm11
vpcmpeqd %ymm1,%ymm12,%ymm15
vpaddd %ymm0,%ymm4,%ymm4
vpaddd %ymm0,%ymm8,%ymm8
vpaddd %ymm0,%ymm12,%ymm12
leaq 192(%rsi),%rsi
vpand %ymm7,%ymm5,%ymm5
vpand %ymm7,%ymm6,%ymm6
vpand %ymm11,%ymm9,%ymm9
vpand %ymm11,%ymm10,%ymm10
vpand %ymm15,%ymm13,%ymm13
vpand %ymm15,%ymm14,%ymm14
vpxor %ymm5,%ymm2,%ymm2
vpxor %ymm6,%ymm3,%ymm3
vpxor %ymm9,%ymm2,%ymm2
vpxor %ymm10,%ymm3,%ymm3
vpxor %ymm13,%ymm2,%ymm2
vpxor %ymm14,%ymm3,%ymm3
decq %rax
jnz .Lselect_loop_avx2_w7
vmovdqa 0(%rsi),%ymm5
vmovdqa 32(%rsi),%ymm6
vpcmpeqd %ymm1,%ymm4,%ymm7
vpand %ymm7,%ymm5,%ymm5
vpand %ymm7,%ymm6,%ymm6
vpxor %ymm5,%ymm2,%ymm2
vpxor %ymm6,%ymm3,%ymm3
vmovdqu %ymm2,0(%rdi)
vmovdqu %ymm3,32(%rdi)
vzeroupper
ret
.cfi_endproc
.LSEH_end_ecp_nistz256_select_w7_avx2:
.size ecp_nistz256_select_w7_avx2,.-ecp_nistz256_select_w7_avx2
.type __ecp_nistz256_add_toq,@function
.align 32
__ecp_nistz256_add_toq:
.cfi_startproc
xorq %r11,%r11
addq 0(%rbx),%r12
adcq 8(%rbx),%r13
movq %r12,%rax
adcq 16(%rbx),%r8
adcq 24(%rbx),%r9
movq %r13,%rbp
adcq $0,%r11
subq $-1,%r12
movq %r8,%rcx
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%r10
sbbq %r15,%r9
sbbq $0,%r11
cmovcq %rax,%r12
cmovcq %rbp,%r13
movq %r12,0(%rdi)
cmovcq %rcx,%r8
movq %r13,8(%rdi)
cmovcq %r10,%r9
movq %r8,16(%rdi)
movq %r9,24(%rdi)
ret
.cfi_endproc
.size __ecp_nistz256_add_toq,.-__ecp_nistz256_add_toq
.type __ecp_nistz256_sub_fromq,@function
.align 32
__ecp_nistz256_sub_fromq:
.cfi_startproc
subq 0(%rbx),%r12
sbbq 8(%rbx),%r13
movq %r12,%rax
sbbq 16(%rbx),%r8
sbbq 24(%rbx),%r9
movq %r13,%rbp
sbbq %r11,%r11
addq $-1,%r12
movq %r8,%rcx
adcq %r14,%r13
adcq $0,%r8
movq %r9,%r10
adcq %r15,%r9
testq %r11,%r11
cmovzq %rax,%r12
cmovzq %rbp,%r13
movq %r12,0(%rdi)
cmovzq %rcx,%r8
movq %r13,8(%rdi)
cmovzq %r10,%r9
movq %r8,16(%rdi)
movq %r9,24(%rdi)
ret
.cfi_endproc
.size __ecp_nistz256_sub_fromq,.-__ecp_nistz256_sub_fromq
.type __ecp_nistz256_subq,@function
.align 32
__ecp_nistz256_subq:
.cfi_startproc
subq %r12,%rax
sbbq %r13,%rbp
movq %rax,%r12
sbbq %r8,%rcx
sbbq %r9,%r10
movq %rbp,%r13
sbbq %r11,%r11
addq $-1,%rax
movq %rcx,%r8
adcq %r14,%rbp
adcq $0,%rcx
movq %r10,%r9
adcq %r15,%r10
testq %r11,%r11
cmovnzq %rax,%r12
cmovnzq %rbp,%r13
cmovnzq %rcx,%r8
cmovnzq %r10,%r9
ret
.cfi_endproc
.size __ecp_nistz256_subq,.-__ecp_nistz256_subq
.type __ecp_nistz256_mul_by_2q,@function
.align 32
__ecp_nistz256_mul_by_2q:
.cfi_startproc
xorq %r11,%r11
addq %r12,%r12
adcq %r13,%r13
movq %r12,%rax
adcq %r8,%r8
adcq %r9,%r9
movq %r13,%rbp
adcq $0,%r11
subq $-1,%r12
movq %r8,%rcx
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%r10
sbbq %r15,%r9
sbbq $0,%r11
cmovcq %rax,%r12
cmovcq %rbp,%r13
movq %r12,0(%rdi)
cmovcq %rcx,%r8
movq %r13,8(%rdi)
cmovcq %r10,%r9
movq %r8,16(%rdi)
movq %r9,24(%rdi)
ret
.cfi_endproc
.size __ecp_nistz256_mul_by_2q,.-__ecp_nistz256_mul_by_2q
.globl ecp_nistz256_point_double_nohw
.hidden ecp_nistz256_point_double_nohw
.type ecp_nistz256_point_double_nohw,@function
.align 32
ecp_nistz256_point_double_nohw:
.cfi_startproc
_CET_ENDBR
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-16
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
subq $160+8,%rsp
.cfi_adjust_cfa_offset 32*5+8
.Lpoint_doubleq_body:
.Lpoint_double_shortcutq:
movdqu 0(%rsi),%xmm0
movq %rsi,%rbx
movdqu 16(%rsi),%xmm1
movq 32+0(%rsi),%r12
movq 32+8(%rsi),%r13
movq 32+16(%rsi),%r8
movq 32+24(%rsi),%r9
movq .Lpoly+8(%rip),%r14
movq .Lpoly+24(%rip),%r15
movdqa %xmm0,96(%rsp)
movdqa %xmm1,96+16(%rsp)
leaq 32(%rdi),%r10
leaq 64(%rdi),%r11
.byte 102,72,15,110,199
.byte 102,73,15,110,202
.byte 102,73,15,110,211
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_by_2q
movq 64+0(%rsi),%rax
movq 64+8(%rsi),%r14
movq 64+16(%rsi),%r15
movq 64+24(%rsi),%r8
leaq 64-0(%rsi),%rsi
leaq 64(%rsp),%rdi
call __ecp_nistz256_sqr_montq
movq 0+0(%rsp),%rax
movq 8+0(%rsp),%r14
leaq 0+0(%rsp),%rsi
movq 16+0(%rsp),%r15
movq 24+0(%rsp),%r8
leaq 0(%rsp),%rdi
call __ecp_nistz256_sqr_montq
movq 32(%rbx),%rax
movq 64+0(%rbx),%r9
movq 64+8(%rbx),%r10
movq 64+16(%rbx),%r11
movq 64+24(%rbx),%r12
leaq 64-0(%rbx),%rsi
leaq 32(%rbx),%rbx
.byte 102,72,15,126,215
call __ecp_nistz256_mul_montq
call __ecp_nistz256_mul_by_2q
movq 96+0(%rsp),%r12
movq 96+8(%rsp),%r13
leaq 64(%rsp),%rbx
movq 96+16(%rsp),%r8
movq 96+24(%rsp),%r9
leaq 32(%rsp),%rdi
call __ecp_nistz256_add_toq
movq 96+0(%rsp),%r12
movq 96+8(%rsp),%r13
leaq 64(%rsp),%rbx
movq 96+16(%rsp),%r8
movq 96+24(%rsp),%r9
leaq 64(%rsp),%rdi
call __ecp_nistz256_sub_fromq
movq 0+0(%rsp),%rax
movq 8+0(%rsp),%r14
leaq 0+0(%rsp),%rsi
movq 16+0(%rsp),%r15
movq 24+0(%rsp),%r8
.byte 102,72,15,126,207
call __ecp_nistz256_sqr_montq
xorq %r9,%r9
movq %r12,%rax
addq $-1,%r12
movq %r13,%r10
adcq %rsi,%r13
movq %r14,%rcx
adcq $0,%r14
movq %r15,%r8
adcq %rbp,%r15
adcq $0,%r9
xorq %rsi,%rsi
testq $1,%rax
cmovzq %rax,%r12
cmovzq %r10,%r13
cmovzq %rcx,%r14
cmovzq %r8,%r15
cmovzq %rsi,%r9
movq %r13,%rax
shrq $1,%r12
shlq $63,%rax
movq %r14,%r10
shrq $1,%r13
orq %rax,%r12
shlq $63,%r10
movq %r15,%rcx
shrq $1,%r14
orq %r10,%r13
shlq $63,%rcx
movq %r12,0(%rdi)
shrq $1,%r15
movq %r13,8(%rdi)
shlq $63,%r9
orq %rcx,%r14
orq %r9,%r15
movq %r14,16(%rdi)
movq %r15,24(%rdi)
movq 64(%rsp),%rax
leaq 64(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq 0+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 32(%rsp),%rdi
call __ecp_nistz256_mul_montq
leaq 128(%rsp),%rdi
call __ecp_nistz256_mul_by_2q
leaq 32(%rsp),%rbx
leaq 32(%rsp),%rdi
call __ecp_nistz256_add_toq
movq 96(%rsp),%rax
leaq 96(%rsp),%rbx
movq 0+0(%rsp),%r9
movq 8+0(%rsp),%r10
leaq 0+0(%rsp),%rsi
movq 16+0(%rsp),%r11
movq 24+0(%rsp),%r12
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_montq
leaq 128(%rsp),%rdi
call __ecp_nistz256_mul_by_2q
movq 0+32(%rsp),%rax
movq 8+32(%rsp),%r14
leaq 0+32(%rsp),%rsi
movq 16+32(%rsp),%r15
movq 24+32(%rsp),%r8
.byte 102,72,15,126,199
call __ecp_nistz256_sqr_montq
leaq 128(%rsp),%rbx
movq %r14,%r8
movq %r15,%r9
movq %rsi,%r14
movq %rbp,%r15
call __ecp_nistz256_sub_fromq
movq 0+0(%rsp),%rax
movq 0+8(%rsp),%rbp
movq 0+16(%rsp),%rcx
movq 0+24(%rsp),%r10
leaq 0(%rsp),%rdi
call __ecp_nistz256_subq
movq 32(%rsp),%rax
leaq 32(%rsp),%rbx
movq %r12,%r14
xorl %ecx,%ecx
movq %r12,0+0(%rsp)
movq %r13,%r10
movq %r13,0+8(%rsp)
cmovzq %r8,%r11
movq %r8,0+16(%rsp)
leaq 0-0(%rsp),%rsi
cmovzq %r9,%r12
movq %r9,0+24(%rsp)
movq %r14,%r9
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_montq
.byte 102,72,15,126,203
.byte 102,72,15,126,207
call __ecp_nistz256_sub_fromq
leaq 160+56(%rsp),%rsi
.cfi_def_cfa %rsi,8
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbx
.cfi_restore %rbx
movq -8(%rsi),%rbp
.cfi_restore %rbp
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lpoint_doubleq_epilogue:
ret
.cfi_endproc
.size ecp_nistz256_point_double_nohw,.-ecp_nistz256_point_double_nohw
.globl ecp_nistz256_point_add_nohw
.hidden ecp_nistz256_point_add_nohw
.type ecp_nistz256_point_add_nohw,@function
.align 32
ecp_nistz256_point_add_nohw:
.cfi_startproc
_CET_ENDBR
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-16
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
subq $576+8,%rsp
.cfi_adjust_cfa_offset 32*18+8
.Lpoint_addq_body:
movdqu 0(%rsi),%xmm0
movdqu 16(%rsi),%xmm1
movdqu 32(%rsi),%xmm2
movdqu 48(%rsi),%xmm3
movdqu 64(%rsi),%xmm4
movdqu 80(%rsi),%xmm5
movq %rsi,%rbx
movq %rdx,%rsi
movdqa %xmm0,384(%rsp)
movdqa %xmm1,384+16(%rsp)
movdqa %xmm2,416(%rsp)
movdqa %xmm3,416+16(%rsp)
movdqa %xmm4,448(%rsp)
movdqa %xmm5,448+16(%rsp)
por %xmm4,%xmm5
movdqu 0(%rsi),%xmm0
pshufd $0xb1,%xmm5,%xmm3
movdqu 16(%rsi),%xmm1
movdqu 32(%rsi),%xmm2
por %xmm3,%xmm5
movdqu 48(%rsi),%xmm3
movq 64+0(%rsi),%rax
movq 64+8(%rsi),%r14
movq 64+16(%rsi),%r15
movq 64+24(%rsi),%r8
movdqa %xmm0,480(%rsp)
pshufd $0x1e,%xmm5,%xmm4
movdqa %xmm1,480+16(%rsp)
movdqu 64(%rsi),%xmm0
movdqu 80(%rsi),%xmm1
movdqa %xmm2,512(%rsp)
movdqa %xmm3,512+16(%rsp)
por %xmm4,%xmm5
pxor %xmm4,%xmm4
por %xmm0,%xmm1
.byte 102,72,15,110,199
leaq 64-0(%rsi),%rsi
movq %rax,544+0(%rsp)
movq %r14,544+8(%rsp)
movq %r15,544+16(%rsp)
movq %r8,544+24(%rsp)
leaq 96(%rsp),%rdi
call __ecp_nistz256_sqr_montq
pcmpeqd %xmm4,%xmm5
pshufd $0xb1,%xmm1,%xmm4
por %xmm1,%xmm4
pshufd $0,%xmm5,%xmm5
pshufd $0x1e,%xmm4,%xmm3
por %xmm3,%xmm4
pxor %xmm3,%xmm3
pcmpeqd %xmm3,%xmm4
pshufd $0,%xmm4,%xmm4
movq 64+0(%rbx),%rax
movq 64+8(%rbx),%r14
movq 64+16(%rbx),%r15
movq 64+24(%rbx),%r8
.byte 102,72,15,110,203
leaq 64-0(%rbx),%rsi
leaq 32(%rsp),%rdi
call __ecp_nistz256_sqr_montq
movq 544(%rsp),%rax
leaq 544(%rsp),%rbx
movq 0+96(%rsp),%r9
movq 8+96(%rsp),%r10
leaq 0+96(%rsp),%rsi
movq 16+96(%rsp),%r11
movq 24+96(%rsp),%r12
leaq 224(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 448(%rsp),%rax
leaq 448(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq 0+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 256(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 416(%rsp),%rax
leaq 416(%rsp),%rbx
movq 0+224(%rsp),%r9
movq 8+224(%rsp),%r10
leaq 0+224(%rsp),%rsi
movq 16+224(%rsp),%r11
movq 24+224(%rsp),%r12
leaq 224(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 512(%rsp),%rax
leaq 512(%rsp),%rbx
movq 0+256(%rsp),%r9
movq 8+256(%rsp),%r10
leaq 0+256(%rsp),%rsi
movq 16+256(%rsp),%r11
movq 24+256(%rsp),%r12
leaq 256(%rsp),%rdi
call __ecp_nistz256_mul_montq
leaq 224(%rsp),%rbx
leaq 64(%rsp),%rdi
call __ecp_nistz256_sub_fromq
orq %r13,%r12
movdqa %xmm4,%xmm2
orq %r8,%r12
orq %r9,%r12
por %xmm5,%xmm2
.byte 102,73,15,110,220
movq 384(%rsp),%rax
leaq 384(%rsp),%rbx
movq 0+96(%rsp),%r9
movq 8+96(%rsp),%r10
leaq 0+96(%rsp),%rsi
movq 16+96(%rsp),%r11
movq 24+96(%rsp),%r12
leaq 160(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 480(%rsp),%rax
leaq 480(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq 0+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 192(%rsp),%rdi
call __ecp_nistz256_mul_montq
leaq 160(%rsp),%rbx
leaq 0(%rsp),%rdi
call __ecp_nistz256_sub_fromq
orq %r13,%r12
orq %r8,%r12
orq %r9,%r12
.byte 102,73,15,126,208
.byte 102,73,15,126,217
orq %r8,%r12
.byte 0x3e
jnz .Ladd_proceedq
testq %r9,%r9
jz .Ladd_doubleq
.byte 102,72,15,126,199
pxor %xmm0,%xmm0
movdqu %xmm0,0(%rdi)
movdqu %xmm0,16(%rdi)
movdqu %xmm0,32(%rdi)
movdqu %xmm0,48(%rdi)
movdqu %xmm0,64(%rdi)
movdqu %xmm0,80(%rdi)
jmp .Ladd_doneq
.align 32
.Ladd_doubleq:
.byte 102,72,15,126,206
.byte 102,72,15,126,199
addq $416,%rsp
.cfi_adjust_cfa_offset -416
jmp .Lpoint_double_shortcutq
.cfi_adjust_cfa_offset 416
.align 32
.Ladd_proceedq:
movq 0+64(%rsp),%rax
movq 8+64(%rsp),%r14
leaq 0+64(%rsp),%rsi
movq 16+64(%rsp),%r15
movq 24+64(%rsp),%r8
leaq 96(%rsp),%rdi
call __ecp_nistz256_sqr_montq
movq 448(%rsp),%rax
leaq 448(%rsp),%rbx
movq 0+0(%rsp),%r9
movq 8+0(%rsp),%r10
leaq 0+0(%rsp),%rsi
movq 16+0(%rsp),%r11
movq 24+0(%rsp),%r12
leaq 352(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 0+0(%rsp),%rax
movq 8+0(%rsp),%r14
leaq 0+0(%rsp),%rsi
movq 16+0(%rsp),%r15
movq 24+0(%rsp),%r8
leaq 32(%rsp),%rdi
call __ecp_nistz256_sqr_montq
movq 544(%rsp),%rax
leaq 544(%rsp),%rbx
movq 0+352(%rsp),%r9
movq 8+352(%rsp),%r10
leaq 0+352(%rsp),%rsi
movq 16+352(%rsp),%r11
movq 24+352(%rsp),%r12
leaq 352(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 0(%rsp),%rax
leaq 0(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq 0+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 128(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 160(%rsp),%rax
leaq 160(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq 0+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 192(%rsp),%rdi
call __ecp_nistz256_mul_montq
xorq %r11,%r11
addq %r12,%r12
leaq 96(%rsp),%rsi
adcq %r13,%r13
movq %r12,%rax
adcq %r8,%r8
adcq %r9,%r9
movq %r13,%rbp
adcq $0,%r11
subq $-1,%r12
movq %r8,%rcx
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%r10
sbbq %r15,%r9
sbbq $0,%r11
cmovcq %rax,%r12
movq 0(%rsi),%rax
cmovcq %rbp,%r13
movq 8(%rsi),%rbp
cmovcq %rcx,%r8
movq 16(%rsi),%rcx
cmovcq %r10,%r9
movq 24(%rsi),%r10
call __ecp_nistz256_subq
leaq 128(%rsp),%rbx
leaq 288(%rsp),%rdi
call __ecp_nistz256_sub_fromq
movq 192+0(%rsp),%rax
movq 192+8(%rsp),%rbp
movq 192+16(%rsp),%rcx
movq 192+24(%rsp),%r10
leaq 320(%rsp),%rdi
call __ecp_nistz256_subq
movq %r12,0(%rdi)
movq %r13,8(%rdi)
movq %r8,16(%rdi)
movq %r9,24(%rdi)
movq 128(%rsp),%rax
leaq 128(%rsp),%rbx
movq 0+224(%rsp),%r9
movq 8+224(%rsp),%r10
leaq 0+224(%rsp),%rsi
movq 16+224(%rsp),%r11
movq 24+224(%rsp),%r12
leaq 256(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 320(%rsp),%rax
leaq 320(%rsp),%rbx
movq 0+64(%rsp),%r9
movq 8+64(%rsp),%r10
leaq 0+64(%rsp),%rsi
movq 16+64(%rsp),%r11
movq 24+64(%rsp),%r12
leaq 320(%rsp),%rdi
call __ecp_nistz256_mul_montq
leaq 256(%rsp),%rbx
leaq 320(%rsp),%rdi
call __ecp_nistz256_sub_fromq
.byte 102,72,15,126,199
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 352(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 352+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 544(%rsp),%xmm2
pand 544+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 448(%rsp),%xmm2
pand 448+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,64(%rdi)
movdqu %xmm3,80(%rdi)
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 288(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 288+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 480(%rsp),%xmm2
pand 480+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 384(%rsp),%xmm2
pand 384+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,0(%rdi)
movdqu %xmm3,16(%rdi)
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 320(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 320+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 512(%rsp),%xmm2
pand 512+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 416(%rsp),%xmm2
pand 416+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,32(%rdi)
movdqu %xmm3,48(%rdi)
.Ladd_doneq:
leaq 576+56(%rsp),%rsi
.cfi_def_cfa %rsi,8
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbx
.cfi_restore %rbx
movq -8(%rsi),%rbp
.cfi_restore %rbp
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lpoint_addq_epilogue:
ret
.cfi_endproc
.size ecp_nistz256_point_add_nohw,.-ecp_nistz256_point_add_nohw
.globl ecp_nistz256_point_add_affine_nohw
.hidden ecp_nistz256_point_add_affine_nohw
.type ecp_nistz256_point_add_affine_nohw,@function
.align 32
ecp_nistz256_point_add_affine_nohw:
.cfi_startproc
_CET_ENDBR
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-16
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
subq $480+8,%rsp
.cfi_adjust_cfa_offset 32*15+8
.Ladd_affineq_body:
movdqu 0(%rsi),%xmm0
movq %rdx,%rbx
movdqu 16(%rsi),%xmm1
movdqu 32(%rsi),%xmm2
movdqu 48(%rsi),%xmm3
movdqu 64(%rsi),%xmm4
movdqu 80(%rsi),%xmm5
movq 64+0(%rsi),%rax
movq 64+8(%rsi),%r14
movq 64+16(%rsi),%r15
movq 64+24(%rsi),%r8
movdqa %xmm0,320(%rsp)
movdqa %xmm1,320+16(%rsp)
movdqa %xmm2,352(%rsp)
movdqa %xmm3,352+16(%rsp)
movdqa %xmm4,384(%rsp)
movdqa %xmm5,384+16(%rsp)
por %xmm4,%xmm5
movdqu 0(%rbx),%xmm0
pshufd $0xb1,%xmm5,%xmm3
movdqu 16(%rbx),%xmm1
movdqu 32(%rbx),%xmm2
por %xmm3,%xmm5
movdqu 48(%rbx),%xmm3
movdqa %xmm0,416(%rsp)
pshufd $0x1e,%xmm5,%xmm4
movdqa %xmm1,416+16(%rsp)
por %xmm0,%xmm1
.byte 102,72,15,110,199
movdqa %xmm2,448(%rsp)
movdqa %xmm3,448+16(%rsp)
por %xmm2,%xmm3
por %xmm4,%xmm5
pxor %xmm4,%xmm4
por %xmm1,%xmm3
leaq 64-0(%rsi),%rsi
leaq 32(%rsp),%rdi
call __ecp_nistz256_sqr_montq
pcmpeqd %xmm4,%xmm5
pshufd $0xb1,%xmm3,%xmm4
movq 0(%rbx),%rax
movq %r12,%r9
por %xmm3,%xmm4
pshufd $0,%xmm5,%xmm5
pshufd $0x1e,%xmm4,%xmm3
movq %r13,%r10
por %xmm3,%xmm4
pxor %xmm3,%xmm3
movq %r14,%r11
pcmpeqd %xmm3,%xmm4
pshufd $0,%xmm4,%xmm4
leaq 32-0(%rsp),%rsi
movq %r15,%r12
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_montq
leaq 320(%rsp),%rbx
leaq 64(%rsp),%rdi
call __ecp_nistz256_sub_fromq
movq 384(%rsp),%rax
leaq 384(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq 0+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 32(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 384(%rsp),%rax
leaq 384(%rsp),%rbx
movq 0+64(%rsp),%r9
movq 8+64(%rsp),%r10
leaq 0+64(%rsp),%rsi
movq 16+64(%rsp),%r11
movq 24+64(%rsp),%r12
leaq 288(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 448(%rsp),%rax
leaq 448(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq 0+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 32(%rsp),%rdi
call __ecp_nistz256_mul_montq
leaq 352(%rsp),%rbx
leaq 96(%rsp),%rdi
call __ecp_nistz256_sub_fromq
movq 0+64(%rsp),%rax
movq 8+64(%rsp),%r14
leaq 0+64(%rsp),%rsi
movq 16+64(%rsp),%r15
movq 24+64(%rsp),%r8
leaq 128(%rsp),%rdi
call __ecp_nistz256_sqr_montq
movq 0+96(%rsp),%rax
movq 8+96(%rsp),%r14
leaq 0+96(%rsp),%rsi
movq 16+96(%rsp),%r15
movq 24+96(%rsp),%r8
leaq 192(%rsp),%rdi
call __ecp_nistz256_sqr_montq
movq 128(%rsp),%rax
leaq 128(%rsp),%rbx
movq 0+64(%rsp),%r9
movq 8+64(%rsp),%r10
leaq 0+64(%rsp),%rsi
movq 16+64(%rsp),%r11
movq 24+64(%rsp),%r12
leaq 160(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 320(%rsp),%rax
leaq 320(%rsp),%rbx
movq 0+128(%rsp),%r9
movq 8+128(%rsp),%r10
leaq 0+128(%rsp),%rsi
movq 16+128(%rsp),%r11
movq 24+128(%rsp),%r12
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_montq
xorq %r11,%r11
addq %r12,%r12
leaq 192(%rsp),%rsi
adcq %r13,%r13
movq %r12,%rax
adcq %r8,%r8
adcq %r9,%r9
movq %r13,%rbp
adcq $0,%r11
subq $-1,%r12
movq %r8,%rcx
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%r10
sbbq %r15,%r9
sbbq $0,%r11
cmovcq %rax,%r12
movq 0(%rsi),%rax
cmovcq %rbp,%r13
movq 8(%rsi),%rbp
cmovcq %rcx,%r8
movq 16(%rsi),%rcx
cmovcq %r10,%r9
movq 24(%rsi),%r10
call __ecp_nistz256_subq
leaq 160(%rsp),%rbx
leaq 224(%rsp),%rdi
call __ecp_nistz256_sub_fromq
movq 0+0(%rsp),%rax
movq 0+8(%rsp),%rbp
movq 0+16(%rsp),%rcx
movq 0+24(%rsp),%r10
leaq 64(%rsp),%rdi
call __ecp_nistz256_subq
movq %r12,0(%rdi)
movq %r13,8(%rdi)
movq %r8,16(%rdi)
movq %r9,24(%rdi)
movq 352(%rsp),%rax
leaq 352(%rsp),%rbx
movq 0+160(%rsp),%r9
movq 8+160(%rsp),%r10
leaq 0+160(%rsp),%rsi
movq 16+160(%rsp),%r11
movq 24+160(%rsp),%r12
leaq 32(%rsp),%rdi
call __ecp_nistz256_mul_montq
movq 96(%rsp),%rax
leaq 96(%rsp),%rbx
movq 0+64(%rsp),%r9
movq 8+64(%rsp),%r10
leaq 0+64(%rsp),%rsi
movq 16+64(%rsp),%r11
movq 24+64(%rsp),%r12
leaq 64(%rsp),%rdi
call __ecp_nistz256_mul_montq
leaq 32(%rsp),%rbx
leaq 256(%rsp),%rdi
call __ecp_nistz256_sub_fromq
.byte 102,72,15,126,199
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 288(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 288+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand .LONE_mont(%rip),%xmm2
pand .LONE_mont+16(%rip),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 384(%rsp),%xmm2
pand 384+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,64(%rdi)
movdqu %xmm3,80(%rdi)
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 224(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 224+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 416(%rsp),%xmm2
pand 416+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 320(%rsp),%xmm2
pand 320+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,0(%rdi)
movdqu %xmm3,16(%rdi)
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 256(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 256+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 448(%rsp),%xmm2
pand 448+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 352(%rsp),%xmm2
pand 352+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,32(%rdi)
movdqu %xmm3,48(%rdi)
leaq 480+56(%rsp),%rsi
.cfi_def_cfa %rsi,8
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbx
.cfi_restore %rbx
movq -8(%rsi),%rbp
.cfi_restore %rbp
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Ladd_affineq_epilogue:
ret
.cfi_endproc
.size ecp_nistz256_point_add_affine_nohw,.-ecp_nistz256_point_add_affine_nohw
.type __ecp_nistz256_add_tox,@function
.align 32
__ecp_nistz256_add_tox:
.cfi_startproc
xorq %r11,%r11
adcq 0(%rbx),%r12
adcq 8(%rbx),%r13
movq %r12,%rax
adcq 16(%rbx),%r8
adcq 24(%rbx),%r9
movq %r13,%rbp
adcq $0,%r11
xorq %r10,%r10
sbbq $-1,%r12
movq %r8,%rcx
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%r10
sbbq %r15,%r9
sbbq $0,%r11
cmovcq %rax,%r12
cmovcq %rbp,%r13
movq %r12,0(%rdi)
cmovcq %rcx,%r8
movq %r13,8(%rdi)
cmovcq %r10,%r9
movq %r8,16(%rdi)
movq %r9,24(%rdi)
ret
.cfi_endproc
.size __ecp_nistz256_add_tox,.-__ecp_nistz256_add_tox
.type __ecp_nistz256_sub_fromx,@function
.align 32
__ecp_nistz256_sub_fromx:
.cfi_startproc
xorq %r11,%r11
sbbq 0(%rbx),%r12
sbbq 8(%rbx),%r13
movq %r12,%rax
sbbq 16(%rbx),%r8
sbbq 24(%rbx),%r9
movq %r13,%rbp
sbbq $0,%r11
xorq %r10,%r10
adcq $-1,%r12
movq %r8,%rcx
adcq %r14,%r13
adcq $0,%r8
movq %r9,%r10
adcq %r15,%r9
btq $0,%r11
cmovncq %rax,%r12
cmovncq %rbp,%r13
movq %r12,0(%rdi)
cmovncq %rcx,%r8
movq %r13,8(%rdi)
cmovncq %r10,%r9
movq %r8,16(%rdi)
movq %r9,24(%rdi)
ret
.cfi_endproc
.size __ecp_nistz256_sub_fromx,.-__ecp_nistz256_sub_fromx
.type __ecp_nistz256_subx,@function
.align 32
__ecp_nistz256_subx:
.cfi_startproc
xorq %r11,%r11
sbbq %r12,%rax
sbbq %r13,%rbp
movq %rax,%r12
sbbq %r8,%rcx
sbbq %r9,%r10
movq %rbp,%r13
sbbq $0,%r11
xorq %r9,%r9
adcq $-1,%rax
movq %rcx,%r8
adcq %r14,%rbp
adcq $0,%rcx
movq %r10,%r9
adcq %r15,%r10
btq $0,%r11
cmovcq %rax,%r12
cmovcq %rbp,%r13
cmovcq %rcx,%r8
cmovcq %r10,%r9
ret
.cfi_endproc
.size __ecp_nistz256_subx,.-__ecp_nistz256_subx
.type __ecp_nistz256_mul_by_2x,@function
.align 32
__ecp_nistz256_mul_by_2x:
.cfi_startproc
xorq %r11,%r11
adcq %r12,%r12
adcq %r13,%r13
movq %r12,%rax
adcq %r8,%r8
adcq %r9,%r9
movq %r13,%rbp
adcq $0,%r11
xorq %r10,%r10
sbbq $-1,%r12
movq %r8,%rcx
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%r10
sbbq %r15,%r9
sbbq $0,%r11
cmovcq %rax,%r12
cmovcq %rbp,%r13
movq %r12,0(%rdi)
cmovcq %rcx,%r8
movq %r13,8(%rdi)
cmovcq %r10,%r9
movq %r8,16(%rdi)
movq %r9,24(%rdi)
ret
.cfi_endproc
.size __ecp_nistz256_mul_by_2x,.-__ecp_nistz256_mul_by_2x
.globl ecp_nistz256_point_double_adx
.hidden ecp_nistz256_point_double_adx
.type ecp_nistz256_point_double_adx,@function
.align 32
ecp_nistz256_point_double_adx:
.cfi_startproc
_CET_ENDBR
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-16
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
subq $160+8,%rsp
.cfi_adjust_cfa_offset 32*5+8
.Lpoint_doublex_body:
.Lpoint_double_shortcutx:
movdqu 0(%rsi),%xmm0
movq %rsi,%rbx
movdqu 16(%rsi),%xmm1
movq 32+0(%rsi),%r12
movq 32+8(%rsi),%r13
movq 32+16(%rsi),%r8
movq 32+24(%rsi),%r9
movq .Lpoly+8(%rip),%r14
movq .Lpoly+24(%rip),%r15
movdqa %xmm0,96(%rsp)
movdqa %xmm1,96+16(%rsp)
leaq 32(%rdi),%r10
leaq 64(%rdi),%r11
.byte 102,72,15,110,199
.byte 102,73,15,110,202
.byte 102,73,15,110,211
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_by_2x
movq 64+0(%rsi),%rdx
movq 64+8(%rsi),%r14
movq 64+16(%rsi),%r15
movq 64+24(%rsi),%r8
leaq 64-128(%rsi),%rsi
leaq 64(%rsp),%rdi
call __ecp_nistz256_sqr_montx
movq 0+0(%rsp),%rdx
movq 8+0(%rsp),%r14
leaq -128+0(%rsp),%rsi
movq 16+0(%rsp),%r15
movq 24+0(%rsp),%r8
leaq 0(%rsp),%rdi
call __ecp_nistz256_sqr_montx
movq 32(%rbx),%rdx
movq 64+0(%rbx),%r9
movq 64+8(%rbx),%r10
movq 64+16(%rbx),%r11
movq 64+24(%rbx),%r12
leaq 64-128(%rbx),%rsi
leaq 32(%rbx),%rbx
.byte 102,72,15,126,215
call __ecp_nistz256_mul_montx
call __ecp_nistz256_mul_by_2x
movq 96+0(%rsp),%r12
movq 96+8(%rsp),%r13
leaq 64(%rsp),%rbx
movq 96+16(%rsp),%r8
movq 96+24(%rsp),%r9
leaq 32(%rsp),%rdi
call __ecp_nistz256_add_tox
movq 96+0(%rsp),%r12
movq 96+8(%rsp),%r13
leaq 64(%rsp),%rbx
movq 96+16(%rsp),%r8
movq 96+24(%rsp),%r9
leaq 64(%rsp),%rdi
call __ecp_nistz256_sub_fromx
movq 0+0(%rsp),%rdx
movq 8+0(%rsp),%r14
leaq -128+0(%rsp),%rsi
movq 16+0(%rsp),%r15
movq 24+0(%rsp),%r8
.byte 102,72,15,126,207
call __ecp_nistz256_sqr_montx
xorq %r9,%r9
movq %r12,%rax
addq $-1,%r12
movq %r13,%r10
adcq %rsi,%r13
movq %r14,%rcx
adcq $0,%r14
movq %r15,%r8
adcq %rbp,%r15
adcq $0,%r9
xorq %rsi,%rsi
testq $1,%rax
cmovzq %rax,%r12
cmovzq %r10,%r13
cmovzq %rcx,%r14
cmovzq %r8,%r15
cmovzq %rsi,%r9
movq %r13,%rax
shrq $1,%r12
shlq $63,%rax
movq %r14,%r10
shrq $1,%r13
orq %rax,%r12
shlq $63,%r10
movq %r15,%rcx
shrq $1,%r14
orq %r10,%r13
shlq $63,%rcx
movq %r12,0(%rdi)
shrq $1,%r15
movq %r13,8(%rdi)
shlq $63,%r9
orq %rcx,%r14
orq %r9,%r15
movq %r14,16(%rdi)
movq %r15,24(%rdi)
movq 64(%rsp),%rdx
leaq 64(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq -128+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 32(%rsp),%rdi
call __ecp_nistz256_mul_montx
leaq 128(%rsp),%rdi
call __ecp_nistz256_mul_by_2x
leaq 32(%rsp),%rbx
leaq 32(%rsp),%rdi
call __ecp_nistz256_add_tox
movq 96(%rsp),%rdx
leaq 96(%rsp),%rbx
movq 0+0(%rsp),%r9
movq 8+0(%rsp),%r10
leaq -128+0(%rsp),%rsi
movq 16+0(%rsp),%r11
movq 24+0(%rsp),%r12
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_montx
leaq 128(%rsp),%rdi
call __ecp_nistz256_mul_by_2x
movq 0+32(%rsp),%rdx
movq 8+32(%rsp),%r14
leaq -128+32(%rsp),%rsi
movq 16+32(%rsp),%r15
movq 24+32(%rsp),%r8
.byte 102,72,15,126,199
call __ecp_nistz256_sqr_montx
leaq 128(%rsp),%rbx
movq %r14,%r8
movq %r15,%r9
movq %rsi,%r14
movq %rbp,%r15
call __ecp_nistz256_sub_fromx
movq 0+0(%rsp),%rax
movq 0+8(%rsp),%rbp
movq 0+16(%rsp),%rcx
movq 0+24(%rsp),%r10
leaq 0(%rsp),%rdi
call __ecp_nistz256_subx
movq 32(%rsp),%rdx
leaq 32(%rsp),%rbx
movq %r12,%r14
xorl %ecx,%ecx
movq %r12,0+0(%rsp)
movq %r13,%r10
movq %r13,0+8(%rsp)
cmovzq %r8,%r11
movq %r8,0+16(%rsp)
leaq 0-128(%rsp),%rsi
cmovzq %r9,%r12
movq %r9,0+24(%rsp)
movq %r14,%r9
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_montx
.byte 102,72,15,126,203
.byte 102,72,15,126,207
call __ecp_nistz256_sub_fromx
leaq 160+56(%rsp),%rsi
.cfi_def_cfa %rsi,8
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbx
.cfi_restore %rbx
movq -8(%rsi),%rbp
.cfi_restore %rbp
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lpoint_doublex_epilogue:
ret
.cfi_endproc
.size ecp_nistz256_point_double_adx,.-ecp_nistz256_point_double_adx
.globl ecp_nistz256_point_add_adx
.hidden ecp_nistz256_point_add_adx
.type ecp_nistz256_point_add_adx,@function
.align 32
ecp_nistz256_point_add_adx:
.cfi_startproc
_CET_ENDBR
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-16
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
subq $576+8,%rsp
.cfi_adjust_cfa_offset 32*18+8
.Lpoint_addx_body:
movdqu 0(%rsi),%xmm0
movdqu 16(%rsi),%xmm1
movdqu 32(%rsi),%xmm2
movdqu 48(%rsi),%xmm3
movdqu 64(%rsi),%xmm4
movdqu 80(%rsi),%xmm5
movq %rsi,%rbx
movq %rdx,%rsi
movdqa %xmm0,384(%rsp)
movdqa %xmm1,384+16(%rsp)
movdqa %xmm2,416(%rsp)
movdqa %xmm3,416+16(%rsp)
movdqa %xmm4,448(%rsp)
movdqa %xmm5,448+16(%rsp)
por %xmm4,%xmm5
movdqu 0(%rsi),%xmm0
pshufd $0xb1,%xmm5,%xmm3
movdqu 16(%rsi),%xmm1
movdqu 32(%rsi),%xmm2
por %xmm3,%xmm5
movdqu 48(%rsi),%xmm3
movq 64+0(%rsi),%rdx
movq 64+8(%rsi),%r14
movq 64+16(%rsi),%r15
movq 64+24(%rsi),%r8
movdqa %xmm0,480(%rsp)
pshufd $0x1e,%xmm5,%xmm4
movdqa %xmm1,480+16(%rsp)
movdqu 64(%rsi),%xmm0
movdqu 80(%rsi),%xmm1
movdqa %xmm2,512(%rsp)
movdqa %xmm3,512+16(%rsp)
por %xmm4,%xmm5
pxor %xmm4,%xmm4
por %xmm0,%xmm1
.byte 102,72,15,110,199
leaq 64-128(%rsi),%rsi
movq %rdx,544+0(%rsp)
movq %r14,544+8(%rsp)
movq %r15,544+16(%rsp)
movq %r8,544+24(%rsp)
leaq 96(%rsp),%rdi
call __ecp_nistz256_sqr_montx
pcmpeqd %xmm4,%xmm5
pshufd $0xb1,%xmm1,%xmm4
por %xmm1,%xmm4
pshufd $0,%xmm5,%xmm5
pshufd $0x1e,%xmm4,%xmm3
por %xmm3,%xmm4
pxor %xmm3,%xmm3
pcmpeqd %xmm3,%xmm4
pshufd $0,%xmm4,%xmm4
movq 64+0(%rbx),%rdx
movq 64+8(%rbx),%r14
movq 64+16(%rbx),%r15
movq 64+24(%rbx),%r8
.byte 102,72,15,110,203
leaq 64-128(%rbx),%rsi
leaq 32(%rsp),%rdi
call __ecp_nistz256_sqr_montx
movq 544(%rsp),%rdx
leaq 544(%rsp),%rbx
movq 0+96(%rsp),%r9
movq 8+96(%rsp),%r10
leaq -128+96(%rsp),%rsi
movq 16+96(%rsp),%r11
movq 24+96(%rsp),%r12
leaq 224(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 448(%rsp),%rdx
leaq 448(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq -128+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 256(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 416(%rsp),%rdx
leaq 416(%rsp),%rbx
movq 0+224(%rsp),%r9
movq 8+224(%rsp),%r10
leaq -128+224(%rsp),%rsi
movq 16+224(%rsp),%r11
movq 24+224(%rsp),%r12
leaq 224(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 512(%rsp),%rdx
leaq 512(%rsp),%rbx
movq 0+256(%rsp),%r9
movq 8+256(%rsp),%r10
leaq -128+256(%rsp),%rsi
movq 16+256(%rsp),%r11
movq 24+256(%rsp),%r12
leaq 256(%rsp),%rdi
call __ecp_nistz256_mul_montx
leaq 224(%rsp),%rbx
leaq 64(%rsp),%rdi
call __ecp_nistz256_sub_fromx
orq %r13,%r12
movdqa %xmm4,%xmm2
orq %r8,%r12
orq %r9,%r12
por %xmm5,%xmm2
.byte 102,73,15,110,220
movq 384(%rsp),%rdx
leaq 384(%rsp),%rbx
movq 0+96(%rsp),%r9
movq 8+96(%rsp),%r10
leaq -128+96(%rsp),%rsi
movq 16+96(%rsp),%r11
movq 24+96(%rsp),%r12
leaq 160(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 480(%rsp),%rdx
leaq 480(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq -128+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 192(%rsp),%rdi
call __ecp_nistz256_mul_montx
leaq 160(%rsp),%rbx
leaq 0(%rsp),%rdi
call __ecp_nistz256_sub_fromx
orq %r13,%r12
orq %r8,%r12
orq %r9,%r12
.byte 102,73,15,126,208
.byte 102,73,15,126,217
orq %r8,%r12
.byte 0x3e
jnz .Ladd_proceedx
testq %r9,%r9
jz .Ladd_doublex
.byte 102,72,15,126,199
pxor %xmm0,%xmm0
movdqu %xmm0,0(%rdi)
movdqu %xmm0,16(%rdi)
movdqu %xmm0,32(%rdi)
movdqu %xmm0,48(%rdi)
movdqu %xmm0,64(%rdi)
movdqu %xmm0,80(%rdi)
jmp .Ladd_donex
.align 32
.Ladd_doublex:
.byte 102,72,15,126,206
.byte 102,72,15,126,199
addq $416,%rsp
.cfi_adjust_cfa_offset -416
jmp .Lpoint_double_shortcutx
.cfi_adjust_cfa_offset 416
.align 32
.Ladd_proceedx:
movq 0+64(%rsp),%rdx
movq 8+64(%rsp),%r14
leaq -128+64(%rsp),%rsi
movq 16+64(%rsp),%r15
movq 24+64(%rsp),%r8
leaq 96(%rsp),%rdi
call __ecp_nistz256_sqr_montx
movq 448(%rsp),%rdx
leaq 448(%rsp),%rbx
movq 0+0(%rsp),%r9
movq 8+0(%rsp),%r10
leaq -128+0(%rsp),%rsi
movq 16+0(%rsp),%r11
movq 24+0(%rsp),%r12
leaq 352(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 0+0(%rsp),%rdx
movq 8+0(%rsp),%r14
leaq -128+0(%rsp),%rsi
movq 16+0(%rsp),%r15
movq 24+0(%rsp),%r8
leaq 32(%rsp),%rdi
call __ecp_nistz256_sqr_montx
movq 544(%rsp),%rdx
leaq 544(%rsp),%rbx
movq 0+352(%rsp),%r9
movq 8+352(%rsp),%r10
leaq -128+352(%rsp),%rsi
movq 16+352(%rsp),%r11
movq 24+352(%rsp),%r12
leaq 352(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 0(%rsp),%rdx
leaq 0(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq -128+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 128(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 160(%rsp),%rdx
leaq 160(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq -128+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 192(%rsp),%rdi
call __ecp_nistz256_mul_montx
xorq %r11,%r11
addq %r12,%r12
leaq 96(%rsp),%rsi
adcq %r13,%r13
movq %r12,%rax
adcq %r8,%r8
adcq %r9,%r9
movq %r13,%rbp
adcq $0,%r11
subq $-1,%r12
movq %r8,%rcx
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%r10
sbbq %r15,%r9
sbbq $0,%r11
cmovcq %rax,%r12
movq 0(%rsi),%rax
cmovcq %rbp,%r13
movq 8(%rsi),%rbp
cmovcq %rcx,%r8
movq 16(%rsi),%rcx
cmovcq %r10,%r9
movq 24(%rsi),%r10
call __ecp_nistz256_subx
leaq 128(%rsp),%rbx
leaq 288(%rsp),%rdi
call __ecp_nistz256_sub_fromx
movq 192+0(%rsp),%rax
movq 192+8(%rsp),%rbp
movq 192+16(%rsp),%rcx
movq 192+24(%rsp),%r10
leaq 320(%rsp),%rdi
call __ecp_nistz256_subx
movq %r12,0(%rdi)
movq %r13,8(%rdi)
movq %r8,16(%rdi)
movq %r9,24(%rdi)
movq 128(%rsp),%rdx
leaq 128(%rsp),%rbx
movq 0+224(%rsp),%r9
movq 8+224(%rsp),%r10
leaq -128+224(%rsp),%rsi
movq 16+224(%rsp),%r11
movq 24+224(%rsp),%r12
leaq 256(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 320(%rsp),%rdx
leaq 320(%rsp),%rbx
movq 0+64(%rsp),%r9
movq 8+64(%rsp),%r10
leaq -128+64(%rsp),%rsi
movq 16+64(%rsp),%r11
movq 24+64(%rsp),%r12
leaq 320(%rsp),%rdi
call __ecp_nistz256_mul_montx
leaq 256(%rsp),%rbx
leaq 320(%rsp),%rdi
call __ecp_nistz256_sub_fromx
.byte 102,72,15,126,199
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 352(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 352+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 544(%rsp),%xmm2
pand 544+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 448(%rsp),%xmm2
pand 448+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,64(%rdi)
movdqu %xmm3,80(%rdi)
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 288(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 288+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 480(%rsp),%xmm2
pand 480+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 384(%rsp),%xmm2
pand 384+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,0(%rdi)
movdqu %xmm3,16(%rdi)
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 320(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 320+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 512(%rsp),%xmm2
pand 512+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 416(%rsp),%xmm2
pand 416+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,32(%rdi)
movdqu %xmm3,48(%rdi)
.Ladd_donex:
leaq 576+56(%rsp),%rsi
.cfi_def_cfa %rsi,8
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbx
.cfi_restore %rbx
movq -8(%rsi),%rbp
.cfi_restore %rbp
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Lpoint_addx_epilogue:
ret
.cfi_endproc
.size ecp_nistz256_point_add_adx,.-ecp_nistz256_point_add_adx
.globl ecp_nistz256_point_add_affine_adx
.hidden ecp_nistz256_point_add_affine_adx
.type ecp_nistz256_point_add_affine_adx,@function
.align 32
ecp_nistz256_point_add_affine_adx:
.cfi_startproc
_CET_ENDBR
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp,-16
pushq %rbx
.cfi_adjust_cfa_offset 8
.cfi_offset %rbx,-24
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-32
pushq %r13
.cfi_adjust_cfa_offset 8
.cfi_offset %r13,-40
pushq %r14
.cfi_adjust_cfa_offset 8
.cfi_offset %r14,-48
pushq %r15
.cfi_adjust_cfa_offset 8
.cfi_offset %r15,-56
subq $480+8,%rsp
.cfi_adjust_cfa_offset 32*15+8
.Ladd_affinex_body:
movdqu 0(%rsi),%xmm0
movq %rdx,%rbx
movdqu 16(%rsi),%xmm1
movdqu 32(%rsi),%xmm2
movdqu 48(%rsi),%xmm3
movdqu 64(%rsi),%xmm4
movdqu 80(%rsi),%xmm5
movq 64+0(%rsi),%rdx
movq 64+8(%rsi),%r14
movq 64+16(%rsi),%r15
movq 64+24(%rsi),%r8
movdqa %xmm0,320(%rsp)
movdqa %xmm1,320+16(%rsp)
movdqa %xmm2,352(%rsp)
movdqa %xmm3,352+16(%rsp)
movdqa %xmm4,384(%rsp)
movdqa %xmm5,384+16(%rsp)
por %xmm4,%xmm5
movdqu 0(%rbx),%xmm0
pshufd $0xb1,%xmm5,%xmm3
movdqu 16(%rbx),%xmm1
movdqu 32(%rbx),%xmm2
por %xmm3,%xmm5
movdqu 48(%rbx),%xmm3
movdqa %xmm0,416(%rsp)
pshufd $0x1e,%xmm5,%xmm4
movdqa %xmm1,416+16(%rsp)
por %xmm0,%xmm1
.byte 102,72,15,110,199
movdqa %xmm2,448(%rsp)
movdqa %xmm3,448+16(%rsp)
por %xmm2,%xmm3
por %xmm4,%xmm5
pxor %xmm4,%xmm4
por %xmm1,%xmm3
leaq 64-128(%rsi),%rsi
leaq 32(%rsp),%rdi
call __ecp_nistz256_sqr_montx
pcmpeqd %xmm4,%xmm5
pshufd $0xb1,%xmm3,%xmm4
movq 0(%rbx),%rdx
movq %r12,%r9
por %xmm3,%xmm4
pshufd $0,%xmm5,%xmm5
pshufd $0x1e,%xmm4,%xmm3
movq %r13,%r10
por %xmm3,%xmm4
pxor %xmm3,%xmm3
movq %r14,%r11
pcmpeqd %xmm3,%xmm4
pshufd $0,%xmm4,%xmm4
leaq 32-128(%rsp),%rsi
movq %r15,%r12
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_montx
leaq 320(%rsp),%rbx
leaq 64(%rsp),%rdi
call __ecp_nistz256_sub_fromx
movq 384(%rsp),%rdx
leaq 384(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq -128+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 32(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 384(%rsp),%rdx
leaq 384(%rsp),%rbx
movq 0+64(%rsp),%r9
movq 8+64(%rsp),%r10
leaq -128+64(%rsp),%rsi
movq 16+64(%rsp),%r11
movq 24+64(%rsp),%r12
leaq 288(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 448(%rsp),%rdx
leaq 448(%rsp),%rbx
movq 0+32(%rsp),%r9
movq 8+32(%rsp),%r10
leaq -128+32(%rsp),%rsi
movq 16+32(%rsp),%r11
movq 24+32(%rsp),%r12
leaq 32(%rsp),%rdi
call __ecp_nistz256_mul_montx
leaq 352(%rsp),%rbx
leaq 96(%rsp),%rdi
call __ecp_nistz256_sub_fromx
movq 0+64(%rsp),%rdx
movq 8+64(%rsp),%r14
leaq -128+64(%rsp),%rsi
movq 16+64(%rsp),%r15
movq 24+64(%rsp),%r8
leaq 128(%rsp),%rdi
call __ecp_nistz256_sqr_montx
movq 0+96(%rsp),%rdx
movq 8+96(%rsp),%r14
leaq -128+96(%rsp),%rsi
movq 16+96(%rsp),%r15
movq 24+96(%rsp),%r8
leaq 192(%rsp),%rdi
call __ecp_nistz256_sqr_montx
movq 128(%rsp),%rdx
leaq 128(%rsp),%rbx
movq 0+64(%rsp),%r9
movq 8+64(%rsp),%r10
leaq -128+64(%rsp),%rsi
movq 16+64(%rsp),%r11
movq 24+64(%rsp),%r12
leaq 160(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 320(%rsp),%rdx
leaq 320(%rsp),%rbx
movq 0+128(%rsp),%r9
movq 8+128(%rsp),%r10
leaq -128+128(%rsp),%rsi
movq 16+128(%rsp),%r11
movq 24+128(%rsp),%r12
leaq 0(%rsp),%rdi
call __ecp_nistz256_mul_montx
xorq %r11,%r11
addq %r12,%r12
leaq 192(%rsp),%rsi
adcq %r13,%r13
movq %r12,%rax
adcq %r8,%r8
adcq %r9,%r9
movq %r13,%rbp
adcq $0,%r11
subq $-1,%r12
movq %r8,%rcx
sbbq %r14,%r13
sbbq $0,%r8
movq %r9,%r10
sbbq %r15,%r9
sbbq $0,%r11
cmovcq %rax,%r12
movq 0(%rsi),%rax
cmovcq %rbp,%r13
movq 8(%rsi),%rbp
cmovcq %rcx,%r8
movq 16(%rsi),%rcx
cmovcq %r10,%r9
movq 24(%rsi),%r10
call __ecp_nistz256_subx
leaq 160(%rsp),%rbx
leaq 224(%rsp),%rdi
call __ecp_nistz256_sub_fromx
movq 0+0(%rsp),%rax
movq 0+8(%rsp),%rbp
movq 0+16(%rsp),%rcx
movq 0+24(%rsp),%r10
leaq 64(%rsp),%rdi
call __ecp_nistz256_subx
movq %r12,0(%rdi)
movq %r13,8(%rdi)
movq %r8,16(%rdi)
movq %r9,24(%rdi)
movq 352(%rsp),%rdx
leaq 352(%rsp),%rbx
movq 0+160(%rsp),%r9
movq 8+160(%rsp),%r10
leaq -128+160(%rsp),%rsi
movq 16+160(%rsp),%r11
movq 24+160(%rsp),%r12
leaq 32(%rsp),%rdi
call __ecp_nistz256_mul_montx
movq 96(%rsp),%rdx
leaq 96(%rsp),%rbx
movq 0+64(%rsp),%r9
movq 8+64(%rsp),%r10
leaq -128+64(%rsp),%rsi
movq 16+64(%rsp),%r11
movq 24+64(%rsp),%r12
leaq 64(%rsp),%rdi
call __ecp_nistz256_mul_montx
leaq 32(%rsp),%rbx
leaq 256(%rsp),%rdi
call __ecp_nistz256_sub_fromx
.byte 102,72,15,126,199
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 288(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 288+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand .LONE_mont(%rip),%xmm2
pand .LONE_mont+16(%rip),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 384(%rsp),%xmm2
pand 384+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,64(%rdi)
movdqu %xmm3,80(%rdi)
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 224(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 224+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 416(%rsp),%xmm2
pand 416+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 320(%rsp),%xmm2
pand 320+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,0(%rdi)
movdqu %xmm3,16(%rdi)
movdqa %xmm5,%xmm0
movdqa %xmm5,%xmm1
pandn 256(%rsp),%xmm0
movdqa %xmm5,%xmm2
pandn 256+16(%rsp),%xmm1
movdqa %xmm5,%xmm3
pand 448(%rsp),%xmm2
pand 448+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqa %xmm4,%xmm0
movdqa %xmm4,%xmm1
pandn %xmm2,%xmm0
movdqa %xmm4,%xmm2
pandn %xmm3,%xmm1
movdqa %xmm4,%xmm3
pand 352(%rsp),%xmm2
pand 352+16(%rsp),%xmm3
por %xmm0,%xmm2
por %xmm1,%xmm3
movdqu %xmm2,32(%rdi)
movdqu %xmm3,48(%rdi)
leaq 480+56(%rsp),%rsi
.cfi_def_cfa %rsi,8
movq -48(%rsi),%r15
.cfi_restore %r15
movq -40(%rsi),%r14
.cfi_restore %r14
movq -32(%rsi),%r13
.cfi_restore %r13
movq -24(%rsi),%r12
.cfi_restore %r12
movq -16(%rsi),%rbx
.cfi_restore %rbx
movq -8(%rsi),%rbp
.cfi_restore %rbp
leaq (%rsi),%rsp
.cfi_def_cfa_register %rsp
.Ladd_affinex_epilogue:
ret
.cfi_endproc
.size ecp_nistz256_point_add_affine_adx,.-ecp_nistz256_point_add_affine_adx
#endif
|
marvin-hansen/iggy-streaming-system
| 30,683
|
thirdparty/crates/ring-0.17.9/pregenerated/armv8-mont-win64.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32)
#include <ring-core/arm_arch.h>
.text
.globl bn_mul_mont_nohw
.def bn_mul_mont_nohw
.type 32
.endef
.align 5
bn_mul_mont_nohw:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-64]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
ldr x9,[x2],#8 // bp[0]
sub x22,sp,x5,lsl#3
ldp x7,x8,[x1],#16 // ap[0..1]
lsl x5,x5,#3
ldr x4,[x4] // *n0
and x22,x22,#-16 // ABI says so
ldp x13,x14,[x3],#16 // np[0..1]
mul x6,x7,x9 // ap[0]*bp[0]
sub x21,x5,#16 // j=num-2
umulh x7,x7,x9
mul x10,x8,x9 // ap[1]*bp[0]
umulh x11,x8,x9
mul x15,x6,x4 // "tp[0]"*n0
mov sp,x22 // alloca
// (*) mul x12,x13,x15 // np[0]*m1
umulh x13,x13,x15
mul x16,x14,x15 // np[1]*m1
// (*) adds x12,x12,x6 // discarded
// (*) As for removal of first multiplication and addition
// instructions. The outcome of first addition is
// guaranteed to be zero, which leaves two computationally
// significant outcomes: it either carries or not. Then
// question is when does it carry? Is there alternative
// way to deduce it? If you follow operations, you can
// observe that condition for carry is quite simple:
// x6 being non-zero. So that carry can be calculated
// by adding -1 to x6. That's what next instruction does.
subs xzr,x6,#1 // (*)
umulh x17,x14,x15
adc x13,x13,xzr
cbz x21,L1st_skip
L1st:
ldr x8,[x1],#8
adds x6,x10,x7
sub x21,x21,#8 // j--
adc x7,x11,xzr
ldr x14,[x3],#8
adds x12,x16,x13
mul x10,x8,x9 // ap[j]*bp[0]
adc x13,x17,xzr
umulh x11,x8,x9
adds x12,x12,x6
mul x16,x14,x15 // np[j]*m1
adc x13,x13,xzr
umulh x17,x14,x15
str x12,[x22],#8 // tp[j-1]
cbnz x21,L1st
L1st_skip:
adds x6,x10,x7
sub x1,x1,x5 // rewind x1
adc x7,x11,xzr
adds x12,x16,x13
sub x3,x3,x5 // rewind x3
adc x13,x17,xzr
adds x12,x12,x6
sub x20,x5,#8 // i=num-1
adcs x13,x13,x7
adc x19,xzr,xzr // upmost overflow bit
stp x12,x13,[x22]
Louter:
ldr x9,[x2],#8 // bp[i]
ldp x7,x8,[x1],#16
ldr x23,[sp] // tp[0]
add x22,sp,#8
mul x6,x7,x9 // ap[0]*bp[i]
sub x21,x5,#16 // j=num-2
umulh x7,x7,x9
ldp x13,x14,[x3],#16
mul x10,x8,x9 // ap[1]*bp[i]
adds x6,x6,x23
umulh x11,x8,x9
adc x7,x7,xzr
mul x15,x6,x4
sub x20,x20,#8 // i--
// (*) mul x12,x13,x15 // np[0]*m1
umulh x13,x13,x15
mul x16,x14,x15 // np[1]*m1
// (*) adds x12,x12,x6
subs xzr,x6,#1 // (*)
umulh x17,x14,x15
cbz x21,Linner_skip
Linner:
ldr x8,[x1],#8
adc x13,x13,xzr
ldr x23,[x22],#8 // tp[j]
adds x6,x10,x7
sub x21,x21,#8 // j--
adc x7,x11,xzr
adds x12,x16,x13
ldr x14,[x3],#8
adc x13,x17,xzr
mul x10,x8,x9 // ap[j]*bp[i]
adds x6,x6,x23
umulh x11,x8,x9
adc x7,x7,xzr
mul x16,x14,x15 // np[j]*m1
adds x12,x12,x6
umulh x17,x14,x15
str x12,[x22,#-16] // tp[j-1]
cbnz x21,Linner
Linner_skip:
ldr x23,[x22],#8 // tp[j]
adc x13,x13,xzr
adds x6,x10,x7
sub x1,x1,x5 // rewind x1
adc x7,x11,xzr
adds x12,x16,x13
sub x3,x3,x5 // rewind x3
adcs x13,x17,x19
adc x19,xzr,xzr
adds x6,x6,x23
adc x7,x7,xzr
adds x12,x12,x6
adcs x13,x13,x7
adc x19,x19,xzr // upmost overflow bit
stp x12,x13,[x22,#-16]
cbnz x20,Louter
// Final step. We see if result is larger than modulus, and
// if it is, subtract the modulus. But comparison implies
// subtraction. So we subtract modulus, see if it borrowed,
// and conditionally copy original value.
ldr x23,[sp] // tp[0]
add x22,sp,#8
ldr x14,[x3],#8 // np[0]
subs x21,x5,#8 // j=num-1 and clear borrow
mov x1,x0
Lsub:
sbcs x8,x23,x14 // tp[j]-np[j]
ldr x23,[x22],#8
sub x21,x21,#8 // j--
ldr x14,[x3],#8
str x8,[x1],#8 // rp[j]=tp[j]-np[j]
cbnz x21,Lsub
sbcs x8,x23,x14
sbcs x19,x19,xzr // did it borrow?
str x8,[x1],#8 // rp[num-1]
ldr x23,[sp] // tp[0]
add x22,sp,#8
ldr x8,[x0],#8 // rp[0]
sub x5,x5,#8 // num--
nop
Lcond_copy:
sub x5,x5,#8 // num--
csel x14,x23,x8,lo // did it borrow?
ldr x23,[x22],#8
ldr x8,[x0],#8
str xzr,[x22,#-16] // wipe tp
str x14,[x0,#-16]
cbnz x5,Lcond_copy
csel x14,x23,x8,lo
str xzr,[x22,#-8] // wipe tp
str x14,[x0,#-8]
ldp x19,x20,[x29,#16]
mov sp,x29
ldp x21,x22,[x29,#32]
mov x0,#1
ldp x23,x24,[x29,#48]
ldr x29,[sp],#64
AARCH64_VALIDATE_LINK_REGISTER
ret
.globl bn_sqr8x_mont
.def bn_sqr8x_mont
.type 32
.endef
.align 5
bn_sqr8x_mont:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-128]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
stp x0,x3,[sp,#96] // offload rp and np
ldp x6,x7,[x1,#8*0]
ldp x8,x9,[x1,#8*2]
ldp x10,x11,[x1,#8*4]
ldp x12,x13,[x1,#8*6]
sub x2,sp,x5,lsl#4
lsl x5,x5,#3
ldr x4,[x4] // *n0
mov sp,x2 // alloca
sub x27,x5,#8*8
b Lsqr8x_zero_start
Lsqr8x_zero:
sub x27,x27,#8*8
stp xzr,xzr,[x2,#8*0]
stp xzr,xzr,[x2,#8*2]
stp xzr,xzr,[x2,#8*4]
stp xzr,xzr,[x2,#8*6]
Lsqr8x_zero_start:
stp xzr,xzr,[x2,#8*8]
stp xzr,xzr,[x2,#8*10]
stp xzr,xzr,[x2,#8*12]
stp xzr,xzr,[x2,#8*14]
add x2,x2,#8*16
cbnz x27,Lsqr8x_zero
add x3,x1,x5
add x1,x1,#8*8
mov x19,xzr
mov x20,xzr
mov x21,xzr
mov x22,xzr
mov x23,xzr
mov x24,xzr
mov x25,xzr
mov x26,xzr
mov x2,sp
str x4,[x29,#112] // offload n0
// Multiply everything but a[i]*a[i]
.align 4
Lsqr8x_outer_loop:
// a[1]a[0] (i)
// a[2]a[0]
// a[3]a[0]
// a[4]a[0]
// a[5]a[0]
// a[6]a[0]
// a[7]a[0]
// a[2]a[1] (ii)
// a[3]a[1]
// a[4]a[1]
// a[5]a[1]
// a[6]a[1]
// a[7]a[1]
// a[3]a[2] (iii)
// a[4]a[2]
// a[5]a[2]
// a[6]a[2]
// a[7]a[2]
// a[4]a[3] (iv)
// a[5]a[3]
// a[6]a[3]
// a[7]a[3]
// a[5]a[4] (v)
// a[6]a[4]
// a[7]a[4]
// a[6]a[5] (vi)
// a[7]a[5]
// a[7]a[6] (vii)
mul x14,x7,x6 // lo(a[1..7]*a[0]) (i)
mul x15,x8,x6
mul x16,x9,x6
mul x17,x10,x6
adds x20,x20,x14 // t[1]+lo(a[1]*a[0])
mul x14,x11,x6
adcs x21,x21,x15
mul x15,x12,x6
adcs x22,x22,x16
mul x16,x13,x6
adcs x23,x23,x17
umulh x17,x7,x6 // hi(a[1..7]*a[0])
adcs x24,x24,x14
umulh x14,x8,x6
adcs x25,x25,x15
umulh x15,x9,x6
adcs x26,x26,x16
umulh x16,x10,x6
stp x19,x20,[x2],#8*2 // t[0..1]
adc x19,xzr,xzr // t[8]
adds x21,x21,x17 // t[2]+lo(a[1]*a[0])
umulh x17,x11,x6
adcs x22,x22,x14
umulh x14,x12,x6
adcs x23,x23,x15
umulh x15,x13,x6
adcs x24,x24,x16
mul x16,x8,x7 // lo(a[2..7]*a[1]) (ii)
adcs x25,x25,x17
mul x17,x9,x7
adcs x26,x26,x14
mul x14,x10,x7
adc x19,x19,x15
mul x15,x11,x7
adds x22,x22,x16
mul x16,x12,x7
adcs x23,x23,x17
mul x17,x13,x7
adcs x24,x24,x14
umulh x14,x8,x7 // hi(a[2..7]*a[1])
adcs x25,x25,x15
umulh x15,x9,x7
adcs x26,x26,x16
umulh x16,x10,x7
adcs x19,x19,x17
umulh x17,x11,x7
stp x21,x22,[x2],#8*2 // t[2..3]
adc x20,xzr,xzr // t[9]
adds x23,x23,x14
umulh x14,x12,x7
adcs x24,x24,x15
umulh x15,x13,x7
adcs x25,x25,x16
mul x16,x9,x8 // lo(a[3..7]*a[2]) (iii)
adcs x26,x26,x17
mul x17,x10,x8
adcs x19,x19,x14
mul x14,x11,x8
adc x20,x20,x15
mul x15,x12,x8
adds x24,x24,x16
mul x16,x13,x8
adcs x25,x25,x17
umulh x17,x9,x8 // hi(a[3..7]*a[2])
adcs x26,x26,x14
umulh x14,x10,x8
adcs x19,x19,x15
umulh x15,x11,x8
adcs x20,x20,x16
umulh x16,x12,x8
stp x23,x24,[x2],#8*2 // t[4..5]
adc x21,xzr,xzr // t[10]
adds x25,x25,x17
umulh x17,x13,x8
adcs x26,x26,x14
mul x14,x10,x9 // lo(a[4..7]*a[3]) (iv)
adcs x19,x19,x15
mul x15,x11,x9
adcs x20,x20,x16
mul x16,x12,x9
adc x21,x21,x17
mul x17,x13,x9
adds x26,x26,x14
umulh x14,x10,x9 // hi(a[4..7]*a[3])
adcs x19,x19,x15
umulh x15,x11,x9
adcs x20,x20,x16
umulh x16,x12,x9
adcs x21,x21,x17
umulh x17,x13,x9
stp x25,x26,[x2],#8*2 // t[6..7]
adc x22,xzr,xzr // t[11]
adds x19,x19,x14
mul x14,x11,x10 // lo(a[5..7]*a[4]) (v)
adcs x20,x20,x15
mul x15,x12,x10
adcs x21,x21,x16
mul x16,x13,x10
adc x22,x22,x17
umulh x17,x11,x10 // hi(a[5..7]*a[4])
adds x20,x20,x14
umulh x14,x12,x10
adcs x21,x21,x15
umulh x15,x13,x10
adcs x22,x22,x16
mul x16,x12,x11 // lo(a[6..7]*a[5]) (vi)
adc x23,xzr,xzr // t[12]
adds x21,x21,x17
mul x17,x13,x11
adcs x22,x22,x14
umulh x14,x12,x11 // hi(a[6..7]*a[5])
adc x23,x23,x15
umulh x15,x13,x11
adds x22,x22,x16
mul x16,x13,x12 // lo(a[7]*a[6]) (vii)
adcs x23,x23,x17
umulh x17,x13,x12 // hi(a[7]*a[6])
adc x24,xzr,xzr // t[13]
adds x23,x23,x14
sub x27,x3,x1 // done yet?
adc x24,x24,x15
adds x24,x24,x16
sub x14,x3,x5 // rewinded ap
adc x25,xzr,xzr // t[14]
add x25,x25,x17
cbz x27,Lsqr8x_outer_break
mov x4,x6
ldp x6,x7,[x2,#8*0]
ldp x8,x9,[x2,#8*2]
ldp x10,x11,[x2,#8*4]
ldp x12,x13,[x2,#8*6]
adds x19,x19,x6
adcs x20,x20,x7
ldp x6,x7,[x1,#8*0]
adcs x21,x21,x8
adcs x22,x22,x9
ldp x8,x9,[x1,#8*2]
adcs x23,x23,x10
adcs x24,x24,x11
ldp x10,x11,[x1,#8*4]
adcs x25,x25,x12
mov x0,x1
adcs x26,xzr,x13
ldp x12,x13,[x1,#8*6]
add x1,x1,#8*8
//adc x28,xzr,xzr // moved below
mov x27,#-8*8
// a[8]a[0]
// a[9]a[0]
// a[a]a[0]
// a[b]a[0]
// a[c]a[0]
// a[d]a[0]
// a[e]a[0]
// a[f]a[0]
// a[8]a[1]
// a[f]a[1]........................
// a[8]a[2]
// a[f]a[2]........................
// a[8]a[3]
// a[f]a[3]........................
// a[8]a[4]
// a[f]a[4]........................
// a[8]a[5]
// a[f]a[5]........................
// a[8]a[6]
// a[f]a[6]........................
// a[8]a[7]
// a[f]a[7]........................
Lsqr8x_mul:
mul x14,x6,x4
adc x28,xzr,xzr // carry bit, modulo-scheduled
mul x15,x7,x4
add x27,x27,#8
mul x16,x8,x4
mul x17,x9,x4
adds x19,x19,x14
mul x14,x10,x4
adcs x20,x20,x15
mul x15,x11,x4
adcs x21,x21,x16
mul x16,x12,x4
adcs x22,x22,x17
mul x17,x13,x4
adcs x23,x23,x14
umulh x14,x6,x4
adcs x24,x24,x15
umulh x15,x7,x4
adcs x25,x25,x16
umulh x16,x8,x4
adcs x26,x26,x17
umulh x17,x9,x4
adc x28,x28,xzr
str x19,[x2],#8
adds x19,x20,x14
umulh x14,x10,x4
adcs x20,x21,x15
umulh x15,x11,x4
adcs x21,x22,x16
umulh x16,x12,x4
adcs x22,x23,x17
umulh x17,x13,x4
ldr x4,[x0,x27]
adcs x23,x24,x14
adcs x24,x25,x15
adcs x25,x26,x16
adcs x26,x28,x17
//adc x28,xzr,xzr // moved above
cbnz x27,Lsqr8x_mul
// note that carry flag is guaranteed
// to be zero at this point
cmp x1,x3 // done yet?
b.eq Lsqr8x_break
ldp x6,x7,[x2,#8*0]
ldp x8,x9,[x2,#8*2]
ldp x10,x11,[x2,#8*4]
ldp x12,x13,[x2,#8*6]
adds x19,x19,x6
ldr x4,[x0,#-8*8]
adcs x20,x20,x7
ldp x6,x7,[x1,#8*0]
adcs x21,x21,x8
adcs x22,x22,x9
ldp x8,x9,[x1,#8*2]
adcs x23,x23,x10
adcs x24,x24,x11
ldp x10,x11,[x1,#8*4]
adcs x25,x25,x12
mov x27,#-8*8
adcs x26,x26,x13
ldp x12,x13,[x1,#8*6]
add x1,x1,#8*8
//adc x28,xzr,xzr // moved above
b Lsqr8x_mul
.align 4
Lsqr8x_break:
ldp x6,x7,[x0,#8*0]
add x1,x0,#8*8
ldp x8,x9,[x0,#8*2]
sub x14,x3,x1 // is it last iteration?
ldp x10,x11,[x0,#8*4]
sub x15,x2,x14
ldp x12,x13,[x0,#8*6]
cbz x14,Lsqr8x_outer_loop
stp x19,x20,[x2,#8*0]
ldp x19,x20,[x15,#8*0]
stp x21,x22,[x2,#8*2]
ldp x21,x22,[x15,#8*2]
stp x23,x24,[x2,#8*4]
ldp x23,x24,[x15,#8*4]
stp x25,x26,[x2,#8*6]
mov x2,x15
ldp x25,x26,[x15,#8*6]
b Lsqr8x_outer_loop
.align 4
Lsqr8x_outer_break:
// Now multiply above result by 2 and add a[n-1]*a[n-1]|...|a[0]*a[0]
ldp x7,x9,[x14,#8*0] // recall that x14 is &a[0]
ldp x15,x16,[sp,#8*1]
ldp x11,x13,[x14,#8*2]
add x1,x14,#8*4
ldp x17,x14,[sp,#8*3]
stp x19,x20,[x2,#8*0]
mul x19,x7,x7
stp x21,x22,[x2,#8*2]
umulh x7,x7,x7
stp x23,x24,[x2,#8*4]
mul x8,x9,x9
stp x25,x26,[x2,#8*6]
mov x2,sp
umulh x9,x9,x9
adds x20,x7,x15,lsl#1
extr x15,x16,x15,#63
sub x27,x5,#8*4
Lsqr4x_shift_n_add:
adcs x21,x8,x15
extr x16,x17,x16,#63
sub x27,x27,#8*4
adcs x22,x9,x16
ldp x15,x16,[x2,#8*5]
mul x10,x11,x11
ldp x7,x9,[x1],#8*2
umulh x11,x11,x11
mul x12,x13,x13
umulh x13,x13,x13
extr x17,x14,x17,#63
stp x19,x20,[x2,#8*0]
adcs x23,x10,x17
extr x14,x15,x14,#63
stp x21,x22,[x2,#8*2]
adcs x24,x11,x14
ldp x17,x14,[x2,#8*7]
extr x15,x16,x15,#63
adcs x25,x12,x15
extr x16,x17,x16,#63
adcs x26,x13,x16
ldp x15,x16,[x2,#8*9]
mul x6,x7,x7
ldp x11,x13,[x1],#8*2
umulh x7,x7,x7
mul x8,x9,x9
umulh x9,x9,x9
stp x23,x24,[x2,#8*4]
extr x17,x14,x17,#63
stp x25,x26,[x2,#8*6]
add x2,x2,#8*8
adcs x19,x6,x17
extr x14,x15,x14,#63
adcs x20,x7,x14
ldp x17,x14,[x2,#8*3]
extr x15,x16,x15,#63
cbnz x27,Lsqr4x_shift_n_add
ldp x1,x4,[x29,#104] // pull np and n0
adcs x21,x8,x15
extr x16,x17,x16,#63
adcs x22,x9,x16
ldp x15,x16,[x2,#8*5]
mul x10,x11,x11
umulh x11,x11,x11
stp x19,x20,[x2,#8*0]
mul x12,x13,x13
umulh x13,x13,x13
stp x21,x22,[x2,#8*2]
extr x17,x14,x17,#63
adcs x23,x10,x17
extr x14,x15,x14,#63
ldp x19,x20,[sp,#8*0]
adcs x24,x11,x14
extr x15,x16,x15,#63
ldp x6,x7,[x1,#8*0]
adcs x25,x12,x15
extr x16,xzr,x16,#63
ldp x8,x9,[x1,#8*2]
adc x26,x13,x16
ldp x10,x11,[x1,#8*4]
// Reduce by 512 bits per iteration
mul x28,x4,x19 // t[0]*n0
ldp x12,x13,[x1,#8*6]
add x3,x1,x5
ldp x21,x22,[sp,#8*2]
stp x23,x24,[x2,#8*4]
ldp x23,x24,[sp,#8*4]
stp x25,x26,[x2,#8*6]
ldp x25,x26,[sp,#8*6]
add x1,x1,#8*8
mov x30,xzr // initial top-most carry
mov x2,sp
mov x27,#8
Lsqr8x_reduction:
// (*) mul x14,x6,x28 // lo(n[0-7])*lo(t[0]*n0)
mul x15,x7,x28
sub x27,x27,#1
mul x16,x8,x28
str x28,[x2],#8 // put aside t[0]*n0 for tail processing
mul x17,x9,x28
// (*) adds xzr,x19,x14
subs xzr,x19,#1 // (*)
mul x14,x10,x28
adcs x19,x20,x15
mul x15,x11,x28
adcs x20,x21,x16
mul x16,x12,x28
adcs x21,x22,x17
mul x17,x13,x28
adcs x22,x23,x14
umulh x14,x6,x28 // hi(n[0-7])*lo(t[0]*n0)
adcs x23,x24,x15
umulh x15,x7,x28
adcs x24,x25,x16
umulh x16,x8,x28
adcs x25,x26,x17
umulh x17,x9,x28
adc x26,xzr,xzr
adds x19,x19,x14
umulh x14,x10,x28
adcs x20,x20,x15
umulh x15,x11,x28
adcs x21,x21,x16
umulh x16,x12,x28
adcs x22,x22,x17
umulh x17,x13,x28
mul x28,x4,x19 // next t[0]*n0
adcs x23,x23,x14
adcs x24,x24,x15
adcs x25,x25,x16
adc x26,x26,x17
cbnz x27,Lsqr8x_reduction
ldp x14,x15,[x2,#8*0]
ldp x16,x17,[x2,#8*2]
mov x0,x2
sub x27,x3,x1 // done yet?
adds x19,x19,x14
adcs x20,x20,x15
ldp x14,x15,[x2,#8*4]
adcs x21,x21,x16
adcs x22,x22,x17
ldp x16,x17,[x2,#8*6]
adcs x23,x23,x14
adcs x24,x24,x15
adcs x25,x25,x16
adcs x26,x26,x17
//adc x28,xzr,xzr // moved below
cbz x27,Lsqr8x8_post_condition
ldr x4,[x2,#-8*8]
ldp x6,x7,[x1,#8*0]
ldp x8,x9,[x1,#8*2]
ldp x10,x11,[x1,#8*4]
mov x27,#-8*8
ldp x12,x13,[x1,#8*6]
add x1,x1,#8*8
Lsqr8x_tail:
mul x14,x6,x4
adc x28,xzr,xzr // carry bit, modulo-scheduled
mul x15,x7,x4
add x27,x27,#8
mul x16,x8,x4
mul x17,x9,x4
adds x19,x19,x14
mul x14,x10,x4
adcs x20,x20,x15
mul x15,x11,x4
adcs x21,x21,x16
mul x16,x12,x4
adcs x22,x22,x17
mul x17,x13,x4
adcs x23,x23,x14
umulh x14,x6,x4
adcs x24,x24,x15
umulh x15,x7,x4
adcs x25,x25,x16
umulh x16,x8,x4
adcs x26,x26,x17
umulh x17,x9,x4
adc x28,x28,xzr
str x19,[x2],#8
adds x19,x20,x14
umulh x14,x10,x4
adcs x20,x21,x15
umulh x15,x11,x4
adcs x21,x22,x16
umulh x16,x12,x4
adcs x22,x23,x17
umulh x17,x13,x4
ldr x4,[x0,x27]
adcs x23,x24,x14
adcs x24,x25,x15
adcs x25,x26,x16
adcs x26,x28,x17
//adc x28,xzr,xzr // moved above
cbnz x27,Lsqr8x_tail
// note that carry flag is guaranteed
// to be zero at this point
ldp x6,x7,[x2,#8*0]
sub x27,x3,x1 // done yet?
sub x16,x3,x5 // rewinded np
ldp x8,x9,[x2,#8*2]
ldp x10,x11,[x2,#8*4]
ldp x12,x13,[x2,#8*6]
cbz x27,Lsqr8x_tail_break
ldr x4,[x0,#-8*8]
adds x19,x19,x6
adcs x20,x20,x7
ldp x6,x7,[x1,#8*0]
adcs x21,x21,x8
adcs x22,x22,x9
ldp x8,x9,[x1,#8*2]
adcs x23,x23,x10
adcs x24,x24,x11
ldp x10,x11,[x1,#8*4]
adcs x25,x25,x12
mov x27,#-8*8
adcs x26,x26,x13
ldp x12,x13,[x1,#8*6]
add x1,x1,#8*8
//adc x28,xzr,xzr // moved above
b Lsqr8x_tail
.align 4
Lsqr8x_tail_break:
ldr x4,[x29,#112] // pull n0
add x27,x2,#8*8 // end of current t[num] window
subs xzr,x30,#1 // "move" top-most carry to carry bit
adcs x14,x19,x6
adcs x15,x20,x7
ldp x19,x20,[x0,#8*0]
adcs x21,x21,x8
ldp x6,x7,[x16,#8*0] // recall that x16 is &n[0]
adcs x22,x22,x9
ldp x8,x9,[x16,#8*2]
adcs x23,x23,x10
adcs x24,x24,x11
ldp x10,x11,[x16,#8*4]
adcs x25,x25,x12
adcs x26,x26,x13
ldp x12,x13,[x16,#8*6]
add x1,x16,#8*8
adc x30,xzr,xzr // top-most carry
mul x28,x4,x19
stp x14,x15,[x2,#8*0]
stp x21,x22,[x2,#8*2]
ldp x21,x22,[x0,#8*2]
stp x23,x24,[x2,#8*4]
ldp x23,x24,[x0,#8*4]
cmp x27,x29 // did we hit the bottom?
stp x25,x26,[x2,#8*6]
mov x2,x0 // slide the window
ldp x25,x26,[x0,#8*6]
mov x27,#8
b.ne Lsqr8x_reduction
// Final step. We see if result is larger than modulus, and
// if it is, subtract the modulus. But comparison implies
// subtraction. So we subtract modulus, see if it borrowed,
// and conditionally copy original value.
ldr x0,[x29,#96] // pull rp
add x2,x2,#8*8
subs x14,x19,x6
sbcs x15,x20,x7
sub x27,x5,#8*8
mov x3,x0 // x0 copy
Lsqr8x_sub:
sbcs x16,x21,x8
ldp x6,x7,[x1,#8*0]
sbcs x17,x22,x9
stp x14,x15,[x0,#8*0]
sbcs x14,x23,x10
ldp x8,x9,[x1,#8*2]
sbcs x15,x24,x11
stp x16,x17,[x0,#8*2]
sbcs x16,x25,x12
ldp x10,x11,[x1,#8*4]
sbcs x17,x26,x13
ldp x12,x13,[x1,#8*6]
add x1,x1,#8*8
ldp x19,x20,[x2,#8*0]
sub x27,x27,#8*8
ldp x21,x22,[x2,#8*2]
ldp x23,x24,[x2,#8*4]
ldp x25,x26,[x2,#8*6]
add x2,x2,#8*8
stp x14,x15,[x0,#8*4]
sbcs x14,x19,x6
stp x16,x17,[x0,#8*6]
add x0,x0,#8*8
sbcs x15,x20,x7
cbnz x27,Lsqr8x_sub
sbcs x16,x21,x8
mov x2,sp
add x1,sp,x5
ldp x6,x7,[x3,#8*0]
sbcs x17,x22,x9
stp x14,x15,[x0,#8*0]
sbcs x14,x23,x10
ldp x8,x9,[x3,#8*2]
sbcs x15,x24,x11
stp x16,x17,[x0,#8*2]
sbcs x16,x25,x12
ldp x19,x20,[x1,#8*0]
sbcs x17,x26,x13
ldp x21,x22,[x1,#8*2]
sbcs xzr,x30,xzr // did it borrow?
ldr x30,[x29,#8] // pull return address
stp x14,x15,[x0,#8*4]
stp x16,x17,[x0,#8*6]
sub x27,x5,#8*4
Lsqr4x_cond_copy:
sub x27,x27,#8*4
csel x14,x19,x6,lo
stp xzr,xzr,[x2,#8*0]
csel x15,x20,x7,lo
ldp x6,x7,[x3,#8*4]
ldp x19,x20,[x1,#8*4]
csel x16,x21,x8,lo
stp xzr,xzr,[x2,#8*2]
add x2,x2,#8*4
csel x17,x22,x9,lo
ldp x8,x9,[x3,#8*6]
ldp x21,x22,[x1,#8*6]
add x1,x1,#8*4
stp x14,x15,[x3,#8*0]
stp x16,x17,[x3,#8*2]
add x3,x3,#8*4
stp xzr,xzr,[x1,#8*0]
stp xzr,xzr,[x1,#8*2]
cbnz x27,Lsqr4x_cond_copy
csel x14,x19,x6,lo
stp xzr,xzr,[x2,#8*0]
csel x15,x20,x7,lo
stp xzr,xzr,[x2,#8*2]
csel x16,x21,x8,lo
csel x17,x22,x9,lo
stp x14,x15,[x3,#8*0]
stp x16,x17,[x3,#8*2]
b Lsqr8x_done
.align 4
Lsqr8x8_post_condition:
adc x28,xzr,xzr
ldr x30,[x29,#8] // pull return address
// x19-7,x28 hold result, x6-7 hold modulus
subs x6,x19,x6
ldr x1,[x29,#96] // pull rp
sbcs x7,x20,x7
stp xzr,xzr,[sp,#8*0]
sbcs x8,x21,x8
stp xzr,xzr,[sp,#8*2]
sbcs x9,x22,x9
stp xzr,xzr,[sp,#8*4]
sbcs x10,x23,x10
stp xzr,xzr,[sp,#8*6]
sbcs x11,x24,x11
stp xzr,xzr,[sp,#8*8]
sbcs x12,x25,x12
stp xzr,xzr,[sp,#8*10]
sbcs x13,x26,x13
stp xzr,xzr,[sp,#8*12]
sbcs x28,x28,xzr // did it borrow?
stp xzr,xzr,[sp,#8*14]
// x6-7 hold result-modulus
csel x6,x19,x6,lo
csel x7,x20,x7,lo
csel x8,x21,x8,lo
csel x9,x22,x9,lo
stp x6,x7,[x1,#8*0]
csel x10,x23,x10,lo
csel x11,x24,x11,lo
stp x8,x9,[x1,#8*2]
csel x12,x25,x12,lo
csel x13,x26,x13,lo
stp x10,x11,[x1,#8*4]
stp x12,x13,[x1,#8*6]
Lsqr8x_done:
ldp x19,x20,[x29,#16]
mov sp,x29
ldp x21,x22,[x29,#32]
mov x0,#1
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldr x29,[sp],#128
// x30 is popped earlier
AARCH64_VALIDATE_LINK_REGISTER
ret
.globl bn_mul4x_mont
.def bn_mul4x_mont
.type 32
.endef
.align 5
bn_mul4x_mont:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-128]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
sub x26,sp,x5,lsl#3
lsl x5,x5,#3
ldr x4,[x4] // *n0
sub sp,x26,#8*4 // alloca
add x10,x2,x5
add x27,x1,x5
stp x0,x10,[x29,#96] // offload rp and &b[num]
ldr x24,[x2,#8*0] // b[0]
ldp x6,x7,[x1,#8*0] // a[0..3]
ldp x8,x9,[x1,#8*2]
add x1,x1,#8*4
mov x19,xzr
mov x20,xzr
mov x21,xzr
mov x22,xzr
ldp x14,x15,[x3,#8*0] // n[0..3]
ldp x16,x17,[x3,#8*2]
adds x3,x3,#8*4 // clear carry bit
mov x0,xzr
mov x28,#0
mov x26,sp
Loop_mul4x_1st_reduction:
mul x10,x6,x24 // lo(a[0..3]*b[0])
adc x0,x0,xzr // modulo-scheduled
mul x11,x7,x24
add x28,x28,#8
mul x12,x8,x24
and x28,x28,#31
mul x13,x9,x24
adds x19,x19,x10
umulh x10,x6,x24 // hi(a[0..3]*b[0])
adcs x20,x20,x11
mul x25,x19,x4 // t[0]*n0
adcs x21,x21,x12
umulh x11,x7,x24
adcs x22,x22,x13
umulh x12,x8,x24
adc x23,xzr,xzr
umulh x13,x9,x24
ldr x24,[x2,x28] // next b[i] (or b[0])
adds x20,x20,x10
// (*) mul x10,x14,x25 // lo(n[0..3]*t[0]*n0)
str x25,[x26],#8 // put aside t[0]*n0 for tail processing
adcs x21,x21,x11
mul x11,x15,x25
adcs x22,x22,x12
mul x12,x16,x25
adc x23,x23,x13 // can't overflow
mul x13,x17,x25
// (*) adds xzr,x19,x10
subs xzr,x19,#1 // (*)
umulh x10,x14,x25 // hi(n[0..3]*t[0]*n0)
adcs x19,x20,x11
umulh x11,x15,x25
adcs x20,x21,x12
umulh x12,x16,x25
adcs x21,x22,x13
umulh x13,x17,x25
adcs x22,x23,x0
adc x0,xzr,xzr
adds x19,x19,x10
sub x10,x27,x1
adcs x20,x20,x11
adcs x21,x21,x12
adcs x22,x22,x13
//adc x0,x0,xzr
cbnz x28,Loop_mul4x_1st_reduction
cbz x10,Lmul4x4_post_condition
ldp x6,x7,[x1,#8*0] // a[4..7]
ldp x8,x9,[x1,#8*2]
add x1,x1,#8*4
ldr x25,[sp] // a[0]*n0
ldp x14,x15,[x3,#8*0] // n[4..7]
ldp x16,x17,[x3,#8*2]
add x3,x3,#8*4
Loop_mul4x_1st_tail:
mul x10,x6,x24 // lo(a[4..7]*b[i])
adc x0,x0,xzr // modulo-scheduled
mul x11,x7,x24
add x28,x28,#8
mul x12,x8,x24
and x28,x28,#31
mul x13,x9,x24
adds x19,x19,x10
umulh x10,x6,x24 // hi(a[4..7]*b[i])
adcs x20,x20,x11
umulh x11,x7,x24
adcs x21,x21,x12
umulh x12,x8,x24
adcs x22,x22,x13
umulh x13,x9,x24
adc x23,xzr,xzr
ldr x24,[x2,x28] // next b[i] (or b[0])
adds x20,x20,x10
mul x10,x14,x25 // lo(n[4..7]*a[0]*n0)
adcs x21,x21,x11
mul x11,x15,x25
adcs x22,x22,x12
mul x12,x16,x25
adc x23,x23,x13 // can't overflow
mul x13,x17,x25
adds x19,x19,x10
umulh x10,x14,x25 // hi(n[4..7]*a[0]*n0)
adcs x20,x20,x11
umulh x11,x15,x25
adcs x21,x21,x12
umulh x12,x16,x25
adcs x22,x22,x13
adcs x23,x23,x0
umulh x13,x17,x25
adc x0,xzr,xzr
ldr x25,[sp,x28] // next t[0]*n0
str x19,[x26],#8 // result!!!
adds x19,x20,x10
sub x10,x27,x1 // done yet?
adcs x20,x21,x11
adcs x21,x22,x12
adcs x22,x23,x13
//adc x0,x0,xzr
cbnz x28,Loop_mul4x_1st_tail
sub x11,x27,x5 // rewinded x1
cbz x10,Lmul4x_proceed
ldp x6,x7,[x1,#8*0]
ldp x8,x9,[x1,#8*2]
add x1,x1,#8*4
ldp x14,x15,[x3,#8*0]
ldp x16,x17,[x3,#8*2]
add x3,x3,#8*4
b Loop_mul4x_1st_tail
.align 5
Lmul4x_proceed:
ldr x24,[x2,#8*4]! // *++b
adc x30,x0,xzr
ldp x6,x7,[x11,#8*0] // a[0..3]
sub x3,x3,x5 // rewind np
ldp x8,x9,[x11,#8*2]
add x1,x11,#8*4
stp x19,x20,[x26,#8*0] // result!!!
ldp x19,x20,[sp,#8*4] // t[0..3]
stp x21,x22,[x26,#8*2] // result!!!
ldp x21,x22,[sp,#8*6]
ldp x14,x15,[x3,#8*0] // n[0..3]
mov x26,sp
ldp x16,x17,[x3,#8*2]
adds x3,x3,#8*4 // clear carry bit
mov x0,xzr
.align 4
Loop_mul4x_reduction:
mul x10,x6,x24 // lo(a[0..3]*b[4])
adc x0,x0,xzr // modulo-scheduled
mul x11,x7,x24
add x28,x28,#8
mul x12,x8,x24
and x28,x28,#31
mul x13,x9,x24
adds x19,x19,x10
umulh x10,x6,x24 // hi(a[0..3]*b[4])
adcs x20,x20,x11
mul x25,x19,x4 // t[0]*n0
adcs x21,x21,x12
umulh x11,x7,x24
adcs x22,x22,x13
umulh x12,x8,x24
adc x23,xzr,xzr
umulh x13,x9,x24
ldr x24,[x2,x28] // next b[i]
adds x20,x20,x10
// (*) mul x10,x14,x25
str x25,[x26],#8 // put aside t[0]*n0 for tail processing
adcs x21,x21,x11
mul x11,x15,x25 // lo(n[0..3]*t[0]*n0
adcs x22,x22,x12
mul x12,x16,x25
adc x23,x23,x13 // can't overflow
mul x13,x17,x25
// (*) adds xzr,x19,x10
subs xzr,x19,#1 // (*)
umulh x10,x14,x25 // hi(n[0..3]*t[0]*n0
adcs x19,x20,x11
umulh x11,x15,x25
adcs x20,x21,x12
umulh x12,x16,x25
adcs x21,x22,x13
umulh x13,x17,x25
adcs x22,x23,x0
adc x0,xzr,xzr
adds x19,x19,x10
adcs x20,x20,x11
adcs x21,x21,x12
adcs x22,x22,x13
//adc x0,x0,xzr
cbnz x28,Loop_mul4x_reduction
adc x0,x0,xzr
ldp x10,x11,[x26,#8*4] // t[4..7]
ldp x12,x13,[x26,#8*6]
ldp x6,x7,[x1,#8*0] // a[4..7]
ldp x8,x9,[x1,#8*2]
add x1,x1,#8*4
adds x19,x19,x10
adcs x20,x20,x11
adcs x21,x21,x12
adcs x22,x22,x13
//adc x0,x0,xzr
ldr x25,[sp] // t[0]*n0
ldp x14,x15,[x3,#8*0] // n[4..7]
ldp x16,x17,[x3,#8*2]
add x3,x3,#8*4
.align 4
Loop_mul4x_tail:
mul x10,x6,x24 // lo(a[4..7]*b[4])
adc x0,x0,xzr // modulo-scheduled
mul x11,x7,x24
add x28,x28,#8
mul x12,x8,x24
and x28,x28,#31
mul x13,x9,x24
adds x19,x19,x10
umulh x10,x6,x24 // hi(a[4..7]*b[4])
adcs x20,x20,x11
umulh x11,x7,x24
adcs x21,x21,x12
umulh x12,x8,x24
adcs x22,x22,x13
umulh x13,x9,x24
adc x23,xzr,xzr
ldr x24,[x2,x28] // next b[i]
adds x20,x20,x10
mul x10,x14,x25 // lo(n[4..7]*t[0]*n0)
adcs x21,x21,x11
mul x11,x15,x25
adcs x22,x22,x12
mul x12,x16,x25
adc x23,x23,x13 // can't overflow
mul x13,x17,x25
adds x19,x19,x10
umulh x10,x14,x25 // hi(n[4..7]*t[0]*n0)
adcs x20,x20,x11
umulh x11,x15,x25
adcs x21,x21,x12
umulh x12,x16,x25
adcs x22,x22,x13
umulh x13,x17,x25
adcs x23,x23,x0
ldr x25,[sp,x28] // next a[0]*n0
adc x0,xzr,xzr
str x19,[x26],#8 // result!!!
adds x19,x20,x10
sub x10,x27,x1 // done yet?
adcs x20,x21,x11
adcs x21,x22,x12
adcs x22,x23,x13
//adc x0,x0,xzr
cbnz x28,Loop_mul4x_tail
sub x11,x3,x5 // rewinded np?
adc x0,x0,xzr
cbz x10,Loop_mul4x_break
ldp x10,x11,[x26,#8*4]
ldp x12,x13,[x26,#8*6]
ldp x6,x7,[x1,#8*0]
ldp x8,x9,[x1,#8*2]
add x1,x1,#8*4
adds x19,x19,x10
adcs x20,x20,x11
adcs x21,x21,x12
adcs x22,x22,x13
//adc x0,x0,xzr
ldp x14,x15,[x3,#8*0]
ldp x16,x17,[x3,#8*2]
add x3,x3,#8*4
b Loop_mul4x_tail
.align 4
Loop_mul4x_break:
ldp x12,x13,[x29,#96] // pull rp and &b[num]
adds x19,x19,x30
add x2,x2,#8*4 // bp++
adcs x20,x20,xzr
sub x1,x1,x5 // rewind ap
adcs x21,x21,xzr
stp x19,x20,[x26,#8*0] // result!!!
adcs x22,x22,xzr
ldp x19,x20,[sp,#8*4] // t[0..3]
adc x30,x0,xzr
stp x21,x22,[x26,#8*2] // result!!!
cmp x2,x13 // done yet?
ldp x21,x22,[sp,#8*6]
ldp x14,x15,[x11,#8*0] // n[0..3]
ldp x16,x17,[x11,#8*2]
add x3,x11,#8*4
b.eq Lmul4x_post
ldr x24,[x2]
ldp x6,x7,[x1,#8*0] // a[0..3]
ldp x8,x9,[x1,#8*2]
adds x1,x1,#8*4 // clear carry bit
mov x0,xzr
mov x26,sp
b Loop_mul4x_reduction
.align 4
Lmul4x_post:
// Final step. We see if result is larger than modulus, and
// if it is, subtract the modulus. But comparison implies
// subtraction. So we subtract modulus, see if it borrowed,
// and conditionally copy original value.
mov x0,x12
mov x27,x12 // x0 copy
subs x10,x19,x14
add x26,sp,#8*8
sbcs x11,x20,x15
sub x28,x5,#8*4
Lmul4x_sub:
sbcs x12,x21,x16
ldp x14,x15,[x3,#8*0]
sub x28,x28,#8*4
ldp x19,x20,[x26,#8*0]
sbcs x13,x22,x17
ldp x16,x17,[x3,#8*2]
add x3,x3,#8*4
ldp x21,x22,[x26,#8*2]
add x26,x26,#8*4
stp x10,x11,[x0,#8*0]
sbcs x10,x19,x14
stp x12,x13,[x0,#8*2]
add x0,x0,#8*4
sbcs x11,x20,x15
cbnz x28,Lmul4x_sub
sbcs x12,x21,x16
mov x26,sp
add x1,sp,#8*4
ldp x6,x7,[x27,#8*0]
sbcs x13,x22,x17
stp x10,x11,[x0,#8*0]
ldp x8,x9,[x27,#8*2]
stp x12,x13,[x0,#8*2]
ldp x19,x20,[x1,#8*0]
ldp x21,x22,[x1,#8*2]
sbcs xzr,x30,xzr // did it borrow?
ldr x30,[x29,#8] // pull return address
sub x28,x5,#8*4
Lmul4x_cond_copy:
sub x28,x28,#8*4
csel x10,x19,x6,lo
stp xzr,xzr,[x26,#8*0]
csel x11,x20,x7,lo
ldp x6,x7,[x27,#8*4]
ldp x19,x20,[x1,#8*4]
csel x12,x21,x8,lo
stp xzr,xzr,[x26,#8*2]
add x26,x26,#8*4
csel x13,x22,x9,lo
ldp x8,x9,[x27,#8*6]
ldp x21,x22,[x1,#8*6]
add x1,x1,#8*4
stp x10,x11,[x27,#8*0]
stp x12,x13,[x27,#8*2]
add x27,x27,#8*4
cbnz x28,Lmul4x_cond_copy
csel x10,x19,x6,lo
stp xzr,xzr,[x26,#8*0]
csel x11,x20,x7,lo
stp xzr,xzr,[x26,#8*2]
csel x12,x21,x8,lo
stp xzr,xzr,[x26,#8*3]
csel x13,x22,x9,lo
stp xzr,xzr,[x26,#8*4]
stp x10,x11,[x27,#8*0]
stp x12,x13,[x27,#8*2]
b Lmul4x_done
.align 4
Lmul4x4_post_condition:
adc x0,x0,xzr
ldr x1,[x29,#96] // pull rp
// x19-3,x0 hold result, x14-7 hold modulus
subs x6,x19,x14
ldr x30,[x29,#8] // pull return address
sbcs x7,x20,x15
stp xzr,xzr,[sp,#8*0]
sbcs x8,x21,x16
stp xzr,xzr,[sp,#8*2]
sbcs x9,x22,x17
stp xzr,xzr,[sp,#8*4]
sbcs xzr,x0,xzr // did it borrow?
stp xzr,xzr,[sp,#8*6]
// x6-3 hold result-modulus
csel x6,x19,x6,lo
csel x7,x20,x7,lo
csel x8,x21,x8,lo
csel x9,x22,x9,lo
stp x6,x7,[x1,#8*0]
stp x8,x9,[x1,#8*2]
Lmul4x_done:
ldp x19,x20,[x29,#16]
mov sp,x29
ldp x21,x22,[x29,#32]
mov x0,#1
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldr x29,[sp],#128
// x30 is popped earlier
AARCH64_VALIDATE_LINK_REGISTER
ret
.byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105,112,108,105,99,97,116,105,111,110,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 4
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
|
marvin-hansen/iggy-streaming-system
| 74,019
|
thirdparty/crates/ring-0.17.9/pregenerated/chacha20_poly1305_armv8-win64.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <ring-core/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32)
#include <ring-core/arm_arch.h>
.section .rodata
.align 7
Lchacha20_consts:
.byte 'e','x','p','a','n','d',' ','3','2','-','b','y','t','e',' ','k'
Linc:
.long 1,2,3,4
Lrol8:
.byte 3,0,1,2, 7,4,5,6, 11,8,9,10, 15,12,13,14
Lclamp:
.quad 0x0FFFFFFC0FFFFFFF, 0x0FFFFFFC0FFFFFFC
.text
.def Lpoly_hash_ad_internal
.type 32
.endef
.align 6
Lpoly_hash_ad_internal:
.cfi_startproc
cbnz x4, Lpoly_hash_intro
ret
Lpoly_hash_intro:
cmp x4, #16
b.lt Lpoly_hash_ad_tail
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
sub x4, x4, #16
b Lpoly_hash_ad_internal
Lpoly_hash_ad_tail:
cbz x4, Lpoly_hash_ad_ret
eor v20.16b, v20.16b, v20.16b // Use T0 to load the AAD
sub x4, x4, #1
Lpoly_hash_tail_16_compose:
ext v20.16b, v20.16b, v20.16b, #15
ldrb w11, [x3, x4]
mov v20.b[0], w11
subs x4, x4, #1
b.ge Lpoly_hash_tail_16_compose
mov x11, v20.d[0]
mov x12, v20.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
Lpoly_hash_ad_ret:
ret
.cfi_endproc
/////////////////////////////////
//
// void chacha20_poly1305_seal(uint8_t *pt, uint8_t *ct, size_t len_in, uint8_t *ad, size_t len_ad, union open_data *seal_data);
//
.globl chacha20_poly1305_seal
.def chacha20_poly1305_seal
.type 32
.endef
.align 6
chacha20_poly1305_seal:
AARCH64_SIGN_LINK_REGISTER
.cfi_startproc
stp x29, x30, [sp, #-80]!
.cfi_def_cfa_offset 80
.cfi_offset w30, -72
.cfi_offset w29, -80
mov x29, sp
// We probably could do .cfi_def_cfa w29, 80 at this point, but since
// we don't actually use the frame pointer like that, it's probably not
// worth bothering.
stp d8, d9, [sp, #16]
stp d10, d11, [sp, #32]
stp d12, d13, [sp, #48]
stp d14, d15, [sp, #64]
.cfi_offset b15, -8
.cfi_offset b14, -16
.cfi_offset b13, -24
.cfi_offset b12, -32
.cfi_offset b11, -40
.cfi_offset b10, -48
.cfi_offset b9, -56
.cfi_offset b8, -64
adrp x11, Lchacha20_consts
add x11, x11, :lo12:Lchacha20_consts
ld1 {v24.16b - v27.16b}, [x11] // Load the CONSTS, INC, ROL8 and CLAMP values
ld1 {v28.16b - v30.16b}, [x5]
mov x15, #1 // Prepare the Poly1305 state
mov x8, #0
mov x9, #0
mov x10, #0
ldr x12, [x5, #56] // The total cipher text length includes extra_in_len
add x12, x12, x2
mov v31.d[0], x4 // Store the input and aad lengths
mov v31.d[1], x12
cmp x2, #128
b.le Lseal_128 // Optimization for smaller buffers
// Initially we prepare 5 ChaCha20 blocks. Four to encrypt up to 4 blocks (256 bytes) of plaintext,
// and one for the Poly1305 R and S keys. The first four blocks (A0-A3..D0-D3) are computed vertically,
// the fifth block (A4-D4) horizontally.
ld4r {v0.4s,v1.4s,v2.4s,v3.4s}, [x11]
mov v4.16b, v24.16b
ld4r {v5.4s,v6.4s,v7.4s,v8.4s}, [x5], #16
mov v9.16b, v28.16b
ld4r {v10.4s,v11.4s,v12.4s,v13.4s}, [x5], #16
mov v14.16b, v29.16b
ld4r {v15.4s,v16.4s,v17.4s,v18.4s}, [x5]
add v15.4s, v15.4s, v25.4s
mov v19.16b, v30.16b
sub x5, x5, #32
mov x6, #10
.align 5
Lseal_init_rounds:
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
add v3.4s, v3.4s, v8.4s
add v4.4s, v4.4s, v9.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
eor v18.16b, v18.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
rev32 v18.8h, v18.8h
rev32 v19.8h, v19.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
add v13.4s, v13.4s, v18.4s
add v14.4s, v14.4s, v19.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
eor v8.16b, v8.16b, v13.16b
eor v9.16b, v9.16b, v14.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
ushr v7.4s, v8.4s, #20
sli v7.4s, v8.4s, #12
ushr v8.4s, v9.4s, #20
sli v8.4s, v9.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
add v3.4s, v3.4s, v7.4s
add v4.4s, v4.4s, v8.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
eor v18.16b, v18.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
tbl v18.16b, {v18.16b}, v26.16b
tbl v19.16b, {v19.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
add v13.4s, v13.4s, v18.4s
add v14.4s, v14.4s, v19.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
eor v7.16b, v7.16b, v13.16b
eor v8.16b, v8.16b, v14.16b
ushr v9.4s, v8.4s, #25
sli v9.4s, v8.4s, #7
ushr v8.4s, v7.4s, #25
sli v8.4s, v7.4s, #7
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v9.16b, v9.16b, v9.16b, #4
ext v14.16b, v14.16b, v14.16b, #8
ext v19.16b, v19.16b, v19.16b, #12
add v0.4s, v0.4s, v6.4s
add v1.4s, v1.4s, v7.4s
add v2.4s, v2.4s, v8.4s
add v3.4s, v3.4s, v5.4s
add v4.4s, v4.4s, v9.4s
eor v18.16b, v18.16b, v0.16b
eor v15.16b, v15.16b, v1.16b
eor v16.16b, v16.16b, v2.16b
eor v17.16b, v17.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
rev32 v18.8h, v18.8h
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
rev32 v19.8h, v19.8h
add v12.4s, v12.4s, v18.4s
add v13.4s, v13.4s, v15.4s
add v10.4s, v10.4s, v16.4s
add v11.4s, v11.4s, v17.4s
add v14.4s, v14.4s, v19.4s
eor v6.16b, v6.16b, v12.16b
eor v7.16b, v7.16b, v13.16b
eor v8.16b, v8.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v9.16b, v9.16b, v14.16b
ushr v20.4s, v6.4s, #20
sli v20.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
ushr v7.4s, v8.4s, #20
sli v7.4s, v8.4s, #12
ushr v8.4s, v5.4s, #20
sli v8.4s, v5.4s, #12
ushr v5.4s, v9.4s, #20
sli v5.4s, v9.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
add v3.4s, v3.4s, v8.4s
add v4.4s, v4.4s, v5.4s
eor v18.16b, v18.16b, v0.16b
eor v15.16b, v15.16b, v1.16b
eor v16.16b, v16.16b, v2.16b
eor v17.16b, v17.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
tbl v18.16b, {v18.16b}, v26.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
tbl v19.16b, {v19.16b}, v26.16b
add v12.4s, v12.4s, v18.4s
add v13.4s, v13.4s, v15.4s
add v10.4s, v10.4s, v16.4s
add v11.4s, v11.4s, v17.4s
add v14.4s, v14.4s, v19.4s
eor v20.16b, v20.16b, v12.16b
eor v6.16b, v6.16b, v13.16b
eor v7.16b, v7.16b, v10.16b
eor v8.16b, v8.16b, v11.16b
eor v5.16b, v5.16b, v14.16b
ushr v9.4s, v5.4s, #25
sli v9.4s, v5.4s, #7
ushr v5.4s, v8.4s, #25
sli v5.4s, v8.4s, #7
ushr v8.4s, v7.4s, #25
sli v8.4s, v7.4s, #7
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v20.4s, #25
sli v6.4s, v20.4s, #7
ext v9.16b, v9.16b, v9.16b, #12
ext v14.16b, v14.16b, v14.16b, #8
ext v19.16b, v19.16b, v19.16b, #4
subs x6, x6, #1
b.hi Lseal_init_rounds
add v15.4s, v15.4s, v25.4s
mov x11, #4
dup v20.4s, w11
add v25.4s, v25.4s, v20.4s
zip1 v20.4s, v0.4s, v1.4s
zip2 v21.4s, v0.4s, v1.4s
zip1 v22.4s, v2.4s, v3.4s
zip2 v23.4s, v2.4s, v3.4s
zip1 v0.2d, v20.2d, v22.2d
zip2 v1.2d, v20.2d, v22.2d
zip1 v2.2d, v21.2d, v23.2d
zip2 v3.2d, v21.2d, v23.2d
zip1 v20.4s, v5.4s, v6.4s
zip2 v21.4s, v5.4s, v6.4s
zip1 v22.4s, v7.4s, v8.4s
zip2 v23.4s, v7.4s, v8.4s
zip1 v5.2d, v20.2d, v22.2d
zip2 v6.2d, v20.2d, v22.2d
zip1 v7.2d, v21.2d, v23.2d
zip2 v8.2d, v21.2d, v23.2d
zip1 v20.4s, v10.4s, v11.4s
zip2 v21.4s, v10.4s, v11.4s
zip1 v22.4s, v12.4s, v13.4s
zip2 v23.4s, v12.4s, v13.4s
zip1 v10.2d, v20.2d, v22.2d
zip2 v11.2d, v20.2d, v22.2d
zip1 v12.2d, v21.2d, v23.2d
zip2 v13.2d, v21.2d, v23.2d
zip1 v20.4s, v15.4s, v16.4s
zip2 v21.4s, v15.4s, v16.4s
zip1 v22.4s, v17.4s, v18.4s
zip2 v23.4s, v17.4s, v18.4s
zip1 v15.2d, v20.2d, v22.2d
zip2 v16.2d, v20.2d, v22.2d
zip1 v17.2d, v21.2d, v23.2d
zip2 v18.2d, v21.2d, v23.2d
add v4.4s, v4.4s, v24.4s
add v9.4s, v9.4s, v28.4s
and v4.16b, v4.16b, v27.16b
add v0.4s, v0.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v10.4s, v10.4s, v29.4s
add v15.4s, v15.4s, v30.4s
add v1.4s, v1.4s, v24.4s
add v6.4s, v6.4s, v28.4s
add v11.4s, v11.4s, v29.4s
add v16.4s, v16.4s, v30.4s
add v2.4s, v2.4s, v24.4s
add v7.4s, v7.4s, v28.4s
add v12.4s, v12.4s, v29.4s
add v17.4s, v17.4s, v30.4s
add v3.4s, v3.4s, v24.4s
add v8.4s, v8.4s, v28.4s
add v13.4s, v13.4s, v29.4s
add v18.4s, v18.4s, v30.4s
mov x16, v4.d[0] // Move the R key to GPRs
mov x17, v4.d[1]
mov v27.16b, v9.16b // Store the S key
bl Lpoly_hash_ad_internal
mov x3, x0
cmp x2, #256
b.le Lseal_tail
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v0.16b
eor v21.16b, v21.16b, v5.16b
eor v22.16b, v22.16b, v10.16b
eor v23.16b, v23.16b, v15.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v1.16b
eor v21.16b, v21.16b, v6.16b
eor v22.16b, v22.16b, v11.16b
eor v23.16b, v23.16b, v16.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v2.16b
eor v21.16b, v21.16b, v7.16b
eor v22.16b, v22.16b, v12.16b
eor v23.16b, v23.16b, v17.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v3.16b
eor v21.16b, v21.16b, v8.16b
eor v22.16b, v22.16b, v13.16b
eor v23.16b, v23.16b, v18.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #256
mov x6, #4 // In the first run of the loop we need to hash 256 bytes, therefore we hash one block for the first 4 rounds
mov x7, #6 // and two blocks for the remaining 6, for a total of (1 * 4 + 2 * 6) * 16 = 256
Lseal_main_loop:
adrp x11, Lchacha20_consts
add x11, x11, :lo12:Lchacha20_consts
ld4r {v0.4s,v1.4s,v2.4s,v3.4s}, [x11]
mov v4.16b, v24.16b
ld4r {v5.4s,v6.4s,v7.4s,v8.4s}, [x5], #16
mov v9.16b, v28.16b
ld4r {v10.4s,v11.4s,v12.4s,v13.4s}, [x5], #16
mov v14.16b, v29.16b
ld4r {v15.4s,v16.4s,v17.4s,v18.4s}, [x5]
add v15.4s, v15.4s, v25.4s
mov v19.16b, v30.16b
eor v20.16b, v20.16b, v20.16b //zero
not v21.16b, v20.16b // -1
sub v21.4s, v25.4s, v21.4s // Add +1
ext v20.16b, v21.16b, v20.16b, #12 // Get the last element (counter)
add v19.4s, v19.4s, v20.4s
sub x5, x5, #32
.align 5
Lseal_main_loop_rounds:
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
add v3.4s, v3.4s, v8.4s
add v4.4s, v4.4s, v9.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
eor v18.16b, v18.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
rev32 v18.8h, v18.8h
rev32 v19.8h, v19.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
add v13.4s, v13.4s, v18.4s
add v14.4s, v14.4s, v19.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
eor v8.16b, v8.16b, v13.16b
eor v9.16b, v9.16b, v14.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
ushr v7.4s, v8.4s, #20
sli v7.4s, v8.4s, #12
ushr v8.4s, v9.4s, #20
sli v8.4s, v9.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
add v3.4s, v3.4s, v7.4s
add v4.4s, v4.4s, v8.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
eor v18.16b, v18.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
tbl v18.16b, {v18.16b}, v26.16b
tbl v19.16b, {v19.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
add v13.4s, v13.4s, v18.4s
add v14.4s, v14.4s, v19.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
eor v7.16b, v7.16b, v13.16b
eor v8.16b, v8.16b, v14.16b
ushr v9.4s, v8.4s, #25
sli v9.4s, v8.4s, #7
ushr v8.4s, v7.4s, #25
sli v8.4s, v7.4s, #7
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v9.16b, v9.16b, v9.16b, #4
ext v14.16b, v14.16b, v14.16b, #8
ext v19.16b, v19.16b, v19.16b, #12
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
add v0.4s, v0.4s, v6.4s
add v1.4s, v1.4s, v7.4s
add v2.4s, v2.4s, v8.4s
add v3.4s, v3.4s, v5.4s
add v4.4s, v4.4s, v9.4s
eor v18.16b, v18.16b, v0.16b
eor v15.16b, v15.16b, v1.16b
eor v16.16b, v16.16b, v2.16b
eor v17.16b, v17.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
rev32 v18.8h, v18.8h
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
rev32 v19.8h, v19.8h
add v12.4s, v12.4s, v18.4s
add v13.4s, v13.4s, v15.4s
add v10.4s, v10.4s, v16.4s
add v11.4s, v11.4s, v17.4s
add v14.4s, v14.4s, v19.4s
eor v6.16b, v6.16b, v12.16b
eor v7.16b, v7.16b, v13.16b
eor v8.16b, v8.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v9.16b, v9.16b, v14.16b
ushr v20.4s, v6.4s, #20
sli v20.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
ushr v7.4s, v8.4s, #20
sli v7.4s, v8.4s, #12
ushr v8.4s, v5.4s, #20
sli v8.4s, v5.4s, #12
ushr v5.4s, v9.4s, #20
sli v5.4s, v9.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
add v3.4s, v3.4s, v8.4s
add v4.4s, v4.4s, v5.4s
eor v18.16b, v18.16b, v0.16b
eor v15.16b, v15.16b, v1.16b
eor v16.16b, v16.16b, v2.16b
eor v17.16b, v17.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
tbl v18.16b, {v18.16b}, v26.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
tbl v19.16b, {v19.16b}, v26.16b
add v12.4s, v12.4s, v18.4s
add v13.4s, v13.4s, v15.4s
add v10.4s, v10.4s, v16.4s
add v11.4s, v11.4s, v17.4s
add v14.4s, v14.4s, v19.4s
eor v20.16b, v20.16b, v12.16b
eor v6.16b, v6.16b, v13.16b
eor v7.16b, v7.16b, v10.16b
eor v8.16b, v8.16b, v11.16b
eor v5.16b, v5.16b, v14.16b
ushr v9.4s, v5.4s, #25
sli v9.4s, v5.4s, #7
ushr v5.4s, v8.4s, #25
sli v5.4s, v8.4s, #7
ushr v8.4s, v7.4s, #25
sli v8.4s, v7.4s, #7
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v20.4s, #25
sli v6.4s, v20.4s, #7
ext v9.16b, v9.16b, v9.16b, #12
ext v14.16b, v14.16b, v14.16b, #8
ext v19.16b, v19.16b, v19.16b, #4
subs x6, x6, #1
b.ge Lseal_main_loop_rounds
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
subs x7, x7, #1
b.gt Lseal_main_loop_rounds
eor v20.16b, v20.16b, v20.16b //zero
not v21.16b, v20.16b // -1
sub v21.4s, v25.4s, v21.4s // Add +1
ext v20.16b, v21.16b, v20.16b, #12 // Get the last element (counter)
add v19.4s, v19.4s, v20.4s
add v15.4s, v15.4s, v25.4s
mov x11, #5
dup v20.4s, w11
add v25.4s, v25.4s, v20.4s
zip1 v20.4s, v0.4s, v1.4s
zip2 v21.4s, v0.4s, v1.4s
zip1 v22.4s, v2.4s, v3.4s
zip2 v23.4s, v2.4s, v3.4s
zip1 v0.2d, v20.2d, v22.2d
zip2 v1.2d, v20.2d, v22.2d
zip1 v2.2d, v21.2d, v23.2d
zip2 v3.2d, v21.2d, v23.2d
zip1 v20.4s, v5.4s, v6.4s
zip2 v21.4s, v5.4s, v6.4s
zip1 v22.4s, v7.4s, v8.4s
zip2 v23.4s, v7.4s, v8.4s
zip1 v5.2d, v20.2d, v22.2d
zip2 v6.2d, v20.2d, v22.2d
zip1 v7.2d, v21.2d, v23.2d
zip2 v8.2d, v21.2d, v23.2d
zip1 v20.4s, v10.4s, v11.4s
zip2 v21.4s, v10.4s, v11.4s
zip1 v22.4s, v12.4s, v13.4s
zip2 v23.4s, v12.4s, v13.4s
zip1 v10.2d, v20.2d, v22.2d
zip2 v11.2d, v20.2d, v22.2d
zip1 v12.2d, v21.2d, v23.2d
zip2 v13.2d, v21.2d, v23.2d
zip1 v20.4s, v15.4s, v16.4s
zip2 v21.4s, v15.4s, v16.4s
zip1 v22.4s, v17.4s, v18.4s
zip2 v23.4s, v17.4s, v18.4s
zip1 v15.2d, v20.2d, v22.2d
zip2 v16.2d, v20.2d, v22.2d
zip1 v17.2d, v21.2d, v23.2d
zip2 v18.2d, v21.2d, v23.2d
add v0.4s, v0.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v10.4s, v10.4s, v29.4s
add v15.4s, v15.4s, v30.4s
add v1.4s, v1.4s, v24.4s
add v6.4s, v6.4s, v28.4s
add v11.4s, v11.4s, v29.4s
add v16.4s, v16.4s, v30.4s
add v2.4s, v2.4s, v24.4s
add v7.4s, v7.4s, v28.4s
add v12.4s, v12.4s, v29.4s
add v17.4s, v17.4s, v30.4s
add v3.4s, v3.4s, v24.4s
add v8.4s, v8.4s, v28.4s
add v13.4s, v13.4s, v29.4s
add v18.4s, v18.4s, v30.4s
add v4.4s, v4.4s, v24.4s
add v9.4s, v9.4s, v28.4s
add v14.4s, v14.4s, v29.4s
add v19.4s, v19.4s, v30.4s
cmp x2, #320
b.le Lseal_tail
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v0.16b
eor v21.16b, v21.16b, v5.16b
eor v22.16b, v22.16b, v10.16b
eor v23.16b, v23.16b, v15.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v1.16b
eor v21.16b, v21.16b, v6.16b
eor v22.16b, v22.16b, v11.16b
eor v23.16b, v23.16b, v16.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v2.16b
eor v21.16b, v21.16b, v7.16b
eor v22.16b, v22.16b, v12.16b
eor v23.16b, v23.16b, v17.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v3.16b
eor v21.16b, v21.16b, v8.16b
eor v22.16b, v22.16b, v13.16b
eor v23.16b, v23.16b, v18.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v4.16b
eor v21.16b, v21.16b, v9.16b
eor v22.16b, v22.16b, v14.16b
eor v23.16b, v23.16b, v19.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #320
mov x6, #0
mov x7, #10 // For the remainder of the loop we always hash and encrypt 320 bytes per iteration
b Lseal_main_loop
Lseal_tail:
// This part of the function handles the storage and authentication of the last [0,320) bytes
// We assume A0-A4 ... D0-D4 hold at least inl (320 max) bytes of the stream data.
cmp x2, #64
b.lt Lseal_tail_64
// Store and authenticate 64B blocks per iteration
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v0.16b
eor v21.16b, v21.16b, v5.16b
eor v22.16b, v22.16b, v10.16b
eor v23.16b, v23.16b, v15.16b
mov x11, v20.d[0]
mov x12, v20.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
mov x11, v21.d[0]
mov x12, v21.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
mov x11, v22.d[0]
mov x12, v22.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
mov x11, v23.d[0]
mov x12, v23.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #64
// Shift the state left by 64 bytes for the next iteration of the loop
mov v0.16b, v1.16b
mov v5.16b, v6.16b
mov v10.16b, v11.16b
mov v15.16b, v16.16b
mov v1.16b, v2.16b
mov v6.16b, v7.16b
mov v11.16b, v12.16b
mov v16.16b, v17.16b
mov v2.16b, v3.16b
mov v7.16b, v8.16b
mov v12.16b, v13.16b
mov v17.16b, v18.16b
mov v3.16b, v4.16b
mov v8.16b, v9.16b
mov v13.16b, v14.16b
mov v18.16b, v19.16b
b Lseal_tail
Lseal_tail_64:
ldp x3, x4, [x5, #48] // extra_in_len and extra_in_ptr
// Here we handle the last [0,64) bytes of plaintext
cmp x2, #16
b.lt Lseal_tail_16
// Each iteration encrypt and authenticate a 16B block
ld1 {v20.16b}, [x1], #16
eor v20.16b, v20.16b, v0.16b
mov x11, v20.d[0]
mov x12, v20.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
st1 {v20.16b}, [x0], #16
sub x2, x2, #16
// Shift the state left by 16 bytes for the next iteration of the loop
mov v0.16b, v5.16b
mov v5.16b, v10.16b
mov v10.16b, v15.16b
b Lseal_tail_64
Lseal_tail_16:
// Here we handle the last [0,16) bytes of ciphertext that require a padded block
cbz x2, Lseal_hash_extra
eor v20.16b, v20.16b, v20.16b // Use T0 to load the plaintext/extra in
eor v21.16b, v21.16b, v21.16b // Use T1 to generate an AND mask that will only mask the ciphertext bytes
not v22.16b, v20.16b
mov x6, x2
add x1, x1, x2
cbz x4, Lseal_tail_16_compose // No extra data to pad with, zero padding
mov x7, #16 // We need to load some extra_in first for padding
sub x7, x7, x2
cmp x4, x7
csel x7, x4, x7, lt // Load the minimum of extra_in_len and the amount needed to fill the register
mov x12, x7
add x3, x3, x7
sub x4, x4, x7
Lseal_tail16_compose_extra_in:
ext v20.16b, v20.16b, v20.16b, #15
ldrb w11, [x3, #-1]!
mov v20.b[0], w11
subs x7, x7, #1
b.gt Lseal_tail16_compose_extra_in
add x3, x3, x12
Lseal_tail_16_compose:
ext v20.16b, v20.16b, v20.16b, #15
ldrb w11, [x1, #-1]!
mov v20.b[0], w11
ext v21.16b, v22.16b, v21.16b, #15
subs x2, x2, #1
b.gt Lseal_tail_16_compose
and v0.16b, v0.16b, v21.16b
eor v20.16b, v20.16b, v0.16b
mov v21.16b, v20.16b
Lseal_tail_16_store:
umov w11, v20.b[0]
strb w11, [x0], #1
ext v20.16b, v20.16b, v20.16b, #1
subs x6, x6, #1
b.gt Lseal_tail_16_store
// Hash in the final ct block concatenated with extra_in
mov x11, v21.d[0]
mov x12, v21.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
Lseal_hash_extra:
cbz x4, Lseal_finalize
Lseal_hash_extra_loop:
cmp x4, #16
b.lt Lseal_hash_extra_tail
ld1 {v20.16b}, [x3], #16
mov x11, v20.d[0]
mov x12, v20.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
sub x4, x4, #16
b Lseal_hash_extra_loop
Lseal_hash_extra_tail:
cbz x4, Lseal_finalize
eor v20.16b, v20.16b, v20.16b // Use T0 to load the remaining extra ciphertext
add x3, x3, x4
Lseal_hash_extra_load:
ext v20.16b, v20.16b, v20.16b, #15
ldrb w11, [x3, #-1]!
mov v20.b[0], w11
subs x4, x4, #1
b.gt Lseal_hash_extra_load
// Hash in the final padded extra_in blcok
mov x11, v20.d[0]
mov x12, v20.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
Lseal_finalize:
mov x11, v31.d[0]
mov x12, v31.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
// Final reduction step
sub x12, xzr, x15
orr x13, xzr, #3
subs x11, x8, #-5
sbcs x12, x9, x12
sbcs x13, x10, x13
csel x8, x11, x8, cs
csel x9, x12, x9, cs
csel x10, x13, x10, cs
mov x11, v27.d[0]
mov x12, v27.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
stp x8, x9, [x5]
ldp d8, d9, [sp, #16]
ldp d10, d11, [sp, #32]
ldp d12, d13, [sp, #48]
ldp d14, d15, [sp, #64]
.cfi_restore b15
.cfi_restore b14
.cfi_restore b13
.cfi_restore b12
.cfi_restore b11
.cfi_restore b10
.cfi_restore b9
.cfi_restore b8
ldp x29, x30, [sp], 80
.cfi_restore w29
.cfi_restore w30
.cfi_def_cfa_offset 0
AARCH64_VALIDATE_LINK_REGISTER
ret
Lseal_128:
// On some architectures preparing 5 blocks for small buffers is wasteful
eor v25.16b, v25.16b, v25.16b
mov x11, #1
mov v25.s[0], w11
mov v0.16b, v24.16b
mov v1.16b, v24.16b
mov v2.16b, v24.16b
mov v5.16b, v28.16b
mov v6.16b, v28.16b
mov v7.16b, v28.16b
mov v10.16b, v29.16b
mov v11.16b, v29.16b
mov v12.16b, v29.16b
mov v17.16b, v30.16b
add v15.4s, v17.4s, v25.4s
add v16.4s, v15.4s, v25.4s
mov x6, #10
Lseal_128_rounds:
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #4
ext v6.16b, v6.16b, v6.16b, #4
ext v7.16b, v7.16b, v7.16b, #4
ext v10.16b, v10.16b, v10.16b, #8
ext v11.16b, v11.16b, v11.16b, #8
ext v12.16b, v12.16b, v12.16b, #8
ext v15.16b, v15.16b, v15.16b, #12
ext v16.16b, v16.16b, v16.16b, #12
ext v17.16b, v17.16b, v17.16b, #12
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #12
ext v6.16b, v6.16b, v6.16b, #12
ext v7.16b, v7.16b, v7.16b, #12
ext v10.16b, v10.16b, v10.16b, #8
ext v11.16b, v11.16b, v11.16b, #8
ext v12.16b, v12.16b, v12.16b, #8
ext v15.16b, v15.16b, v15.16b, #4
ext v16.16b, v16.16b, v16.16b, #4
ext v17.16b, v17.16b, v17.16b, #4
subs x6, x6, #1
b.hi Lseal_128_rounds
add v0.4s, v0.4s, v24.4s
add v1.4s, v1.4s, v24.4s
add v2.4s, v2.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v6.4s, v6.4s, v28.4s
add v7.4s, v7.4s, v28.4s
// Only the first 32 bytes of the third block (counter = 0) are needed,
// so skip updating v12 and v17.
add v10.4s, v10.4s, v29.4s
add v11.4s, v11.4s, v29.4s
add v30.4s, v30.4s, v25.4s
add v15.4s, v15.4s, v30.4s
add v30.4s, v30.4s, v25.4s
add v16.4s, v16.4s, v30.4s
and v2.16b, v2.16b, v27.16b
mov x16, v2.d[0] // Move the R key to GPRs
mov x17, v2.d[1]
mov v27.16b, v7.16b // Store the S key
bl Lpoly_hash_ad_internal
b Lseal_tail
.cfi_endproc
/////////////////////////////////
//
// void chacha20_poly1305_open(uint8_t *pt, uint8_t *ct, size_t len_in, uint8_t *ad, size_t len_ad, union open_data *aead_data);
//
.globl chacha20_poly1305_open
.def chacha20_poly1305_open
.type 32
.endef
.align 6
chacha20_poly1305_open:
AARCH64_SIGN_LINK_REGISTER
.cfi_startproc
stp x29, x30, [sp, #-80]!
.cfi_def_cfa_offset 80
.cfi_offset w30, -72
.cfi_offset w29, -80
mov x29, sp
// We probably could do .cfi_def_cfa w29, 80 at this point, but since
// we don't actually use the frame pointer like that, it's probably not
// worth bothering.
stp d8, d9, [sp, #16]
stp d10, d11, [sp, #32]
stp d12, d13, [sp, #48]
stp d14, d15, [sp, #64]
.cfi_offset b15, -8
.cfi_offset b14, -16
.cfi_offset b13, -24
.cfi_offset b12, -32
.cfi_offset b11, -40
.cfi_offset b10, -48
.cfi_offset b9, -56
.cfi_offset b8, -64
adrp x11, Lchacha20_consts
add x11, x11, :lo12:Lchacha20_consts
ld1 {v24.16b - v27.16b}, [x11] // Load the CONSTS, INC, ROL8 and CLAMP values
ld1 {v28.16b - v30.16b}, [x5]
mov x15, #1 // Prepare the Poly1305 state
mov x8, #0
mov x9, #0
mov x10, #0
mov v31.d[0], x4 // Store the input and aad lengths
mov v31.d[1], x2
cmp x2, #128
b.le Lopen_128 // Optimization for smaller buffers
// Initially we prepare a single ChaCha20 block for the Poly1305 R and S keys
mov v0.16b, v24.16b
mov v5.16b, v28.16b
mov v10.16b, v29.16b
mov v15.16b, v30.16b
mov x6, #10
.align 5
Lopen_init_rounds:
add v0.4s, v0.4s, v5.4s
eor v15.16b, v15.16b, v0.16b
rev32 v15.8h, v15.8h
add v10.4s, v10.4s, v15.4s
eor v5.16b, v5.16b, v10.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
add v0.4s, v0.4s, v20.4s
eor v15.16b, v15.16b, v0.16b
tbl v15.16b, {v15.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
eor v20.16b, v20.16b, v10.16b
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #4
ext v10.16b, v10.16b, v10.16b, #8
ext v15.16b, v15.16b, v15.16b, #12
add v0.4s, v0.4s, v5.4s
eor v15.16b, v15.16b, v0.16b
rev32 v15.8h, v15.8h
add v10.4s, v10.4s, v15.4s
eor v5.16b, v5.16b, v10.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
add v0.4s, v0.4s, v20.4s
eor v15.16b, v15.16b, v0.16b
tbl v15.16b, {v15.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
eor v20.16b, v20.16b, v10.16b
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #12
ext v10.16b, v10.16b, v10.16b, #8
ext v15.16b, v15.16b, v15.16b, #4
subs x6, x6, #1
b.hi Lopen_init_rounds
add v0.4s, v0.4s, v24.4s
add v5.4s, v5.4s, v28.4s
and v0.16b, v0.16b, v27.16b
mov x16, v0.d[0] // Move the R key to GPRs
mov x17, v0.d[1]
mov v27.16b, v5.16b // Store the S key
bl Lpoly_hash_ad_internal
Lopen_ad_done:
mov x3, x1
// Each iteration of the loop hash 320 bytes, and prepare stream for 320 bytes
Lopen_main_loop:
cmp x2, #192
b.lt Lopen_tail
adrp x11, Lchacha20_consts
add x11, x11, :lo12:Lchacha20_consts
ld4r {v0.4s,v1.4s,v2.4s,v3.4s}, [x11]
mov v4.16b, v24.16b
ld4r {v5.4s,v6.4s,v7.4s,v8.4s}, [x5], #16
mov v9.16b, v28.16b
ld4r {v10.4s,v11.4s,v12.4s,v13.4s}, [x5], #16
mov v14.16b, v29.16b
ld4r {v15.4s,v16.4s,v17.4s,v18.4s}, [x5]
sub x5, x5, #32
add v15.4s, v15.4s, v25.4s
mov v19.16b, v30.16b
eor v20.16b, v20.16b, v20.16b //zero
not v21.16b, v20.16b // -1
sub v21.4s, v25.4s, v21.4s // Add +1
ext v20.16b, v21.16b, v20.16b, #12 // Get the last element (counter)
add v19.4s, v19.4s, v20.4s
lsr x4, x2, #4 // How many whole blocks we have to hash, will always be at least 12
sub x4, x4, #10
mov x7, #10
subs x6, x7, x4
subs x6, x7, x4 // itr1 can be negative if we have more than 320 bytes to hash
csel x7, x7, x4, le // if itr1 is zero or less, itr2 should be 10 to indicate all 10 rounds are full
cbz x7, Lopen_main_loop_rounds_short
.align 5
Lopen_main_loop_rounds:
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
Lopen_main_loop_rounds_short:
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
add v3.4s, v3.4s, v8.4s
add v4.4s, v4.4s, v9.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
eor v18.16b, v18.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
rev32 v18.8h, v18.8h
rev32 v19.8h, v19.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
add v13.4s, v13.4s, v18.4s
add v14.4s, v14.4s, v19.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
eor v8.16b, v8.16b, v13.16b
eor v9.16b, v9.16b, v14.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
ushr v7.4s, v8.4s, #20
sli v7.4s, v8.4s, #12
ushr v8.4s, v9.4s, #20
sli v8.4s, v9.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
add v3.4s, v3.4s, v7.4s
add v4.4s, v4.4s, v8.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
eor v18.16b, v18.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
tbl v18.16b, {v18.16b}, v26.16b
tbl v19.16b, {v19.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
add v13.4s, v13.4s, v18.4s
add v14.4s, v14.4s, v19.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
eor v7.16b, v7.16b, v13.16b
eor v8.16b, v8.16b, v14.16b
ushr v9.4s, v8.4s, #25
sli v9.4s, v8.4s, #7
ushr v8.4s, v7.4s, #25
sli v8.4s, v7.4s, #7
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v9.16b, v9.16b, v9.16b, #4
ext v14.16b, v14.16b, v14.16b, #8
ext v19.16b, v19.16b, v19.16b, #12
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
add v0.4s, v0.4s, v6.4s
add v1.4s, v1.4s, v7.4s
add v2.4s, v2.4s, v8.4s
add v3.4s, v3.4s, v5.4s
add v4.4s, v4.4s, v9.4s
eor v18.16b, v18.16b, v0.16b
eor v15.16b, v15.16b, v1.16b
eor v16.16b, v16.16b, v2.16b
eor v17.16b, v17.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
rev32 v18.8h, v18.8h
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
rev32 v19.8h, v19.8h
add v12.4s, v12.4s, v18.4s
add v13.4s, v13.4s, v15.4s
add v10.4s, v10.4s, v16.4s
add v11.4s, v11.4s, v17.4s
add v14.4s, v14.4s, v19.4s
eor v6.16b, v6.16b, v12.16b
eor v7.16b, v7.16b, v13.16b
eor v8.16b, v8.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v9.16b, v9.16b, v14.16b
ushr v20.4s, v6.4s, #20
sli v20.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
ushr v7.4s, v8.4s, #20
sli v7.4s, v8.4s, #12
ushr v8.4s, v5.4s, #20
sli v8.4s, v5.4s, #12
ushr v5.4s, v9.4s, #20
sli v5.4s, v9.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
add v3.4s, v3.4s, v8.4s
add v4.4s, v4.4s, v5.4s
eor v18.16b, v18.16b, v0.16b
eor v15.16b, v15.16b, v1.16b
eor v16.16b, v16.16b, v2.16b
eor v17.16b, v17.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
tbl v18.16b, {v18.16b}, v26.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
tbl v19.16b, {v19.16b}, v26.16b
add v12.4s, v12.4s, v18.4s
add v13.4s, v13.4s, v15.4s
add v10.4s, v10.4s, v16.4s
add v11.4s, v11.4s, v17.4s
add v14.4s, v14.4s, v19.4s
eor v20.16b, v20.16b, v12.16b
eor v6.16b, v6.16b, v13.16b
eor v7.16b, v7.16b, v10.16b
eor v8.16b, v8.16b, v11.16b
eor v5.16b, v5.16b, v14.16b
ushr v9.4s, v5.4s, #25
sli v9.4s, v5.4s, #7
ushr v5.4s, v8.4s, #25
sli v5.4s, v8.4s, #7
ushr v8.4s, v7.4s, #25
sli v8.4s, v7.4s, #7
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v20.4s, #25
sli v6.4s, v20.4s, #7
ext v9.16b, v9.16b, v9.16b, #12
ext v14.16b, v14.16b, v14.16b, #8
ext v19.16b, v19.16b, v19.16b, #4
subs x7, x7, #1
b.gt Lopen_main_loop_rounds
subs x6, x6, #1
b.ge Lopen_main_loop_rounds_short
eor v20.16b, v20.16b, v20.16b //zero
not v21.16b, v20.16b // -1
sub v21.4s, v25.4s, v21.4s // Add +1
ext v20.16b, v21.16b, v20.16b, #12 // Get the last element (counter)
add v19.4s, v19.4s, v20.4s
add v15.4s, v15.4s, v25.4s
mov x11, #5
dup v20.4s, w11
add v25.4s, v25.4s, v20.4s
zip1 v20.4s, v0.4s, v1.4s
zip2 v21.4s, v0.4s, v1.4s
zip1 v22.4s, v2.4s, v3.4s
zip2 v23.4s, v2.4s, v3.4s
zip1 v0.2d, v20.2d, v22.2d
zip2 v1.2d, v20.2d, v22.2d
zip1 v2.2d, v21.2d, v23.2d
zip2 v3.2d, v21.2d, v23.2d
zip1 v20.4s, v5.4s, v6.4s
zip2 v21.4s, v5.4s, v6.4s
zip1 v22.4s, v7.4s, v8.4s
zip2 v23.4s, v7.4s, v8.4s
zip1 v5.2d, v20.2d, v22.2d
zip2 v6.2d, v20.2d, v22.2d
zip1 v7.2d, v21.2d, v23.2d
zip2 v8.2d, v21.2d, v23.2d
zip1 v20.4s, v10.4s, v11.4s
zip2 v21.4s, v10.4s, v11.4s
zip1 v22.4s, v12.4s, v13.4s
zip2 v23.4s, v12.4s, v13.4s
zip1 v10.2d, v20.2d, v22.2d
zip2 v11.2d, v20.2d, v22.2d
zip1 v12.2d, v21.2d, v23.2d
zip2 v13.2d, v21.2d, v23.2d
zip1 v20.4s, v15.4s, v16.4s
zip2 v21.4s, v15.4s, v16.4s
zip1 v22.4s, v17.4s, v18.4s
zip2 v23.4s, v17.4s, v18.4s
zip1 v15.2d, v20.2d, v22.2d
zip2 v16.2d, v20.2d, v22.2d
zip1 v17.2d, v21.2d, v23.2d
zip2 v18.2d, v21.2d, v23.2d
add v0.4s, v0.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v10.4s, v10.4s, v29.4s
add v15.4s, v15.4s, v30.4s
add v1.4s, v1.4s, v24.4s
add v6.4s, v6.4s, v28.4s
add v11.4s, v11.4s, v29.4s
add v16.4s, v16.4s, v30.4s
add v2.4s, v2.4s, v24.4s
add v7.4s, v7.4s, v28.4s
add v12.4s, v12.4s, v29.4s
add v17.4s, v17.4s, v30.4s
add v3.4s, v3.4s, v24.4s
add v8.4s, v8.4s, v28.4s
add v13.4s, v13.4s, v29.4s
add v18.4s, v18.4s, v30.4s
add v4.4s, v4.4s, v24.4s
add v9.4s, v9.4s, v28.4s
add v14.4s, v14.4s, v29.4s
add v19.4s, v19.4s, v30.4s
// We can always safely store 192 bytes
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v0.16b
eor v21.16b, v21.16b, v5.16b
eor v22.16b, v22.16b, v10.16b
eor v23.16b, v23.16b, v15.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v1.16b
eor v21.16b, v21.16b, v6.16b
eor v22.16b, v22.16b, v11.16b
eor v23.16b, v23.16b, v16.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v2.16b
eor v21.16b, v21.16b, v7.16b
eor v22.16b, v22.16b, v12.16b
eor v23.16b, v23.16b, v17.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #192
mov v0.16b, v3.16b
mov v5.16b, v8.16b
mov v10.16b, v13.16b
mov v15.16b, v18.16b
cmp x2, #64
b.lt Lopen_tail_64_store
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v3.16b
eor v21.16b, v21.16b, v8.16b
eor v22.16b, v22.16b, v13.16b
eor v23.16b, v23.16b, v18.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #64
mov v0.16b, v4.16b
mov v5.16b, v9.16b
mov v10.16b, v14.16b
mov v15.16b, v19.16b
cmp x2, #64
b.lt Lopen_tail_64_store
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v4.16b
eor v21.16b, v21.16b, v9.16b
eor v22.16b, v22.16b, v14.16b
eor v23.16b, v23.16b, v19.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #64
b Lopen_main_loop
Lopen_tail:
cbz x2, Lopen_finalize
lsr x4, x2, #4 // How many whole blocks we have to hash
cmp x2, #64
b.le Lopen_tail_64
cmp x2, #128
b.le Lopen_tail_128
Lopen_tail_192:
// We need three more blocks
mov v0.16b, v24.16b
mov v1.16b, v24.16b
mov v2.16b, v24.16b
mov v5.16b, v28.16b
mov v6.16b, v28.16b
mov v7.16b, v28.16b
mov v10.16b, v29.16b
mov v11.16b, v29.16b
mov v12.16b, v29.16b
mov v15.16b, v30.16b
mov v16.16b, v30.16b
mov v17.16b, v30.16b
eor v23.16b, v23.16b, v23.16b
eor v21.16b, v21.16b, v21.16b
ins v23.s[0], v25.s[0]
ins v21.d[0], x15
add v22.4s, v23.4s, v21.4s
add v21.4s, v22.4s, v21.4s
add v15.4s, v15.4s, v21.4s
add v16.4s, v16.4s, v23.4s
add v17.4s, v17.4s, v22.4s
mov x7, #10
subs x6, x7, x4 // itr1 can be negative if we have more than 160 bytes to hash
csel x7, x7, x4, le // if itr1 is zero or less, itr2 should be 10 to indicate all 10 rounds are hashing
sub x4, x4, x7
cbz x7, Lopen_tail_192_rounds_no_hash
Lopen_tail_192_rounds:
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
Lopen_tail_192_rounds_no_hash:
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #4
ext v6.16b, v6.16b, v6.16b, #4
ext v7.16b, v7.16b, v7.16b, #4
ext v10.16b, v10.16b, v10.16b, #8
ext v11.16b, v11.16b, v11.16b, #8
ext v12.16b, v12.16b, v12.16b, #8
ext v15.16b, v15.16b, v15.16b, #12
ext v16.16b, v16.16b, v16.16b, #12
ext v17.16b, v17.16b, v17.16b, #12
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #12
ext v6.16b, v6.16b, v6.16b, #12
ext v7.16b, v7.16b, v7.16b, #12
ext v10.16b, v10.16b, v10.16b, #8
ext v11.16b, v11.16b, v11.16b, #8
ext v12.16b, v12.16b, v12.16b, #8
ext v15.16b, v15.16b, v15.16b, #4
ext v16.16b, v16.16b, v16.16b, #4
ext v17.16b, v17.16b, v17.16b, #4
subs x7, x7, #1
b.gt Lopen_tail_192_rounds
subs x6, x6, #1
b.ge Lopen_tail_192_rounds_no_hash
// We hashed 160 bytes at most, may still have 32 bytes left
Lopen_tail_192_hash:
cbz x4, Lopen_tail_192_hash_done
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
sub x4, x4, #1
b Lopen_tail_192_hash
Lopen_tail_192_hash_done:
add v0.4s, v0.4s, v24.4s
add v1.4s, v1.4s, v24.4s
add v2.4s, v2.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v6.4s, v6.4s, v28.4s
add v7.4s, v7.4s, v28.4s
add v10.4s, v10.4s, v29.4s
add v11.4s, v11.4s, v29.4s
add v12.4s, v12.4s, v29.4s
add v15.4s, v15.4s, v30.4s
add v16.4s, v16.4s, v30.4s
add v17.4s, v17.4s, v30.4s
add v15.4s, v15.4s, v21.4s
add v16.4s, v16.4s, v23.4s
add v17.4s, v17.4s, v22.4s
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v1.16b
eor v21.16b, v21.16b, v6.16b
eor v22.16b, v22.16b, v11.16b
eor v23.16b, v23.16b, v16.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v2.16b
eor v21.16b, v21.16b, v7.16b
eor v22.16b, v22.16b, v12.16b
eor v23.16b, v23.16b, v17.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #128
b Lopen_tail_64_store
Lopen_tail_128:
// We need two more blocks
mov v0.16b, v24.16b
mov v1.16b, v24.16b
mov v5.16b, v28.16b
mov v6.16b, v28.16b
mov v10.16b, v29.16b
mov v11.16b, v29.16b
mov v15.16b, v30.16b
mov v16.16b, v30.16b
eor v23.16b, v23.16b, v23.16b
eor v22.16b, v22.16b, v22.16b
ins v23.s[0], v25.s[0]
ins v22.d[0], x15
add v22.4s, v22.4s, v23.4s
add v15.4s, v15.4s, v22.4s
add v16.4s, v16.4s, v23.4s
mov x6, #10
sub x6, x6, x4
Lopen_tail_128_rounds:
add v0.4s, v0.4s, v5.4s
eor v15.16b, v15.16b, v0.16b
rev32 v15.8h, v15.8h
add v10.4s, v10.4s, v15.4s
eor v5.16b, v5.16b, v10.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
add v0.4s, v0.4s, v20.4s
eor v15.16b, v15.16b, v0.16b
tbl v15.16b, {v15.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
eor v20.16b, v20.16b, v10.16b
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #4
ext v10.16b, v10.16b, v10.16b, #8
ext v15.16b, v15.16b, v15.16b, #12
add v1.4s, v1.4s, v6.4s
eor v16.16b, v16.16b, v1.16b
rev32 v16.8h, v16.8h
add v11.4s, v11.4s, v16.4s
eor v6.16b, v6.16b, v11.16b
ushr v20.4s, v6.4s, #20
sli v20.4s, v6.4s, #12
add v1.4s, v1.4s, v20.4s
eor v16.16b, v16.16b, v1.16b
tbl v16.16b, {v16.16b}, v26.16b
add v11.4s, v11.4s, v16.4s
eor v20.16b, v20.16b, v11.16b
ushr v6.4s, v20.4s, #25
sli v6.4s, v20.4s, #7
ext v6.16b, v6.16b, v6.16b, #4
ext v11.16b, v11.16b, v11.16b, #8
ext v16.16b, v16.16b, v16.16b, #12
add v0.4s, v0.4s, v5.4s
eor v15.16b, v15.16b, v0.16b
rev32 v15.8h, v15.8h
add v10.4s, v10.4s, v15.4s
eor v5.16b, v5.16b, v10.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
add v0.4s, v0.4s, v20.4s
eor v15.16b, v15.16b, v0.16b
tbl v15.16b, {v15.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
eor v20.16b, v20.16b, v10.16b
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #12
ext v10.16b, v10.16b, v10.16b, #8
ext v15.16b, v15.16b, v15.16b, #4
add v1.4s, v1.4s, v6.4s
eor v16.16b, v16.16b, v1.16b
rev32 v16.8h, v16.8h
add v11.4s, v11.4s, v16.4s
eor v6.16b, v6.16b, v11.16b
ushr v20.4s, v6.4s, #20
sli v20.4s, v6.4s, #12
add v1.4s, v1.4s, v20.4s
eor v16.16b, v16.16b, v1.16b
tbl v16.16b, {v16.16b}, v26.16b
add v11.4s, v11.4s, v16.4s
eor v20.16b, v20.16b, v11.16b
ushr v6.4s, v20.4s, #25
sli v6.4s, v20.4s, #7
ext v6.16b, v6.16b, v6.16b, #12
ext v11.16b, v11.16b, v11.16b, #8
ext v16.16b, v16.16b, v16.16b, #4
subs x6, x6, #1
b.gt Lopen_tail_128_rounds
cbz x4, Lopen_tail_128_rounds_done
subs x4, x4, #1
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
b Lopen_tail_128_rounds
Lopen_tail_128_rounds_done:
add v0.4s, v0.4s, v24.4s
add v1.4s, v1.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v6.4s, v6.4s, v28.4s
add v10.4s, v10.4s, v29.4s
add v11.4s, v11.4s, v29.4s
add v15.4s, v15.4s, v30.4s
add v16.4s, v16.4s, v30.4s
add v15.4s, v15.4s, v22.4s
add v16.4s, v16.4s, v23.4s
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v1.16b
eor v21.16b, v21.16b, v6.16b
eor v22.16b, v22.16b, v11.16b
eor v23.16b, v23.16b, v16.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #64
b Lopen_tail_64_store
Lopen_tail_64:
// We just need a single block
mov v0.16b, v24.16b
mov v5.16b, v28.16b
mov v10.16b, v29.16b
mov v15.16b, v30.16b
eor v23.16b, v23.16b, v23.16b
ins v23.s[0], v25.s[0]
add v15.4s, v15.4s, v23.4s
mov x6, #10
sub x6, x6, x4
Lopen_tail_64_rounds:
add v0.4s, v0.4s, v5.4s
eor v15.16b, v15.16b, v0.16b
rev32 v15.8h, v15.8h
add v10.4s, v10.4s, v15.4s
eor v5.16b, v5.16b, v10.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
add v0.4s, v0.4s, v20.4s
eor v15.16b, v15.16b, v0.16b
tbl v15.16b, {v15.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
eor v20.16b, v20.16b, v10.16b
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #4
ext v10.16b, v10.16b, v10.16b, #8
ext v15.16b, v15.16b, v15.16b, #12
add v0.4s, v0.4s, v5.4s
eor v15.16b, v15.16b, v0.16b
rev32 v15.8h, v15.8h
add v10.4s, v10.4s, v15.4s
eor v5.16b, v5.16b, v10.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
add v0.4s, v0.4s, v20.4s
eor v15.16b, v15.16b, v0.16b
tbl v15.16b, {v15.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
eor v20.16b, v20.16b, v10.16b
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #12
ext v10.16b, v10.16b, v10.16b, #8
ext v15.16b, v15.16b, v15.16b, #4
subs x6, x6, #1
b.gt Lopen_tail_64_rounds
cbz x4, Lopen_tail_64_rounds_done
subs x4, x4, #1
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
b Lopen_tail_64_rounds
Lopen_tail_64_rounds_done:
add v0.4s, v0.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v10.4s, v10.4s, v29.4s
add v15.4s, v15.4s, v30.4s
add v15.4s, v15.4s, v23.4s
Lopen_tail_64_store:
cmp x2, #16
b.lt Lopen_tail_16
ld1 {v20.16b}, [x1], #16
eor v20.16b, v20.16b, v0.16b
st1 {v20.16b}, [x0], #16
mov v0.16b, v5.16b
mov v5.16b, v10.16b
mov v10.16b, v15.16b
sub x2, x2, #16
b Lopen_tail_64_store
Lopen_tail_16:
// Here we handle the last [0,16) bytes that require a padded block
cbz x2, Lopen_finalize
eor v20.16b, v20.16b, v20.16b // Use T0 to load the ciphertext
eor v21.16b, v21.16b, v21.16b // Use T1 to generate an AND mask
not v22.16b, v20.16b
add x7, x1, x2
mov x6, x2
Lopen_tail_16_compose:
ext v20.16b, v20.16b, v20.16b, #15
ldrb w11, [x7, #-1]!
mov v20.b[0], w11
ext v21.16b, v22.16b, v21.16b, #15
subs x2, x2, #1
b.gt Lopen_tail_16_compose
and v20.16b, v20.16b, v21.16b
// Hash in the final padded block
mov x11, v20.d[0]
mov x12, v20.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
eor v20.16b, v20.16b, v0.16b
Lopen_tail_16_store:
umov w11, v20.b[0]
strb w11, [x0], #1
ext v20.16b, v20.16b, v20.16b, #1
subs x6, x6, #1
b.gt Lopen_tail_16_store
Lopen_finalize:
mov x11, v31.d[0]
mov x12, v31.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
// Final reduction step
sub x12, xzr, x15
orr x13, xzr, #3
subs x11, x8, #-5
sbcs x12, x9, x12
sbcs x13, x10, x13
csel x8, x11, x8, cs
csel x9, x12, x9, cs
csel x10, x13, x10, cs
mov x11, v27.d[0]
mov x12, v27.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
stp x8, x9, [x5]
ldp d8, d9, [sp, #16]
ldp d10, d11, [sp, #32]
ldp d12, d13, [sp, #48]
ldp d14, d15, [sp, #64]
.cfi_restore b15
.cfi_restore b14
.cfi_restore b13
.cfi_restore b12
.cfi_restore b11
.cfi_restore b10
.cfi_restore b9
.cfi_restore b8
ldp x29, x30, [sp], 80
.cfi_restore w29
.cfi_restore w30
.cfi_def_cfa_offset 0
AARCH64_VALIDATE_LINK_REGISTER
ret
Lopen_128:
// On some architectures preparing 5 blocks for small buffers is wasteful
eor v25.16b, v25.16b, v25.16b
mov x11, #1
mov v25.s[0], w11
mov v0.16b, v24.16b
mov v1.16b, v24.16b
mov v2.16b, v24.16b
mov v5.16b, v28.16b
mov v6.16b, v28.16b
mov v7.16b, v28.16b
mov v10.16b, v29.16b
mov v11.16b, v29.16b
mov v12.16b, v29.16b
mov v17.16b, v30.16b
add v15.4s, v17.4s, v25.4s
add v16.4s, v15.4s, v25.4s
mov x6, #10
Lopen_128_rounds:
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #4
ext v6.16b, v6.16b, v6.16b, #4
ext v7.16b, v7.16b, v7.16b, #4
ext v10.16b, v10.16b, v10.16b, #8
ext v11.16b, v11.16b, v11.16b, #8
ext v12.16b, v12.16b, v12.16b, #8
ext v15.16b, v15.16b, v15.16b, #12
ext v16.16b, v16.16b, v16.16b, #12
ext v17.16b, v17.16b, v17.16b, #12
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #12
ext v6.16b, v6.16b, v6.16b, #12
ext v7.16b, v7.16b, v7.16b, #12
ext v10.16b, v10.16b, v10.16b, #8
ext v11.16b, v11.16b, v11.16b, #8
ext v12.16b, v12.16b, v12.16b, #8
ext v15.16b, v15.16b, v15.16b, #4
ext v16.16b, v16.16b, v16.16b, #4
ext v17.16b, v17.16b, v17.16b, #4
subs x6, x6, #1
b.hi Lopen_128_rounds
add v0.4s, v0.4s, v24.4s
add v1.4s, v1.4s, v24.4s
add v2.4s, v2.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v6.4s, v6.4s, v28.4s
add v7.4s, v7.4s, v28.4s
add v10.4s, v10.4s, v29.4s
add v11.4s, v11.4s, v29.4s
add v30.4s, v30.4s, v25.4s
add v15.4s, v15.4s, v30.4s
add v30.4s, v30.4s, v25.4s
add v16.4s, v16.4s, v30.4s
and v2.16b, v2.16b, v27.16b
mov x16, v2.d[0] // Move the R key to GPRs
mov x17, v2.d[1]
mov v27.16b, v7.16b // Store the S key
bl Lpoly_hash_ad_internal
Lopen_128_store:
cmp x2, #64
b.lt Lopen_128_store_64
ld1 {v20.16b - v23.16b}, [x1], #64
mov x11, v20.d[0]
mov x12, v20.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
mov x11, v21.d[0]
mov x12, v21.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
mov x11, v22.d[0]
mov x12, v22.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
mov x11, v23.d[0]
mov x12, v23.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
eor v20.16b, v20.16b, v0.16b
eor v21.16b, v21.16b, v5.16b
eor v22.16b, v22.16b, v10.16b
eor v23.16b, v23.16b, v15.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #64
mov v0.16b, v1.16b
mov v5.16b, v6.16b
mov v10.16b, v11.16b
mov v15.16b, v16.16b
Lopen_128_store_64:
lsr x4, x2, #4
mov x3, x1
Lopen_128_hash_64:
cbz x4, Lopen_tail_64_store
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
sub x4, x4, #1
b Lopen_128_hash_64
.cfi_endproc
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.