repo_id
stringlengths
5
115
size
int64
590
5.01M
file_path
stringlengths
4
212
content
stringlengths
590
5.01M
AirFortressIlikara/LS2K0300-linux-4.19
12,523
arch/arm64/crypto/aes-neon.S
/* * linux/arch/arm64/crypto/aes-neon.S - AES cipher for ARMv8 NEON * * Copyright (C) 2013 - 2017 Linaro Ltd. <ard.biesheuvel@linaro.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/linkage.h> #include <asm/assembler.h> #define AES_ENTRY(func) ENTRY(neon_ ## func) #define AES_ENDPROC(func) ENDPROC(neon_ ## func) /* multiply by polynomial 'x' in GF(2^8) */ .macro mul_by_x, out, in, temp, const sshr \temp, \in, #7 shl \out, \in, #1 and \temp, \temp, \const eor \out, \out, \temp .endm /* multiply by polynomial 'x^2' in GF(2^8) */ .macro mul_by_x2, out, in, temp, const ushr \temp, \in, #6 shl \out, \in, #2 pmul \temp, \temp, \const eor \out, \out, \temp .endm /* preload the entire Sbox */ .macro prepare, sbox, shiftrows, temp movi v12.16b, #0x1b ldr_l q13, \shiftrows, \temp ldr_l q14, .Lror32by8, \temp adr_l \temp, \sbox ld1 {v16.16b-v19.16b}, [\temp], #64 ld1 {v20.16b-v23.16b}, [\temp], #64 ld1 {v24.16b-v27.16b}, [\temp], #64 ld1 {v28.16b-v31.16b}, [\temp] .endm /* do preload for encryption */ .macro enc_prepare, ignore0, ignore1, temp prepare .LForward_Sbox, .LForward_ShiftRows, \temp .endm .macro enc_switch_key, ignore0, ignore1, temp /* do nothing */ .endm /* do preload for decryption */ .macro dec_prepare, ignore0, ignore1, temp prepare .LReverse_Sbox, .LReverse_ShiftRows, \temp .endm /* apply SubBytes transformation using the the preloaded Sbox */ .macro sub_bytes, in sub v9.16b, \in\().16b, v15.16b tbl \in\().16b, {v16.16b-v19.16b}, \in\().16b sub v10.16b, v9.16b, v15.16b tbx \in\().16b, {v20.16b-v23.16b}, v9.16b sub v11.16b, v10.16b, v15.16b tbx \in\().16b, {v24.16b-v27.16b}, v10.16b tbx \in\().16b, {v28.16b-v31.16b}, v11.16b .endm /* apply MixColumns transformation */ .macro mix_columns, in, enc .if \enc == 0 /* Inverse MixColumns: pre-multiply by { 5, 0, 4, 0 } */ mul_by_x2 v8.16b, \in\().16b, v9.16b, v12.16b eor \in\().16b, \in\().16b, v8.16b rev32 v8.8h, v8.8h eor \in\().16b, \in\().16b, v8.16b .endif mul_by_x v9.16b, \in\().16b, v8.16b, v12.16b rev32 v8.8h, \in\().8h eor v8.16b, v8.16b, v9.16b eor \in\().16b, \in\().16b, v8.16b tbl \in\().16b, {\in\().16b}, v14.16b eor \in\().16b, \in\().16b, v8.16b .endm .macro do_block, enc, in, rounds, rk, rkp, i ld1 {v15.4s}, [\rk] add \rkp, \rk, #16 mov \i, \rounds 1111: eor \in\().16b, \in\().16b, v15.16b /* ^round key */ movi v15.16b, #0x40 tbl \in\().16b, {\in\().16b}, v13.16b /* ShiftRows */ sub_bytes \in subs \i, \i, #1 ld1 {v15.4s}, [\rkp], #16 beq 2222f mix_columns \in, \enc b 1111b 2222: eor \in\().16b, \in\().16b, v15.16b /* ^round key */ .endm .macro encrypt_block, in, rounds, rk, rkp, i do_block 1, \in, \rounds, \rk, \rkp, \i .endm .macro decrypt_block, in, rounds, rk, rkp, i do_block 0, \in, \rounds, \rk, \rkp, \i .endm /* * Interleaved versions: functionally equivalent to the * ones above, but applied to 2 or 4 AES states in parallel. */ .macro sub_bytes_2x, in0, in1 sub v8.16b, \in0\().16b, v15.16b tbl \in0\().16b, {v16.16b-v19.16b}, \in0\().16b sub v9.16b, \in1\().16b, v15.16b tbl \in1\().16b, {v16.16b-v19.16b}, \in1\().16b sub v10.16b, v8.16b, v15.16b tbx \in0\().16b, {v20.16b-v23.16b}, v8.16b sub v11.16b, v9.16b, v15.16b tbx \in1\().16b, {v20.16b-v23.16b}, v9.16b sub v8.16b, v10.16b, v15.16b tbx \in0\().16b, {v24.16b-v27.16b}, v10.16b sub v9.16b, v11.16b, v15.16b tbx \in1\().16b, {v24.16b-v27.16b}, v11.16b tbx \in0\().16b, {v28.16b-v31.16b}, v8.16b tbx \in1\().16b, {v28.16b-v31.16b}, v9.16b .endm .macro sub_bytes_4x, in0, in1, in2, in3 sub v8.16b, \in0\().16b, v15.16b tbl \in0\().16b, {v16.16b-v19.16b}, \in0\().16b sub v9.16b, \in1\().16b, v15.16b tbl \in1\().16b, {v16.16b-v19.16b}, \in1\().16b sub v10.16b, \in2\().16b, v15.16b tbl \in2\().16b, {v16.16b-v19.16b}, \in2\().16b sub v11.16b, \in3\().16b, v15.16b tbl \in3\().16b, {v16.16b-v19.16b}, \in3\().16b tbx \in0\().16b, {v20.16b-v23.16b}, v8.16b tbx \in1\().16b, {v20.16b-v23.16b}, v9.16b sub v8.16b, v8.16b, v15.16b tbx \in2\().16b, {v20.16b-v23.16b}, v10.16b sub v9.16b, v9.16b, v15.16b tbx \in3\().16b, {v20.16b-v23.16b}, v11.16b sub v10.16b, v10.16b, v15.16b tbx \in0\().16b, {v24.16b-v27.16b}, v8.16b sub v11.16b, v11.16b, v15.16b tbx \in1\().16b, {v24.16b-v27.16b}, v9.16b sub v8.16b, v8.16b, v15.16b tbx \in2\().16b, {v24.16b-v27.16b}, v10.16b sub v9.16b, v9.16b, v15.16b tbx \in3\().16b, {v24.16b-v27.16b}, v11.16b sub v10.16b, v10.16b, v15.16b tbx \in0\().16b, {v28.16b-v31.16b}, v8.16b sub v11.16b, v11.16b, v15.16b tbx \in1\().16b, {v28.16b-v31.16b}, v9.16b tbx \in2\().16b, {v28.16b-v31.16b}, v10.16b tbx \in3\().16b, {v28.16b-v31.16b}, v11.16b .endm .macro mul_by_x_2x, out0, out1, in0, in1, tmp0, tmp1, const sshr \tmp0\().16b, \in0\().16b, #7 shl \out0\().16b, \in0\().16b, #1 sshr \tmp1\().16b, \in1\().16b, #7 and \tmp0\().16b, \tmp0\().16b, \const\().16b shl \out1\().16b, \in1\().16b, #1 and \tmp1\().16b, \tmp1\().16b, \const\().16b eor \out0\().16b, \out0\().16b, \tmp0\().16b eor \out1\().16b, \out1\().16b, \tmp1\().16b .endm .macro mul_by_x2_2x, out0, out1, in0, in1, tmp0, tmp1, const ushr \tmp0\().16b, \in0\().16b, #6 shl \out0\().16b, \in0\().16b, #2 ushr \tmp1\().16b, \in1\().16b, #6 pmul \tmp0\().16b, \tmp0\().16b, \const\().16b shl \out1\().16b, \in1\().16b, #2 pmul \tmp1\().16b, \tmp1\().16b, \const\().16b eor \out0\().16b, \out0\().16b, \tmp0\().16b eor \out1\().16b, \out1\().16b, \tmp1\().16b .endm .macro mix_columns_2x, in0, in1, enc .if \enc == 0 /* Inverse MixColumns: pre-multiply by { 5, 0, 4, 0 } */ mul_by_x2_2x v8, v9, \in0, \in1, v10, v11, v12 eor \in0\().16b, \in0\().16b, v8.16b rev32 v8.8h, v8.8h eor \in1\().16b, \in1\().16b, v9.16b rev32 v9.8h, v9.8h eor \in0\().16b, \in0\().16b, v8.16b eor \in1\().16b, \in1\().16b, v9.16b .endif mul_by_x_2x v8, v9, \in0, \in1, v10, v11, v12 rev32 v10.8h, \in0\().8h rev32 v11.8h, \in1\().8h eor v10.16b, v10.16b, v8.16b eor v11.16b, v11.16b, v9.16b eor \in0\().16b, \in0\().16b, v10.16b eor \in1\().16b, \in1\().16b, v11.16b tbl \in0\().16b, {\in0\().16b}, v14.16b tbl \in1\().16b, {\in1\().16b}, v14.16b eor \in0\().16b, \in0\().16b, v10.16b eor \in1\().16b, \in1\().16b, v11.16b .endm .macro do_block_2x, enc, in0, in1, rounds, rk, rkp, i ld1 {v15.4s}, [\rk] add \rkp, \rk, #16 mov \i, \rounds 1111: eor \in0\().16b, \in0\().16b, v15.16b /* ^round key */ eor \in1\().16b, \in1\().16b, v15.16b /* ^round key */ movi v15.16b, #0x40 tbl \in0\().16b, {\in0\().16b}, v13.16b /* ShiftRows */ tbl \in1\().16b, {\in1\().16b}, v13.16b /* ShiftRows */ sub_bytes_2x \in0, \in1 subs \i, \i, #1 ld1 {v15.4s}, [\rkp], #16 beq 2222f mix_columns_2x \in0, \in1, \enc b 1111b 2222: eor \in0\().16b, \in0\().16b, v15.16b /* ^round key */ eor \in1\().16b, \in1\().16b, v15.16b /* ^round key */ .endm .macro do_block_4x, enc, in0, in1, in2, in3, rounds, rk, rkp, i ld1 {v15.4s}, [\rk] add \rkp, \rk, #16 mov \i, \rounds 1111: eor \in0\().16b, \in0\().16b, v15.16b /* ^round key */ eor \in1\().16b, \in1\().16b, v15.16b /* ^round key */ eor \in2\().16b, \in2\().16b, v15.16b /* ^round key */ eor \in3\().16b, \in3\().16b, v15.16b /* ^round key */ movi v15.16b, #0x40 tbl \in0\().16b, {\in0\().16b}, v13.16b /* ShiftRows */ tbl \in1\().16b, {\in1\().16b}, v13.16b /* ShiftRows */ tbl \in2\().16b, {\in2\().16b}, v13.16b /* ShiftRows */ tbl \in3\().16b, {\in3\().16b}, v13.16b /* ShiftRows */ sub_bytes_4x \in0, \in1, \in2, \in3 subs \i, \i, #1 ld1 {v15.4s}, [\rkp], #16 beq 2222f mix_columns_2x \in0, \in1, \enc mix_columns_2x \in2, \in3, \enc b 1111b 2222: eor \in0\().16b, \in0\().16b, v15.16b /* ^round key */ eor \in1\().16b, \in1\().16b, v15.16b /* ^round key */ eor \in2\().16b, \in2\().16b, v15.16b /* ^round key */ eor \in3\().16b, \in3\().16b, v15.16b /* ^round key */ .endm .macro encrypt_block2x, in0, in1, rounds, rk, rkp, i do_block_2x 1, \in0, \in1, \rounds, \rk, \rkp, \i .endm .macro decrypt_block2x, in0, in1, rounds, rk, rkp, i do_block_2x 0, \in0, \in1, \rounds, \rk, \rkp, \i .endm .macro encrypt_block4x, in0, in1, in2, in3, rounds, rk, rkp, i do_block_4x 1, \in0, \in1, \in2, \in3, \rounds, \rk, \rkp, \i .endm .macro decrypt_block4x, in0, in1, in2, in3, rounds, rk, rkp, i do_block_4x 0, \in0, \in1, \in2, \in3, \rounds, \rk, \rkp, \i .endm #include "aes-modes.S" .section ".rodata", "a" .align 6 .LForward_Sbox: .byte 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5 .byte 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76 .byte 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0 .byte 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0 .byte 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc .byte 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15 .byte 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a .byte 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75 .byte 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0 .byte 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84 .byte 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b .byte 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf .byte 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85 .byte 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8 .byte 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5 .byte 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2 .byte 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17 .byte 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73 .byte 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88 .byte 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb .byte 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c .byte 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79 .byte 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9 .byte 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08 .byte 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6 .byte 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a .byte 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e .byte 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e .byte 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94 .byte 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf .byte 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68 .byte 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16 .LReverse_Sbox: .byte 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38 .byte 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb .byte 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87 .byte 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb .byte 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d .byte 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e .byte 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2 .byte 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25 .byte 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16 .byte 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92 .byte 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda .byte 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84 .byte 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a .byte 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06 .byte 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02 .byte 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b .byte 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea .byte 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73 .byte 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85 .byte 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e .byte 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89 .byte 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b .byte 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20 .byte 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4 .byte 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31 .byte 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f .byte 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d .byte 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef .byte 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0 .byte 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61 .byte 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26 .byte 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d .LForward_ShiftRows: .octa 0x0b06010c07020d08030e09040f0a0500 .LReverse_ShiftRows: .octa 0x0306090c0f0205080b0e0104070a0d00 .Lror32by8: .octa 0x0c0f0e0d080b0a090407060500030201
AirFortressIlikara/LS2K0300-linux-4.19
7,128
arch/arm64/crypto/crc32-ce-core.S
/* * Accelerated CRC32(C) using arm64 CRC, NEON and Crypto Extensions instructions * * Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ /* GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 only, * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License version 2 for more details (a copy is included * in the LICENSE file that accompanied this code). * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see http://www.gnu.org/licenses * * Please visit http://www.xyratex.com/contact if you need additional * information or have any questions. * * GPL HEADER END */ /* * Copyright 2012 Xyratex Technology Limited * * Using hardware provided PCLMULQDQ instruction to accelerate the CRC32 * calculation. * CRC32 polynomial:0x04c11db7(BE)/0xEDB88320(LE) * PCLMULQDQ is a new instruction in Intel SSE4.2, the reference can be found * at: * http://www.intel.com/products/processor/manuals/ * Intel(R) 64 and IA-32 Architectures Software Developer's Manual * Volume 2B: Instruction Set Reference, N-Z * * Authors: Gregory Prestas <Gregory_Prestas@us.xyratex.com> * Alexander Boyko <Alexander_Boyko@xyratex.com> */ #include <linux/linkage.h> #include <asm/assembler.h> .section ".rodata", "a" .align 6 .cpu generic+crypto+crc .Lcrc32_constants: /* * [x4*128+32 mod P(x) << 32)]' << 1 = 0x154442bd4 * #define CONSTANT_R1 0x154442bd4LL * * [(x4*128-32 mod P(x) << 32)]' << 1 = 0x1c6e41596 * #define CONSTANT_R2 0x1c6e41596LL */ .octa 0x00000001c6e415960000000154442bd4 /* * [(x128+32 mod P(x) << 32)]' << 1 = 0x1751997d0 * #define CONSTANT_R3 0x1751997d0LL * * [(x128-32 mod P(x) << 32)]' << 1 = 0x0ccaa009e * #define CONSTANT_R4 0x0ccaa009eLL */ .octa 0x00000000ccaa009e00000001751997d0 /* * [(x64 mod P(x) << 32)]' << 1 = 0x163cd6124 * #define CONSTANT_R5 0x163cd6124LL */ .quad 0x0000000163cd6124 .quad 0x00000000FFFFFFFF /* * #define CRCPOLY_TRUE_LE_FULL 0x1DB710641LL * * Barrett Reduction constant (u64`) = u` = (x**64 / P(x))` * = 0x1F7011641LL * #define CONSTANT_RU 0x1F7011641LL */ .octa 0x00000001F701164100000001DB710641 .Lcrc32c_constants: .octa 0x000000009e4addf800000000740eef02 .octa 0x000000014cd00bd600000000f20c0dfe .quad 0x00000000dd45aab8 .quad 0x00000000FFFFFFFF .octa 0x00000000dea713f10000000105ec76f0 vCONSTANT .req v0 dCONSTANT .req d0 qCONSTANT .req q0 BUF .req x19 LEN .req x20 CRC .req x21 CONST .req x22 vzr .req v9 /** * Calculate crc32 * BUF - buffer * LEN - sizeof buffer (multiple of 16 bytes), LEN should be > 63 * CRC - initial crc32 * return %eax crc32 * uint crc32_pmull_le(unsigned char const *buffer, * size_t len, uint crc32) */ .text ENTRY(crc32_pmull_le) adr_l x3, .Lcrc32_constants b 0f ENTRY(crc32c_pmull_le) adr_l x3, .Lcrc32c_constants 0: frame_push 4, 64 mov BUF, x0 mov LEN, x1 mov CRC, x2 mov CONST, x3 bic LEN, LEN, #15 ld1 {v1.16b-v4.16b}, [BUF], #0x40 movi vzr.16b, #0 fmov dCONSTANT, CRC eor v1.16b, v1.16b, vCONSTANT.16b sub LEN, LEN, #0x40 cmp LEN, #0x40 b.lt less_64 ldr qCONSTANT, [CONST] loop_64: /* 64 bytes Full cache line folding */ sub LEN, LEN, #0x40 pmull2 v5.1q, v1.2d, vCONSTANT.2d pmull2 v6.1q, v2.2d, vCONSTANT.2d pmull2 v7.1q, v3.2d, vCONSTANT.2d pmull2 v8.1q, v4.2d, vCONSTANT.2d pmull v1.1q, v1.1d, vCONSTANT.1d pmull v2.1q, v2.1d, vCONSTANT.1d pmull v3.1q, v3.1d, vCONSTANT.1d pmull v4.1q, v4.1d, vCONSTANT.1d eor v1.16b, v1.16b, v5.16b ld1 {v5.16b}, [BUF], #0x10 eor v2.16b, v2.16b, v6.16b ld1 {v6.16b}, [BUF], #0x10 eor v3.16b, v3.16b, v7.16b ld1 {v7.16b}, [BUF], #0x10 eor v4.16b, v4.16b, v8.16b ld1 {v8.16b}, [BUF], #0x10 eor v1.16b, v1.16b, v5.16b eor v2.16b, v2.16b, v6.16b eor v3.16b, v3.16b, v7.16b eor v4.16b, v4.16b, v8.16b cmp LEN, #0x40 b.lt less_64 if_will_cond_yield_neon stp q1, q2, [sp, #.Lframe_local_offset] stp q3, q4, [sp, #.Lframe_local_offset + 32] do_cond_yield_neon ldp q1, q2, [sp, #.Lframe_local_offset] ldp q3, q4, [sp, #.Lframe_local_offset + 32] ldr qCONSTANT, [CONST] movi vzr.16b, #0 endif_yield_neon b loop_64 less_64: /* Folding cache line into 128bit */ ldr qCONSTANT, [CONST, #16] pmull2 v5.1q, v1.2d, vCONSTANT.2d pmull v1.1q, v1.1d, vCONSTANT.1d eor v1.16b, v1.16b, v5.16b eor v1.16b, v1.16b, v2.16b pmull2 v5.1q, v1.2d, vCONSTANT.2d pmull v1.1q, v1.1d, vCONSTANT.1d eor v1.16b, v1.16b, v5.16b eor v1.16b, v1.16b, v3.16b pmull2 v5.1q, v1.2d, vCONSTANT.2d pmull v1.1q, v1.1d, vCONSTANT.1d eor v1.16b, v1.16b, v5.16b eor v1.16b, v1.16b, v4.16b cbz LEN, fold_64 loop_16: /* Folding rest buffer into 128bit */ subs LEN, LEN, #0x10 ld1 {v2.16b}, [BUF], #0x10 pmull2 v5.1q, v1.2d, vCONSTANT.2d pmull v1.1q, v1.1d, vCONSTANT.1d eor v1.16b, v1.16b, v5.16b eor v1.16b, v1.16b, v2.16b b.ne loop_16 fold_64: /* perform the last 64 bit fold, also adds 32 zeroes * to the input stream */ ext v2.16b, v1.16b, v1.16b, #8 pmull2 v2.1q, v2.2d, vCONSTANT.2d ext v1.16b, v1.16b, vzr.16b, #8 eor v1.16b, v1.16b, v2.16b /* final 32-bit fold */ ldr dCONSTANT, [CONST, #32] ldr d3, [CONST, #40] ext v2.16b, v1.16b, vzr.16b, #4 and v1.16b, v1.16b, v3.16b pmull v1.1q, v1.1d, vCONSTANT.1d eor v1.16b, v1.16b, v2.16b /* Finish up with the bit-reversed barrett reduction 64 ==> 32 bits */ ldr qCONSTANT, [CONST, #48] and v2.16b, v1.16b, v3.16b ext v2.16b, vzr.16b, v2.16b, #8 pmull2 v2.1q, v2.2d, vCONSTANT.2d and v2.16b, v2.16b, v3.16b pmull v2.1q, v2.1d, vCONSTANT.1d eor v1.16b, v1.16b, v2.16b mov w0, v1.s[1] frame_pop ret ENDPROC(crc32_pmull_le) ENDPROC(crc32c_pmull_le) .macro __crc32, c 0: subs x2, x2, #16 b.mi 8f ldp x3, x4, [x1], #16 CPU_BE( rev x3, x3 ) CPU_BE( rev x4, x4 ) crc32\c\()x w0, w0, x3 crc32\c\()x w0, w0, x4 b.ne 0b ret 8: tbz x2, #3, 4f ldr x3, [x1], #8 CPU_BE( rev x3, x3 ) crc32\c\()x w0, w0, x3 4: tbz x2, #2, 2f ldr w3, [x1], #4 CPU_BE( rev w3, w3 ) crc32\c\()w w0, w0, w3 2: tbz x2, #1, 1f ldrh w3, [x1], #2 CPU_BE( rev16 w3, w3 ) crc32\c\()h w0, w0, w3 1: tbz x2, #0, 0f ldrb w3, [x1] crc32\c\()b w0, w0, w3 0: ret .endm .align 5 ENTRY(crc32_armv8_le) __crc32 ENDPROC(crc32_armv8_le) .align 5 ENTRY(crc32c_armv8_le) __crc32 c ENDPROC(crc32c_armv8_le)
AirFortressIlikara/LS2K0300-linux-4.19
4,130
arch/arm64/crypto/sha2-ce-core.S
/* * sha2-ce-core.S - core SHA-224/SHA-256 transform using v8 Crypto Extensions * * Copyright (C) 2014 Linaro Ltd <ard.biesheuvel@linaro.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/linkage.h> #include <asm/assembler.h> .text .arch armv8-a+crypto dga .req q20 dgav .req v20 dgb .req q21 dgbv .req v21 t0 .req v22 t1 .req v23 dg0q .req q24 dg0v .req v24 dg1q .req q25 dg1v .req v25 dg2q .req q26 dg2v .req v26 .macro add_only, ev, rc, s0 mov dg2v.16b, dg0v.16b .ifeq \ev add t1.4s, v\s0\().4s, \rc\().4s sha256h dg0q, dg1q, t0.4s sha256h2 dg1q, dg2q, t0.4s .else .ifnb \s0 add t0.4s, v\s0\().4s, \rc\().4s .endif sha256h dg0q, dg1q, t1.4s sha256h2 dg1q, dg2q, t1.4s .endif .endm .macro add_update, ev, rc, s0, s1, s2, s3 sha256su0 v\s0\().4s, v\s1\().4s add_only \ev, \rc, \s1 sha256su1 v\s0\().4s, v\s2\().4s, v\s3\().4s .endm /* * The SHA-256 round constants */ .section ".rodata", "a" .align 4 .Lsha2_rcon: .word 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5 .word 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5 .word 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3 .word 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174 .word 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc .word 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da .word 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7 .word 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967 .word 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13 .word 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85 .word 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3 .word 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070 .word 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5 .word 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3 .word 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208 .word 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2 /* * void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src, * int blocks) */ .text ENTRY(sha2_ce_transform) frame_push 3 mov x19, x0 mov x20, x1 mov x21, x2 /* load round constants */ 0: adr_l x8, .Lsha2_rcon ld1 { v0.4s- v3.4s}, [x8], #64 ld1 { v4.4s- v7.4s}, [x8], #64 ld1 { v8.4s-v11.4s}, [x8], #64 ld1 {v12.4s-v15.4s}, [x8] /* load state */ ld1 {dgav.4s, dgbv.4s}, [x19] /* load sha256_ce_state::finalize */ ldr_l w4, sha256_ce_offsetof_finalize, x4 ldr w4, [x19, x4] /* load input */ 1: ld1 {v16.4s-v19.4s}, [x20], #64 sub w21, w21, #1 CPU_LE( rev32 v16.16b, v16.16b ) CPU_LE( rev32 v17.16b, v17.16b ) CPU_LE( rev32 v18.16b, v18.16b ) CPU_LE( rev32 v19.16b, v19.16b ) 2: add t0.4s, v16.4s, v0.4s mov dg0v.16b, dgav.16b mov dg1v.16b, dgbv.16b add_update 0, v1, 16, 17, 18, 19 add_update 1, v2, 17, 18, 19, 16 add_update 0, v3, 18, 19, 16, 17 add_update 1, v4, 19, 16, 17, 18 add_update 0, v5, 16, 17, 18, 19 add_update 1, v6, 17, 18, 19, 16 add_update 0, v7, 18, 19, 16, 17 add_update 1, v8, 19, 16, 17, 18 add_update 0, v9, 16, 17, 18, 19 add_update 1, v10, 17, 18, 19, 16 add_update 0, v11, 18, 19, 16, 17 add_update 1, v12, 19, 16, 17, 18 add_only 0, v13, 17 add_only 1, v14, 18 add_only 0, v15, 19 add_only 1 /* update state */ add dgav.4s, dgav.4s, dg0v.4s add dgbv.4s, dgbv.4s, dg1v.4s /* handled all input blocks? */ cbz w21, 3f if_will_cond_yield_neon st1 {dgav.4s, dgbv.4s}, [x19] do_cond_yield_neon b 0b endif_yield_neon b 1b /* * Final block: add padding and total bit count. * Skip if the input size was not a round multiple of the block size, * the padding is handled by the C code in that case. */ 3: cbz x4, 4f ldr_l w4, sha256_ce_offsetof_count, x4 ldr x4, [x19, x4] movi v17.2d, #0 mov x8, #0x80000000 movi v18.2d, #0 ror x7, x4, #29 // ror(lsl(x4, 3), 32) fmov d16, x8 mov x4, #0 mov v19.d[0], xzr mov v19.d[1], x7 b 2b /* store new state */ 4: st1 {dgav.4s, dgbv.4s}, [x19] frame_pop ret ENDPROC(sha2_ce_transform)
AirFortressIlikara/LS2K0300-linux-4.19
1,805
arch/arm64/crypto/aes-ce-core.S
/* * Copyright (C) 2013 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/linkage.h> #include <asm/assembler.h> .arch armv8-a+crypto ENTRY(__aes_ce_encrypt) sub w3, w3, #2 ld1 {v0.16b}, [x2] ld1 {v1.4s}, [x0], #16 cmp w3, #10 bmi 0f bne 3f mov v3.16b, v1.16b b 2f 0: mov v2.16b, v1.16b ld1 {v3.4s}, [x0], #16 1: aese v0.16b, v2.16b aesmc v0.16b, v0.16b 2: ld1 {v1.4s}, [x0], #16 aese v0.16b, v3.16b aesmc v0.16b, v0.16b 3: ld1 {v2.4s}, [x0], #16 subs w3, w3, #3 aese v0.16b, v1.16b aesmc v0.16b, v0.16b ld1 {v3.4s}, [x0], #16 bpl 1b aese v0.16b, v2.16b eor v0.16b, v0.16b, v3.16b st1 {v0.16b}, [x1] ret ENDPROC(__aes_ce_encrypt) ENTRY(__aes_ce_decrypt) sub w3, w3, #2 ld1 {v0.16b}, [x2] ld1 {v1.4s}, [x0], #16 cmp w3, #10 bmi 0f bne 3f mov v3.16b, v1.16b b 2f 0: mov v2.16b, v1.16b ld1 {v3.4s}, [x0], #16 1: aesd v0.16b, v2.16b aesimc v0.16b, v0.16b 2: ld1 {v1.4s}, [x0], #16 aesd v0.16b, v3.16b aesimc v0.16b, v0.16b 3: ld1 {v2.4s}, [x0], #16 subs w3, w3, #3 aesd v0.16b, v1.16b aesimc v0.16b, v0.16b ld1 {v3.4s}, [x0], #16 bpl 1b aesd v0.16b, v2.16b eor v0.16b, v0.16b, v3.16b st1 {v0.16b}, [x1] ret ENDPROC(__aes_ce_decrypt) /* * __aes_ce_sub() - use the aese instruction to perform the AES sbox * substitution on each byte in 'input' */ ENTRY(__aes_ce_sub) dup v1.4s, w0 movi v0.16b, #0 aese v0.16b, v1.16b umov w0, v0.s[0] ret ENDPROC(__aes_ce_sub) ENTRY(__aes_ce_invert) ld1 {v0.4s}, [x1] aesimc v1.16b, v0.16b st1 {v1.4s}, [x0] ret ENDPROC(__aes_ce_invert)
AirFortressIlikara/LS2K0300-linux-4.19
12,897
arch/arm64/crypto/crct10dif-ce-core.S
// // Accelerated CRC-T10DIF using arm64 NEON and Crypto Extensions instructions // // Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org> // // This program is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License version 2 as // published by the Free Software Foundation. // // // Implement fast CRC-T10DIF computation with SSE and PCLMULQDQ instructions // // Copyright (c) 2013, Intel Corporation // // Authors: // Erdinc Ozturk <erdinc.ozturk@intel.com> // Vinodh Gopal <vinodh.gopal@intel.com> // James Guilford <james.guilford@intel.com> // Tim Chen <tim.c.chen@linux.intel.com> // // This software is available to you under a choice of one of two // licenses. You may choose to be licensed under the terms of the GNU // General Public License (GPL) Version 2, available from the file // COPYING in the main directory of this source tree, or the // OpenIB.org BSD license below: // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the // distribution. // // * Neither the name of the Intel Corporation nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // // THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION ""AS IS"" AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Function API: // UINT16 crc_t10dif_pcl( // UINT16 init_crc, //initial CRC value, 16 bits // const unsigned char *buf, //buffer pointer to calculate CRC on // UINT64 len //buffer length in bytes (64-bit data) // ); // // Reference paper titled "Fast CRC Computation for Generic // Polynomials Using PCLMULQDQ Instruction" // URL: http://www.intel.com/content/dam/www/public/us/en/documents // /white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf // // #include <linux/linkage.h> #include <asm/assembler.h> .text .cpu generic+crypto arg1_low32 .req w19 arg2 .req x20 arg3 .req x21 vzr .req v13 ENTRY(crc_t10dif_pmull) frame_push 3, 128 mov arg1_low32, w0 mov arg2, x1 mov arg3, x2 movi vzr.16b, #0 // init zero register // adjust the 16-bit initial_crc value, scale it to 32 bits lsl arg1_low32, arg1_low32, #16 // check if smaller than 256 cmp arg3, #256 // for sizes less than 128, we can't fold 64B at a time... b.lt _less_than_128 // load the initial crc value // crc value does not need to be byte-reflected, but it needs // to be moved to the high part of the register. // because data will be byte-reflected and will align with // initial crc at correct place. movi v10.16b, #0 mov v10.s[3], arg1_low32 // initial crc // receive the initial 64B data, xor the initial crc value ldp q0, q1, [arg2] ldp q2, q3, [arg2, #0x20] ldp q4, q5, [arg2, #0x40] ldp q6, q7, [arg2, #0x60] add arg2, arg2, #0x80 CPU_LE( rev64 v0.16b, v0.16b ) CPU_LE( rev64 v1.16b, v1.16b ) CPU_LE( rev64 v2.16b, v2.16b ) CPU_LE( rev64 v3.16b, v3.16b ) CPU_LE( rev64 v4.16b, v4.16b ) CPU_LE( rev64 v5.16b, v5.16b ) CPU_LE( rev64 v6.16b, v6.16b ) CPU_LE( rev64 v7.16b, v7.16b ) CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 ) CPU_LE( ext v1.16b, v1.16b, v1.16b, #8 ) CPU_LE( ext v2.16b, v2.16b, v2.16b, #8 ) CPU_LE( ext v3.16b, v3.16b, v3.16b, #8 ) CPU_LE( ext v4.16b, v4.16b, v4.16b, #8 ) CPU_LE( ext v5.16b, v5.16b, v5.16b, #8 ) CPU_LE( ext v6.16b, v6.16b, v6.16b, #8 ) CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 ) // XOR the initial_crc value eor v0.16b, v0.16b, v10.16b ldr_l q10, rk3, x8 // xmm10 has rk3 and rk4 // type of pmull instruction // will determine which constant to use // // we subtract 256 instead of 128 to save one instruction from the loop // sub arg3, arg3, #256 // at this section of the code, there is 64*x+y (0<=y<64) bytes of // buffer. The _fold_64_B_loop will fold 64B at a time // until we have 64+y Bytes of buffer // fold 64B at a time. This section of the code folds 4 vector // registers in parallel _fold_64_B_loop: .macro fold64, reg1, reg2 ldp q11, q12, [arg2], #0x20 pmull2 v8.1q, \reg1\().2d, v10.2d pmull \reg1\().1q, \reg1\().1d, v10.1d CPU_LE( rev64 v11.16b, v11.16b ) CPU_LE( rev64 v12.16b, v12.16b ) pmull2 v9.1q, \reg2\().2d, v10.2d pmull \reg2\().1q, \reg2\().1d, v10.1d CPU_LE( ext v11.16b, v11.16b, v11.16b, #8 ) CPU_LE( ext v12.16b, v12.16b, v12.16b, #8 ) eor \reg1\().16b, \reg1\().16b, v8.16b eor \reg2\().16b, \reg2\().16b, v9.16b eor \reg1\().16b, \reg1\().16b, v11.16b eor \reg2\().16b, \reg2\().16b, v12.16b .endm fold64 v0, v1 fold64 v2, v3 fold64 v4, v5 fold64 v6, v7 subs arg3, arg3, #128 // check if there is another 64B in the buffer to be able to fold b.lt _fold_64_B_end if_will_cond_yield_neon stp q0, q1, [sp, #.Lframe_local_offset] stp q2, q3, [sp, #.Lframe_local_offset + 32] stp q4, q5, [sp, #.Lframe_local_offset + 64] stp q6, q7, [sp, #.Lframe_local_offset + 96] do_cond_yield_neon ldp q0, q1, [sp, #.Lframe_local_offset] ldp q2, q3, [sp, #.Lframe_local_offset + 32] ldp q4, q5, [sp, #.Lframe_local_offset + 64] ldp q6, q7, [sp, #.Lframe_local_offset + 96] ldr_l q10, rk3, x8 movi vzr.16b, #0 // init zero register endif_yield_neon b _fold_64_B_loop _fold_64_B_end: // at this point, the buffer pointer is pointing at the last y Bytes // of the buffer the 64B of folded data is in 4 of the vector // registers: v0, v1, v2, v3 // fold the 8 vector registers to 1 vector register with different // constants ldr_l q10, rk9, x8 .macro fold16, reg, rk pmull v8.1q, \reg\().1d, v10.1d pmull2 \reg\().1q, \reg\().2d, v10.2d .ifnb \rk ldr_l q10, \rk, x8 .endif eor v7.16b, v7.16b, v8.16b eor v7.16b, v7.16b, \reg\().16b .endm fold16 v0, rk11 fold16 v1, rk13 fold16 v2, rk15 fold16 v3, rk17 fold16 v4, rk19 fold16 v5, rk1 fold16 v6 // instead of 64, we add 48 to the loop counter to save 1 instruction // from the loop instead of a cmp instruction, we use the negative // flag with the jl instruction adds arg3, arg3, #(128-16) b.lt _final_reduction_for_128 // now we have 16+y bytes left to reduce. 16 Bytes is in register v7 // and the rest is in memory. We can fold 16 bytes at a time if y>=16 // continue folding 16B at a time _16B_reduction_loop: pmull v8.1q, v7.1d, v10.1d pmull2 v7.1q, v7.2d, v10.2d eor v7.16b, v7.16b, v8.16b ldr q0, [arg2], #16 CPU_LE( rev64 v0.16b, v0.16b ) CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 ) eor v7.16b, v7.16b, v0.16b subs arg3, arg3, #16 // instead of a cmp instruction, we utilize the flags with the // jge instruction equivalent of: cmp arg3, 16-16 // check if there is any more 16B in the buffer to be able to fold b.ge _16B_reduction_loop // now we have 16+z bytes left to reduce, where 0<= z < 16. // first, we reduce the data in the xmm7 register _final_reduction_for_128: // check if any more data to fold. If not, compute the CRC of // the final 128 bits adds arg3, arg3, #16 b.eq _128_done // here we are getting data that is less than 16 bytes. // since we know that there was data before the pointer, we can // offset the input pointer before the actual point, to receive // exactly 16 bytes. after that the registers need to be adjusted. _get_last_two_regs: add arg2, arg2, arg3 ldr q1, [arg2, #-16] CPU_LE( rev64 v1.16b, v1.16b ) CPU_LE( ext v1.16b, v1.16b, v1.16b, #8 ) // get rid of the extra data that was loaded before // load the shift constant adr_l x4, tbl_shf_table + 16 sub x4, x4, arg3 ld1 {v0.16b}, [x4] // shift v2 to the left by arg3 bytes tbl v2.16b, {v7.16b}, v0.16b // shift v7 to the right by 16-arg3 bytes movi v9.16b, #0x80 eor v0.16b, v0.16b, v9.16b tbl v7.16b, {v7.16b}, v0.16b // blend sshr v0.16b, v0.16b, #7 // convert to 8-bit mask bsl v0.16b, v2.16b, v1.16b // fold 16 Bytes pmull v8.1q, v7.1d, v10.1d pmull2 v7.1q, v7.2d, v10.2d eor v7.16b, v7.16b, v8.16b eor v7.16b, v7.16b, v0.16b _128_done: // compute crc of a 128-bit value ldr_l q10, rk5, x8 // rk5 and rk6 in xmm10 // 64b fold ext v0.16b, vzr.16b, v7.16b, #8 mov v7.d[0], v7.d[1] pmull v7.1q, v7.1d, v10.1d eor v7.16b, v7.16b, v0.16b // 32b fold ext v0.16b, v7.16b, vzr.16b, #4 mov v7.s[3], vzr.s[0] pmull2 v0.1q, v0.2d, v10.2d eor v7.16b, v7.16b, v0.16b // barrett reduction _barrett: ldr_l q10, rk7, x8 mov v0.d[0], v7.d[1] pmull v0.1q, v0.1d, v10.1d ext v0.16b, vzr.16b, v0.16b, #12 pmull2 v0.1q, v0.2d, v10.2d ext v0.16b, vzr.16b, v0.16b, #12 eor v7.16b, v7.16b, v0.16b mov w0, v7.s[1] _cleanup: // scale the result back to 16 bits lsr x0, x0, #16 frame_pop ret _less_than_128: cbz arg3, _cleanup movi v0.16b, #0 mov v0.s[3], arg1_low32 // get the initial crc value ldr q7, [arg2], #0x10 CPU_LE( rev64 v7.16b, v7.16b ) CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 ) eor v7.16b, v7.16b, v0.16b // xor the initial crc value cmp arg3, #16 b.eq _128_done // exactly 16 left b.lt _less_than_16_left ldr_l q10, rk1, x8 // rk1 and rk2 in xmm10 // update the counter. subtract 32 instead of 16 to save one // instruction from the loop subs arg3, arg3, #32 b.ge _16B_reduction_loop add arg3, arg3, #16 b _get_last_two_regs _less_than_16_left: // shl r9, 4 adr_l x0, tbl_shf_table + 16 sub x0, x0, arg3 ld1 {v0.16b}, [x0] movi v9.16b, #0x80 eor v0.16b, v0.16b, v9.16b tbl v7.16b, {v7.16b}, v0.16b b _128_done ENDPROC(crc_t10dif_pmull) // precomputed constants // these constants are precomputed from the poly: // 0x8bb70000 (0x8bb7 scaled to 32 bits) .section ".rodata", "a" .align 4 // Q = 0x18BB70000 // rk1 = 2^(32*3) mod Q << 32 // rk2 = 2^(32*5) mod Q << 32 // rk3 = 2^(32*15) mod Q << 32 // rk4 = 2^(32*17) mod Q << 32 // rk5 = 2^(32*3) mod Q << 32 // rk6 = 2^(32*2) mod Q << 32 // rk7 = floor(2^64/Q) // rk8 = Q rk1: .octa 0x06df0000000000002d56000000000000 rk3: .octa 0x7cf50000000000009d9d000000000000 rk5: .octa 0x13680000000000002d56000000000000 rk7: .octa 0x000000018bb7000000000001f65a57f8 rk9: .octa 0xbfd6000000000000ceae000000000000 rk11: .octa 0x713c0000000000001e16000000000000 rk13: .octa 0x80a6000000000000f7f9000000000000 rk15: .octa 0xe658000000000000044c000000000000 rk17: .octa 0xa497000000000000ad18000000000000 rk19: .octa 0xe7b50000000000006ee3000000000000 tbl_shf_table: // use these values for shift constants for the tbl/tbx instruction // different alignments result in values as shown: // DDQ 0x008f8e8d8c8b8a898887868584838281 # shl 15 (16-1) / shr1 // DDQ 0x01008f8e8d8c8b8a8988878685848382 # shl 14 (16-3) / shr2 // DDQ 0x0201008f8e8d8c8b8a89888786858483 # shl 13 (16-4) / shr3 // DDQ 0x030201008f8e8d8c8b8a898887868584 # shl 12 (16-4) / shr4 // DDQ 0x04030201008f8e8d8c8b8a8988878685 # shl 11 (16-5) / shr5 // DDQ 0x0504030201008f8e8d8c8b8a89888786 # shl 10 (16-6) / shr6 // DDQ 0x060504030201008f8e8d8c8b8a898887 # shl 9 (16-7) / shr7 // DDQ 0x07060504030201008f8e8d8c8b8a8988 # shl 8 (16-8) / shr8 // DDQ 0x0807060504030201008f8e8d8c8b8a89 # shl 7 (16-9) / shr9 // DDQ 0x090807060504030201008f8e8d8c8b8a # shl 6 (16-10) / shr10 // DDQ 0x0a090807060504030201008f8e8d8c8b # shl 5 (16-11) / shr11 // DDQ 0x0b0a090807060504030201008f8e8d8c # shl 4 (16-12) / shr12 // DDQ 0x0c0b0a090807060504030201008f8e8d # shl 3 (16-13) / shr13 // DDQ 0x0d0c0b0a090807060504030201008f8e # shl 2 (16-14) / shr14 // DDQ 0x0e0d0c0b0a090807060504030201008f # shl 1 (16-15) / shr15 .byte 0x0, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87 .byte 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f .byte 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7 .byte 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe , 0x0
AirFortressIlikara/LS2K0300-linux-4.19
5,405
arch/arm64/crypto/aes-cipher-core.S
/* * Scalar AES core transform * * Copyright (C) 2017 Linaro Ltd <ard.biesheuvel@linaro.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/cache.h> .text rk .req x0 out .req x1 in .req x2 rounds .req x3 tt .req x2 .macro __pair1, sz, op, reg0, reg1, in0, in1e, in1d, shift .ifc \op\shift, b0 ubfiz \reg0, \in0, #2, #8 ubfiz \reg1, \in1e, #2, #8 .else ubfx \reg0, \in0, #\shift, #8 ubfx \reg1, \in1e, #\shift, #8 .endif /* * AArch64 cannot do byte size indexed loads from a table containing * 32-bit quantities, i.e., 'ldrb w12, [tt, w12, uxtw #2]' is not a * valid instruction. So perform the shift explicitly first for the * high bytes (the low byte is shifted implicitly by using ubfiz rather * than ubfx above) */ .ifnc \op, b ldr \reg0, [tt, \reg0, uxtw #2] ldr \reg1, [tt, \reg1, uxtw #2] .else .if \shift > 0 lsl \reg0, \reg0, #2 lsl \reg1, \reg1, #2 .endif ldrb \reg0, [tt, \reg0, uxtw] ldrb \reg1, [tt, \reg1, uxtw] .endif .endm .macro __pair0, sz, op, reg0, reg1, in0, in1e, in1d, shift ubfx \reg0, \in0, #\shift, #8 ubfx \reg1, \in1d, #\shift, #8 ldr\op \reg0, [tt, \reg0, uxtw #\sz] ldr\op \reg1, [tt, \reg1, uxtw #\sz] .endm .macro __hround, out0, out1, in0, in1, in2, in3, t0, t1, enc, sz, op ldp \out0, \out1, [rk], #8 __pair\enc \sz, \op, w12, w13, \in0, \in1, \in3, 0 __pair\enc \sz, \op, w14, w15, \in1, \in2, \in0, 8 __pair\enc \sz, \op, w16, w17, \in2, \in3, \in1, 16 __pair\enc \sz, \op, \t0, \t1, \in3, \in0, \in2, 24 eor \out0, \out0, w12 eor \out1, \out1, w13 eor \out0, \out0, w14, ror #24 eor \out1, \out1, w15, ror #24 eor \out0, \out0, w16, ror #16 eor \out1, \out1, w17, ror #16 eor \out0, \out0, \t0, ror #8 eor \out1, \out1, \t1, ror #8 .endm .macro fround, out0, out1, out2, out3, in0, in1, in2, in3, sz=2, op __hround \out0, \out1, \in0, \in1, \in2, \in3, \out2, \out3, 1, \sz, \op __hround \out2, \out3, \in2, \in3, \in0, \in1, \in1, \in2, 1, \sz, \op .endm .macro iround, out0, out1, out2, out3, in0, in1, in2, in3, sz=2, op __hround \out0, \out1, \in0, \in3, \in2, \in1, \out2, \out3, 0, \sz, \op __hround \out2, \out3, \in2, \in1, \in0, \in3, \in1, \in0, 0, \sz, \op .endm .macro do_crypt, round, ttab, ltab, bsz ldp w4, w5, [in] ldp w6, w7, [in, #8] ldp w8, w9, [rk], #16 ldp w10, w11, [rk, #-8] CPU_BE( rev w4, w4 ) CPU_BE( rev w5, w5 ) CPU_BE( rev w6, w6 ) CPU_BE( rev w7, w7 ) eor w4, w4, w8 eor w5, w5, w9 eor w6, w6, w10 eor w7, w7, w11 adr_l tt, \ttab tbnz rounds, #1, 1f 0: \round w8, w9, w10, w11, w4, w5, w6, w7 \round w4, w5, w6, w7, w8, w9, w10, w11 1: subs rounds, rounds, #4 \round w8, w9, w10, w11, w4, w5, w6, w7 b.ls 3f 2: \round w4, w5, w6, w7, w8, w9, w10, w11 b 0b 3: adr_l tt, \ltab \round w4, w5, w6, w7, w8, w9, w10, w11, \bsz, b CPU_BE( rev w4, w4 ) CPU_BE( rev w5, w5 ) CPU_BE( rev w6, w6 ) CPU_BE( rev w7, w7 ) stp w4, w5, [out] stp w6, w7, [out, #8] ret .endm ENTRY(__aes_arm64_encrypt) do_crypt fround, crypto_ft_tab, crypto_ft_tab + 1, 2 ENDPROC(__aes_arm64_encrypt) .align 5 ENTRY(__aes_arm64_decrypt) do_crypt iround, crypto_it_tab, __aes_arm64_inverse_sbox, 0 ENDPROC(__aes_arm64_decrypt) .section ".rodata", "a" .align L1_CACHE_SHIFT .type __aes_arm64_inverse_sbox, %object __aes_arm64_inverse_sbox: .byte 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38 .byte 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb .byte 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87 .byte 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb .byte 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d .byte 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e .byte 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2 .byte 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25 .byte 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16 .byte 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92 .byte 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda .byte 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84 .byte 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a .byte 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06 .byte 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02 .byte 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b .byte 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea .byte 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73 .byte 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85 .byte 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e .byte 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89 .byte 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b .byte 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20 .byte 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4 .byte 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31 .byte 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f .byte 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d .byte 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef .byte 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0 .byte 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61 .byte 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26 .byte 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d .size __aes_arm64_inverse_sbox, . - __aes_arm64_inverse_sbox
AirFortressIlikara/LS2K0300-linux-4.19
3,408
arch/arm64/crypto/sm3-ce-core.S
/* * sm3-ce-core.S - SM3 secure hash using ARMv8.2 Crypto Extensions * * Copyright (C) 2018 Linaro Ltd <ard.biesheuvel@linaro.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/linkage.h> #include <asm/assembler.h> .irp b, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 .set .Lv\b\().4s, \b .endr .macro sm3partw1, rd, rn, rm .inst 0xce60c000 | .L\rd | (.L\rn << 5) | (.L\rm << 16) .endm .macro sm3partw2, rd, rn, rm .inst 0xce60c400 | .L\rd | (.L\rn << 5) | (.L\rm << 16) .endm .macro sm3ss1, rd, rn, rm, ra .inst 0xce400000 | .L\rd | (.L\rn << 5) | (.L\ra << 10) | (.L\rm << 16) .endm .macro sm3tt1a, rd, rn, rm, imm2 .inst 0xce408000 | .L\rd | (.L\rn << 5) | ((\imm2) << 12) | (.L\rm << 16) .endm .macro sm3tt1b, rd, rn, rm, imm2 .inst 0xce408400 | .L\rd | (.L\rn << 5) | ((\imm2) << 12) | (.L\rm << 16) .endm .macro sm3tt2a, rd, rn, rm, imm2 .inst 0xce408800 | .L\rd | (.L\rn << 5) | ((\imm2) << 12) | (.L\rm << 16) .endm .macro sm3tt2b, rd, rn, rm, imm2 .inst 0xce408c00 | .L\rd | (.L\rn << 5) | ((\imm2) << 12) | (.L\rm << 16) .endm .macro round, ab, s0, t0, t1, i sm3ss1 v5.4s, v8.4s, \t0\().4s, v9.4s shl \t1\().4s, \t0\().4s, #1 sri \t1\().4s, \t0\().4s, #31 sm3tt1\ab v8.4s, v5.4s, v10.4s, \i sm3tt2\ab v9.4s, v5.4s, \s0\().4s, \i .endm .macro qround, ab, s0, s1, s2, s3, s4 .ifnb \s4 ext \s4\().16b, \s1\().16b, \s2\().16b, #12 ext v6.16b, \s0\().16b, \s1\().16b, #12 ext v7.16b, \s2\().16b, \s3\().16b, #8 sm3partw1 \s4\().4s, \s0\().4s, \s3\().4s .endif eor v10.16b, \s0\().16b, \s1\().16b round \ab, \s0, v11, v12, 0 round \ab, \s0, v12, v11, 1 round \ab, \s0, v11, v12, 2 round \ab, \s0, v12, v11, 3 .ifnb \s4 sm3partw2 \s4\().4s, v7.4s, v6.4s .endif .endm /* * void sm3_ce_transform(struct sm3_state *sst, u8 const *src, * int blocks) */ .text ENTRY(sm3_ce_transform) /* load state */ ld1 {v8.4s-v9.4s}, [x0] rev64 v8.4s, v8.4s rev64 v9.4s, v9.4s ext v8.16b, v8.16b, v8.16b, #8 ext v9.16b, v9.16b, v9.16b, #8 adr_l x8, .Lt ldp s13, s14, [x8] /* load input */ 0: ld1 {v0.16b-v3.16b}, [x1], #64 sub w2, w2, #1 mov v15.16b, v8.16b mov v16.16b, v9.16b CPU_LE( rev32 v0.16b, v0.16b ) CPU_LE( rev32 v1.16b, v1.16b ) CPU_LE( rev32 v2.16b, v2.16b ) CPU_LE( rev32 v3.16b, v3.16b ) ext v11.16b, v13.16b, v13.16b, #4 qround a, v0, v1, v2, v3, v4 qround a, v1, v2, v3, v4, v0 qround a, v2, v3, v4, v0, v1 qround a, v3, v4, v0, v1, v2 ext v11.16b, v14.16b, v14.16b, #4 qround b, v4, v0, v1, v2, v3 qround b, v0, v1, v2, v3, v4 qround b, v1, v2, v3, v4, v0 qround b, v2, v3, v4, v0, v1 qround b, v3, v4, v0, v1, v2 qround b, v4, v0, v1, v2, v3 qround b, v0, v1, v2, v3, v4 qround b, v1, v2, v3, v4, v0 qround b, v2, v3, v4, v0, v1 qround b, v3, v4 qround b, v4, v0 qround b, v0, v1 eor v8.16b, v8.16b, v15.16b eor v9.16b, v9.16b, v16.16b /* handled all input blocks? */ cbnz w2, 0b /* save state */ rev64 v8.4s, v8.4s rev64 v9.4s, v9.4s ext v8.16b, v8.16b, v8.16b, #8 ext v9.16b, v9.16b, v9.16b, #8 st1 {v8.4s-v9.4s}, [x0] ret ENDPROC(sm3_ce_transform) .section ".rodata", "a" .align 3 .Lt: .word 0x79cc4519, 0x9d8a7a87
AirFortressIlikara/LS2K0300-linux-4.19
1,835
arch/arm64/lib/copy_to_user.S
/* * Copyright (C) 2012 ARM Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/linkage.h> #include <asm/cache.h> #include <asm/asm-uaccess.h> /* * Copy to user space from a kernel buffer (alignment handled by the hardware) * * Parameters: * x0 - to * x1 - from * x2 - n * Returns: * x0 - bytes not copied */ .macro ldrb1 ptr, regB, val ldrb \ptr, [\regB], \val .endm .macro strb1 ptr, regB, val uao_user_alternative 9998f, strb, sttrb, \ptr, \regB, \val .endm .macro ldrh1 ptr, regB, val ldrh \ptr, [\regB], \val .endm .macro strh1 ptr, regB, val uao_user_alternative 9998f, strh, sttrh, \ptr, \regB, \val .endm .macro ldr1 ptr, regB, val ldr \ptr, [\regB], \val .endm .macro str1 ptr, regB, val uao_user_alternative 9998f, str, sttr, \ptr, \regB, \val .endm .macro ldp1 ptr, regB, regC, val ldp \ptr, \regB, [\regC], \val .endm .macro stp1 ptr, regB, regC, val uao_stp 9998f, \ptr, \regB, \regC, \val .endm end .req x5 ENTRY(__arch_copy_to_user) uaccess_enable_not_uao x3, x4, x5 add end, x0, x2 #include "copy_template.S" uaccess_disable_not_uao x3, x4 mov x0, #0 ret ENDPROC(__arch_copy_to_user) .section .fixup,"ax" .align 2 9998: sub x0, end, dst // bytes not copied uaccess_disable_not_uao x3, x4 ret .previous
AirFortressIlikara/LS2K0300-linux-4.19
1,679
arch/arm64/lib/clear_user.S
/* * Based on arch/arm/lib/clear_user.S * * Copyright (C) 2012 ARM Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/linkage.h> #include <asm/asm-uaccess.h> .text /* Prototype: int __arch_clear_user(void *addr, size_t sz) * Purpose : clear some user memory * Params : addr - user memory address to clear * : sz - number of bytes to clear * Returns : number of bytes NOT cleared * * Alignment fixed up by hardware. */ ENTRY(__arch_clear_user) uaccess_enable_not_uao x2, x3, x4 mov x2, x1 // save the size for fixup return subs x1, x1, #8 b.mi 2f 1: uao_user_alternative 9f, str, sttr, xzr, x0, 8 subs x1, x1, #8 b.pl 1b 2: adds x1, x1, #4 b.mi 3f uao_user_alternative 9f, str, sttr, wzr, x0, 4 sub x1, x1, #4 3: adds x1, x1, #2 b.mi 4f uao_user_alternative 9f, strh, sttrh, wzr, x0, 2 sub x1, x1, #2 4: adds x1, x1, #1 b.mi 5f uao_user_alternative 9f, strb, sttrb, wzr, x0, 0 5: mov x0, #0 uaccess_disable_not_uao x2, x3 ret ENDPROC(__arch_clear_user) .section .fixup,"ax" .align 2 9: mov x0, x2 // return the original size uaccess_disable_not_uao x2, x3 ret .previous
AirFortressIlikara/LS2K0300-linux-4.19
4,649
arch/arm64/lib/copy_template.S
/* * Copyright (C) 2013 ARM Ltd. * Copyright (C) 2013 Linaro. * * This code is based on glibc cortex strings work originally authored by Linaro * and re-licensed under GPLv2 for the Linux kernel. The original code can * be found @ * * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/ * files/head:/src/aarch64/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * Copy a buffer from src to dest (alignment handled by the hardware) * * Parameters: * x0 - dest * x1 - src * x2 - n * Returns: * x0 - dest */ dstin .req x0 src .req x1 count .req x2 tmp1 .req x3 tmp1w .req w3 tmp2 .req x4 tmp2w .req w4 dst .req x6 A_l .req x7 A_h .req x8 B_l .req x9 B_h .req x10 C_l .req x11 C_h .req x12 D_l .req x13 D_h .req x14 mov dst, dstin cmp count, #16 /*When memory length is less than 16, the accessed are not aligned.*/ b.lo .Ltiny15 neg tmp2, src ands tmp2, tmp2, #15/* Bytes to reach alignment. */ b.eq .LSrcAligned sub count, count, tmp2 /* * Copy the leading memory data from src to dst in an increasing * address order.By this way,the risk of overwriting the source * memory data is eliminated when the distance between src and * dst is less than 16. The memory accesses here are alignment. */ tbz tmp2, #0, 1f ldrb1 tmp1w, src, #1 strb1 tmp1w, dst, #1 1: tbz tmp2, #1, 2f ldrh1 tmp1w, src, #2 strh1 tmp1w, dst, #2 2: tbz tmp2, #2, 3f ldr1 tmp1w, src, #4 str1 tmp1w, dst, #4 3: tbz tmp2, #3, .LSrcAligned ldr1 tmp1, src, #8 str1 tmp1, dst, #8 .LSrcAligned: cmp count, #64 b.ge .Lcpy_over64 /* * Deal with small copies quickly by dropping straight into the * exit block. */ .Ltail63: /* * Copy up to 48 bytes of data. At this point we only need the * bottom 6 bits of count to be accurate. */ ands tmp1, count, #0x30 b.eq .Ltiny15 cmp tmp1w, #0x20 b.eq 1f b.lt 2f ldp1 A_l, A_h, src, #16 stp1 A_l, A_h, dst, #16 1: ldp1 A_l, A_h, src, #16 stp1 A_l, A_h, dst, #16 2: ldp1 A_l, A_h, src, #16 stp1 A_l, A_h, dst, #16 .Ltiny15: /* * Prefer to break one ldp/stp into several load/store to access * memory in an increasing address order,rather than to load/store 16 * bytes from (src-16) to (dst-16) and to backward the src to aligned * address,which way is used in original cortex memcpy. If keeping * the original memcpy process here, memmove need to satisfy the * precondition that src address is at least 16 bytes bigger than dst * address,otherwise some source data will be overwritten when memove * call memcpy directly. To make memmove simpler and decouple the * memcpy's dependency on memmove, withdrew the original process. */ tbz count, #3, 1f ldr1 tmp1, src, #8 str1 tmp1, dst, #8 1: tbz count, #2, 2f ldr1 tmp1w, src, #4 str1 tmp1w, dst, #4 2: tbz count, #1, 3f ldrh1 tmp1w, src, #2 strh1 tmp1w, dst, #2 3: tbz count, #0, .Lexitfunc ldrb1 tmp1w, src, #1 strb1 tmp1w, dst, #1 b .Lexitfunc .Lcpy_over64: subs count, count, #128 b.ge .Lcpy_body_large /* * Less than 128 bytes to copy, so handle 64 here and then jump * to the tail. */ ldp1 A_l, A_h, src, #16 stp1 A_l, A_h, dst, #16 ldp1 B_l, B_h, src, #16 ldp1 C_l, C_h, src, #16 stp1 B_l, B_h, dst, #16 stp1 C_l, C_h, dst, #16 ldp1 D_l, D_h, src, #16 stp1 D_l, D_h, dst, #16 tst count, #0x3f b.ne .Ltail63 b .Lexitfunc /* * Critical loop. Start at a new cache line boundary. Assuming * 64 bytes per line this ensures the entire loop is in one line. */ .p2align L1_CACHE_SHIFT .Lcpy_body_large: /* pre-get 64 bytes data. */ ldp1 A_l, A_h, src, #16 ldp1 B_l, B_h, src, #16 ldp1 C_l, C_h, src, #16 ldp1 D_l, D_h, src, #16 1: /* * interlace the load of next 64 bytes data block with store of the last * loaded 64 bytes data. */ stp1 A_l, A_h, dst, #16 ldp1 A_l, A_h, src, #16 stp1 B_l, B_h, dst, #16 ldp1 B_l, B_h, src, #16 stp1 C_l, C_h, dst, #16 ldp1 C_l, C_h, src, #16 stp1 D_l, D_h, dst, #16 ldp1 D_l, D_h, src, #16 subs count, count, #64 b.ge 1b stp1 A_l, A_h, dst, #16 stp1 B_l, B_h, dst, #16 stp1 C_l, C_h, dst, #16 stp1 D_l, D_h, dst, #16 tst count, #0x3f b.ne .Ltail63 .Lexitfunc:
AirFortressIlikara/LS2K0300-linux-4.19
1,786
arch/arm64/lib/memcpy.S
/* * Copyright (C) 2013 ARM Ltd. * Copyright (C) 2013 Linaro. * * This code is based on glibc cortex strings work originally authored by Linaro * and re-licensed under GPLv2 for the Linux kernel. The original code can * be found @ * * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/ * files/head:/src/aarch64/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/cache.h> /* * Copy a buffer from src to dest (alignment handled by the hardware) * * Parameters: * x0 - dest * x1 - src * x2 - n * Returns: * x0 - dest */ .macro ldrb1 ptr, regB, val ldrb \ptr, [\regB], \val .endm .macro strb1 ptr, regB, val strb \ptr, [\regB], \val .endm .macro ldrh1 ptr, regB, val ldrh \ptr, [\regB], \val .endm .macro strh1 ptr, regB, val strh \ptr, [\regB], \val .endm .macro ldr1 ptr, regB, val ldr \ptr, [\regB], \val .endm .macro str1 ptr, regB, val str \ptr, [\regB], \val .endm .macro ldp1 ptr, regB, regC, val ldp \ptr, \regB, [\regC], \val .endm .macro stp1 ptr, regB, regC, val stp \ptr, \regB, [\regC], \val .endm ENTRY(__memcpy) WEAK(memcpy) #include "copy_template.S" ret ENDPIPROC(memcpy) ENDPROC(__memcpy)
AirFortressIlikara/LS2K0300-linux-4.19
4,403
arch/arm64/lib/memmove.S
/* * Copyright (C) 2013 ARM Ltd. * Copyright (C) 2013 Linaro. * * This code is based on glibc cortex strings work originally authored by Linaro * and re-licensed under GPLv2 for the Linux kernel. The original code can * be found @ * * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/ * files/head:/src/aarch64/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/cache.h> /* * Move a buffer from src to test (alignment handled by the hardware). * If dest <= src, call memcpy, otherwise copy in reverse order. * * Parameters: * x0 - dest * x1 - src * x2 - n * Returns: * x0 - dest */ dstin .req x0 src .req x1 count .req x2 tmp1 .req x3 tmp1w .req w3 tmp2 .req x4 tmp2w .req w4 tmp3 .req x5 tmp3w .req w5 dst .req x6 A_l .req x7 A_h .req x8 B_l .req x9 B_h .req x10 C_l .req x11 C_h .req x12 D_l .req x13 D_h .req x14 ENTRY(__memmove) WEAK(memmove) cmp dstin, src b.lo __memcpy add tmp1, src, count cmp dstin, tmp1 b.hs __memcpy /* No overlap. */ add dst, dstin, count add src, src, count cmp count, #16 b.lo .Ltail15 /*probably non-alignment accesses.*/ ands tmp2, src, #15 /* Bytes to reach alignment. */ b.eq .LSrcAligned sub count, count, tmp2 /* * process the aligned offset length to make the src aligned firstly. * those extra instructions' cost is acceptable. It also make the * coming accesses are based on aligned address. */ tbz tmp2, #0, 1f ldrb tmp1w, [src, #-1]! strb tmp1w, [dst, #-1]! 1: tbz tmp2, #1, 2f ldrh tmp1w, [src, #-2]! strh tmp1w, [dst, #-2]! 2: tbz tmp2, #2, 3f ldr tmp1w, [src, #-4]! str tmp1w, [dst, #-4]! 3: tbz tmp2, #3, .LSrcAligned ldr tmp1, [src, #-8]! str tmp1, [dst, #-8]! .LSrcAligned: cmp count, #64 b.ge .Lcpy_over64 /* * Deal with small copies quickly by dropping straight into the * exit block. */ .Ltail63: /* * Copy up to 48 bytes of data. At this point we only need the * bottom 6 bits of count to be accurate. */ ands tmp1, count, #0x30 b.eq .Ltail15 cmp tmp1w, #0x20 b.eq 1f b.lt 2f ldp A_l, A_h, [src, #-16]! stp A_l, A_h, [dst, #-16]! 1: ldp A_l, A_h, [src, #-16]! stp A_l, A_h, [dst, #-16]! 2: ldp A_l, A_h, [src, #-16]! stp A_l, A_h, [dst, #-16]! .Ltail15: tbz count, #3, 1f ldr tmp1, [src, #-8]! str tmp1, [dst, #-8]! 1: tbz count, #2, 2f ldr tmp1w, [src, #-4]! str tmp1w, [dst, #-4]! 2: tbz count, #1, 3f ldrh tmp1w, [src, #-2]! strh tmp1w, [dst, #-2]! 3: tbz count, #0, .Lexitfunc ldrb tmp1w, [src, #-1] strb tmp1w, [dst, #-1] .Lexitfunc: ret .Lcpy_over64: subs count, count, #128 b.ge .Lcpy_body_large /* * Less than 128 bytes to copy, so handle 64 bytes here and then jump * to the tail. */ ldp A_l, A_h, [src, #-16] stp A_l, A_h, [dst, #-16] ldp B_l, B_h, [src, #-32] ldp C_l, C_h, [src, #-48] stp B_l, B_h, [dst, #-32] stp C_l, C_h, [dst, #-48] ldp D_l, D_h, [src, #-64]! stp D_l, D_h, [dst, #-64]! tst count, #0x3f b.ne .Ltail63 ret /* * Critical loop. Start at a new cache line boundary. Assuming * 64 bytes per line this ensures the entire loop is in one line. */ .p2align L1_CACHE_SHIFT .Lcpy_body_large: /* pre-load 64 bytes data. */ ldp A_l, A_h, [src, #-16] ldp B_l, B_h, [src, #-32] ldp C_l, C_h, [src, #-48] ldp D_l, D_h, [src, #-64]! 1: /* * interlace the load of next 64 bytes data block with store of the last * loaded 64 bytes data. */ stp A_l, A_h, [dst, #-16] ldp A_l, A_h, [src, #-16] stp B_l, B_h, [dst, #-32] ldp B_l, B_h, [src, #-32] stp C_l, C_h, [dst, #-48] ldp C_l, C_h, [src, #-48] stp D_l, D_h, [dst, #-64]! ldp D_l, D_h, [src, #-64]! subs count, count, #64 b.ge 1b stp A_l, A_h, [dst, #-16] stp B_l, B_h, [dst, #-32] stp C_l, C_h, [dst, #-48] stp D_l, D_h, [dst, #-64]! tst count, #0x3f b.ne .Ltail63 ret ENDPIPROC(memmove) ENDPROC(__memmove)
AirFortressIlikara/LS2K0300-linux-4.19
2,105
arch/arm64/lib/copy_page.S
/* * Copyright (C) 2012 ARM Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/linkage.h> #include <linux/const.h> #include <asm/assembler.h> #include <asm/page.h> #include <asm/cpufeature.h> #include <asm/alternative.h> /* * Copy a page from src to dest (both are page aligned) * * Parameters: * x0 - dest * x1 - src */ ENTRY(copy_page) alternative_if ARM64_HAS_NO_HW_PREFETCH // Prefetch three cache lines ahead. prfm pldl1strm, [x1, #128] prfm pldl1strm, [x1, #256] prfm pldl1strm, [x1, #384] alternative_else_nop_endif ldp x2, x3, [x1] ldp x4, x5, [x1, #16] ldp x6, x7, [x1, #32] ldp x8, x9, [x1, #48] ldp x10, x11, [x1, #64] ldp x12, x13, [x1, #80] ldp x14, x15, [x1, #96] ldp x16, x17, [x1, #112] mov x18, #(PAGE_SIZE - 128) add x1, x1, #128 1: subs x18, x18, #128 alternative_if ARM64_HAS_NO_HW_PREFETCH prfm pldl1strm, [x1, #384] alternative_else_nop_endif stnp x2, x3, [x0] ldp x2, x3, [x1] stnp x4, x5, [x0, #16] ldp x4, x5, [x1, #16] stnp x6, x7, [x0, #32] ldp x6, x7, [x1, #32] stnp x8, x9, [x0, #48] ldp x8, x9, [x1, #48] stnp x10, x11, [x0, #64] ldp x10, x11, [x1, #64] stnp x12, x13, [x0, #80] ldp x12, x13, [x1, #80] stnp x14, x15, [x0, #96] ldp x14, x15, [x1, #96] stnp x16, x17, [x0, #112] ldp x16, x17, [x1, #112] add x0, x0, #128 add x1, x1, #128 b.gt 1b stnp x2, x3, [x0] stnp x4, x5, [x0, #16] stnp x6, x7, [x0, #32] stnp x8, x9, [x0, #48] stnp x10, x11, [x0, #64] stnp x12, x13, [x0, #80] stnp x14, x15, [x0, #96] stnp x16, x17, [x0, #112] ret ENDPROC(copy_page)
AirFortressIlikara/LS2K0300-linux-4.19
1,980
arch/arm64/lib/copy_in_user.S
/* * Copy from user space to user space * * Copyright (C) 2012 ARM Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/linkage.h> #include <asm/cache.h> #include <asm/asm-uaccess.h> /* * Copy from user space to user space (alignment handled by the hardware) * * Parameters: * x0 - to * x1 - from * x2 - n * Returns: * x0 - bytes not copied */ .macro ldrb1 ptr, regB, val uao_user_alternative 9998f, ldrb, ldtrb, \ptr, \regB, \val .endm .macro strb1 ptr, regB, val uao_user_alternative 9998f, strb, sttrb, \ptr, \regB, \val .endm .macro ldrh1 ptr, regB, val uao_user_alternative 9998f, ldrh, ldtrh, \ptr, \regB, \val .endm .macro strh1 ptr, regB, val uao_user_alternative 9998f, strh, sttrh, \ptr, \regB, \val .endm .macro ldr1 ptr, regB, val uao_user_alternative 9998f, ldr, ldtr, \ptr, \regB, \val .endm .macro str1 ptr, regB, val uao_user_alternative 9998f, str, sttr, \ptr, \regB, \val .endm .macro ldp1 ptr, regB, regC, val uao_ldp 9998f, \ptr, \regB, \regC, \val .endm .macro stp1 ptr, regB, regC, val uao_stp 9998f, \ptr, \regB, \regC, \val .endm end .req x5 ENTRY(__arch_copy_in_user) uaccess_enable_not_uao x3, x4, x5 add end, x0, x2 #include "copy_template.S" uaccess_disable_not_uao x3, x4 mov x0, #0 ret ENDPROC(__arch_copy_in_user) .section .fixup,"ax" .align 2 9998: sub x0, end, dst // bytes not copied uaccess_disable_not_uao x3, x4 ret .previous
AirFortressIlikara/LS2K0300-linux-4.19
9,006
arch/arm64/lib/strncmp.S
/* * Copyright (C) 2013 ARM Ltd. * Copyright (C) 2013 Linaro. * * This code is based on glibc cortex strings work originally authored by Linaro * and re-licensed under GPLv2 for the Linux kernel. The original code can * be found @ * * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/ * files/head:/src/aarch64/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/linkage.h> #include <asm/assembler.h> /* * compare two strings * * Parameters: * x0 - const string 1 pointer * x1 - const string 2 pointer * x2 - the maximal length to be compared * Returns: * x0 - an integer less than, equal to, or greater than zero if s1 is found, * respectively, to be less than, to match, or be greater than s2. */ #define REP8_01 0x0101010101010101 #define REP8_7f 0x7f7f7f7f7f7f7f7f #define REP8_80 0x8080808080808080 /* Parameters and result. */ src1 .req x0 src2 .req x1 limit .req x2 result .req x0 /* Internal variables. */ data1 .req x3 data1w .req w3 data2 .req x4 data2w .req w4 has_nul .req x5 diff .req x6 syndrome .req x7 tmp1 .req x8 tmp2 .req x9 tmp3 .req x10 zeroones .req x11 pos .req x12 limit_wd .req x13 mask .req x14 endloop .req x15 WEAK(strncmp) cbz limit, .Lret0 eor tmp1, src1, src2 mov zeroones, #REP8_01 tst tmp1, #7 b.ne .Lmisaligned8 ands tmp1, src1, #7 b.ne .Lmutual_align /* Calculate the number of full and partial words -1. */ /* * when limit is mulitply of 8, if not sub 1, * the judgement of last dword will wrong. */ sub limit_wd, limit, #1 /* limit != 0, so no underflow. */ lsr limit_wd, limit_wd, #3 /* Convert to Dwords. */ /* * NUL detection works on the principle that (X - 1) & (~X) & 0x80 * (=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and * can be done in parallel across the entire word. */ .Lloop_aligned: ldr data1, [src1], #8 ldr data2, [src2], #8 .Lstart_realigned: subs limit_wd, limit_wd, #1 sub tmp1, data1, zeroones orr tmp2, data1, #REP8_7f eor diff, data1, data2 /* Non-zero if differences found. */ csinv endloop, diff, xzr, pl /* Last Dword or differences.*/ bics has_nul, tmp1, tmp2 /* Non-zero if NUL terminator. */ ccmp endloop, #0, #0, eq b.eq .Lloop_aligned /*Not reached the limit, must have found the end or a diff. */ tbz limit_wd, #63, .Lnot_limit /* Limit % 8 == 0 => all bytes significant. */ ands limit, limit, #7 b.eq .Lnot_limit lsl limit, limit, #3 /* Bits -> bytes. */ mov mask, #~0 CPU_BE( lsr mask, mask, limit ) CPU_LE( lsl mask, mask, limit ) bic data1, data1, mask bic data2, data2, mask /* Make sure that the NUL byte is marked in the syndrome. */ orr has_nul, has_nul, mask .Lnot_limit: orr syndrome, diff, has_nul b .Lcal_cmpresult .Lmutual_align: /* * Sources are mutually aligned, but are not currently at an * alignment boundary. Round down the addresses and then mask off * the bytes that precede the start point. * We also need to adjust the limit calculations, but without * overflowing if the limit is near ULONG_MAX. */ bic src1, src1, #7 bic src2, src2, #7 ldr data1, [src1], #8 neg tmp3, tmp1, lsl #3 /* 64 - bits(bytes beyond align). */ ldr data2, [src2], #8 mov tmp2, #~0 sub limit_wd, limit, #1 /* limit != 0, so no underflow. */ /* Big-endian. Early bytes are at MSB. */ CPU_BE( lsl tmp2, tmp2, tmp3 ) /* Shift (tmp1 & 63). */ /* Little-endian. Early bytes are at LSB. */ CPU_LE( lsr tmp2, tmp2, tmp3 ) /* Shift (tmp1 & 63). */ and tmp3, limit_wd, #7 lsr limit_wd, limit_wd, #3 /* Adjust the limit. Only low 3 bits used, so overflow irrelevant.*/ add limit, limit, tmp1 add tmp3, tmp3, tmp1 orr data1, data1, tmp2 orr data2, data2, tmp2 add limit_wd, limit_wd, tmp3, lsr #3 b .Lstart_realigned /*when src1 offset is not equal to src2 offset...*/ .Lmisaligned8: cmp limit, #8 b.lo .Ltiny8proc /*limit < 8... */ /* * Get the align offset length to compare per byte first. * After this process, one string's address will be aligned.*/ and tmp1, src1, #7 neg tmp1, tmp1 add tmp1, tmp1, #8 and tmp2, src2, #7 neg tmp2, tmp2 add tmp2, tmp2, #8 subs tmp3, tmp1, tmp2 csel pos, tmp1, tmp2, hi /*Choose the maximum. */ /* * Here, limit is not less than 8, so directly run .Ltinycmp * without checking the limit.*/ sub limit, limit, pos .Ltinycmp: ldrb data1w, [src1], #1 ldrb data2w, [src2], #1 subs pos, pos, #1 ccmp data1w, #1, #0, ne /* NZCV = 0b0000. */ ccmp data1w, data2w, #0, cs /* NZCV = 0b0000. */ b.eq .Ltinycmp cbnz pos, 1f /*find the null or unequal...*/ cmp data1w, #1 ccmp data1w, data2w, #0, cs b.eq .Lstart_align /*the last bytes are equal....*/ 1: sub result, data1, data2 ret .Lstart_align: lsr limit_wd, limit, #3 cbz limit_wd, .Lremain8 /*process more leading bytes to make str1 aligned...*/ ands xzr, src1, #7 b.eq .Lrecal_offset add src1, src1, tmp3 /*tmp3 is positive in this branch.*/ add src2, src2, tmp3 ldr data1, [src1], #8 ldr data2, [src2], #8 sub limit, limit, tmp3 lsr limit_wd, limit, #3 subs limit_wd, limit_wd, #1 sub tmp1, data1, zeroones orr tmp2, data1, #REP8_7f eor diff, data1, data2 /* Non-zero if differences found. */ csinv endloop, diff, xzr, ne/*if limit_wd is 0,will finish the cmp*/ bics has_nul, tmp1, tmp2 ccmp endloop, #0, #0, eq /*has_null is ZERO: no null byte*/ b.ne .Lunequal_proc /*How far is the current str2 from the alignment boundary...*/ and tmp3, tmp3, #7 .Lrecal_offset: neg pos, tmp3 .Lloopcmp_proc: /* * Divide the eight bytes into two parts. First,backwards the src2 * to an alignment boundary,load eight bytes from the SRC2 alignment * boundary,then compare with the relative bytes from SRC1. * If all 8 bytes are equal,then start the second part's comparison. * Otherwise finish the comparison. * This special handle can garantee all the accesses are in the * thread/task space in avoid to overrange access. */ ldr data1, [src1,pos] ldr data2, [src2,pos] sub tmp1, data1, zeroones orr tmp2, data1, #REP8_7f bics has_nul, tmp1, tmp2 /* Non-zero if NUL terminator. */ eor diff, data1, data2 /* Non-zero if differences found. */ csinv endloop, diff, xzr, eq cbnz endloop, .Lunequal_proc /*The second part process*/ ldr data1, [src1], #8 ldr data2, [src2], #8 subs limit_wd, limit_wd, #1 sub tmp1, data1, zeroones orr tmp2, data1, #REP8_7f eor diff, data1, data2 /* Non-zero if differences found. */ csinv endloop, diff, xzr, ne/*if limit_wd is 0,will finish the cmp*/ bics has_nul, tmp1, tmp2 ccmp endloop, #0, #0, eq /*has_null is ZERO: no null byte*/ b.eq .Lloopcmp_proc .Lunequal_proc: orr syndrome, diff, has_nul cbz syndrome, .Lremain8 .Lcal_cmpresult: /* * reversed the byte-order as big-endian,then CLZ can find the most * significant zero bits. */ CPU_LE( rev syndrome, syndrome ) CPU_LE( rev data1, data1 ) CPU_LE( rev data2, data2 ) /* * For big-endian we cannot use the trick with the syndrome value * as carry-propagation can corrupt the upper bits if the trailing * bytes in the string contain 0x01. * However, if there is no NUL byte in the dword, we can generate * the result directly. We can't just subtract the bytes as the * MSB might be significant. */ CPU_BE( cbnz has_nul, 1f ) CPU_BE( cmp data1, data2 ) CPU_BE( cset result, ne ) CPU_BE( cneg result, result, lo ) CPU_BE( ret ) CPU_BE( 1: ) /* Re-compute the NUL-byte detection, using a byte-reversed value.*/ CPU_BE( rev tmp3, data1 ) CPU_BE( sub tmp1, tmp3, zeroones ) CPU_BE( orr tmp2, tmp3, #REP8_7f ) CPU_BE( bic has_nul, tmp1, tmp2 ) CPU_BE( rev has_nul, has_nul ) CPU_BE( orr syndrome, diff, has_nul ) /* * The MS-non-zero bit of the syndrome marks either the first bit * that is different, or the top bit of the first zero byte. * Shifting left now will bring the critical information into the * top bits. */ clz pos, syndrome lsl data1, data1, pos lsl data2, data2, pos /* * But we need to zero-extend (char is unsigned) the value and then * perform a signed 32-bit subtraction. */ lsr data1, data1, #56 sub result, data1, data2, lsr #56 ret .Lremain8: /* Limit % 8 == 0 => all bytes significant. */ ands limit, limit, #7 b.eq .Lret0 .Ltiny8proc: ldrb data1w, [src1], #1 ldrb data2w, [src2], #1 subs limit, limit, #1 ccmp data1w, #1, #0, ne /* NZCV = 0b0000. */ ccmp data1w, data2w, #0, cs /* NZCV = 0b0000. */ b.eq .Ltiny8proc sub result, data1, data2 ret .Lret0: mov result, #0 ret ENDPIPROC(strncmp)
AirFortressIlikara/LS2K0300-linux-4.19
5,036
arch/arm64/lib/memset.S
/* * Copyright (C) 2013 ARM Ltd. * Copyright (C) 2013 Linaro. * * This code is based on glibc cortex strings work originally authored by Linaro * and re-licensed under GPLv2 for the Linux kernel. The original code can * be found @ * * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/ * files/head:/src/aarch64/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/cache.h> /* * Fill in the buffer with character c (alignment handled by the hardware) * * Parameters: * x0 - buf * x1 - c * x2 - n * Returns: * x0 - buf */ dstin .req x0 val .req w1 count .req x2 tmp1 .req x3 tmp1w .req w3 tmp2 .req x4 tmp2w .req w4 zva_len_x .req x5 zva_len .req w5 zva_bits_x .req x6 A_l .req x7 A_lw .req w7 dst .req x8 tmp3w .req w9 tmp3 .req x9 ENTRY(__memset) WEAK(memset) mov dst, dstin /* Preserve return value. */ and A_lw, val, #255 orr A_lw, A_lw, A_lw, lsl #8 orr A_lw, A_lw, A_lw, lsl #16 orr A_l, A_l, A_l, lsl #32 cmp count, #15 b.hi .Lover16_proc /*All store maybe are non-aligned..*/ tbz count, #3, 1f str A_l, [dst], #8 1: tbz count, #2, 2f str A_lw, [dst], #4 2: tbz count, #1, 3f strh A_lw, [dst], #2 3: tbz count, #0, 4f strb A_lw, [dst] 4: ret .Lover16_proc: /*Whether the start address is aligned with 16.*/ neg tmp2, dst ands tmp2, tmp2, #15 b.eq .Laligned /* * The count is not less than 16, we can use stp to store the start 16 bytes, * then adjust the dst aligned with 16.This process will make the current * memory address at alignment boundary. */ stp A_l, A_l, [dst] /*non-aligned store..*/ /*make the dst aligned..*/ sub count, count, tmp2 add dst, dst, tmp2 .Laligned: cbz A_l, .Lzero_mem .Ltail_maybe_long: cmp count, #64 b.ge .Lnot_short .Ltail63: ands tmp1, count, #0x30 b.eq 3f cmp tmp1w, #0x20 b.eq 1f b.lt 2f stp A_l, A_l, [dst], #16 1: stp A_l, A_l, [dst], #16 2: stp A_l, A_l, [dst], #16 /* * The last store length is less than 16,use stp to write last 16 bytes. * It will lead some bytes written twice and the access is non-aligned. */ 3: ands count, count, #15 cbz count, 4f add dst, dst, count stp A_l, A_l, [dst, #-16] /* Repeat some/all of last store. */ 4: ret /* * Critical loop. Start at a new cache line boundary. Assuming * 64 bytes per line, this ensures the entire loop is in one line. */ .p2align L1_CACHE_SHIFT .Lnot_short: sub dst, dst, #16/* Pre-bias. */ sub count, count, #64 1: stp A_l, A_l, [dst, #16] stp A_l, A_l, [dst, #32] stp A_l, A_l, [dst, #48] stp A_l, A_l, [dst, #64]! subs count, count, #64 b.ge 1b tst count, #0x3f add dst, dst, #16 b.ne .Ltail63 .Lexitfunc: ret /* * For zeroing memory, check to see if we can use the ZVA feature to * zero entire 'cache' lines. */ .Lzero_mem: cmp count, #63 b.le .Ltail63 /* * For zeroing small amounts of memory, it's not worth setting up * the line-clear code. */ cmp count, #128 b.lt .Lnot_short /*count is at least 128 bytes*/ mrs tmp1, dczid_el0 tbnz tmp1, #4, .Lnot_short mov tmp3w, #4 and zva_len, tmp1w, #15 /* Safety: other bits reserved. */ lsl zva_len, tmp3w, zva_len ands tmp3w, zva_len, #63 /* * ensure the zva_len is not less than 64. * It is not meaningful to use ZVA if the block size is less than 64. */ b.ne .Lnot_short .Lzero_by_line: /* * Compute how far we need to go to become suitably aligned. We're * already at quad-word alignment. */ cmp count, zva_len_x b.lt .Lnot_short /* Not enough to reach alignment. */ sub zva_bits_x, zva_len_x, #1 neg tmp2, dst ands tmp2, tmp2, zva_bits_x b.eq 2f /* Already aligned. */ /* Not aligned, check that there's enough to copy after alignment.*/ sub tmp1, count, tmp2 /* * grantee the remain length to be ZVA is bigger than 64, * avoid to make the 2f's process over mem range.*/ cmp tmp1, #64 ccmp tmp1, zva_len_x, #8, ge /* NZCV=0b1000 */ b.lt .Lnot_short /* * We know that there's at least 64 bytes to zero and that it's safe * to overrun by 64 bytes. */ mov count, tmp1 1: stp A_l, A_l, [dst] stp A_l, A_l, [dst, #16] stp A_l, A_l, [dst, #32] subs tmp2, tmp2, #64 stp A_l, A_l, [dst, #48] add dst, dst, #64 b.ge 1b /* We've overrun a bit, so adjust dst downwards.*/ add dst, dst, tmp2 2: sub count, count, zva_len_x 3: dc zva, dst add dst, dst, zva_len_x subs count, count, zva_len_x b.ge 3b ands count, count, zva_bits_x b.ne .Ltail_maybe_long ret ENDPIPROC(memset) ENDPROC(__memset)
AirFortressIlikara/LS2K0300-linux-4.19
1,077
arch/arm64/lib/strrchr.S
/* * Based on arch/arm/lib/strrchr.S * * Copyright (C) 1995-2000 Russell King * Copyright (C) 2013 ARM Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/linkage.h> #include <asm/assembler.h> /* * Find the last occurrence of a character in a string. * * Parameters: * x0 - str * x1 - c * Returns: * x0 - address of last occurrence of 'c' or 0 */ WEAK(strrchr) mov x3, #0 and w1, w1, #0xff 1: ldrb w2, [x0], #1 cbz w2, 2f cmp w2, w1 b.ne 1b sub x3, x0, #1 b 1b 2: mov x0, x3 ret ENDPIPROC(strrchr)
AirFortressIlikara/LS2K0300-linux-4.19
1,860
arch/arm64/lib/copy_from_user.S
/* * Copyright (C) 2012 ARM Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/linkage.h> #include <asm/cache.h> #include <asm/asm-uaccess.h> /* * Copy from user space to a kernel buffer (alignment handled by the hardware) * * Parameters: * x0 - to * x1 - from * x2 - n * Returns: * x0 - bytes not copied */ .macro ldrb1 ptr, regB, val uao_user_alternative 9998f, ldrb, ldtrb, \ptr, \regB, \val .endm .macro strb1 ptr, regB, val strb \ptr, [\regB], \val .endm .macro ldrh1 ptr, regB, val uao_user_alternative 9998f, ldrh, ldtrh, \ptr, \regB, \val .endm .macro strh1 ptr, regB, val strh \ptr, [\regB], \val .endm .macro ldr1 ptr, regB, val uao_user_alternative 9998f, ldr, ldtr, \ptr, \regB, \val .endm .macro str1 ptr, regB, val str \ptr, [\regB], \val .endm .macro ldp1 ptr, regB, regC, val uao_ldp 9998f, \ptr, \regB, \regC, \val .endm .macro stp1 ptr, regB, regC, val stp \ptr, \regB, [\regC], \val .endm end .req x5 ENTRY(__arch_copy_from_user) uaccess_enable_not_uao x3, x4, x5 add end, x0, x2 #include "copy_template.S" uaccess_disable_not_uao x3, x4 mov x0, #0 // Nothing to copy ret ENDPROC(__arch_copy_from_user) .section .fixup,"ax" .align 2 9998: sub x0, end, dst // bytes not copied uaccess_disable_not_uao x3, x4 ret .previous
AirFortressIlikara/LS2K0300-linux-4.19
1,072
arch/arm64/lib/memchr.S
/* * Based on arch/arm/lib/memchr.S * * Copyright (C) 1995-2000 Russell King * Copyright (C) 2013 ARM Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/linkage.h> #include <asm/assembler.h> /* * Find a character in an area of memory. * * Parameters: * x0 - buf * x1 - c * x2 - n * Returns: * x0 - address of first occurrence of 'c' or 0 */ WEAK(memchr) and w1, w1, #0xff 1: subs x2, x2, #1 b.mi 2f ldrb w3, [x0], #1 cmp w3, w1 b.ne 1b sub x0, x0, #1 ret 2: mov x0, #0 ret ENDPIPROC(memchr)
AirFortressIlikara/LS2K0300-linux-4.19
7,168
arch/arm64/lib/memcmp.S
/* * Copyright (C) 2013 ARM Ltd. * Copyright (C) 2013 Linaro. * * This code is based on glibc cortex strings work originally authored by Linaro * and re-licensed under GPLv2 for the Linux kernel. The original code can * be found @ * * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/ * files/head:/src/aarch64/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/linkage.h> #include <asm/assembler.h> /* * compare memory areas(when two memory areas' offset are different, * alignment handled by the hardware) * * Parameters: * x0 - const memory area 1 pointer * x1 - const memory area 2 pointer * x2 - the maximal compare byte length * Returns: * x0 - a compare result, maybe less than, equal to, or greater than ZERO */ /* Parameters and result. */ src1 .req x0 src2 .req x1 limit .req x2 result .req x0 /* Internal variables. */ data1 .req x3 data1w .req w3 data2 .req x4 data2w .req w4 has_nul .req x5 diff .req x6 endloop .req x7 tmp1 .req x8 tmp2 .req x9 tmp3 .req x10 pos .req x11 limit_wd .req x12 mask .req x13 WEAK(memcmp) cbz limit, .Lret0 eor tmp1, src1, src2 tst tmp1, #7 b.ne .Lmisaligned8 ands tmp1, src1, #7 b.ne .Lmutual_align sub limit_wd, limit, #1 /* limit != 0, so no underflow. */ lsr limit_wd, limit_wd, #3 /* Convert to Dwords. */ /* * The input source addresses are at alignment boundary. * Directly compare eight bytes each time. */ .Lloop_aligned: ldr data1, [src1], #8 ldr data2, [src2], #8 .Lstart_realigned: subs limit_wd, limit_wd, #1 eor diff, data1, data2 /* Non-zero if differences found. */ csinv endloop, diff, xzr, cs /* Last Dword or differences. */ cbz endloop, .Lloop_aligned /* Not reached the limit, must have found a diff. */ tbz limit_wd, #63, .Lnot_limit /* Limit % 8 == 0 => the diff is in the last 8 bytes. */ ands limit, limit, #7 b.eq .Lnot_limit /* * The remained bytes less than 8. It is needed to extract valid data * from last eight bytes of the intended memory range. */ lsl limit, limit, #3 /* bytes-> bits. */ mov mask, #~0 CPU_BE( lsr mask, mask, limit ) CPU_LE( lsl mask, mask, limit ) bic data1, data1, mask bic data2, data2, mask orr diff, diff, mask b .Lnot_limit .Lmutual_align: /* * Sources are mutually aligned, but are not currently at an * alignment boundary. Round down the addresses and then mask off * the bytes that precede the start point. */ bic src1, src1, #7 bic src2, src2, #7 ldr data1, [src1], #8 ldr data2, [src2], #8 /* * We can not add limit with alignment offset(tmp1) here. Since the * addition probably make the limit overflown. */ sub limit_wd, limit, #1/*limit != 0, so no underflow.*/ and tmp3, limit_wd, #7 lsr limit_wd, limit_wd, #3 add tmp3, tmp3, tmp1 add limit_wd, limit_wd, tmp3, lsr #3 add limit, limit, tmp1/* Adjust the limit for the extra. */ lsl tmp1, tmp1, #3/* Bytes beyond alignment -> bits.*/ neg tmp1, tmp1/* Bits to alignment -64. */ mov tmp2, #~0 /*mask off the non-intended bytes before the start address.*/ CPU_BE( lsl tmp2, tmp2, tmp1 )/*Big-endian.Early bytes are at MSB*/ /* Little-endian. Early bytes are at LSB. */ CPU_LE( lsr tmp2, tmp2, tmp1 ) orr data1, data1, tmp2 orr data2, data2, tmp2 b .Lstart_realigned /*src1 and src2 have different alignment offset.*/ .Lmisaligned8: cmp limit, #8 b.lo .Ltiny8proc /*limit < 8: compare byte by byte*/ and tmp1, src1, #7 neg tmp1, tmp1 add tmp1, tmp1, #8/*valid length in the first 8 bytes of src1*/ and tmp2, src2, #7 neg tmp2, tmp2 add tmp2, tmp2, #8/*valid length in the first 8 bytes of src2*/ subs tmp3, tmp1, tmp2 csel pos, tmp1, tmp2, hi /*Choose the maximum.*/ sub limit, limit, pos /*compare the proceeding bytes in the first 8 byte segment.*/ .Ltinycmp: ldrb data1w, [src1], #1 ldrb data2w, [src2], #1 subs pos, pos, #1 ccmp data1w, data2w, #0, ne /* NZCV = 0b0000. */ b.eq .Ltinycmp cbnz pos, 1f /*diff occurred before the last byte.*/ cmp data1w, data2w b.eq .Lstart_align 1: sub result, data1, data2 ret .Lstart_align: lsr limit_wd, limit, #3 cbz limit_wd, .Lremain8 ands xzr, src1, #7 b.eq .Lrecal_offset /*process more leading bytes to make src1 aligned...*/ add src1, src1, tmp3 /*backwards src1 to alignment boundary*/ add src2, src2, tmp3 sub limit, limit, tmp3 lsr limit_wd, limit, #3 cbz limit_wd, .Lremain8 /*load 8 bytes from aligned SRC1..*/ ldr data1, [src1], #8 ldr data2, [src2], #8 subs limit_wd, limit_wd, #1 eor diff, data1, data2 /*Non-zero if differences found.*/ csinv endloop, diff, xzr, ne cbnz endloop, .Lunequal_proc /*How far is the current SRC2 from the alignment boundary...*/ and tmp3, tmp3, #7 .Lrecal_offset:/*src1 is aligned now..*/ neg pos, tmp3 .Lloopcmp_proc: /* * Divide the eight bytes into two parts. First,backwards the src2 * to an alignment boundary,load eight bytes and compare from * the SRC2 alignment boundary. If all 8 bytes are equal,then start * the second part's comparison. Otherwise finish the comparison. * This special handle can garantee all the accesses are in the * thread/task space in avoid to overrange access. */ ldr data1, [src1,pos] ldr data2, [src2,pos] eor diff, data1, data2 /* Non-zero if differences found. */ cbnz diff, .Lnot_limit /*The second part process*/ ldr data1, [src1], #8 ldr data2, [src2], #8 eor diff, data1, data2 /* Non-zero if differences found. */ subs limit_wd, limit_wd, #1 csinv endloop, diff, xzr, ne/*if limit_wd is 0,will finish the cmp*/ cbz endloop, .Lloopcmp_proc .Lunequal_proc: cbz diff, .Lremain8 /* There is difference occurred in the latest comparison. */ .Lnot_limit: /* * For little endian,reverse the low significant equal bits into MSB,then * following CLZ can find how many equal bits exist. */ CPU_LE( rev diff, diff ) CPU_LE( rev data1, data1 ) CPU_LE( rev data2, data2 ) /* * The MS-non-zero bit of DIFF marks either the first bit * that is different, or the end of the significant data. * Shifting left now will bring the critical information into the * top bits. */ clz pos, diff lsl data1, data1, pos lsl data2, data2, pos /* * We need to zero-extend (char is unsigned) the value and then * perform a signed subtraction. */ lsr data1, data1, #56 sub result, data1, data2, lsr #56 ret .Lremain8: /* Limit % 8 == 0 =>. all data are equal.*/ ands limit, limit, #7 b.eq .Lret0 .Ltiny8proc: ldrb data1w, [src1], #1 ldrb data2w, [src2], #1 subs limit, limit, #1 ccmp data1w, data2w, #0, ne /* NZCV = 0b0000. */ b.eq .Ltiny8proc sub result, data1, data2 ret .Lret0: mov result, #0 ret ENDPIPROC(memcmp)
AirFortressIlikara/LS2K0300-linux-4.19
3,518
arch/arm64/lib/strlen.S
/* * Copyright (C) 2013 ARM Ltd. * Copyright (C) 2013 Linaro. * * This code is based on glibc cortex strings work originally authored by Linaro * and re-licensed under GPLv2 for the Linux kernel. The original code can * be found @ * * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/ * files/head:/src/aarch64/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/linkage.h> #include <asm/assembler.h> /* * calculate the length of a string * * Parameters: * x0 - const string pointer * Returns: * x0 - the return length of specific string */ /* Arguments and results. */ srcin .req x0 len .req x0 /* Locals and temporaries. */ src .req x1 data1 .req x2 data2 .req x3 data2a .req x4 has_nul1 .req x5 has_nul2 .req x6 tmp1 .req x7 tmp2 .req x8 tmp3 .req x9 tmp4 .req x10 zeroones .req x11 pos .req x12 #define REP8_01 0x0101010101010101 #define REP8_7f 0x7f7f7f7f7f7f7f7f #define REP8_80 0x8080808080808080 WEAK(strlen) mov zeroones, #REP8_01 bic src, srcin, #15 ands tmp1, srcin, #15 b.ne .Lmisaligned /* * NUL detection works on the principle that (X - 1) & (~X) & 0x80 * (=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and * can be done in parallel across the entire word. */ /* * The inner loop deals with two Dwords at a time. This has a * slightly higher start-up cost, but we should win quite quickly, * especially on cores with a high number of issue slots per * cycle, as we get much better parallelism out of the operations. */ .Lloop: ldp data1, data2, [src], #16 .Lrealigned: sub tmp1, data1, zeroones orr tmp2, data1, #REP8_7f sub tmp3, data2, zeroones orr tmp4, data2, #REP8_7f bic has_nul1, tmp1, tmp2 bics has_nul2, tmp3, tmp4 ccmp has_nul1, #0, #0, eq /* NZCV = 0000 */ b.eq .Lloop sub len, src, srcin cbz has_nul1, .Lnul_in_data2 CPU_BE( mov data2, data1 ) /*prepare data to re-calculate the syndrome*/ sub len, len, #8 mov has_nul2, has_nul1 .Lnul_in_data2: /* * For big-endian, carry propagation (if the final byte in the * string is 0x01) means we cannot use has_nul directly. The * easiest way to get the correct byte is to byte-swap the data * and calculate the syndrome a second time. */ CPU_BE( rev data2, data2 ) CPU_BE( sub tmp1, data2, zeroones ) CPU_BE( orr tmp2, data2, #REP8_7f ) CPU_BE( bic has_nul2, tmp1, tmp2 ) sub len, len, #8 rev has_nul2, has_nul2 clz pos, has_nul2 add len, len, pos, lsr #3 /* Bits to bytes. */ ret .Lmisaligned: cmp tmp1, #8 neg tmp1, tmp1 ldp data1, data2, [src], #16 lsl tmp1, tmp1, #3 /* Bytes beyond alignment -> bits. */ mov tmp2, #~0 /* Big-endian. Early bytes are at MSB. */ CPU_BE( lsl tmp2, tmp2, tmp1 ) /* Shift (tmp1 & 63). */ /* Little-endian. Early bytes are at LSB. */ CPU_LE( lsr tmp2, tmp2, tmp1 ) /* Shift (tmp1 & 63). */ orr data1, data1, tmp2 orr data2a, data2, tmp2 csinv data1, data1, xzr, le csel data2, data2, data2a, le b .Lrealigned ENDPIPROC(strlen)
AirFortressIlikara/LS2K0300-linux-4.19
4,769
arch/arm64/lib/strnlen.S
/* * Copyright (C) 2013 ARM Ltd. * Copyright (C) 2013 Linaro. * * This code is based on glibc cortex strings work originally authored by Linaro * and re-licensed under GPLv2 for the Linux kernel. The original code can * be found @ * * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/ * files/head:/src/aarch64/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/linkage.h> #include <asm/assembler.h> /* * determine the length of a fixed-size string * * Parameters: * x0 - const string pointer * x1 - maximal string length * Returns: * x0 - the return length of specific string */ /* Arguments and results. */ srcin .req x0 len .req x0 limit .req x1 /* Locals and temporaries. */ src .req x2 data1 .req x3 data2 .req x4 data2a .req x5 has_nul1 .req x6 has_nul2 .req x7 tmp1 .req x8 tmp2 .req x9 tmp3 .req x10 tmp4 .req x11 zeroones .req x12 pos .req x13 limit_wd .req x14 #define REP8_01 0x0101010101010101 #define REP8_7f 0x7f7f7f7f7f7f7f7f #define REP8_80 0x8080808080808080 WEAK(strnlen) cbz limit, .Lhit_limit mov zeroones, #REP8_01 bic src, srcin, #15 ands tmp1, srcin, #15 b.ne .Lmisaligned /* Calculate the number of full and partial words -1. */ sub limit_wd, limit, #1 /* Limit != 0, so no underflow. */ lsr limit_wd, limit_wd, #4 /* Convert to Qwords. */ /* * NUL detection works on the principle that (X - 1) & (~X) & 0x80 * (=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and * can be done in parallel across the entire word. */ /* * The inner loop deals with two Dwords at a time. This has a * slightly higher start-up cost, but we should win quite quickly, * especially on cores with a high number of issue slots per * cycle, as we get much better parallelism out of the operations. */ .Lloop: ldp data1, data2, [src], #16 .Lrealigned: sub tmp1, data1, zeroones orr tmp2, data1, #REP8_7f sub tmp3, data2, zeroones orr tmp4, data2, #REP8_7f bic has_nul1, tmp1, tmp2 bic has_nul2, tmp3, tmp4 subs limit_wd, limit_wd, #1 orr tmp1, has_nul1, has_nul2 ccmp tmp1, #0, #0, pl /* NZCV = 0000 */ b.eq .Lloop cbz tmp1, .Lhit_limit /* No null in final Qword. */ /* * We know there's a null in the final Qword. The easiest thing * to do now is work out the length of the string and return * MIN (len, limit). */ sub len, src, srcin cbz has_nul1, .Lnul_in_data2 CPU_BE( mov data2, data1 ) /*perpare data to re-calculate the syndrome*/ sub len, len, #8 mov has_nul2, has_nul1 .Lnul_in_data2: /* * For big-endian, carry propagation (if the final byte in the * string is 0x01) means we cannot use has_nul directly. The * easiest way to get the correct byte is to byte-swap the data * and calculate the syndrome a second time. */ CPU_BE( rev data2, data2 ) CPU_BE( sub tmp1, data2, zeroones ) CPU_BE( orr tmp2, data2, #REP8_7f ) CPU_BE( bic has_nul2, tmp1, tmp2 ) sub len, len, #8 rev has_nul2, has_nul2 clz pos, has_nul2 add len, len, pos, lsr #3 /* Bits to bytes. */ cmp len, limit csel len, len, limit, ls /* Return the lower value. */ ret .Lmisaligned: /* * Deal with a partial first word. * We're doing two things in parallel here; * 1) Calculate the number of words (but avoiding overflow if * limit is near ULONG_MAX) - to do this we need to work out * limit + tmp1 - 1 as a 65-bit value before shifting it; * 2) Load and mask the initial data words - we force the bytes * before the ones we are interested in to 0xff - this ensures * early bytes will not hit any zero detection. */ ldp data1, data2, [src], #16 sub limit_wd, limit, #1 and tmp3, limit_wd, #15 lsr limit_wd, limit_wd, #4 add tmp3, tmp3, tmp1 add limit_wd, limit_wd, tmp3, lsr #4 neg tmp4, tmp1 lsl tmp4, tmp4, #3 /* Bytes beyond alignment -> bits. */ mov tmp2, #~0 /* Big-endian. Early bytes are at MSB. */ CPU_BE( lsl tmp2, tmp2, tmp4 ) /* Shift (tmp1 & 63). */ /* Little-endian. Early bytes are at LSB. */ CPU_LE( lsr tmp2, tmp2, tmp4 ) /* Shift (tmp1 & 63). */ cmp tmp1, #8 orr data1, data1, tmp2 orr data2a, data2, tmp2 csinv data1, data1, xzr, le csel data2, data2, data2a, le b .Lrealigned .Lhit_limit: mov len, limit ret ENDPIPROC(strnlen)
AirFortressIlikara/LS2K0300-linux-4.19
1,086
arch/arm64/lib/strchr.S
/* * Based on arch/arm/lib/strchr.S * * Copyright (C) 1995-2000 Russell King * Copyright (C) 2013 ARM Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/linkage.h> #include <asm/assembler.h> /* * Find the first occurrence of a character in a string. * * Parameters: * x0 - str * x1 - c * Returns: * x0 - address of first occurrence of 'c' or 0 */ WEAK(strchr) and w1, w1, #0xff 1: ldrb w2, [x0], #1 cmp w2, w1 ccmp w2, wzr, #4, ne b.ne 1b sub x0, x0, #1 cmp w2, w1 csel x0, x0, xzr, eq ret ENDPROC(strchr)
AirFortressIlikara/LS2K0300-linux-4.19
6,683
arch/arm64/lib/strcmp.S
/* * Copyright (C) 2013 ARM Ltd. * Copyright (C) 2013 Linaro. * * This code is based on glibc cortex strings work originally authored by Linaro * and re-licensed under GPLv2 for the Linux kernel. The original code can * be found @ * * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/ * files/head:/src/aarch64/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/linkage.h> #include <asm/assembler.h> /* * compare two strings * * Parameters: * x0 - const string 1 pointer * x1 - const string 2 pointer * Returns: * x0 - an integer less than, equal to, or greater than zero * if s1 is found, respectively, to be less than, to match, * or be greater than s2. */ #define REP8_01 0x0101010101010101 #define REP8_7f 0x7f7f7f7f7f7f7f7f #define REP8_80 0x8080808080808080 /* Parameters and result. */ src1 .req x0 src2 .req x1 result .req x0 /* Internal variables. */ data1 .req x2 data1w .req w2 data2 .req x3 data2w .req w3 has_nul .req x4 diff .req x5 syndrome .req x6 tmp1 .req x7 tmp2 .req x8 tmp3 .req x9 zeroones .req x10 pos .req x11 WEAK(strcmp) eor tmp1, src1, src2 mov zeroones, #REP8_01 tst tmp1, #7 b.ne .Lmisaligned8 ands tmp1, src1, #7 b.ne .Lmutual_align /* * NUL detection works on the principle that (X - 1) & (~X) & 0x80 * (=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and * can be done in parallel across the entire word. */ .Lloop_aligned: ldr data1, [src1], #8 ldr data2, [src2], #8 .Lstart_realigned: sub tmp1, data1, zeroones orr tmp2, data1, #REP8_7f eor diff, data1, data2 /* Non-zero if differences found. */ bic has_nul, tmp1, tmp2 /* Non-zero if NUL terminator. */ orr syndrome, diff, has_nul cbz syndrome, .Lloop_aligned b .Lcal_cmpresult .Lmutual_align: /* * Sources are mutually aligned, but are not currently at an * alignment boundary. Round down the addresses and then mask off * the bytes that preceed the start point. */ bic src1, src1, #7 bic src2, src2, #7 lsl tmp1, tmp1, #3 /* Bytes beyond alignment -> bits. */ ldr data1, [src1], #8 neg tmp1, tmp1 /* Bits to alignment -64. */ ldr data2, [src2], #8 mov tmp2, #~0 /* Big-endian. Early bytes are at MSB. */ CPU_BE( lsl tmp2, tmp2, tmp1 ) /* Shift (tmp1 & 63). */ /* Little-endian. Early bytes are at LSB. */ CPU_LE( lsr tmp2, tmp2, tmp1 ) /* Shift (tmp1 & 63). */ orr data1, data1, tmp2 orr data2, data2, tmp2 b .Lstart_realigned .Lmisaligned8: /* * Get the align offset length to compare per byte first. * After this process, one string's address will be aligned. */ and tmp1, src1, #7 neg tmp1, tmp1 add tmp1, tmp1, #8 and tmp2, src2, #7 neg tmp2, tmp2 add tmp2, tmp2, #8 subs tmp3, tmp1, tmp2 csel pos, tmp1, tmp2, hi /*Choose the maximum. */ .Ltinycmp: ldrb data1w, [src1], #1 ldrb data2w, [src2], #1 subs pos, pos, #1 ccmp data1w, #1, #0, ne /* NZCV = 0b0000. */ ccmp data1w, data2w, #0, cs /* NZCV = 0b0000. */ b.eq .Ltinycmp cbnz pos, 1f /*find the null or unequal...*/ cmp data1w, #1 ccmp data1w, data2w, #0, cs b.eq .Lstart_align /*the last bytes are equal....*/ 1: sub result, data1, data2 ret .Lstart_align: ands xzr, src1, #7 b.eq .Lrecal_offset /*process more leading bytes to make str1 aligned...*/ add src1, src1, tmp3 add src2, src2, tmp3 /*load 8 bytes from aligned str1 and non-aligned str2..*/ ldr data1, [src1], #8 ldr data2, [src2], #8 sub tmp1, data1, zeroones orr tmp2, data1, #REP8_7f bic has_nul, tmp1, tmp2 eor diff, data1, data2 /* Non-zero if differences found. */ orr syndrome, diff, has_nul cbnz syndrome, .Lcal_cmpresult /*How far is the current str2 from the alignment boundary...*/ and tmp3, tmp3, #7 .Lrecal_offset: neg pos, tmp3 .Lloopcmp_proc: /* * Divide the eight bytes into two parts. First,backwards the src2 * to an alignment boundary,load eight bytes from the SRC2 alignment * boundary,then compare with the relative bytes from SRC1. * If all 8 bytes are equal,then start the second part's comparison. * Otherwise finish the comparison. * This special handle can garantee all the accesses are in the * thread/task space in avoid to overrange access. */ ldr data1, [src1,pos] ldr data2, [src2,pos] sub tmp1, data1, zeroones orr tmp2, data1, #REP8_7f bic has_nul, tmp1, tmp2 eor diff, data1, data2 /* Non-zero if differences found. */ orr syndrome, diff, has_nul cbnz syndrome, .Lcal_cmpresult /*The second part process*/ ldr data1, [src1], #8 ldr data2, [src2], #8 sub tmp1, data1, zeroones orr tmp2, data1, #REP8_7f bic has_nul, tmp1, tmp2 eor diff, data1, data2 /* Non-zero if differences found. */ orr syndrome, diff, has_nul cbz syndrome, .Lloopcmp_proc .Lcal_cmpresult: /* * reversed the byte-order as big-endian,then CLZ can find the most * significant zero bits. */ CPU_LE( rev syndrome, syndrome ) CPU_LE( rev data1, data1 ) CPU_LE( rev data2, data2 ) /* * For big-endian we cannot use the trick with the syndrome value * as carry-propagation can corrupt the upper bits if the trailing * bytes in the string contain 0x01. * However, if there is no NUL byte in the dword, we can generate * the result directly. We ca not just subtract the bytes as the * MSB might be significant. */ CPU_BE( cbnz has_nul, 1f ) CPU_BE( cmp data1, data2 ) CPU_BE( cset result, ne ) CPU_BE( cneg result, result, lo ) CPU_BE( ret ) CPU_BE( 1: ) /*Re-compute the NUL-byte detection, using a byte-reversed value. */ CPU_BE( rev tmp3, data1 ) CPU_BE( sub tmp1, tmp3, zeroones ) CPU_BE( orr tmp2, tmp3, #REP8_7f ) CPU_BE( bic has_nul, tmp1, tmp2 ) CPU_BE( rev has_nul, has_nul ) CPU_BE( orr syndrome, diff, has_nul ) clz pos, syndrome /* * The MS-non-zero bit of the syndrome marks either the first bit * that is different, or the top bit of the first zero byte. * Shifting left now will bring the critical information into the * top bits. */ lsl data1, data1, pos lsl data2, data2, pos /* * But we need to zero-extend (char is unsigned) the value and then * perform a signed 32-bit subtraction. */ lsr data1, data1, #56 sub result, data1, data2, lsr #56 ret ENDPIPROC(strcmp)
AirFortressIlikara/LS2K0300-linux-4.19
1,778
arch/arm64/kvm/hyp.S
/* * Copyright (C) 2012,2013 - ARM Ltd * Author: Marc Zyngier <marc.zyngier@arm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/linkage.h> #include <asm/alternative.h> #include <asm/assembler.h> #include <asm/cpufeature.h> /* * u64 __kvm_call_hyp(void *hypfn, ...); * * This is not really a variadic function in the classic C-way and care must * be taken when calling this to ensure parameters are passed in registers * only, since the stack will change between the caller and the callee. * * Call the function with the first argument containing a pointer to the * function you wish to call in Hyp mode, and subsequent arguments will be * passed as x0, x1, and x2 (a maximum of 3 arguments in addition to the * function pointer can be passed). The function being called must be mapped * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are * passed in x0. * * A function pointer with a value less than 0xfff has a special meaning, * and is used to implement hyp stubs in the same way as in * arch/arm64/kernel/hyp_stub.S. */ ENTRY(__kvm_call_hyp) alternative_if_not ARM64_HAS_VIRT_HOST_EXTN hvc #0 ret alternative_else_nop_endif b __vhe_hyp_call ENDPROC(__kvm_call_hyp)
AirFortressIlikara/LS2K0300-linux-4.19
4,221
arch/arm64/kvm/hyp-init.S
/* * Copyright (C) 2012,2013 - ARM Ltd * Author: Marc Zyngier <marc.zyngier@arm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, version 2, as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/kvm_arm.h> #include <asm/kvm_mmu.h> #include <asm/pgtable-hwdef.h> #include <asm/sysreg.h> #include <asm/virt.h> .text .pushsection .hyp.idmap.text, "ax" .align 11 ENTRY(__kvm_hyp_init) ventry __invalid // Synchronous EL2t ventry __invalid // IRQ EL2t ventry __invalid // FIQ EL2t ventry __invalid // Error EL2t ventry __invalid // Synchronous EL2h ventry __invalid // IRQ EL2h ventry __invalid // FIQ EL2h ventry __invalid // Error EL2h ventry __do_hyp_init // Synchronous 64-bit EL1 ventry __invalid // IRQ 64-bit EL1 ventry __invalid // FIQ 64-bit EL1 ventry __invalid // Error 64-bit EL1 ventry __invalid // Synchronous 32-bit EL1 ventry __invalid // IRQ 32-bit EL1 ventry __invalid // FIQ 32-bit EL1 ventry __invalid // Error 32-bit EL1 __invalid: b . /* * x0: HYP pgd * x1: HYP stack * x2: HYP vectors * x3: per-CPU offset */ __do_hyp_init: /* Check for a stub HVC call */ cmp x0, #HVC_STUB_HCALL_NR b.lo __kvm_handle_stub_hvc phys_to_ttbr x4, x0 msr ttbr0_el2, x4 mrs x4, tcr_el1 ldr x5, =TCR_EL2_MASK and x4, x4, x5 mov x5, #TCR_EL2_RES1 orr x4, x4, x5 /* * The ID map may be configured to use an extended virtual address * range. This is only the case if system RAM is out of range for the * currently configured page size and VA_BITS, in which case we will * also need the extended virtual range for the HYP ID map, or we won't * be able to enable the EL2 MMU. * * However, at EL2, there is only one TTBR register, and we can't switch * between translation tables *and* update TCR_EL2.T0SZ at the same * time. Bottom line: we need to use the extended range with *both* our * translation tables. * * So use the same T0SZ value we use for the ID map. */ ldr_l x5, idmap_t0sz bfi x4, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH /* * Set the PS bits in TCR_EL2. */ tcr_compute_pa_size x4, #TCR_EL2_PS_SHIFT, x5, x6 msr tcr_el2, x4 mrs x4, mair_el1 msr mair_el2, x4 isb /* Invalidate the stale TLBs from Bootloader */ tlbi alle2 dsb sy /* * Preserve all the RES1 bits while setting the default flags, * as well as the EE bit on BE. Drop the A flag since the compiler * is allowed to generate unaligned accesses. */ ldr x4, =(SCTLR_EL2_RES1 | (SCTLR_ELx_FLAGS & ~SCTLR_ELx_A)) CPU_BE( orr x4, x4, #SCTLR_ELx_EE) msr sctlr_el2, x4 isb /* Set the stack and new vectors */ kern_hyp_va x1 mov sp, x1 msr vbar_el2, x2 /* Set tpidr_el2 for use by HYP */ msr tpidr_el2, x3 /* Hello, World! */ eret ENDPROC(__kvm_hyp_init) ENTRY(__kvm_handle_stub_hvc) cmp x0, #HVC_SOFT_RESTART b.ne 1f /* This is where we're about to jump, staying at EL2 */ msr elr_el2, x1 mov x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT | PSR_MODE_EL2h) msr spsr_el2, x0 /* Shuffle the arguments, and don't come back */ mov x0, x2 mov x1, x3 mov x2, x4 b reset 1: cmp x0, #HVC_RESET_VECTORS b.ne 1f /* * Set the HVC_RESET_VECTORS return code before entering the common * path so that we do not clobber x0-x2 in case we are coming via * HVC_SOFT_RESTART. */ mov x0, xzr reset: /* Reset kvm back to the hyp stub. */ mrs x5, sctlr_el2 ldr x6, =SCTLR_ELx_FLAGS bic x5, x5, x6 // Clear SCTL_M and etc pre_disable_mmu_workaround msr sctlr_el2, x5 isb /* Install stub vectors */ adr_l x5, __hyp_stub_vectors msr vbar_el2, x5 eret 1: /* Bad stub call */ ldr x0, =HVC_STUB_ERR eret ENDPROC(__kvm_handle_stub_hvc) .ltorg .popsection
AirFortressIlikara/LS2K0300-linux-4.19
6,017
arch/arm64/mm/cache.S
/* * Cache maintenance * * Copyright (C) 2001 Deep Blue Solutions Ltd. * Copyright (C) 2012 ARM Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/errno.h> #include <linux/linkage.h> #include <linux/init.h> #include <asm/assembler.h> #include <asm/cpufeature.h> #include <asm/alternative.h> #include <asm/asm-uaccess.h> /* * flush_icache_range(start,end) * * Ensure that the I and D caches are coherent within specified region. * This is typically used when code has been written to a memory region, * and will be executed. * * - start - virtual start address of region * - end - virtual end address of region */ ENTRY(__flush_icache_range) /* FALLTHROUGH */ /* * __flush_cache_user_range(start,end) * * Ensure that the I and D caches are coherent within specified region. * This is typically used when code has been written to a memory region, * and will be executed. * * - start - virtual start address of region * - end - virtual end address of region */ ENTRY(__flush_cache_user_range) uaccess_ttbr0_enable x2, x3, x4 alternative_if ARM64_HAS_CACHE_IDC dsb ishst b 7f alternative_else_nop_endif dcache_line_size x2, x3 sub x3, x2, #1 bic x4, x0, x3 1: user_alt 9f, "dc cvau, x4", "dc civac, x4", ARM64_WORKAROUND_CLEAN_CACHE add x4, x4, x2 cmp x4, x1 b.lo 1b dsb ish 7: alternative_if ARM64_HAS_CACHE_DIC isb b 8f alternative_else_nop_endif invalidate_icache_by_line x0, x1, x2, x3, 9f 8: mov x0, #0 1: uaccess_ttbr0_disable x1, x2 ret 9: mov x0, #-EFAULT b 1b ENDPROC(__flush_icache_range) ENDPROC(__flush_cache_user_range) /* * invalidate_icache_range(start,end) * * Ensure that the I cache is invalid within specified region. * * - start - virtual start address of region * - end - virtual end address of region */ ENTRY(invalidate_icache_range) alternative_if ARM64_HAS_CACHE_DIC mov x0, xzr isb ret alternative_else_nop_endif uaccess_ttbr0_enable x2, x3, x4 invalidate_icache_by_line x0, x1, x2, x3, 2f mov x0, xzr 1: uaccess_ttbr0_disable x1, x2 ret 2: mov x0, #-EFAULT b 1b ENDPROC(invalidate_icache_range) /* * __flush_dcache_area(kaddr, size) * * Ensure that any D-cache lines for the interval [kaddr, kaddr+size) * are cleaned and invalidated to the PoC. * * - kaddr - kernel address * - size - size in question */ ENTRY(__flush_dcache_area) dcache_by_line_op civac, sy, x0, x1, x2, x3 ret ENDPIPROC(__flush_dcache_area) /* * __clean_dcache_area_pou(kaddr, size) * * Ensure that any D-cache lines for the interval [kaddr, kaddr+size) * are cleaned to the PoU. * * - kaddr - kernel address * - size - size in question */ ENTRY(__clean_dcache_area_pou) alternative_if ARM64_HAS_CACHE_IDC dsb ishst ret alternative_else_nop_endif dcache_by_line_op cvau, ish, x0, x1, x2, x3 ret ENDPROC(__clean_dcache_area_pou) /* * __inval_dcache_area(kaddr, size) * * Ensure that any D-cache lines for the interval [kaddr, kaddr+size) * are invalidated. Any partial lines at the ends of the interval are * also cleaned to PoC to prevent data loss. * * - kaddr - kernel address * - size - size in question */ ENTRY(__inval_dcache_area) /* FALLTHROUGH */ /* * __dma_inv_area(start, size) * - start - virtual start address of region * - size - size in question */ __dma_inv_area: add x1, x1, x0 dcache_line_size x2, x3 sub x3, x2, #1 tst x1, x3 // end cache line aligned? bic x1, x1, x3 b.eq 1f dc civac, x1 // clean & invalidate D / U line 1: tst x0, x3 // start cache line aligned? bic x0, x0, x3 b.eq 2f dc civac, x0 // clean & invalidate D / U line b 3f 2: dc ivac, x0 // invalidate D / U line 3: add x0, x0, x2 cmp x0, x1 b.lo 2b dsb sy ret ENDPIPROC(__inval_dcache_area) ENDPROC(__dma_inv_area) /* * __clean_dcache_area_poc(kaddr, size) * * Ensure that any D-cache lines for the interval [kaddr, kaddr+size) * are cleaned to the PoC. * * - kaddr - kernel address * - size - size in question */ ENTRY(__clean_dcache_area_poc) /* FALLTHROUGH */ /* * __dma_clean_area(start, size) * - start - virtual start address of region * - size - size in question */ __dma_clean_area: dcache_by_line_op cvac, sy, x0, x1, x2, x3 ret ENDPIPROC(__clean_dcache_area_poc) ENDPROC(__dma_clean_area) /* * __clean_dcache_area_pop(kaddr, size) * * Ensure that any D-cache lines for the interval [kaddr, kaddr+size) * are cleaned to the PoP. * * - kaddr - kernel address * - size - size in question */ ENTRY(__clean_dcache_area_pop) alternative_if_not ARM64_HAS_DCPOP b __clean_dcache_area_poc alternative_else_nop_endif dcache_by_line_op cvap, sy, x0, x1, x2, x3 ret ENDPIPROC(__clean_dcache_area_pop) /* * __dma_flush_area(start, size) * * clean & invalidate D / U line * * - start - virtual start address of region * - size - size in question */ ENTRY(__dma_flush_area) dcache_by_line_op civac, sy, x0, x1, x2, x3 ret ENDPIPROC(__dma_flush_area) /* * __dma_map_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(__dma_map_area) cmp w2, #DMA_FROM_DEVICE b.eq __dma_inv_area b __dma_clean_area ENDPIPROC(__dma_map_area) /* * __dma_unmap_area(start, size, dir) * - start - kernel virtual start address * - size - size of region * - dir - DMA direction */ ENTRY(__dma_unmap_area) cmp w2, #DMA_TO_DEVICE b.ne __dma_inv_area ret ENDPIPROC(__dma_unmap_area)
AirFortressIlikara/LS2K0300-linux-4.19
10,975
arch/arm64/mm/proc.S
/* * Based on arch/arm/mm/proc.S * * Copyright (C) 2001 Deep Blue Solutions Ltd. * Copyright (C) 2012 ARM Ltd. * Author: Catalin Marinas <catalin.marinas@arm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/init.h> #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/asm-offsets.h> #include <asm/hwcap.h> #include <asm/pgtable.h> #include <asm/pgtable-hwdef.h> #include <asm/cpufeature.h> #include <asm/alternative.h> #ifdef CONFIG_ARM64_64K_PAGES #define TCR_TG_FLAGS TCR_TG0_64K | TCR_TG1_64K #elif defined(CONFIG_ARM64_16K_PAGES) #define TCR_TG_FLAGS TCR_TG0_16K | TCR_TG1_16K #else /* CONFIG_ARM64_4K_PAGES */ #define TCR_TG_FLAGS TCR_TG0_4K | TCR_TG1_4K #endif #ifdef CONFIG_RANDOMIZE_BASE #define TCR_KASLR_FLAGS TCR_NFD1 #else #define TCR_KASLR_FLAGS 0 #endif #define TCR_SMP_FLAGS TCR_SHARED /* PTWs cacheable, inner/outer WBWA */ #define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA #define MAIR(attr, mt) ((attr) << ((mt) * 8)) /* * cpu_do_idle() * * Idle the processor (wait for interrupt). */ ENTRY(cpu_do_idle) dsb sy // WFI may enter a low-power mode wfi ret ENDPROC(cpu_do_idle) #ifdef CONFIG_CPU_PM /** * cpu_do_suspend - save CPU registers context * * x0: virtual address of context pointer */ ENTRY(cpu_do_suspend) mrs x2, tpidr_el0 mrs x3, tpidrro_el0 mrs x4, contextidr_el1 mrs x5, osdlr_el1 mrs x6, cpacr_el1 mrs x7, tcr_el1 mrs x8, vbar_el1 mrs x9, mdscr_el1 mrs x10, oslsr_el1 mrs x11, sctlr_el1 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN mrs x12, tpidr_el1 alternative_else mrs x12, tpidr_el2 alternative_endif mrs x13, sp_el0 stp x2, x3, [x0] stp x4, x5, [x0, #16] stp x6, x7, [x0, #32] stp x8, x9, [x0, #48] stp x10, x11, [x0, #64] stp x12, x13, [x0, #80] ret ENDPROC(cpu_do_suspend) /** * cpu_do_resume - restore CPU register context * * x0: Address of context pointer */ .pushsection ".idmap.text", "awx" ENTRY(cpu_do_resume) ldp x2, x3, [x0] ldp x4, x5, [x0, #16] ldp x6, x8, [x0, #32] ldp x9, x10, [x0, #48] ldp x11, x12, [x0, #64] ldp x13, x14, [x0, #80] msr tpidr_el0, x2 msr tpidrro_el0, x3 msr contextidr_el1, x4 msr cpacr_el1, x6 /* Don't change t0sz here, mask those bits when restoring */ mrs x7, tcr_el1 bfi x8, x7, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH msr tcr_el1, x8 msr vbar_el1, x9 /* * __cpu_setup() cleared MDSCR_EL1.MDE and friends, before unmasking * debug exceptions. By restoring MDSCR_EL1 here, we may take a debug * exception. Mask them until local_daif_restore() in cpu_suspend() * resets them. */ disable_daif msr mdscr_el1, x10 msr sctlr_el1, x12 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN msr tpidr_el1, x13 alternative_else msr tpidr_el2, x13 alternative_endif msr sp_el0, x14 /* * Restore oslsr_el1 by writing oslar_el1 */ msr osdlr_el1, x5 ubfx x11, x11, #1, #1 msr oslar_el1, x11 reset_pmuserenr_el0 x0 // Disable PMU access from EL0 alternative_if ARM64_HAS_RAS_EXTN msr_s SYS_DISR_EL1, xzr alternative_else_nop_endif isb ret ENDPROC(cpu_do_resume) .popsection #endif /* * cpu_do_switch_mm(pgd_phys, tsk) * * Set the translation table base pointer to be pgd_phys. * * - pgd_phys - physical address of new TTB */ ENTRY(cpu_do_switch_mm) mrs x2, ttbr1_el1 mmid x1, x1 // get mm->context.id phys_to_ttbr x3, x0 #ifdef CONFIG_ARM64_SW_TTBR0_PAN bfi x3, x1, #48, #16 // set the ASID field in TTBR0 #endif bfi x2, x1, #48, #16 // set the ASID msr ttbr1_el1, x2 // in TTBR1 (since TCR.A1 is set) isb msr ttbr0_el1, x3 // now update TTBR0 isb b post_ttbr_update_workaround // Back to C code... ENDPROC(cpu_do_switch_mm) .pushsection ".idmap.text", "awx" .macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2 adrp \tmp1, empty_zero_page phys_to_ttbr \tmp2, \tmp1 msr ttbr1_el1, \tmp2 isb tlbi vmalle1 dsb nsh isb .endm /* * void idmap_cpu_replace_ttbr1(phys_addr_t new_pgd) * * This is the low-level counterpart to cpu_replace_ttbr1, and should not be * called by anything else. It can only be executed from a TTBR0 mapping. */ ENTRY(idmap_cpu_replace_ttbr1) save_and_disable_daif flags=x2 __idmap_cpu_set_reserved_ttbr1 x1, x3 phys_to_ttbr x3, x0 msr ttbr1_el1, x3 isb restore_daif x2 ret ENDPROC(idmap_cpu_replace_ttbr1) .popsection #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 .pushsection ".idmap.text", "awx" .macro __idmap_kpti_get_pgtable_ent, type dc cvac, cur_\()\type\()p // Ensure any existing dirty dmb sy // lines are written back before ldr \type, [cur_\()\type\()p] // loading the entry tbz \type, #0, skip_\()\type // Skip invalid and tbnz \type, #11, skip_\()\type // non-global entries .endm .macro __idmap_kpti_put_pgtable_ent_ng, type orr \type, \type, #PTE_NG // Same bit for blocks and pages str \type, [cur_\()\type\()p] // Update the entry and ensure dmb sy // that it is visible to all dc civac, cur_\()\type\()p // CPUs. .endm /* * void __kpti_install_ng_mappings(int cpu, int num_cpus, phys_addr_t swapper) * * Called exactly once from stop_machine context by each CPU found during boot. */ __idmap_kpti_flag: .long 1 ENTRY(idmap_kpti_install_ng_mappings) cpu .req w0 num_cpus .req w1 swapper_pa .req x2 swapper_ttb .req x3 flag_ptr .req x4 cur_pgdp .req x5 end_pgdp .req x6 pgd .req x7 cur_pudp .req x8 end_pudp .req x9 pud .req x10 cur_pmdp .req x11 end_pmdp .req x12 pmd .req x13 cur_ptep .req x14 end_ptep .req x15 pte .req x16 mrs swapper_ttb, ttbr1_el1 adr flag_ptr, __idmap_kpti_flag cbnz cpu, __idmap_kpti_secondary /* We're the boot CPU. Wait for the others to catch up */ sevl 1: wfe ldaxr w18, [flag_ptr] eor w18, w18, num_cpus cbnz w18, 1b /* We need to walk swapper, so turn off the MMU. */ pre_disable_mmu_workaround mrs x18, sctlr_el1 bic x18, x18, #SCTLR_ELx_M msr sctlr_el1, x18 isb /* Everybody is enjoying the idmap, so we can rewrite swapper. */ /* PGD */ mov cur_pgdp, swapper_pa add end_pgdp, cur_pgdp, #(PTRS_PER_PGD * 8) do_pgd: __idmap_kpti_get_pgtable_ent pgd tbnz pgd, #1, walk_puds next_pgd: __idmap_kpti_put_pgtable_ent_ng pgd skip_pgd: add cur_pgdp, cur_pgdp, #8 cmp cur_pgdp, end_pgdp b.ne do_pgd /* Publish the updated tables and nuke all the TLBs */ dsb sy tlbi vmalle1is dsb ish isb /* We're done: fire up the MMU again */ mrs x18, sctlr_el1 orr x18, x18, #SCTLR_ELx_M msr sctlr_el1, x18 isb /* * Invalidate the local I-cache so that any instructions fetched * speculatively from the PoC are discarded, since they may have * been dynamically patched at the PoU. */ ic iallu dsb nsh isb /* Set the flag to zero to indicate that we're all done */ str wzr, [flag_ptr] ret /* PUD */ walk_puds: .if CONFIG_PGTABLE_LEVELS > 3 pte_to_phys cur_pudp, pgd add end_pudp, cur_pudp, #(PTRS_PER_PUD * 8) do_pud: __idmap_kpti_get_pgtable_ent pud tbnz pud, #1, walk_pmds next_pud: __idmap_kpti_put_pgtable_ent_ng pud skip_pud: add cur_pudp, cur_pudp, 8 cmp cur_pudp, end_pudp b.ne do_pud b next_pgd .else /* CONFIG_PGTABLE_LEVELS <= 3 */ mov pud, pgd b walk_pmds next_pud: b next_pgd .endif /* PMD */ walk_pmds: .if CONFIG_PGTABLE_LEVELS > 2 pte_to_phys cur_pmdp, pud add end_pmdp, cur_pmdp, #(PTRS_PER_PMD * 8) do_pmd: __idmap_kpti_get_pgtable_ent pmd tbnz pmd, #1, walk_ptes next_pmd: __idmap_kpti_put_pgtable_ent_ng pmd skip_pmd: add cur_pmdp, cur_pmdp, #8 cmp cur_pmdp, end_pmdp b.ne do_pmd b next_pud .else /* CONFIG_PGTABLE_LEVELS <= 2 */ mov pmd, pud b walk_ptes next_pmd: b next_pud .endif /* PTE */ walk_ptes: pte_to_phys cur_ptep, pmd add end_ptep, cur_ptep, #(PTRS_PER_PTE * 8) do_pte: __idmap_kpti_get_pgtable_ent pte __idmap_kpti_put_pgtable_ent_ng pte skip_pte: add cur_ptep, cur_ptep, #8 cmp cur_ptep, end_ptep b.ne do_pte b next_pmd /* Secondary CPUs end up here */ __idmap_kpti_secondary: /* Uninstall swapper before surgery begins */ __idmap_cpu_set_reserved_ttbr1 x18, x17 /* Increment the flag to let the boot CPU we're ready */ 1: ldxr w18, [flag_ptr] add w18, w18, #1 stxr w17, w18, [flag_ptr] cbnz w17, 1b /* Wait for the boot CPU to finish messing around with swapper */ sevl 1: wfe ldxr w18, [flag_ptr] cbnz w18, 1b /* All done, act like nothing happened */ msr ttbr1_el1, swapper_ttb isb ret .unreq cpu .unreq num_cpus .unreq swapper_pa .unreq swapper_ttb .unreq flag_ptr .unreq cur_pgdp .unreq end_pgdp .unreq pgd .unreq cur_pudp .unreq end_pudp .unreq pud .unreq cur_pmdp .unreq end_pmdp .unreq pmd .unreq cur_ptep .unreq end_ptep .unreq pte ENDPROC(idmap_kpti_install_ng_mappings) .popsection #endif /* * __cpu_setup * * Initialise the processor for turning the MMU on. Return in x0 the * value of the SCTLR_EL1 register. */ .pushsection ".idmap.text", "awx" ENTRY(__cpu_setup) tlbi vmalle1 // Invalidate local TLB dsb nsh mov x0, #3 << 20 msr cpacr_el1, x0 // Enable FP/ASIMD mov x0, #1 << 12 // Reset mdscr_el1 and disable msr mdscr_el1, x0 // access to the DCC from EL0 isb // Unmask debug exceptions now, enable_dbg // since this is per-cpu reset_pmuserenr_el0 x0 // Disable PMU access from EL0 /* * Memory region attributes for LPAE: * * n = AttrIndx[2:0] * n MAIR * DEVICE_nGnRnE 000 00000000 * DEVICE_nGnRE 001 00000100 * DEVICE_GRE 010 00001100 * NORMAL_NC 011 01000100 * NORMAL 100 11111111 * NORMAL_WT 101 10111011 */ ldr x5, =MAIR(0x00, MT_DEVICE_nGnRnE) | \ MAIR(0x04, MT_DEVICE_nGnRE) | \ MAIR(0x0c, MT_DEVICE_GRE) | \ MAIR(0x44, MT_NORMAL_NC) | \ MAIR(0xff, MT_NORMAL) | \ MAIR(0xbb, MT_NORMAL_WT) msr mair_el1, x5 /* * Prepare SCTLR */ mov_q x0, SCTLR_EL1_SET /* * Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for * both user and kernel. */ ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \ TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \ TCR_TBI0 | TCR_A1 tcr_set_idmap_t0sz x10, x9 /* * Set the IPS bits in TCR_EL1. */ tcr_compute_pa_size x10, #TCR_IPS_SHIFT, x5, x6 #ifdef CONFIG_ARM64_HW_AFDBM /* * Enable hardware update of the Access Flags bit. * Hardware dirty bit management is enabled later, * via capabilities. */ mrs x9, ID_AA64MMFR1_EL1 and x9, x9, #0xf cbz x9, 1f orr x10, x10, #TCR_HA // hardware Access flag update 1: #endif /* CONFIG_ARM64_HW_AFDBM */ msr tcr_el1, x10 ret // return to head.S ENDPROC(__cpu_setup)
AirFortressIlikara/LS2K0300-linux-4.19
2,417
arch/arm64/kernel/vdso/vdso.lds.S
/* * GNU linker script for the VDSO library. * * Copyright (C) 2012 ARM Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * * Author: Will Deacon <will.deacon@arm.com> * Heavily based on the vDSO linker scripts for other archs. */ #include <linux/const.h> #include <asm/page.h> #include <asm/vdso.h> OUTPUT_FORMAT("elf64-littleaarch64", "elf64-bigaarch64", "elf64-littleaarch64") OUTPUT_ARCH(aarch64) SECTIONS { PROVIDE(_vdso_data = . - PAGE_SIZE); . = VDSO_LBASE + SIZEOF_HEADERS; .hash : { *(.hash) } :text .gnu.hash : { *(.gnu.hash) } .dynsym : { *(.dynsym) } .dynstr : { *(.dynstr) } .gnu.version : { *(.gnu.version) } .gnu.version_d : { *(.gnu.version_d) } .gnu.version_r : { *(.gnu.version_r) } .note : { *(.note.*) } :text :note . = ALIGN(16); .text : { *(.text*) } :text =0xd503201f PROVIDE (__etext = .); PROVIDE (_etext = .); PROVIDE (etext = .); .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr .eh_frame : { KEEP (*(.eh_frame)) } :text .dynamic : { *(.dynamic) } :text :dynamic .rodata : { *(.rodata*) } :text _end = .; PROVIDE(end = .); /DISCARD/ : { *(.note.GNU-stack) *(.data .data.* .gnu.linkonce.d.* .sdata*) *(.bss .sbss .dynbss .dynsbss) } } /* * We must supply the ELF program headers explicitly to get just one * PT_LOAD segment, and set the flags explicitly to make segments read-only. */ PHDRS { text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */ dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ note PT_NOTE FLAGS(4); /* PF_R */ eh_frame_hdr PT_GNU_EH_FRAME; } /* * This controls what symbols we export from the DSO. */ VERSION { LINUX_2.6.39 { global: __kernel_rt_sigreturn; __kernel_gettimeofday; __kernel_clock_gettime; __kernel_clock_getres; local: *; }; } /* * Make the sigreturn code visible to the kernel. */ VDSO_sigtramp = __kernel_rt_sigreturn;
AirFortressIlikara/LS2K0300-linux-4.19
1,049
arch/arm64/kernel/vdso/sigreturn.S
/* * Sigreturn trampoline for returning from a signal when the SA_RESTORER * flag is not set. * * Copyright (C) 2012 ARM Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * * Author: Will Deacon <will.deacon@arm.com> */ #include <linux/linkage.h> #include <asm/unistd.h> .text nop ENTRY(__kernel_rt_sigreturn) .cfi_startproc .cfi_signal_frame .cfi_def_cfa x29, 0 .cfi_offset x29, 0 * 8 .cfi_offset x30, 1 * 8 mov x8, #__NR_rt_sigreturn svc #0 .cfi_endproc ENDPROC(__kernel_rt_sigreturn)
AirFortressIlikara/LS2K0300-linux-4.19
8,481
arch/arm64/kernel/vdso/gettimeofday.S
/* * Userspace implementations of gettimeofday() and friends. * * Copyright (C) 2012 ARM Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * * Author: Will Deacon <will.deacon@arm.com> */ #include <linux/linkage.h> #include <asm/asm-offsets.h> #include <asm/unistd.h> #define NSEC_PER_SEC_LO16 0xca00 #define NSEC_PER_SEC_HI16 0x3b9a vdso_data .req x6 seqcnt .req w7 w_tmp .req w8 x_tmp .req x8 /* * Conventions for macro arguments: * - An argument is write-only if its name starts with "res". * - All other arguments are read-only, unless otherwise specified. */ .macro seqcnt_acquire 9999: ldr seqcnt, [vdso_data, #VDSO_TB_SEQ_COUNT] tbnz seqcnt, #0, 9999b dmb ishld .endm .macro seqcnt_check fail dmb ishld ldr w_tmp, [vdso_data, #VDSO_TB_SEQ_COUNT] cmp w_tmp, seqcnt b.ne \fail .endm .macro syscall_check fail ldr w_tmp, [vdso_data, #VDSO_USE_SYSCALL] cbnz w_tmp, \fail .endm .macro get_nsec_per_sec res mov \res, #NSEC_PER_SEC_LO16 movk \res, #NSEC_PER_SEC_HI16, lsl #16 .endm /* * Returns the clock delta, in nanoseconds left-shifted by the clock * shift. */ .macro get_clock_shifted_nsec res, cycle_last, mult /* Read the virtual counter. */ isb mrs x_tmp, cntvct_el0 /* Calculate cycle delta and convert to ns. */ sub \res, x_tmp, \cycle_last /* We can only guarantee 56 bits of precision. */ movn x_tmp, #0xff00, lsl #48 and \res, x_tmp, \res mul \res, \res, \mult /* * Fake address dependency from the value computed from the counter * register to subsequent data page accesses so that the sequence * locking also orders the read of the counter. */ and x_tmp, \res, xzr add vdso_data, vdso_data, x_tmp .endm /* * Returns in res_{sec,nsec} the REALTIME timespec, based on the * "wall time" (xtime) and the clock_mono delta. */ .macro get_ts_realtime res_sec, res_nsec, \ clock_nsec, xtime_sec, xtime_nsec, nsec_to_sec add \res_nsec, \clock_nsec, \xtime_nsec udiv x_tmp, \res_nsec, \nsec_to_sec add \res_sec, \xtime_sec, x_tmp msub \res_nsec, x_tmp, \nsec_to_sec, \res_nsec .endm /* * Returns in res_{sec,nsec} the timespec based on the clock_raw delta, * used for CLOCK_MONOTONIC_RAW. */ .macro get_ts_clock_raw res_sec, res_nsec, clock_nsec, nsec_to_sec udiv \res_sec, \clock_nsec, \nsec_to_sec msub \res_nsec, \res_sec, \nsec_to_sec, \clock_nsec .endm /* sec and nsec are modified in place. */ .macro add_ts sec, nsec, ts_sec, ts_nsec, nsec_to_sec /* Add timespec. */ add \sec, \sec, \ts_sec add \nsec, \nsec, \ts_nsec /* Normalise the new timespec. */ cmp \nsec, \nsec_to_sec b.lt 9999f sub \nsec, \nsec, \nsec_to_sec add \sec, \sec, #1 9999: cmp \nsec, #0 b.ge 9998f add \nsec, \nsec, \nsec_to_sec sub \sec, \sec, #1 9998: .endm .macro clock_gettime_return, shift=0 .if \shift == 1 lsr x11, x11, x12 .endif stp x10, x11, [x1, #TSPEC_TV_SEC] mov x0, xzr ret .endm .macro jump_slot jumptable, index, label .if (. - \jumptable) != 4 * (\index) .error "Jump slot index mismatch" .endif b \label .endm .text /* int __kernel_gettimeofday(struct timeval *tv, struct timezone *tz); */ ENTRY(__kernel_gettimeofday) .cfi_startproc adr vdso_data, _vdso_data /* If tv is NULL, skip to the timezone code. */ cbz x0, 2f /* Compute the time of day. */ 1: seqcnt_acquire syscall_check fail=4f ldr x10, [vdso_data, #VDSO_CS_CYCLE_LAST] /* w11 = cs_mono_mult, w12 = cs_shift */ ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT] ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC] get_nsec_per_sec res=x9 lsl x9, x9, x12 get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11 seqcnt_check fail=1b get_ts_realtime res_sec=x10, res_nsec=x11, \ clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9 /* Convert ns to us. */ mov x13, #1000 lsl x13, x13, x12 udiv x11, x11, x13 stp x10, x11, [x0, #TVAL_TV_SEC] 2: /* If tz is NULL, return 0. */ cbz x1, 3f ldp w4, w5, [vdso_data, #VDSO_TZ_MINWEST] stp w4, w5, [x1, #TZ_MINWEST] 3: mov x0, xzr ret 4: /* Syscall fallback. */ mov x8, #__NR_gettimeofday svc #0 ret .cfi_endproc ENDPROC(__kernel_gettimeofday) #define JUMPSLOT_MAX CLOCK_MONOTONIC_COARSE /* int __kernel_clock_gettime(clockid_t clock_id, struct timespec *tp); */ ENTRY(__kernel_clock_gettime) .cfi_startproc cmp w0, #JUMPSLOT_MAX b.hi syscall adr vdso_data, _vdso_data adr x_tmp, jumptable add x_tmp, x_tmp, w0, uxtw #2 br x_tmp ALIGN jumptable: jump_slot jumptable, CLOCK_REALTIME, realtime jump_slot jumptable, CLOCK_MONOTONIC, monotonic b syscall b syscall jump_slot jumptable, CLOCK_MONOTONIC_RAW, monotonic_raw jump_slot jumptable, CLOCK_REALTIME_COARSE, realtime_coarse jump_slot jumptable, CLOCK_MONOTONIC_COARSE, monotonic_coarse .if (. - jumptable) != 4 * (JUMPSLOT_MAX + 1) .error "Wrong jumptable size" .endif ALIGN realtime: seqcnt_acquire syscall_check fail=syscall ldr x10, [vdso_data, #VDSO_CS_CYCLE_LAST] /* w11 = cs_mono_mult, w12 = cs_shift */ ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT] ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC] /* All computations are done with left-shifted nsecs. */ get_nsec_per_sec res=x9 lsl x9, x9, x12 get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11 seqcnt_check fail=realtime get_ts_realtime res_sec=x10, res_nsec=x11, \ clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9 clock_gettime_return, shift=1 ALIGN monotonic: seqcnt_acquire syscall_check fail=syscall ldr x10, [vdso_data, #VDSO_CS_CYCLE_LAST] /* w11 = cs_mono_mult, w12 = cs_shift */ ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT] ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC] ldp x3, x4, [vdso_data, #VDSO_WTM_CLK_SEC] /* All computations are done with left-shifted nsecs. */ lsl x4, x4, x12 get_nsec_per_sec res=x9 lsl x9, x9, x12 get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11 seqcnt_check fail=monotonic get_ts_realtime res_sec=x10, res_nsec=x11, \ clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9 add_ts sec=x10, nsec=x11, ts_sec=x3, ts_nsec=x4, nsec_to_sec=x9 clock_gettime_return, shift=1 ALIGN monotonic_raw: seqcnt_acquire syscall_check fail=syscall ldr x10, [vdso_data, #VDSO_CS_CYCLE_LAST] /* w11 = cs_raw_mult, w12 = cs_shift */ ldp w12, w11, [vdso_data, #VDSO_CS_SHIFT] ldp x13, x14, [vdso_data, #VDSO_RAW_TIME_SEC] /* All computations are done with left-shifted nsecs. */ get_nsec_per_sec res=x9 lsl x9, x9, x12 get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11 seqcnt_check fail=monotonic_raw get_ts_clock_raw res_sec=x10, res_nsec=x11, \ clock_nsec=x15, nsec_to_sec=x9 add_ts sec=x10, nsec=x11, ts_sec=x13, ts_nsec=x14, nsec_to_sec=x9 clock_gettime_return, shift=1 ALIGN realtime_coarse: seqcnt_acquire ldp x10, x11, [vdso_data, #VDSO_XTIME_CRS_SEC] seqcnt_check fail=realtime_coarse clock_gettime_return ALIGN monotonic_coarse: seqcnt_acquire ldp x10, x11, [vdso_data, #VDSO_XTIME_CRS_SEC] ldp x13, x14, [vdso_data, #VDSO_WTM_CLK_SEC] seqcnt_check fail=monotonic_coarse /* Computations are done in (non-shifted) nsecs. */ get_nsec_per_sec res=x9 add_ts sec=x10, nsec=x11, ts_sec=x13, ts_nsec=x14, nsec_to_sec=x9 clock_gettime_return ALIGN syscall: /* Syscall fallback. */ mov x8, #__NR_clock_gettime svc #0 ret .cfi_endproc ENDPROC(__kernel_clock_gettime) /* int __kernel_clock_getres(clockid_t clock_id, struct timespec *res); */ ENTRY(__kernel_clock_getres) .cfi_startproc cmp w0, #CLOCK_REALTIME ccmp w0, #CLOCK_MONOTONIC, #0x4, ne ccmp w0, #CLOCK_MONOTONIC_RAW, #0x4, ne b.ne 1f adr vdso_data, _vdso_data ldr w2, [vdso_data, #CLOCK_REALTIME_RES] b 2f 1: cmp w0, #CLOCK_REALTIME_COARSE ccmp w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne b.ne 4f ldr x2, 5f 2: cbz x1, 3f stp xzr, x2, [x1] 3: /* res == NULL. */ mov w0, wzr ret 4: /* Syscall fallback. */ mov x8, #__NR_clock_getres svc #0 ret 5: .quad CLOCK_COARSE_RES .cfi_endproc ENDPROC(__kernel_clock_getres)
AirFortressIlikara/LS2K0300-linux-4.19
1,718
arch/arm64/kernel/probes/kprobes_trampoline.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * trampoline entry and return code for kretprobes. */ #include <linux/linkage.h> #include <asm/asm-offsets.h> #include <asm/assembler.h> .text .macro save_all_base_regs stp x0, x1, [sp, #S_X0] stp x2, x3, [sp, #S_X2] stp x4, x5, [sp, #S_X4] stp x6, x7, [sp, #S_X6] stp x8, x9, [sp, #S_X8] stp x10, x11, [sp, #S_X10] stp x12, x13, [sp, #S_X12] stp x14, x15, [sp, #S_X14] stp x16, x17, [sp, #S_X16] stp x18, x19, [sp, #S_X18] stp x20, x21, [sp, #S_X20] stp x22, x23, [sp, #S_X22] stp x24, x25, [sp, #S_X24] stp x26, x27, [sp, #S_X26] stp x28, x29, [sp, #S_X28] add x0, sp, #S_FRAME_SIZE stp lr, x0, [sp, #S_LR] /* * Construct a useful saved PSTATE */ mrs x0, nzcv mrs x1, daif orr x0, x0, x1 mrs x1, CurrentEL orr x0, x0, x1 mrs x1, SPSel orr x0, x0, x1 stp xzr, x0, [sp, #S_PC] .endm .macro restore_all_base_regs ldr x0, [sp, #S_PSTATE] and x0, x0, #(PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT) msr nzcv, x0 ldp x0, x1, [sp, #S_X0] ldp x2, x3, [sp, #S_X2] ldp x4, x5, [sp, #S_X4] ldp x6, x7, [sp, #S_X6] ldp x8, x9, [sp, #S_X8] ldp x10, x11, [sp, #S_X10] ldp x12, x13, [sp, #S_X12] ldp x14, x15, [sp, #S_X14] ldp x16, x17, [sp, #S_X16] ldp x18, x19, [sp, #S_X18] ldp x20, x21, [sp, #S_X20] ldp x22, x23, [sp, #S_X22] ldp x24, x25, [sp, #S_X24] ldp x26, x27, [sp, #S_X26] ldp x28, x29, [sp, #S_X28] .endm ENTRY(kretprobe_trampoline) sub sp, sp, #S_FRAME_SIZE save_all_base_regs mov x0, sp bl trampoline_probe_handler /* * Replace trampoline address in lr with actual orig_ret_addr return * address. */ mov lr, x0 restore_all_base_regs add sp, sp, #S_FRAME_SIZE ret ENDPROC(kretprobe_trampoline)
AirFortressIlikara/LS2K0300-linux-4.19
5,296
arch/arm64/kvm/hyp/entry.S
/* * Copyright (C) 2015 - ARM Ltd * Author: Marc Zyngier <marc.zyngier@arm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/linkage.h> #include <asm/alternative.h> #include <asm/asm-offsets.h> #include <asm/assembler.h> #include <asm/fpsimdmacros.h> #include <asm/kvm.h> #include <asm/kvm_arm.h> #include <asm/kvm_asm.h> #include <asm/kvm_mmu.h> #define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x) #define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x) .text .pushsection .hyp.text, "ax" .macro save_callee_saved_regs ctxt stp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)] stp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)] stp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)] stp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)] stp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)] stp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)] .endm .macro restore_callee_saved_regs ctxt ldp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)] ldp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)] ldp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)] ldp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)] ldp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)] ldp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)] .endm /* * u64 __guest_enter(struct kvm_vcpu *vcpu, * struct kvm_cpu_context *host_ctxt); */ ENTRY(__guest_enter) // x0: vcpu // x1: host context // x2-x17: clobbered by macros // x18: guest context // Store the host regs save_callee_saved_regs x1 // Now the host state is stored if we have a pending RAS SError it must // affect the host. If any asynchronous exception is pending we defer // the guest entry. The DSB isn't necessary before v8.2 as any SError // would be fatal. alternative_if ARM64_HAS_RAS_EXTN dsb nshst isb alternative_else_nop_endif mrs x1, isr_el1 cbz x1, 1f mov x0, #ARM_EXCEPTION_IRQ ret 1: add x18, x0, #VCPU_CONTEXT // Restore guest regs x0-x17 ldp x0, x1, [x18, #CPU_XREG_OFFSET(0)] ldp x2, x3, [x18, #CPU_XREG_OFFSET(2)] ldp x4, x5, [x18, #CPU_XREG_OFFSET(4)] ldp x6, x7, [x18, #CPU_XREG_OFFSET(6)] ldp x8, x9, [x18, #CPU_XREG_OFFSET(8)] ldp x10, x11, [x18, #CPU_XREG_OFFSET(10)] ldp x12, x13, [x18, #CPU_XREG_OFFSET(12)] ldp x14, x15, [x18, #CPU_XREG_OFFSET(14)] ldp x16, x17, [x18, #CPU_XREG_OFFSET(16)] // Restore guest regs x19-x29, lr restore_callee_saved_regs x18 // Restore guest reg x18 ldr x18, [x18, #CPU_XREG_OFFSET(18)] // Do not touch any register after this! eret ENDPROC(__guest_enter) ENTRY(__guest_exit) // x0: return code // x1: vcpu // x2-x29,lr: vcpu regs // vcpu x0-x1 on the stack add x1, x1, #VCPU_CONTEXT ALTERNATIVE(nop, SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN) // Store the guest regs x2 and x3 stp x2, x3, [x1, #CPU_XREG_OFFSET(2)] // Retrieve the guest regs x0-x1 from the stack ldp x2, x3, [sp], #16 // x0, x1 // Store the guest regs x0-x1 and x4-x18 stp x2, x3, [x1, #CPU_XREG_OFFSET(0)] stp x4, x5, [x1, #CPU_XREG_OFFSET(4)] stp x6, x7, [x1, #CPU_XREG_OFFSET(6)] stp x8, x9, [x1, #CPU_XREG_OFFSET(8)] stp x10, x11, [x1, #CPU_XREG_OFFSET(10)] stp x12, x13, [x1, #CPU_XREG_OFFSET(12)] stp x14, x15, [x1, #CPU_XREG_OFFSET(14)] stp x16, x17, [x1, #CPU_XREG_OFFSET(16)] str x18, [x1, #CPU_XREG_OFFSET(18)] // Store the guest regs x19-x29, lr save_callee_saved_regs x1 get_host_ctxt x2, x3 // Now restore the host regs restore_callee_saved_regs x2 alternative_if ARM64_HAS_RAS_EXTN // If we have the RAS extensions we can consume a pending error // without an unmask-SError and isb. esb mrs_s x2, SYS_DISR_EL1 str x2, [x1, #(VCPU_FAULT_DISR - VCPU_CONTEXT)] cbz x2, 1f msr_s SYS_DISR_EL1, xzr orr x0, x0, #(1<<ARM_EXIT_WITH_SERROR_BIT) 1: ret alternative_else // If we have a pending asynchronous abort, now is the // time to find out. From your VAXorcist book, page 666: // "Threaten me not, oh Evil one! For I speak with // the power of DEC, and I command thee to show thyself!" mrs x2, elr_el2 mrs x3, esr_el2 mrs x4, spsr_el2 mov x5, x0 dsb sy // Synchronize against in-flight ld/st nop msr daifclr, #4 // Unmask aborts alternative_endif // This is our single instruction exception window. A pending // SError is guaranteed to occur at the earliest when we unmask // it, and at the latest just after the ISB. abort_guest_exit_start: isb abort_guest_exit_end: msr daifset, #4 // Mask aborts ret _kvm_extable abort_guest_exit_start, 9997f _kvm_extable abort_guest_exit_end, 9997f 9997: msr daifset, #4 // Mask aborts mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT) // restore the EL1 exception context so that we can report some // information. Merge the exception code with the SError pending bit. msr elr_el2, x2 msr esr_el2, x3 msr spsr_el2, x4 orr x0, x0, x5 1: ret ENDPROC(__guest_exit)
AirFortressIlikara/LS2K0300-linux-4.19
7,635
arch/arm64/kvm/hyp/hyp-entry.S
/* * Copyright (C) 2015-2018 - ARM Ltd * Author: Marc Zyngier <marc.zyngier@arm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/arm-smccc.h> #include <linux/linkage.h> #include <asm/alternative.h> #include <asm/assembler.h> #include <asm/cpufeature.h> #include <asm/kvm_arm.h> #include <asm/kvm_asm.h> #include <asm/kvm_mmu.h> #include <asm/mmu.h> .macro save_caller_saved_regs_vect /* x0 and x1 were saved in the vector entry */ stp x2, x3, [sp, #-16]! stp x4, x5, [sp, #-16]! stp x6, x7, [sp, #-16]! stp x8, x9, [sp, #-16]! stp x10, x11, [sp, #-16]! stp x12, x13, [sp, #-16]! stp x14, x15, [sp, #-16]! stp x16, x17, [sp, #-16]! .endm .macro restore_caller_saved_regs_vect ldp x16, x17, [sp], #16 ldp x14, x15, [sp], #16 ldp x12, x13, [sp], #16 ldp x10, x11, [sp], #16 ldp x8, x9, [sp], #16 ldp x6, x7, [sp], #16 ldp x4, x5, [sp], #16 ldp x2, x3, [sp], #16 ldp x0, x1, [sp], #16 .endm .text .pushsection .hyp.text, "ax" .macro do_el2_call /* * Shuffle the parameters before calling the function * pointed to in x0. Assumes parameters in x[1,2,3]. */ str lr, [sp, #-16]! mov lr, x0 mov x0, x1 mov x1, x2 mov x2, x3 blr lr ldr lr, [sp], #16 .endm ENTRY(__vhe_hyp_call) do_el2_call /* * We used to rely on having an exception return to get * an implicit isb. In the E2H case, we don't have it anymore. * rather than changing all the leaf functions, just do it here * before returning to the rest of the kernel. */ isb ret ENDPROC(__vhe_hyp_call) el1_sync: // Guest trapped into EL2 mrs x0, esr_el2 lsr x0, x0, #ESR_ELx_EC_SHIFT cmp x0, #ESR_ELx_EC_HVC64 ccmp x0, #ESR_ELx_EC_HVC32, #4, ne b.ne el1_trap mrs x1, vttbr_el2 // If vttbr is valid, the guest cbnz x1, el1_hvc_guest // called HVC /* Here, we're pretty sure the host called HVC. */ ldp x0, x1, [sp], #16 /* Check for a stub HVC call */ cmp x0, #HVC_STUB_HCALL_NR b.hs 1f /* * Compute the idmap address of __kvm_handle_stub_hvc and * jump there. Since we use kimage_voffset, do not use the * HYP VA for __kvm_handle_stub_hvc, but the kernel VA instead * (by loading it from the constant pool). * * Preserve x0-x4, which may contain stub parameters. */ ldr x5, =__kvm_handle_stub_hvc ldr_l x6, kimage_voffset /* x5 = __pa(x5) */ sub x5, x5, x6 br x5 1: /* * Perform the EL2 call */ kern_hyp_va x0 do_el2_call eret el1_hvc_guest: /* * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1. * The workaround has already been applied on the host, * so let's quickly get back to the guest. We don't bother * restoring x1, as it can be clobbered anyway. */ ldr x1, [sp] // Guest's x0 eor w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1 cbz w1, wa_epilogue /* ARM_SMCCC_ARCH_WORKAROUND_2 handling */ eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \ ARM_SMCCC_ARCH_WORKAROUND_2) cbnz w1, el1_trap #ifdef CONFIG_ARM64_SSBD alternative_cb arm64_enable_wa2_handling b wa2_end alternative_cb_end get_vcpu_ptr x2, x0 ldr x0, [x2, #VCPU_WORKAROUND_FLAGS] // Sanitize the argument and update the guest flags ldr x1, [sp, #8] // Guest's x1 clz w1, w1 // Murphy's device: lsr w1, w1, #5 // w1 = !!w1 without using eor w1, w1, #1 // the flags... bfi x0, x1, #VCPU_WORKAROUND_2_FLAG_SHIFT, #1 str x0, [x2, #VCPU_WORKAROUND_FLAGS] /* Check that we actually need to perform the call */ hyp_ldr_this_cpu x0, arm64_ssbd_callback_required, x2 cbz x0, wa2_end mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2 smc #0 /* Don't leak data from the SMC call */ mov x3, xzr wa2_end: mov x2, xzr mov x1, xzr #endif wa_epilogue: mov x0, xzr add sp, sp, #16 eret el1_trap: get_vcpu_ptr x1, x0 mov x0, #ARM_EXCEPTION_TRAP b __guest_exit el1_irq: get_vcpu_ptr x1, x0 mov x0, #ARM_EXCEPTION_IRQ b __guest_exit el1_error: get_vcpu_ptr x1, x0 mov x0, #ARM_EXCEPTION_EL1_SERROR b __guest_exit el2_sync: save_caller_saved_regs_vect stp x29, x30, [sp, #-16]! bl kvm_unexpected_el2_exception ldp x29, x30, [sp], #16 restore_caller_saved_regs_vect eret el2_error: save_caller_saved_regs_vect stp x29, x30, [sp, #-16]! bl kvm_unexpected_el2_exception ldp x29, x30, [sp], #16 restore_caller_saved_regs_vect eret ENTRY(__hyp_do_panic) mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ PSR_MODE_EL1h) msr spsr_el2, lr ldr lr, =panic msr elr_el2, lr eret ENDPROC(__hyp_do_panic) ENTRY(__hyp_panic) get_host_ctxt x0, x1 b hyp_panic ENDPROC(__hyp_panic) .macro invalid_vector label, target = __hyp_panic .align 2 \label: b \target ENDPROC(\label) .endm /* None of these should ever happen */ invalid_vector el2t_sync_invalid invalid_vector el2t_irq_invalid invalid_vector el2t_fiq_invalid invalid_vector el2t_error_invalid invalid_vector el2h_irq_invalid invalid_vector el2h_fiq_invalid invalid_vector el1_fiq_invalid .ltorg .align 11 .macro valid_vect target .align 7 stp x0, x1, [sp, #-16]! b \target .endm .macro invalid_vect target .align 7 b \target ldp x0, x1, [sp], #16 b \target .endm ENTRY(__kvm_hyp_vector) invalid_vect el2t_sync_invalid // Synchronous EL2t invalid_vect el2t_irq_invalid // IRQ EL2t invalid_vect el2t_fiq_invalid // FIQ EL2t invalid_vect el2t_error_invalid // Error EL2t valid_vect el2_sync // Synchronous EL2h invalid_vect el2h_irq_invalid // IRQ EL2h invalid_vect el2h_fiq_invalid // FIQ EL2h valid_vect el2_error // Error EL2h valid_vect el1_sync // Synchronous 64-bit EL1 valid_vect el1_irq // IRQ 64-bit EL1 invalid_vect el1_fiq_invalid // FIQ 64-bit EL1 valid_vect el1_error // Error 64-bit EL1 valid_vect el1_sync // Synchronous 32-bit EL1 valid_vect el1_irq // IRQ 32-bit EL1 invalid_vect el1_fiq_invalid // FIQ 32-bit EL1 valid_vect el1_error // Error 32-bit EL1 ENDPROC(__kvm_hyp_vector) #ifdef CONFIG_KVM_INDIRECT_VECTORS .macro hyp_ventry .align 7 1: .rept 27 nop .endr /* * The default sequence is to directly branch to the KVM vectors, * using the computed offset. This applies for VHE as well as * !ARM64_HARDEN_EL2_VECTORS. * * For ARM64_HARDEN_EL2_VECTORS configurations, this gets replaced * with: * * stp x0, x1, [sp, #-16]! * movz x0, #(addr & 0xffff) * movk x0, #((addr >> 16) & 0xffff), lsl #16 * movk x0, #((addr >> 32) & 0xffff), lsl #32 * br x0 * * Where addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + 4. * See kvm_patch_vector_branch for details. */ alternative_cb kvm_patch_vector_branch b __kvm_hyp_vector + (1b - 0b) nop nop nop nop alternative_cb_end .endm .macro generate_vectors 0: .rept 16 hyp_ventry .endr .org 0b + SZ_2K // Safety measure .endm .align 11 ENTRY(__bp_harden_hyp_vecs_start) .rept BP_HARDEN_EL2_SLOTS generate_vectors .endr ENTRY(__bp_harden_hyp_vecs_end) .popsection ENTRY(__smccc_workaround_1_smc_start) sub sp, sp, #(8 * 4) stp x2, x3, [sp, #(8 * 0)] stp x0, x1, [sp, #(8 * 2)] mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1 smc #0 ldp x2, x3, [sp, #(8 * 0)] ldp x0, x1, [sp, #(8 * 2)] add sp, sp, #(8 * 4) ENTRY(__smccc_workaround_1_smc_end) #endif
AirFortressIlikara/LS2K0300-linux-4.19
7,011
arch/xtensa/kernel/head.S
/* * arch/xtensa/kernel/head.S * * Xtensa Processor startup code. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2001 - 2008 Tensilica Inc. * * Chris Zankel <chris@zankel.net> * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca> * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com> * Kevin Chea */ #include <asm/processor.h> #include <asm/page.h> #include <asm/cacheasm.h> #include <asm/initialize_mmu.h> #include <asm/mxregs.h> #include <linux/init.h> #include <linux/linkage.h> /* * This module contains the entry code for kernel images. It performs the * minimal setup needed to call the generic C routines. * * Prerequisites: * * - The kernel image has been loaded to the actual address where it was * compiled to. * - a2 contains either 0 or a pointer to a list of boot parameters. * (see setup.c for more details) * */ /* * _start * * The bootloader passes a pointer to a list of boot parameters in a2. */ /* The first bytes of the kernel image must be an instruction, so we * manually allocate and define the literal constant we need for a jx * instruction. */ __HEAD .begin no-absolute-literals ENTRY(_start) /* Preserve the pointer to the boot parameter list in EXCSAVE_1 */ wsr a2, excsave1 _j _SetupOCD .align 4 .literal_position .Lstartup: .word _startup .align 4 _SetupOCD: /* * Initialize WB, WS, and clear PS.EXCM (to allow loop instructions). * Set Interrupt Level just below XCHAL_DEBUGLEVEL to allow * xt-gdb to single step via DEBUG exceptions received directly * by ocd. */ movi a1, 1 movi a0, 0 wsr a1, windowstart wsr a0, windowbase rsync movi a1, LOCKLEVEL wsr a1, ps rsync .global _SetupMMU _SetupMMU: Offset = _SetupMMU - _start #ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX initialize_mmu #if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY rsr a2, excsave1 movi a3, XCHAL_KSEG_PADDR bltu a2, a3, 1f sub a2, a2, a3 movi a3, XCHAL_KSEG_SIZE bgeu a2, a3, 1f movi a3, XCHAL_KSEG_CACHED_VADDR add a2, a2, a3 wsr a2, excsave1 1: #endif #endif .end no-absolute-literals l32r a0, .Lstartup jx a0 ENDPROC(_start) __REF .literal_position ENTRY(_startup) /* Set a0 to 0 for the remaining initialization. */ movi a0, 0 #if XCHAL_HAVE_VECBASE movi a2, VECBASE_VADDR wsr a2, vecbase #endif /* Clear debugging registers. */ #if XCHAL_HAVE_DEBUG #if XCHAL_NUM_IBREAK > 0 wsr a0, ibreakenable #endif wsr a0, icount movi a1, 15 wsr a0, icountlevel .set _index, 0 .rept XCHAL_NUM_DBREAK wsr a0, SREG_DBREAKC + _index .set _index, _index + 1 .endr #endif /* Clear CCOUNT (not really necessary, but nice) */ wsr a0, ccount # not really necessary, but nice /* Disable zero-loops. */ #if XCHAL_HAVE_LOOPS wsr a0, lcount #endif /* Disable all timers. */ .set _index, 0 .rept XCHAL_NUM_TIMERS wsr a0, SREG_CCOMPARE + _index .set _index, _index + 1 .endr /* Interrupt initialization. */ movi a2, XCHAL_INTTYPE_MASK_SOFTWARE | XCHAL_INTTYPE_MASK_EXTERN_EDGE wsr a0, intenable wsr a2, intclear /* Disable coprocessors. */ #if XCHAL_HAVE_CP wsr a0, cpenable #endif /* Initialize the caches. * a2, a3 are just working registers (clobbered). */ #if XCHAL_DCACHE_LINE_LOCKABLE ___unlock_dcache_all a2 a3 #endif #if XCHAL_ICACHE_LINE_LOCKABLE ___unlock_icache_all a2 a3 #endif ___invalidate_dcache_all a2 a3 ___invalidate_icache_all a2 a3 isync initialize_cacheattr #ifdef CONFIG_HAVE_SMP movi a2, CCON # MX External Register to Configure Cache movi a3, 1 wer a3, a2 #endif /* Setup stack and enable window exceptions (keep irqs disabled) */ movi a1, start_info l32i a1, a1, 0 movi a2, (1 << PS_WOE_BIT) | LOCKLEVEL # WOE=1, INTLEVEL=LOCKLEVEL, UM=0 wsr a2, ps # (enable reg-windows; progmode stack) rsync #ifdef CONFIG_SMP /* * Notice that we assume with SMP that cores have PRID * supported by the cores. */ rsr a2, prid bnez a2, .Lboot_secondary #endif /* CONFIG_SMP */ /* Unpack data sections * * The linker script used to build the Linux kernel image * creates a table located at __boot_reloc_table_start * that contans the information what data needs to be unpacked. * * Uses a2-a7. */ movi a2, __boot_reloc_table_start movi a3, __boot_reloc_table_end 1: beq a2, a3, 3f # no more entries? l32i a4, a2, 0 # start destination (in RAM) l32i a5, a2, 4 # end desination (in RAM) l32i a6, a2, 8 # start source (in ROM) addi a2, a2, 12 # next entry beq a4, a5, 1b # skip, empty entry beq a4, a6, 1b # skip, source and dest. are the same 2: l32i a7, a6, 0 # load word addi a6, a6, 4 s32i a7, a4, 0 # store word addi a4, a4, 4 bltu a4, a5, 2b j 1b 3: /* All code and initialized data segments have been copied. * Now clear the BSS segment. */ movi a2, __bss_start # start of BSS movi a3, __bss_stop # end of BSS __loopt a2, a3, a4, 2 s32i a0, a2, 0 __endla a2, a3, 4 #if XCHAL_DCACHE_IS_WRITEBACK /* After unpacking, flush the writeback cache to memory so the * instructions/data are available. */ ___flush_dcache_all a2 a3 #endif memw isync ___invalidate_icache_all a2 a3 isync movi a6, 0 xsr a6, excsave1 /* init_arch kick-starts the linux kernel */ call4 init_arch call4 start_kernel should_never_return: j should_never_return #ifdef CONFIG_SMP .Lboot_secondary: movi a2, cpu_start_ccount 1: memw l32i a3, a2, 0 beqi a3, 0, 1b movi a3, 0 s32i a3, a2, 0 1: memw l32i a3, a2, 0 beqi a3, 0, 1b wsr a3, ccount movi a3, 0 s32i a3, a2, 0 memw movi a6, 0 wsr a6, excsave1 call4 secondary_start_kernel j should_never_return #endif /* CONFIG_SMP */ ENDPROC(_startup) #ifdef CONFIG_HOTPLUG_CPU ENTRY(cpu_restart) #if XCHAL_DCACHE_IS_WRITEBACK ___flush_invalidate_dcache_all a2 a3 #else ___invalidate_dcache_all a2 a3 #endif memw movi a2, CCON # MX External Register to Configure Cache movi a3, 0 wer a3, a2 extw rsr a0, prid neg a2, a0 movi a3, cpu_start_id memw s32i a2, a3, 0 #if XCHAL_DCACHE_IS_WRITEBACK dhwbi a3, 0 #endif 1: memw l32i a2, a3, 0 dhi a3, 0 bne a2, a0, 1b /* * Initialize WB, WS, and clear PS.EXCM (to allow loop instructions). * Set Interrupt Level just below XCHAL_DEBUGLEVEL to allow * xt-gdb to single step via DEBUG exceptions received directly * by ocd. */ movi a1, 1 movi a0, 0 wsr a1, windowstart wsr a0, windowbase rsync movi a1, LOCKLEVEL wsr a1, ps rsync j _startup ENDPROC(cpu_restart) #endif /* CONFIG_HOTPLUG_CPU */ /* * DATA section */ .section ".data.init.refok" .align 4 ENTRY(start_info) .long init_thread_union + KERNEL_STACK_SIZE /* * BSS section */ __PAGE_ALIGNED_BSS #ifdef CONFIG_MMU ENTRY(swapper_pg_dir) .fill PAGE_SIZE, 1, 0 END(swapper_pg_dir) #endif ENTRY(empty_zero_page) .fill PAGE_SIZE, 1, 0 END(empty_zero_page)
AirFortressIlikara/LS2K0300-linux-4.19
22,278
arch/xtensa/kernel/vectors.S
/* * arch/xtensa/kernel/vectors.S * * This file contains all exception vectors (user, kernel, and double), * as well as the window vectors (overflow and underflow), and the debug * vector. These are the primary vectors executed by the processor if an * exception occurs. * * This file is subject to the terms and conditions of the GNU General * Public License. See the file "COPYING" in the main directory of * this archive for more details. * * Copyright (C) 2005 - 2008 Tensilica, Inc. * * Chris Zankel <chris@zankel.net> * */ /* * We use a two-level table approach. The user and kernel exception vectors * use a first-level dispatch table to dispatch the exception to a registered * fast handler or the default handler, if no fast handler was registered. * The default handler sets up a C-stack and dispatches the exception to a * registerd C handler in the second-level dispatch table. * * Fast handler entry condition: * * a0: trashed, original value saved on stack (PT_AREG0) * a1: a1 * a2: new stack pointer, original value in depc * a3: dispatch table * depc: a2, original value saved on stack (PT_DEPC) * excsave_1: a3 * * The value for PT_DEPC saved to stack also functions as a boolean to * indicate that the exception is either a double or a regular exception: * * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception * * Note: Neither the kernel nor the user exception handler generate literals. * */ #include <linux/linkage.h> #include <asm/ptrace.h> #include <asm/current.h> #include <asm/asm-offsets.h> #include <asm/pgtable.h> #include <asm/processor.h> #include <asm/page.h> #include <asm/thread_info.h> #include <asm/vectors.h> #define WINDOW_VECTORS_SIZE 0x180 /* * User exception vector. (Exceptions with PS.UM == 1, PS.EXCM == 0) * * We get here when an exception occurred while we were in userland. * We switch to the kernel stack and jump to the first level handler * associated to the exception cause. * * Note: the saved kernel stack pointer (EXC_TABLE_KSTK) is already * decremented by PT_USER_SIZE. */ .section .UserExceptionVector.text, "ax" ENTRY(_UserExceptionVector) xsr a3, excsave1 # save a3 and get dispatch table wsr a2, depc # save a2 l32i a2, a3, EXC_TABLE_KSTK # load kernel stack to a2 s32i a0, a2, PT_AREG0 # save a0 to ESF rsr a0, exccause # retrieve exception cause s32i a0, a2, PT_DEPC # mark it as a regular exception addx4 a0, a0, a3 # find entry in table l32i a0, a0, EXC_TABLE_FAST_USER # load handler xsr a3, excsave1 # restore a3 and dispatch table jx a0 ENDPROC(_UserExceptionVector) /* * Kernel exception vector. (Exceptions with PS.UM == 0, PS.EXCM == 0) * * We get this exception when we were already in kernel space. * We decrement the current stack pointer (kernel) by PT_SIZE and * jump to the first-level handler associated with the exception cause. * * Note: we need to preserve space for the spill region. */ .section .KernelExceptionVector.text, "ax" ENTRY(_KernelExceptionVector) xsr a3, excsave1 # save a3, and get dispatch table wsr a2, depc # save a2 addi a2, a1, -16-PT_SIZE # adjust stack pointer s32i a0, a2, PT_AREG0 # save a0 to ESF rsr a0, exccause # retrieve exception cause s32i a0, a2, PT_DEPC # mark it as a regular exception addx4 a0, a0, a3 # find entry in table l32i a0, a0, EXC_TABLE_FAST_KERNEL # load handler address xsr a3, excsave1 # restore a3 and dispatch table jx a0 ENDPROC(_KernelExceptionVector) /* * Double exception vector (Exceptions with PS.EXCM == 1) * We get this exception when another exception occurs while were are * already in an exception, such as window overflow/underflow exception, * or 'expected' exceptions, for example memory exception when we were trying * to read data from an invalid address in user space. * * Note that this vector is never invoked for level-1 interrupts, because such * interrupts are disabled (masked) when PS.EXCM is set. * * We decode the exception and take the appropriate action. However, the * double exception vector is much more careful, because a lot more error * cases go through the double exception vector than through the user and * kernel exception vectors. * * Occasionally, the kernel expects a double exception to occur. This usually * happens when accessing user-space memory with the user's permissions * (l32e/s32e instructions). The kernel state, though, is not always suitable * for immediate transfer of control to handle_double, where "normal" exception * processing occurs. Also in kernel mode, TLB misses can occur if accessing * vmalloc memory, possibly requiring repair in a double exception handler. * * The variable at TABLE_FIXUP offset from the pointer in EXCSAVE_1 doubles as * a boolean variable and a pointer to a fixup routine. If the variable * EXC_TABLE_FIXUP is non-zero, this handler jumps to that address. A value of * zero indicates to use the default kernel/user exception handler. * There is only one exception, when the value is identical to the exc_table * label, the kernel is in trouble. This mechanism is used to protect critical * sections, mainly when the handler writes to the stack to assert the stack * pointer is valid. Once the fixup/default handler leaves that area, the * EXC_TABLE_FIXUP variable is reset to the fixup handler or zero. * * Procedures wishing to use this mechanism should set EXC_TABLE_FIXUP to the * nonzero address of a fixup routine before it could cause a double exception * and reset it before it returns. * * Some other things to take care of when a fast exception handler doesn't * specify a particular fixup handler but wants to use the default handlers: * * - The original stack pointer (in a1) must not be modified. The fast * exception handler should only use a2 as the stack pointer. * * - If the fast handler manipulates the stack pointer (in a2), it has to * register a valid fixup handler and cannot use the default handlers. * * - The handler can use any other generic register from a3 to a15, but it * must save the content of these registers to stack (PT_AREG3...PT_AREGx) * * - These registers must be saved before a double exception can occur. * * - If we ever implement handling signals while in double exceptions, the * number of registers a fast handler has saved (excluding a0 and a1) must * be written to PT_AREG1. (1 if only a3 is used, 2 for a3 and a4, etc. ) * * The fixup handlers are special handlers: * * - Fixup entry conditions differ from regular exceptions: * * a0: DEPC * a1: a1 * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE * a3: exctable * depc: a0 * excsave_1: a3 * * - When the kernel enters the fixup handler, it still assumes it is in a * critical section, so EXC_TABLE_FIXUP variable is set to exc_table. * The fixup handler, therefore, has to re-register itself as the fixup * handler before it returns from the double exception. * * - Fixup handler can share the same exception frame with the fast handler. * The kernel stack pointer is not changed when entering the fixup handler. * * - Fixup handlers can jump to the default kernel and user exception * handlers. Before it jumps, though, it has to setup a exception frame * on stack. Because the default handler resets the register fixup handler * the fixup handler must make sure that the default handler returns to * it instead of the exception address, so it can re-register itself as * the fixup handler. * * In case of a critical condition where the kernel cannot recover, we jump * to unrecoverable_exception with the following entry conditions. * All registers a0...a15 are unchanged from the last exception, except: * * a0: last address before we jumped to the unrecoverable_exception. * excsave_1: a0 * * * See the handle_alloca_user and spill_registers routines for example clients. * * FIXME: Note: we currently don't allow signal handling coming from a double * exception, so the item markt with (*) is not required. */ .section .DoubleExceptionVector.text, "ax" ENTRY(_DoubleExceptionVector) xsr a3, excsave1 s32i a2, a3, EXC_TABLE_DOUBLE_SAVE /* Check for kernel double exception (usually fatal). */ rsr a2, ps _bbsi.l a2, PS_UM_BIT, 1f j .Lksp .align 4 .literal_position 1: /* Check if we are currently handling a window exception. */ /* Note: We don't need to indicate that we enter a critical section. */ xsr a0, depc # get DEPC, save a0 movi a2, WINDOW_VECTORS_VADDR _bltu a0, a2, .Lfixup addi a2, a2, WINDOW_VECTORS_SIZE _bgeu a0, a2, .Lfixup /* Window overflow/underflow exception. Get stack pointer. */ l32i a2, a3, EXC_TABLE_KSTK /* Check for overflow/underflow exception, jump if overflow. */ bbci.l a0, 6, _DoubleExceptionVector_WindowOverflow /* * Restart window underflow exception. * Currently: * depc = orig a0, * a0 = orig DEPC, * a2 = new sp based on KSTK from exc_table * a3 = excsave_1 * excsave_1 = orig a3 * * We return to the instruction in user space that caused the window * underflow exception. Therefore, we change window base to the value * before we entered the window underflow exception and prepare the * registers to return as if we were coming from a regular exception * by changing depc (in a0). * Note: We can trash the current window frame (a0...a3) and depc! */ _DoubleExceptionVector_WindowUnderflow: xsr a3, excsave1 wsr a2, depc # save stack pointer temporarily rsr a0, ps extui a0, a0, PS_OWB_SHIFT, PS_OWB_WIDTH wsr a0, windowbase rsync /* We are now in the previous window frame. Save registers again. */ xsr a2, depc # save a2 and get stack pointer s32i a0, a2, PT_AREG0 xsr a3, excsave1 rsr a0, exccause s32i a0, a2, PT_DEPC # mark it as a regular exception addx4 a0, a0, a3 xsr a3, excsave1 l32i a0, a0, EXC_TABLE_FAST_USER jx a0 /* * We only allow the ITLB miss exception if we are in kernel space. * All other exceptions are unexpected and thus unrecoverable! */ #ifdef CONFIG_MMU .extern fast_second_level_miss_double_kernel .Lksp: /* a0: a0, a1: a1, a2: a2, a3: trashed, depc: depc, excsave: a3 */ rsr a3, exccause beqi a3, EXCCAUSE_ITLB_MISS, 1f addi a3, a3, -EXCCAUSE_DTLB_MISS bnez a3, .Lunrecoverable 1: movi a3, fast_second_level_miss_double_kernel jx a3 #else .equ .Lksp, .Lunrecoverable #endif /* Critical! We can't handle this situation. PANIC! */ .extern unrecoverable_exception .Lunrecoverable_fixup: l32i a2, a3, EXC_TABLE_DOUBLE_SAVE xsr a0, depc .Lunrecoverable: rsr a3, excsave1 wsr a0, excsave1 call0 unrecoverable_exception .Lfixup:/* Check for a fixup handler or if we were in a critical section. */ /* a0: depc, a1: a1, a2: trash, a3: exctable, depc: a0, excsave1: a3 */ /* Enter critical section. */ l32i a2, a3, EXC_TABLE_FIXUP s32i a3, a3, EXC_TABLE_FIXUP beq a2, a3, .Lunrecoverable_fixup # critical section beqz a2, .Ldflt # no handler was registered /* a0: depc, a1: a1, a2: trash, a3: exctable, depc: a0, excsave: a3 */ jx a2 .Ldflt: /* Get stack pointer. */ l32i a2, a3, EXC_TABLE_DOUBLE_SAVE addi a2, a2, -PT_USER_SIZE /* a0: depc, a1: a1, a2: kstk, a3: exctable, depc: a0, excsave: a3 */ s32i a0, a2, PT_DEPC l32i a0, a3, EXC_TABLE_DOUBLE_SAVE xsr a0, depc s32i a0, a2, PT_AREG0 /* a0: avail, a1: a1, a2: kstk, a3: exctable, depc: a2, excsave: a3 */ rsr a0, exccause addx4 a0, a0, a3 xsr a3, excsave1 l32i a0, a0, EXC_TABLE_FAST_USER jx a0 /* * Restart window OVERFLOW exception. * Currently: * depc = orig a0, * a0 = orig DEPC, * a2 = new sp based on KSTK from exc_table * a3 = EXCSAVE_1 * excsave_1 = orig a3 * * We return to the instruction in user space that caused the window * overflow exception. Therefore, we change window base to the value * before we entered the window overflow exception and prepare the * registers to return as if we were coming from a regular exception * by changing DEPC (in a0). * * NOTE: We CANNOT trash the current window frame (a0...a3), but we * can clobber depc. * * The tricky part here is that overflow8 and overflow12 handlers * save a0, then clobber a0. To restart the handler, we have to restore * a0 if the double exception was past the point where a0 was clobbered. * * To keep things simple, we take advantage of the fact all overflow * handlers save a0 in their very first instruction. If DEPC was past * that instruction, we can safely restore a0 from where it was saved * on the stack. * * a0: depc, a1: a1, a2: kstk, a3: exc_table, depc: a0, excsave1: a3 */ _DoubleExceptionVector_WindowOverflow: extui a2, a0, 0, 6 # get offset into 64-byte vector handler beqz a2, 1f # if at start of vector, don't restore addi a0, a0, -128 bbsi.l a0, 8, 1f # don't restore except for overflow 8 and 12 /* * This fixup handler is for the extremely unlikely case where the * overflow handler's reference thru a0 gets a hardware TLB refill * that bumps out the (distinct, aliasing) TLB entry that mapped its * prior references thru a9/a13, and where our reference now thru * a9/a13 gets a 2nd-level miss exception (not hardware TLB refill). */ movi a2, window_overflow_restore_a0_fixup s32i a2, a3, EXC_TABLE_FIXUP l32i a2, a3, EXC_TABLE_DOUBLE_SAVE xsr a3, excsave1 bbsi.l a0, 7, 2f /* * Restore a0 as saved by _WindowOverflow8(). */ l32e a0, a9, -16 wsr a0, depc # replace the saved a0 j 3f 2: /* * Restore a0 as saved by _WindowOverflow12(). */ l32e a0, a13, -16 wsr a0, depc # replace the saved a0 3: xsr a3, excsave1 movi a0, 0 s32i a0, a3, EXC_TABLE_FIXUP s32i a2, a3, EXC_TABLE_DOUBLE_SAVE 1: /* * Restore WindowBase while leaving all address registers restored. * We have to use ROTW for this, because WSR.WINDOWBASE requires * an address register (which would prevent restore). * * Window Base goes from 0 ... 7 (Module 8) * Window Start is 8 bits; Ex: (0b1010 1010):0x55 from series of call4s */ rsr a0, ps extui a0, a0, PS_OWB_SHIFT, PS_OWB_WIDTH rsr a2, windowbase sub a0, a2, a0 extui a0, a0, 0, 3 l32i a2, a3, EXC_TABLE_DOUBLE_SAVE xsr a3, excsave1 beqi a0, 1, .L1pane beqi a0, 3, .L3pane rsr a0, depc rotw -2 /* * We are now in the user code's original window frame. * Process the exception as a user exception as if it was * taken by the user code. * * This is similar to the user exception vector, * except that PT_DEPC isn't set to EXCCAUSE. */ 1: xsr a3, excsave1 wsr a2, depc l32i a2, a3, EXC_TABLE_KSTK s32i a0, a2, PT_AREG0 rsr a0, exccause s32i a0, a2, PT_DEPC _DoubleExceptionVector_handle_exception: addi a0, a0, -EXCCAUSE_UNALIGNED beqz a0, 2f addx4 a0, a0, a3 l32i a0, a0, EXC_TABLE_FAST_USER + 4 * EXCCAUSE_UNALIGNED xsr a3, excsave1 jx a0 2: movi a0, user_exception xsr a3, excsave1 jx a0 .L1pane: rsr a0, depc rotw -1 j 1b .L3pane: rsr a0, depc rotw -3 j 1b ENDPROC(_DoubleExceptionVector) .text /* * Fixup handler for TLB miss in double exception handler for window owerflow. * We get here with windowbase set to the window that was being spilled and * a0 trashed. a0 bit 7 determines if this is a call8 (bit clear) or call12 * (bit set) window. * * We do the following here: * - go to the original window retaining a0 value; * - set up exception stack to return back to appropriate a0 restore code * (we'll need to rotate window back and there's no place to save this * information, use different return address for that); * - handle the exception; * - go to the window that was being spilled; * - set up window_overflow_restore_a0_fixup as a fixup routine; * - reload a0; * - restore the original window; * - reset the default fixup routine; * - return to user. By the time we get to this fixup handler all information * about the conditions of the original double exception that happened in * the window overflow handler is lost, so we just return to userspace to * retry overflow from start. * * a0: value of depc, original value in depc * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE * a3: exctable, original value in excsave1 */ .literal_position ENTRY(window_overflow_restore_a0_fixup) rsr a0, ps extui a0, a0, PS_OWB_SHIFT, PS_OWB_WIDTH rsr a2, windowbase sub a0, a2, a0 extui a0, a0, 0, 3 l32i a2, a3, EXC_TABLE_DOUBLE_SAVE xsr a3, excsave1 _beqi a0, 1, .Lhandle_1 _beqi a0, 3, .Lhandle_3 .macro overflow_fixup_handle_exception_pane n rsr a0, depc rotw -\n xsr a3, excsave1 wsr a2, depc l32i a2, a3, EXC_TABLE_KSTK s32i a0, a2, PT_AREG0 movi a0, .Lrestore_\n s32i a0, a2, PT_DEPC rsr a0, exccause j _DoubleExceptionVector_handle_exception .endm overflow_fixup_handle_exception_pane 2 .Lhandle_1: overflow_fixup_handle_exception_pane 1 .Lhandle_3: overflow_fixup_handle_exception_pane 3 .macro overflow_fixup_restore_a0_pane n rotw \n /* Need to preserve a0 value here to be able to handle exception * that may occur on a0 reload from stack. It may occur because * TLB miss handler may not be atomic and pointer to page table * may be lost before we get here. There are no free registers, * so we need to use EXC_TABLE_DOUBLE_SAVE area. */ xsr a3, excsave1 s32i a2, a3, EXC_TABLE_DOUBLE_SAVE movi a2, window_overflow_restore_a0_fixup s32i a2, a3, EXC_TABLE_FIXUP l32i a2, a3, EXC_TABLE_DOUBLE_SAVE xsr a3, excsave1 bbsi.l a0, 7, 1f l32e a0, a9, -16 j 2f 1: l32e a0, a13, -16 2: rotw -\n .endm .Lrestore_2: overflow_fixup_restore_a0_pane 2 .Lset_default_fixup: xsr a3, excsave1 s32i a2, a3, EXC_TABLE_DOUBLE_SAVE movi a2, 0 s32i a2, a3, EXC_TABLE_FIXUP l32i a2, a3, EXC_TABLE_DOUBLE_SAVE xsr a3, excsave1 rfe .Lrestore_1: overflow_fixup_restore_a0_pane 1 j .Lset_default_fixup .Lrestore_3: overflow_fixup_restore_a0_pane 3 j .Lset_default_fixup ENDPROC(window_overflow_restore_a0_fixup) /* * Debug interrupt vector * * There is not much space here, so simply jump to another handler. * EXCSAVE[DEBUGLEVEL] has been set to that handler. */ .section .DebugInterruptVector.text, "ax" ENTRY(_DebugInterruptVector) xsr a3, SREG_EXCSAVE + XCHAL_DEBUGLEVEL s32i a0, a3, DT_DEBUG_SAVE l32i a0, a3, DT_DEBUG_EXCEPTION jx a0 ENDPROC(_DebugInterruptVector) /* * Medium priority level interrupt vectors * * Each takes less than 16 (0x10) bytes, no literals, by placing * the extra 8 bytes that would otherwise be required in the window * vectors area where there is space. With relocatable vectors, * all vectors are within ~ 4 kB range of each other, so we can * simply jump (J) to another vector without having to use JX. * * common_exception code gets current IRQ level in PS.INTLEVEL * and preserves it for the IRQ handling time. */ .macro irq_entry_level level .if XCHAL_EXCM_LEVEL >= \level .section .Level\level\()InterruptVector.text, "ax" ENTRY(_Level\level\()InterruptVector) wsr a0, excsave2 rsr a0, epc\level wsr a0, epc1 .if \level <= LOCKLEVEL movi a0, EXCCAUSE_LEVEL1_INTERRUPT .else movi a0, EXCCAUSE_MAPPED_NMI .endif wsr a0, exccause rsr a0, eps\level # branch to user or kernel vector j _SimulateUserKernelVectorException .endif .endm irq_entry_level 2 irq_entry_level 3 irq_entry_level 4 irq_entry_level 5 irq_entry_level 6 /* Window overflow and underflow handlers. * The handlers must be 64 bytes apart, first starting with the underflow * handlers underflow-4 to underflow-12, then the overflow handlers * overflow-4 to overflow-12. * * Note: We rerun the underflow handlers if we hit an exception, so * we try to access any page that would cause a page fault early. */ #define ENTRY_ALIGN64(name) \ .globl name; \ .align 64; \ name: .section .WindowVectors.text, "ax" /* 4-Register Window Overflow Vector (Handler) */ ENTRY_ALIGN64(_WindowOverflow4) s32e a0, a5, -16 s32e a1, a5, -12 s32e a2, a5, -8 s32e a3, a5, -4 rfwo ENDPROC(_WindowOverflow4) #if XCHAL_EXCM_LEVEL >= 2 /* Not a window vector - but a convenient location * (where we know there's space) for continuation of * medium priority interrupt dispatch code. * On entry here, a0 contains PS, and EPC2 contains saved a0: */ .align 4 _SimulateUserKernelVectorException: addi a0, a0, (1 << PS_EXCM_BIT) #if !XTENSA_FAKE_NMI wsr a0, ps #endif bbsi.l a0, PS_UM_BIT, 1f # branch if user mode xsr a0, excsave2 # restore a0 j _KernelExceptionVector # simulate kernel vector exception 1: xsr a0, excsave2 # restore a0 j _UserExceptionVector # simulate user vector exception #endif /* 4-Register Window Underflow Vector (Handler) */ ENTRY_ALIGN64(_WindowUnderflow4) l32e a0, a5, -16 l32e a1, a5, -12 l32e a2, a5, -8 l32e a3, a5, -4 rfwu ENDPROC(_WindowUnderflow4) /* 8-Register Window Overflow Vector (Handler) */ ENTRY_ALIGN64(_WindowOverflow8) s32e a0, a9, -16 l32e a0, a1, -12 s32e a2, a9, -8 s32e a1, a9, -12 s32e a3, a9, -4 s32e a4, a0, -32 s32e a5, a0, -28 s32e a6, a0, -24 s32e a7, a0, -20 rfwo ENDPROC(_WindowOverflow8) /* 8-Register Window Underflow Vector (Handler) */ ENTRY_ALIGN64(_WindowUnderflow8) l32e a1, a9, -12 l32e a0, a9, -16 l32e a7, a1, -12 l32e a2, a9, -8 l32e a4, a7, -32 l32e a3, a9, -4 l32e a5, a7, -28 l32e a6, a7, -24 l32e a7, a7, -20 rfwu ENDPROC(_WindowUnderflow8) /* 12-Register Window Overflow Vector (Handler) */ ENTRY_ALIGN64(_WindowOverflow12) s32e a0, a13, -16 l32e a0, a1, -12 s32e a1, a13, -12 s32e a2, a13, -8 s32e a3, a13, -4 s32e a4, a0, -48 s32e a5, a0, -44 s32e a6, a0, -40 s32e a7, a0, -36 s32e a8, a0, -32 s32e a9, a0, -28 s32e a10, a0, -24 s32e a11, a0, -20 rfwo ENDPROC(_WindowOverflow12) /* 12-Register Window Underflow Vector (Handler) */ ENTRY_ALIGN64(_WindowUnderflow12) l32e a1, a13, -12 l32e a0, a13, -16 l32e a11, a1, -12 l32e a2, a13, -8 l32e a4, a11, -48 l32e a8, a11, -32 l32e a3, a13, -4 l32e a5, a11, -44 l32e a6, a11, -40 l32e a7, a11, -36 l32e a9, a11, -28 l32e a10, a11, -24 l32e a11, a11, -20 rfwu ENDPROC(_WindowUnderflow12) .text
AirFortressIlikara/LS2K0300-linux-4.19
47,934
arch/xtensa/kernel/entry.S
/* * Low-level exception handling * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2004 - 2008 by Tensilica Inc. * Copyright (C) 2015 Cadence Design Systems Inc. * * Chris Zankel <chris@zankel.net> * */ #include <linux/linkage.h> #include <asm/asm-offsets.h> #include <asm/asmmacro.h> #include <asm/processor.h> #include <asm/coprocessor.h> #include <asm/thread_info.h> #include <asm/asm-uaccess.h> #include <asm/unistd.h> #include <asm/ptrace.h> #include <asm/current.h> #include <asm/pgtable.h> #include <asm/page.h> #include <asm/signal.h> #include <asm/tlbflush.h> #include <variant/tie-asm.h> /* Unimplemented features. */ #undef KERNEL_STACK_OVERFLOW_CHECK /* Not well tested. * * - fast_coprocessor */ /* * Macro to find first bit set in WINDOWBASE from the left + 1 * * 100....0 -> 1 * 010....0 -> 2 * 000....1 -> WSBITS */ .macro ffs_ws bit mask #if XCHAL_HAVE_NSA nsau \bit, \mask # 32-WSBITS ... 31 (32 iff 0) addi \bit, \bit, WSBITS - 32 + 1 # uppest bit set -> return 1 #else movi \bit, WSBITS #if WSBITS > 16 _bltui \mask, 0x10000, 99f addi \bit, \bit, -16 extui \mask, \mask, 16, 16 #endif #if WSBITS > 8 99: _bltui \mask, 0x100, 99f addi \bit, \bit, -8 srli \mask, \mask, 8 #endif 99: _bltui \mask, 0x10, 99f addi \bit, \bit, -4 srli \mask, \mask, 4 99: _bltui \mask, 0x4, 99f addi \bit, \bit, -2 srli \mask, \mask, 2 99: _bltui \mask, 0x2, 99f addi \bit, \bit, -1 99: #endif .endm .macro irq_save flags tmp #if XTENSA_FAKE_NMI #if defined(CONFIG_DEBUG_KERNEL) && (LOCKLEVEL | TOPLEVEL) >= XCHAL_DEBUGLEVEL rsr \flags, ps extui \tmp, \flags, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH bgei \tmp, LOCKLEVEL, 99f rsil \tmp, LOCKLEVEL 99: #else movi \tmp, LOCKLEVEL rsr \flags, ps or \flags, \flags, \tmp xsr \flags, ps rsync #endif #else rsil \flags, LOCKLEVEL #endif .endm /* ----------------- DEFAULT FIRST LEVEL EXCEPTION HANDLERS ----------------- */ /* * First-level exception handler for user exceptions. * Save some special registers, extra states and all registers in the AR * register file that were in use in the user task, and jump to the common * exception code. * We save SAR (used to calculate WMASK), and WB and WS (we don't have to * save them for kernel exceptions). * * Entry condition for user_exception: * * a0: trashed, original value saved on stack (PT_AREG0) * a1: a1 * a2: new stack pointer, original value in depc * a3: a3 * depc: a2, original value saved on stack (PT_DEPC) * excsave1: dispatch table * * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception * * Entry condition for _user_exception: * * a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC * excsave has been restored, and * stack pointer (a1) has been set. * * Note: _user_exception might be at an odd address. Don't use call0..call12 */ .literal_position ENTRY(user_exception) /* Save a1, a2, a3, and set SP. */ rsr a0, depc s32i a1, a2, PT_AREG1 s32i a0, a2, PT_AREG2 s32i a3, a2, PT_AREG3 mov a1, a2 .globl _user_exception _user_exception: /* Save SAR and turn off single stepping */ movi a2, 0 wsr a2, depc # terminate user stack trace with 0 rsr a3, sar xsr a2, icountlevel s32i a3, a1, PT_SAR s32i a2, a1, PT_ICOUNTLEVEL #if XCHAL_HAVE_THREADPTR rur a2, threadptr s32i a2, a1, PT_THREADPTR #endif /* Rotate ws so that the current windowbase is at bit0. */ /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */ rsr a2, windowbase rsr a3, windowstart ssr a2 s32i a2, a1, PT_WINDOWBASE s32i a3, a1, PT_WINDOWSTART slli a2, a3, 32-WSBITS src a2, a3, a2 srli a2, a2, 32-WSBITS s32i a2, a1, PT_WMASK # needed for restoring registers /* Save only live registers. */ _bbsi.l a2, 1, 1f s32i a4, a1, PT_AREG4 s32i a5, a1, PT_AREG5 s32i a6, a1, PT_AREG6 s32i a7, a1, PT_AREG7 _bbsi.l a2, 2, 1f s32i a8, a1, PT_AREG8 s32i a9, a1, PT_AREG9 s32i a10, a1, PT_AREG10 s32i a11, a1, PT_AREG11 _bbsi.l a2, 3, 1f s32i a12, a1, PT_AREG12 s32i a13, a1, PT_AREG13 s32i a14, a1, PT_AREG14 s32i a15, a1, PT_AREG15 _bnei a2, 1, 1f # only one valid frame? /* Only one valid frame, skip saving regs. */ j 2f /* Save the remaining registers. * We have to save all registers up to the first '1' from * the right, except the current frame (bit 0). * Assume a2 is: 001001000110001 * All register frames starting from the top field to the marked '1' * must be saved. */ 1: addi a3, a2, -1 # eliminate '1' in bit 0: yyyyxxww0 neg a3, a3 # yyyyxxww0 -> YYYYXXWW1+1 and a3, a3, a2 # max. only one bit is set /* Find number of frames to save */ ffs_ws a0, a3 # number of frames to the '1' from left /* Store information into WMASK: * bits 0..3: xxx1 masked lower 4 bits of the rotated windowstart, * bits 4...: number of valid 4-register frames */ slli a3, a0, 4 # number of frames to save in bits 8..4 extui a2, a2, 0, 4 # mask for the first 16 registers or a2, a3, a2 s32i a2, a1, PT_WMASK # needed when we restore the reg-file /* Save 4 registers at a time */ 1: rotw -1 s32i a0, a5, PT_AREG_END - 16 s32i a1, a5, PT_AREG_END - 12 s32i a2, a5, PT_AREG_END - 8 s32i a3, a5, PT_AREG_END - 4 addi a0, a4, -1 addi a1, a5, -16 _bnez a0, 1b /* WINDOWBASE still in SAR! */ rsr a2, sar # original WINDOWBASE movi a3, 1 ssl a2 sll a3, a3 wsr a3, windowstart # set corresponding WINDOWSTART bit wsr a2, windowbase # and WINDOWSTART rsync /* We are back to the original stack pointer (a1) */ 2: /* Now, jump to the common exception handler. */ j common_exception ENDPROC(user_exception) /* * First-level exit handler for kernel exceptions * Save special registers and the live window frame. * Note: Even though we changes the stack pointer, we don't have to do a * MOVSP here, as we do that when we return from the exception. * (See comment in the kernel exception exit code) * * Entry condition for kernel_exception: * * a0: trashed, original value saved on stack (PT_AREG0) * a1: a1 * a2: new stack pointer, original in DEPC * a3: a3 * depc: a2, original value saved on stack (PT_DEPC) * excsave_1: dispatch table * * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception * * Entry condition for _kernel_exception: * * a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC * excsave has been restored, and * stack pointer (a1) has been set. * * Note: _kernel_exception might be at an odd address. Don't use call0..call12 */ ENTRY(kernel_exception) /* Save a1, a2, a3, and set SP. */ rsr a0, depc # get a2 s32i a1, a2, PT_AREG1 s32i a0, a2, PT_AREG2 s32i a3, a2, PT_AREG3 mov a1, a2 .globl _kernel_exception _kernel_exception: /* Save SAR and turn off single stepping */ movi a2, 0 rsr a3, sar xsr a2, icountlevel s32i a3, a1, PT_SAR s32i a2, a1, PT_ICOUNTLEVEL /* Rotate ws so that the current windowbase is at bit0. */ /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */ rsr a2, windowbase # don't need to save these, we only rsr a3, windowstart # need shifted windowstart: windowmask ssr a2 slli a2, a3, 32-WSBITS src a2, a3, a2 srli a2, a2, 32-WSBITS s32i a2, a1, PT_WMASK # needed for kernel_exception_exit /* Save only the live window-frame */ _bbsi.l a2, 1, 1f s32i a4, a1, PT_AREG4 s32i a5, a1, PT_AREG5 s32i a6, a1, PT_AREG6 s32i a7, a1, PT_AREG7 _bbsi.l a2, 2, 1f s32i a8, a1, PT_AREG8 s32i a9, a1, PT_AREG9 s32i a10, a1, PT_AREG10 s32i a11, a1, PT_AREG11 _bbsi.l a2, 3, 1f s32i a12, a1, PT_AREG12 s32i a13, a1, PT_AREG13 s32i a14, a1, PT_AREG14 s32i a15, a1, PT_AREG15 _bnei a2, 1, 1f /* Copy spill slots of a0 and a1 to imitate movsp * in order to keep exception stack continuous */ l32i a3, a1, PT_SIZE l32i a0, a1, PT_SIZE + 4 s32e a3, a1, -16 s32e a0, a1, -12 1: l32i a0, a1, PT_AREG0 # restore saved a0 wsr a0, depc #ifdef KERNEL_STACK_OVERFLOW_CHECK /* Stack overflow check, for debugging */ extui a2, a1, TASK_SIZE_BITS,XX movi a3, SIZE?? _bge a2, a3, out_of_stack_panic #endif /* * This is the common exception handler. * We get here from the user exception handler or simply by falling through * from the kernel exception handler. * Save the remaining special registers, switch to kernel mode, and jump * to the second-level exception handler. * */ common_exception: /* Save some registers, disable loops and clear the syscall flag. */ rsr a2, debugcause rsr a3, epc1 s32i a2, a1, PT_DEBUGCAUSE s32i a3, a1, PT_PC movi a2, -1 rsr a3, excvaddr s32i a2, a1, PT_SYSCALL movi a2, 0 s32i a3, a1, PT_EXCVADDR #if XCHAL_HAVE_LOOPS xsr a2, lcount s32i a2, a1, PT_LCOUNT #endif /* It is now save to restore the EXC_TABLE_FIXUP variable. */ rsr a2, exccause movi a3, 0 rsr a0, excsave1 s32i a2, a1, PT_EXCCAUSE s32i a3, a0, EXC_TABLE_FIXUP /* All unrecoverable states are saved on stack, now, and a1 is valid. * Now we can allow exceptions again. In case we've got an interrupt * PS.INTLEVEL is set to LOCKLEVEL disabling furhter interrupts, * otherwise it's left unchanged. * * Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X) */ rsr a3, ps s32i a3, a1, PT_PS # save ps #if XTENSA_FAKE_NMI /* Correct PS needs to be saved in the PT_PS: * - in case of exception or level-1 interrupt it's in the PS, * and is already saved. * - in case of medium level interrupt it's in the excsave2. */ movi a0, EXCCAUSE_MAPPED_NMI extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH beq a2, a0, .Lmedium_level_irq bnei a2, EXCCAUSE_LEVEL1_INTERRUPT, .Lexception beqz a3, .Llevel1_irq # level-1 IRQ sets ps.intlevel to 0 .Lmedium_level_irq: rsr a0, excsave2 s32i a0, a1, PT_PS # save medium-level interrupt ps bgei a3, LOCKLEVEL, .Lexception .Llevel1_irq: movi a3, LOCKLEVEL .Lexception: movi a0, 1 << PS_WOE_BIT or a3, a3, a0 #else addi a2, a2, -EXCCAUSE_LEVEL1_INTERRUPT movi a0, LOCKLEVEL extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH # a3 = PS.INTLEVEL moveqz a3, a0, a2 # a3 = LOCKLEVEL iff interrupt movi a2, 1 << PS_WOE_BIT or a3, a3, a2 rsr a2, exccause #endif /* restore return address (or 0 if return to userspace) */ rsr a0, depc wsr a3, ps rsync # PS.WOE => rsync => overflow /* Save lbeg, lend */ #if XCHAL_HAVE_LOOPS rsr a4, lbeg rsr a3, lend s32i a4, a1, PT_LBEG s32i a3, a1, PT_LEND #endif /* Save SCOMPARE1 */ #if XCHAL_HAVE_S32C1I rsr a3, scompare1 s32i a3, a1, PT_SCOMPARE1 #endif /* Save optional registers. */ save_xtregs_opt a1 a3 a4 a5 a6 a7 PT_XTREGS_OPT /* Go to second-level dispatcher. Set up parameters to pass to the * exception handler and call the exception handler. */ rsr a4, excsave1 mov a6, a1 # pass stack frame mov a7, a2 # pass EXCCAUSE addx4 a4, a2, a4 l32i a4, a4, EXC_TABLE_DEFAULT # load handler /* Call the second-level handler */ callx4 a4 /* Jump here for exception exit */ .global common_exception_return common_exception_return: #if XTENSA_FAKE_NMI l32i a2, a1, PT_EXCCAUSE movi a3, EXCCAUSE_MAPPED_NMI beq a2, a3, .LNMIexit #endif 1: irq_save a2, a3 #ifdef CONFIG_TRACE_IRQFLAGS call4 trace_hardirqs_off #endif /* Jump if we are returning from kernel exceptions. */ l32i a3, a1, PT_PS GET_THREAD_INFO(a2, a1) l32i a4, a2, TI_FLAGS _bbci.l a3, PS_UM_BIT, 6f /* Specific to a user exception exit: * We need to check some flags for signal handling and rescheduling, * and have to restore WB and WS, extra states, and all registers * in the register file that were in use in the user task. * Note that we don't disable interrupts here. */ _bbsi.l a4, TIF_NEED_RESCHED, 3f _bbsi.l a4, TIF_NOTIFY_RESUME, 2f _bbci.l a4, TIF_SIGPENDING, 5f 2: l32i a4, a1, PT_DEPC bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f /* Call do_signal() */ #ifdef CONFIG_TRACE_IRQFLAGS call4 trace_hardirqs_on #endif rsil a2, 0 mov a6, a1 call4 do_notify_resume # int do_notify_resume(struct pt_regs*) j 1b 3: /* Reschedule */ #ifdef CONFIG_TRACE_IRQFLAGS call4 trace_hardirqs_on #endif rsil a2, 0 call4 schedule # void schedule (void) j 1b #ifdef CONFIG_PREEMPT 6: _bbci.l a4, TIF_NEED_RESCHED, 4f /* Check current_thread_info->preempt_count */ l32i a4, a2, TI_PRE_COUNT bnez a4, 4f call4 preempt_schedule_irq j 1b #endif #if XTENSA_FAKE_NMI .LNMIexit: l32i a3, a1, PT_PS _bbci.l a3, PS_UM_BIT, 4f #endif 5: #ifdef CONFIG_HAVE_HW_BREAKPOINT _bbci.l a4, TIF_DB_DISABLED, 7f call4 restore_dbreak 7: #endif #ifdef CONFIG_DEBUG_TLB_SANITY l32i a4, a1, PT_DEPC bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f call4 check_tlb_sanity #endif 6: 4: #ifdef CONFIG_TRACE_IRQFLAGS extui a4, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH bgei a4, LOCKLEVEL, 1f call4 trace_hardirqs_on 1: #endif /* Restore optional registers. */ load_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT /* Restore SCOMPARE1 */ #if XCHAL_HAVE_S32C1I l32i a2, a1, PT_SCOMPARE1 wsr a2, scompare1 #endif wsr a3, ps /* disable interrupts */ _bbci.l a3, PS_UM_BIT, kernel_exception_exit user_exception_exit: /* Restore the state of the task and return from the exception. */ /* Switch to the user thread WINDOWBASE. Save SP temporarily in DEPC */ l32i a2, a1, PT_WINDOWBASE l32i a3, a1, PT_WINDOWSTART wsr a1, depc # use DEPC as temp storage wsr a3, windowstart # restore WINDOWSTART ssr a2 # preserve user's WB in the SAR wsr a2, windowbase # switch to user's saved WB rsync rsr a1, depc # restore stack pointer l32i a2, a1, PT_WMASK # register frames saved (in bits 4...9) rotw -1 # we restore a4..a7 _bltui a6, 16, 1f # only have to restore current window? /* The working registers are a0 and a3. We are restoring to * a4..a7. Be careful not to destroy what we have just restored. * Note: wmask has the format YYYYM: * Y: number of registers saved in groups of 4 * M: 4 bit mask of first 16 registers */ mov a2, a6 mov a3, a5 2: rotw -1 # a0..a3 become a4..a7 addi a3, a7, -4*4 # next iteration addi a2, a6, -16 # decrementing Y in WMASK l32i a4, a3, PT_AREG_END + 0 l32i a5, a3, PT_AREG_END + 4 l32i a6, a3, PT_AREG_END + 8 l32i a7, a3, PT_AREG_END + 12 _bgeui a2, 16, 2b /* Clear unrestored registers (don't leak anything to user-land */ 1: rsr a0, windowbase rsr a3, sar sub a3, a0, a3 beqz a3, 2f extui a3, a3, 0, WBBITS 1: rotw -1 addi a3, a7, -1 movi a4, 0 movi a5, 0 movi a6, 0 movi a7, 0 bgei a3, 1, 1b /* We are back were we were when we started. * Note: a2 still contains WMASK (if we've returned to the original * frame where we had loaded a2), or at least the lower 4 bits * (if we have restored WSBITS-1 frames). */ 2: #if XCHAL_HAVE_THREADPTR l32i a3, a1, PT_THREADPTR wur a3, threadptr #endif j common_exception_exit /* This is the kernel exception exit. * We avoided to do a MOVSP when we entered the exception, but we * have to do it here. */ kernel_exception_exit: /* Check if we have to do a movsp. * * We only have to do a movsp if the previous window-frame has * been spilled to the *temporary* exception stack instead of the * task's stack. This is the case if the corresponding bit in * WINDOWSTART for the previous window-frame was set before * (not spilled) but is zero now (spilled). * If this bit is zero, all other bits except the one for the * current window frame are also zero. So, we can use a simple test: * 'and' WINDOWSTART and WINDOWSTART-1: * * (XXXXXX1[0]* - 1) AND XXXXXX1[0]* = XXXXXX0[0]* * * The result is zero only if one bit was set. * * (Note: We might have gone through several task switches before * we come back to the current task, so WINDOWBASE might be * different from the time the exception occurred.) */ /* Test WINDOWSTART before and after the exception. * We actually have WMASK, so we only have to test if it is 1 or not. */ l32i a2, a1, PT_WMASK _beqi a2, 1, common_exception_exit # Spilled before exception,jump /* Test WINDOWSTART now. If spilled, do the movsp */ rsr a3, windowstart addi a0, a3, -1 and a3, a3, a0 _bnez a3, common_exception_exit /* Do a movsp (we returned from a call4, so we have at least a0..a7) */ addi a0, a1, -16 l32i a3, a0, 0 l32i a4, a0, 4 s32i a3, a1, PT_SIZE+0 s32i a4, a1, PT_SIZE+4 l32i a3, a0, 8 l32i a4, a0, 12 s32i a3, a1, PT_SIZE+8 s32i a4, a1, PT_SIZE+12 /* Common exception exit. * We restore the special register and the current window frame, and * return from the exception. * * Note: We expect a2 to hold PT_WMASK */ common_exception_exit: /* Restore address registers. */ _bbsi.l a2, 1, 1f l32i a4, a1, PT_AREG4 l32i a5, a1, PT_AREG5 l32i a6, a1, PT_AREG6 l32i a7, a1, PT_AREG7 _bbsi.l a2, 2, 1f l32i a8, a1, PT_AREG8 l32i a9, a1, PT_AREG9 l32i a10, a1, PT_AREG10 l32i a11, a1, PT_AREG11 _bbsi.l a2, 3, 1f l32i a12, a1, PT_AREG12 l32i a13, a1, PT_AREG13 l32i a14, a1, PT_AREG14 l32i a15, a1, PT_AREG15 /* Restore PC, SAR */ 1: l32i a2, a1, PT_PC l32i a3, a1, PT_SAR wsr a2, epc1 wsr a3, sar /* Restore LBEG, LEND, LCOUNT */ #if XCHAL_HAVE_LOOPS l32i a2, a1, PT_LBEG l32i a3, a1, PT_LEND wsr a2, lbeg l32i a2, a1, PT_LCOUNT wsr a3, lend wsr a2, lcount #endif /* We control single stepping through the ICOUNTLEVEL register. */ l32i a2, a1, PT_ICOUNTLEVEL movi a3, -2 wsr a2, icountlevel wsr a3, icount /* Check if it was double exception. */ l32i a0, a1, PT_DEPC l32i a3, a1, PT_AREG3 l32i a2, a1, PT_AREG2 _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f /* Restore a0...a3 and return */ l32i a0, a1, PT_AREG0 l32i a1, a1, PT_AREG1 rfe 1: wsr a0, depc l32i a0, a1, PT_AREG0 l32i a1, a1, PT_AREG1 rfde ENDPROC(kernel_exception) /* * Debug exception handler. * * Currently, we don't support KGDB, so only user application can be debugged. * * When we get here, a0 is trashed and saved to excsave[debuglevel] */ .literal_position ENTRY(debug_exception) rsr a0, SREG_EPS + XCHAL_DEBUGLEVEL bbsi.l a0, PS_EXCM_BIT, 1f # exception mode /* Set EPC1 and EXCCAUSE */ wsr a2, depc # save a2 temporarily rsr a2, SREG_EPC + XCHAL_DEBUGLEVEL wsr a2, epc1 movi a2, EXCCAUSE_MAPPED_DEBUG wsr a2, exccause /* Restore PS to the value before the debug exc but with PS.EXCM set.*/ movi a2, 1 << PS_EXCM_BIT or a2, a0, a2 wsr a2, ps /* Switch to kernel/user stack, restore jump vector, and save a0 */ bbsi.l a2, PS_UM_BIT, 2f # jump if user mode addi a2, a1, -16-PT_SIZE # assume kernel stack 3: l32i a0, a3, DT_DEBUG_SAVE s32i a1, a2, PT_AREG1 s32i a0, a2, PT_AREG0 movi a0, 0 s32i a0, a2, PT_DEPC # mark it as a regular exception xsr a3, SREG_EXCSAVE + XCHAL_DEBUGLEVEL xsr a0, depc s32i a3, a2, PT_AREG3 s32i a0, a2, PT_AREG2 mov a1, a2 /* Debug exception is handled as an exception, so interrupts will * likely be enabled in the common exception handler. Disable * preemption if we have HW breakpoints to preserve DEBUGCAUSE.DBNUM * meaning. */ #if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_HAVE_HW_BREAKPOINT) GET_THREAD_INFO(a2, a1) l32i a3, a2, TI_PRE_COUNT addi a3, a3, 1 s32i a3, a2, TI_PRE_COUNT #endif rsr a2, ps bbsi.l a2, PS_UM_BIT, _user_exception j _kernel_exception 2: rsr a2, excsave1 l32i a2, a2, EXC_TABLE_KSTK # load kernel stack pointer j 3b #ifdef CONFIG_HAVE_HW_BREAKPOINT /* Debug exception while in exception mode. This may happen when * window overflow/underflow handler or fast exception handler hits * data breakpoint, in which case save and disable all data * breakpoints, single-step faulting instruction and restore data * breakpoints. */ 1: bbci.l a0, PS_UM_BIT, 1b # jump if kernel mode rsr a0, debugcause bbsi.l a0, DEBUGCAUSE_DBREAK_BIT, .Ldebug_save_dbreak .set _index, 0 .rept XCHAL_NUM_DBREAK l32i a0, a3, DT_DBREAKC_SAVE + _index * 4 wsr a0, SREG_DBREAKC + _index .set _index, _index + 1 .endr l32i a0, a3, DT_ICOUNT_LEVEL_SAVE wsr a0, icountlevel l32i a0, a3, DT_ICOUNT_SAVE xsr a0, icount l32i a0, a3, DT_DEBUG_SAVE xsr a3, SREG_EXCSAVE + XCHAL_DEBUGLEVEL rfi XCHAL_DEBUGLEVEL .Ldebug_save_dbreak: .set _index, 0 .rept XCHAL_NUM_DBREAK movi a0, 0 xsr a0, SREG_DBREAKC + _index s32i a0, a3, DT_DBREAKC_SAVE + _index * 4 .set _index, _index + 1 .endr movi a0, XCHAL_EXCM_LEVEL + 1 xsr a0, icountlevel s32i a0, a3, DT_ICOUNT_LEVEL_SAVE movi a0, 0xfffffffe xsr a0, icount s32i a0, a3, DT_ICOUNT_SAVE l32i a0, a3, DT_DEBUG_SAVE xsr a3, SREG_EXCSAVE + XCHAL_DEBUGLEVEL rfi XCHAL_DEBUGLEVEL #else /* Debug exception while in exception mode. Should not happen. */ 1: j 1b // FIXME!! #endif ENDPROC(debug_exception) /* * We get here in case of an unrecoverable exception. * The only thing we can do is to be nice and print a panic message. * We only produce a single stack frame for panic, so ??? * * * Entry conditions: * * - a0 contains the caller address; original value saved in excsave1. * - the original a0 contains a valid return address (backtrace) or 0. * - a2 contains a valid stackpointer * * Notes: * * - If the stack pointer could be invalid, the caller has to setup a * dummy stack pointer (e.g. the stack of the init_task) * * - If the return address could be invalid, the caller has to set it * to 0, so the backtrace would stop. * */ .align 4 unrecoverable_text: .ascii "Unrecoverable error in exception handler\0" .literal_position ENTRY(unrecoverable_exception) movi a0, 1 movi a1, 0 wsr a0, windowstart wsr a1, windowbase rsync movi a1, (1 << PS_WOE_BIT) | LOCKLEVEL wsr a1, ps rsync movi a1, init_task movi a0, 0 addi a1, a1, PT_REGS_OFFSET movi a6, unrecoverable_text call4 panic 1: j 1b ENDPROC(unrecoverable_exception) /* -------------------------- FAST EXCEPTION HANDLERS ----------------------- */ /* * Fast-handler for alloca exceptions * * The ALLOCA handler is entered when user code executes the MOVSP * instruction and the caller's frame is not in the register file. * * This algorithm was taken from the Ross Morley's RTOS Porting Layer: * * /home/ross/rtos/porting/XtensaRTOS-PortingLayer-20090507/xtensa_vectors.S * * It leverages the existing window spill/fill routines and their support for * double exceptions. The 'movsp' instruction will only cause an exception if * the next window needs to be loaded. In fact this ALLOCA exception may be * replaced at some point by changing the hardware to do a underflow exception * of the proper size instead. * * This algorithm simply backs out the register changes started by the user * excpetion handler, makes it appear that we have started a window underflow * by rotating the window back and then setting the old window base (OWB) in * the 'ps' register with the rolled back window base. The 'movsp' instruction * will be re-executed and this time since the next window frames is in the * active AR registers it won't cause an exception. * * If the WindowUnderflow code gets a TLB miss the page will get mapped * the the partial windeowUnderflow will be handeled in the double exception * handler. * * Entry condition: * * a0: trashed, original value saved on stack (PT_AREG0) * a1: a1 * a2: new stack pointer, original in DEPC * a3: a3 * depc: a2, original value saved on stack (PT_DEPC) * excsave_1: dispatch table * * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception */ ENTRY(fast_alloca) rsr a0, windowbase rotw -1 rsr a2, ps extui a3, a2, PS_OWB_SHIFT, PS_OWB_WIDTH xor a3, a3, a4 l32i a4, a6, PT_AREG0 l32i a1, a6, PT_DEPC rsr a6, depc wsr a1, depc slli a3, a3, PS_OWB_SHIFT xor a2, a2, a3 wsr a2, ps rsync _bbci.l a4, 31, 4f rotw -1 _bbci.l a8, 30, 8f rotw -1 j _WindowUnderflow12 8: j _WindowUnderflow8 4: j _WindowUnderflow4 ENDPROC(fast_alloca) /* * fast system calls. * * WARNING: The kernel doesn't save the entire user context before * handling a fast system call. These functions are small and short, * usually offering some functionality not available to user tasks. * * BE CAREFUL TO PRESERVE THE USER'S CONTEXT. * * Entry condition: * * a0: trashed, original value saved on stack (PT_AREG0) * a1: a1 * a2: new stack pointer, original in DEPC * a3: a3 * depc: a2, original value saved on stack (PT_DEPC) * excsave_1: dispatch table */ ENTRY(fast_syscall_kernel) /* Skip syscall. */ rsr a0, epc1 addi a0, a0, 3 wsr a0, epc1 l32i a0, a2, PT_DEPC bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable rsr a0, depc # get syscall-nr _beqz a0, fast_syscall_spill_registers _beqi a0, __NR_xtensa, fast_syscall_xtensa j kernel_exception ENDPROC(fast_syscall_kernel) ENTRY(fast_syscall_user) /* Skip syscall. */ rsr a0, epc1 addi a0, a0, 3 wsr a0, epc1 l32i a0, a2, PT_DEPC bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable rsr a0, depc # get syscall-nr _beqz a0, fast_syscall_spill_registers _beqi a0, __NR_xtensa, fast_syscall_xtensa j user_exception ENDPROC(fast_syscall_user) ENTRY(fast_syscall_unrecoverable) /* Restore all states. */ l32i a0, a2, PT_AREG0 # restore a0 xsr a2, depc # restore a2, depc wsr a0, excsave1 call0 unrecoverable_exception ENDPROC(fast_syscall_unrecoverable) /* * sysxtensa syscall handler * * int sysxtensa (SYS_XTENSA_ATOMIC_SET, ptr, val, unused); * int sysxtensa (SYS_XTENSA_ATOMIC_ADD, ptr, val, unused); * int sysxtensa (SYS_XTENSA_ATOMIC_EXG_ADD, ptr, val, unused); * int sysxtensa (SYS_XTENSA_ATOMIC_CMP_SWP, ptr, oldval, newval); * a2 a6 a3 a4 a5 * * Entry condition: * * a0: a2 (syscall-nr), original value saved on stack (PT_AREG0) * a1: a1 * a2: new stack pointer, original in a0 and DEPC * a3: a3 * a4..a15: unchanged * depc: a2, original value saved on stack (PT_DEPC) * excsave_1: dispatch table * * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception * * Note: we don't have to save a2; a2 holds the return value */ .literal_position #ifdef CONFIG_FAST_SYSCALL_XTENSA ENTRY(fast_syscall_xtensa) s32i a7, a2, PT_AREG7 # we need an additional register movi a7, 4 # sizeof(unsigned int) access_ok a3, a7, a0, a2, .Leac # a0: scratch reg, a2: sp _bgeui a6, SYS_XTENSA_COUNT, .Lill _bnei a6, SYS_XTENSA_ATOMIC_CMP_SWP, .Lnswp /* Fall through for ATOMIC_CMP_SWP. */ .Lswp: /* Atomic compare and swap */ EX(.Leac) l32i a0, a3, 0 # read old value bne a0, a4, 1f # same as old value? jump EX(.Leac) s32i a5, a3, 0 # different, modify value l32i a7, a2, PT_AREG7 # restore a7 l32i a0, a2, PT_AREG0 # restore a0 movi a2, 1 # and return 1 rfe 1: l32i a7, a2, PT_AREG7 # restore a7 l32i a0, a2, PT_AREG0 # restore a0 movi a2, 0 # return 0 (note that we cannot set rfe .Lnswp: /* Atomic set, add, and exg_add. */ EX(.Leac) l32i a7, a3, 0 # orig addi a6, a6, -SYS_XTENSA_ATOMIC_SET add a0, a4, a7 # + arg moveqz a0, a4, a6 # set addi a6, a6, SYS_XTENSA_ATOMIC_SET EX(.Leac) s32i a0, a3, 0 # write new value mov a0, a2 mov a2, a7 l32i a7, a0, PT_AREG7 # restore a7 l32i a0, a0, PT_AREG0 # restore a0 rfe .Leac: l32i a7, a2, PT_AREG7 # restore a7 l32i a0, a2, PT_AREG0 # restore a0 movi a2, -EFAULT rfe .Lill: l32i a7, a2, PT_AREG7 # restore a7 l32i a0, a2, PT_AREG0 # restore a0 movi a2, -EINVAL rfe ENDPROC(fast_syscall_xtensa) #else /* CONFIG_FAST_SYSCALL_XTENSA */ ENTRY(fast_syscall_xtensa) l32i a0, a2, PT_AREG0 # restore a0 movi a2, -ENOSYS rfe ENDPROC(fast_syscall_xtensa) #endif /* CONFIG_FAST_SYSCALL_XTENSA */ /* fast_syscall_spill_registers. * * Entry condition: * * a0: trashed, original value saved on stack (PT_AREG0) * a1: a1 * a2: new stack pointer, original in DEPC * a3: a3 * depc: a2, original value saved on stack (PT_DEPC) * excsave_1: dispatch table * * Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler. */ #ifdef CONFIG_FAST_SYSCALL_SPILL_REGISTERS ENTRY(fast_syscall_spill_registers) /* Register a FIXUP handler (pass current wb as a parameter) */ xsr a3, excsave1 movi a0, fast_syscall_spill_registers_fixup s32i a0, a3, EXC_TABLE_FIXUP rsr a0, windowbase s32i a0, a3, EXC_TABLE_PARAM xsr a3, excsave1 # restore a3 and excsave_1 /* Save a3, a4 and SAR on stack. */ rsr a0, sar s32i a3, a2, PT_AREG3 s32i a0, a2, PT_SAR /* The spill routine might clobber a4, a7, a8, a11, a12, and a15. */ s32i a4, a2, PT_AREG4 s32i a7, a2, PT_AREG7 s32i a8, a2, PT_AREG8 s32i a11, a2, PT_AREG11 s32i a12, a2, PT_AREG12 s32i a15, a2, PT_AREG15 /* * Rotate ws so that the current windowbase is at bit 0. * Assume ws = xxxwww1yy (www1 current window frame). * Rotate ws right so that a4 = yyxxxwww1. */ rsr a0, windowbase rsr a3, windowstart # a3 = xxxwww1yy ssr a0 # holds WB slli a0, a3, WSBITS or a3, a3, a0 # a3 = xxxwww1yyxxxwww1yy srl a3, a3 # a3 = 00xxxwww1yyxxxwww1 /* We are done if there are no more than the current register frame. */ extui a3, a3, 1, WSBITS-1 # a3 = 0yyxxxwww movi a0, (1 << (WSBITS-1)) _beqz a3, .Lnospill # only one active frame? jump /* We want 1 at the top, so that we return to the current windowbase */ or a3, a3, a0 # 1yyxxxwww /* Skip empty frames - get 'oldest' WINDOWSTART-bit. */ wsr a3, windowstart # save shifted windowstart neg a0, a3 and a3, a0, a3 # first bit set from right: 000010000 ffs_ws a0, a3 # a0: shifts to skip empty frames movi a3, WSBITS sub a0, a3, a0 # WSBITS-a0:number of 0-bits from right ssr a0 # save in SAR for later. rsr a3, windowbase add a3, a3, a0 wsr a3, windowbase rsync rsr a3, windowstart srl a3, a3 # shift windowstart /* WB is now just one frame below the oldest frame in the register window. WS is shifted so the oldest frame is in bit 0, thus, WB and WS differ by one 4-register frame. */ /* Save frames. Depending what call was used (call4, call8, call12), * we have to save 4,8. or 12 registers. */ .Lloop: _bbsi.l a3, 1, .Lc4 _bbci.l a3, 2, .Lc12 .Lc8: s32e a4, a13, -16 l32e a4, a5, -12 s32e a8, a4, -32 s32e a5, a13, -12 s32e a6, a13, -8 s32e a7, a13, -4 s32e a9, a4, -28 s32e a10, a4, -24 s32e a11, a4, -20 srli a11, a3, 2 # shift windowbase by 2 rotw 2 _bnei a3, 1, .Lloop j .Lexit .Lc4: s32e a4, a9, -16 s32e a5, a9, -12 s32e a6, a9, -8 s32e a7, a9, -4 srli a7, a3, 1 rotw 1 _bnei a3, 1, .Lloop j .Lexit .Lc12: _bbci.l a3, 3, .Linvalid_mask # bit 2 shouldn't be zero! /* 12-register frame (call12) */ l32e a0, a5, -12 s32e a8, a0, -48 mov a8, a0 s32e a9, a8, -44 s32e a10, a8, -40 s32e a11, a8, -36 s32e a12, a8, -32 s32e a13, a8, -28 s32e a14, a8, -24 s32e a15, a8, -20 srli a15, a3, 3 /* The stack pointer for a4..a7 is out of reach, so we rotate the * window, grab the stackpointer, and rotate back. * Alternatively, we could also use the following approach, but that * makes the fixup routine much more complicated: * rotw 1 * s32e a0, a13, -16 * ... * rotw 2 */ rotw 1 mov a4, a13 rotw -1 s32e a4, a8, -16 s32e a5, a8, -12 s32e a6, a8, -8 s32e a7, a8, -4 rotw 3 _beqi a3, 1, .Lexit j .Lloop .Lexit: /* Done. Do the final rotation and set WS */ rotw 1 rsr a3, windowbase ssl a3 movi a3, 1 sll a3, a3 wsr a3, windowstart .Lnospill: /* Advance PC, restore registers and SAR, and return from exception. */ l32i a3, a2, PT_SAR l32i a0, a2, PT_AREG0 wsr a3, sar l32i a3, a2, PT_AREG3 /* Restore clobbered registers. */ l32i a4, a2, PT_AREG4 l32i a7, a2, PT_AREG7 l32i a8, a2, PT_AREG8 l32i a11, a2, PT_AREG11 l32i a12, a2, PT_AREG12 l32i a15, a2, PT_AREG15 movi a2, 0 rfe .Linvalid_mask: /* We get here because of an unrecoverable error in the window * registers, so set up a dummy frame and kill the user application. * Note: We assume EXC_TABLE_KSTK contains a valid stack pointer. */ movi a0, 1 movi a1, 0 wsr a0, windowstart wsr a1, windowbase rsync movi a0, 0 rsr a3, excsave1 l32i a1, a3, EXC_TABLE_KSTK movi a4, (1 << PS_WOE_BIT) | LOCKLEVEL wsr a4, ps rsync movi a6, SIGSEGV call4 do_exit /* shouldn't return, so panic */ wsr a0, excsave1 call0 unrecoverable_exception # should not return 1: j 1b ENDPROC(fast_syscall_spill_registers) /* Fixup handler. * * We get here if the spill routine causes an exception, e.g. tlb miss. * We basically restore WINDOWBASE and WINDOWSTART to the condition when * we entered the spill routine and jump to the user exception handler. * * Note that we only need to restore the bits in windowstart that have not * been spilled yet by the _spill_register routine. Luckily, a3 contains a * rotated windowstart with only those bits set for frames that haven't been * spilled yet. Because a3 is rotated such that bit 0 represents the register * frame for the current windowbase - 1, we need to rotate a3 left by the * value of the current windowbase + 1 and move it to windowstart. * * a0: value of depc, original value in depc * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE * a3: exctable, original value in excsave1 */ ENTRY(fast_syscall_spill_registers_fixup) rsr a2, windowbase # get current windowbase (a2 is saved) xsr a0, depc # restore depc and a0 ssl a2 # set shift (32 - WB) /* We need to make sure the current registers (a0-a3) are preserved. * To do this, we simply set the bit for the current window frame * in WS, so that the exception handlers save them to the task stack. * * Note: we use a3 to set the windowbase, so we take a special care * of it, saving it in the original _spill_registers frame across * the exception handler call. */ xsr a3, excsave1 # get spill-mask slli a3, a3, 1 # shift left by one addi a3, a3, 1 # set the bit for the current window frame slli a2, a3, 32-WSBITS src a2, a3, a2 # a2 = xxwww1yyxxxwww1yy...... wsr a2, windowstart # set corrected windowstart srli a3, a3, 1 rsr a2, excsave1 l32i a2, a2, EXC_TABLE_DOUBLE_SAVE # restore a2 xsr a2, excsave1 s32i a3, a2, EXC_TABLE_DOUBLE_SAVE # save a3 l32i a3, a2, EXC_TABLE_PARAM # original WB (in user task) xsr a2, excsave1 /* Return to the original (user task) WINDOWBASE. * We leave the following frame behind: * a0, a1, a2 same * a3: trashed (saved in EXC_TABLE_DOUBLE_SAVE) * depc: depc (we have to return to that address) * excsave_1: exctable */ wsr a3, windowbase rsync /* We are now in the original frame when we entered _spill_registers: * a0: return address * a1: used, stack pointer * a2: kernel stack pointer * a3: available * depc: exception address * excsave: exctable * Note: This frame might be the same as above. */ /* Setup stack pointer. */ addi a2, a2, -PT_USER_SIZE s32i a0, a2, PT_AREG0 /* Make sure we return to this fixup handler. */ movi a3, fast_syscall_spill_registers_fixup_return s32i a3, a2, PT_DEPC # setup depc /* Jump to the exception handler. */ rsr a3, excsave1 rsr a0, exccause addx4 a0, a0, a3 # find entry in table l32i a0, a0, EXC_TABLE_FAST_USER # load handler l32i a3, a3, EXC_TABLE_DOUBLE_SAVE jx a0 ENDPROC(fast_syscall_spill_registers_fixup) ENTRY(fast_syscall_spill_registers_fixup_return) /* When we return here, all registers have been restored (a2: DEPC) */ wsr a2, depc # exception address /* Restore fixup handler. */ rsr a2, excsave1 s32i a3, a2, EXC_TABLE_DOUBLE_SAVE movi a3, fast_syscall_spill_registers_fixup s32i a3, a2, EXC_TABLE_FIXUP rsr a3, windowbase s32i a3, a2, EXC_TABLE_PARAM l32i a2, a2, EXC_TABLE_KSTK /* Load WB at the time the exception occurred. */ rsr a3, sar # WB is still in SAR neg a3, a3 wsr a3, windowbase rsync rsr a3, excsave1 l32i a3, a3, EXC_TABLE_DOUBLE_SAVE rfde ENDPROC(fast_syscall_spill_registers_fixup_return) #else /* CONFIG_FAST_SYSCALL_SPILL_REGISTERS */ ENTRY(fast_syscall_spill_registers) l32i a0, a2, PT_AREG0 # restore a0 movi a2, -ENOSYS rfe ENDPROC(fast_syscall_spill_registers) #endif /* CONFIG_FAST_SYSCALL_SPILL_REGISTERS */ #ifdef CONFIG_MMU /* * We should never get here. Bail out! */ ENTRY(fast_second_level_miss_double_kernel) 1: call0 unrecoverable_exception # should not return 1: j 1b ENDPROC(fast_second_level_miss_double_kernel) /* First-level entry handler for user, kernel, and double 2nd-level * TLB miss exceptions. Note that for now, user and kernel miss * exceptions share the same entry point and are handled identically. * * An old, less-efficient C version of this function used to exist. * We include it below, interleaved as comments, for reference. * * Entry condition: * * a0: trashed, original value saved on stack (PT_AREG0) * a1: a1 * a2: new stack pointer, original in DEPC * a3: a3 * depc: a2, original value saved on stack (PT_DEPC) * excsave_1: dispatch table * * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception */ ENTRY(fast_second_level_miss) /* Save a1 and a3. Note: we don't expect a double exception. */ s32i a1, a2, PT_AREG1 s32i a3, a2, PT_AREG3 /* We need to map the page of PTEs for the user task. Find * the pointer to that page. Also, it's possible for tsk->mm * to be NULL while tsk->active_mm is nonzero if we faulted on * a vmalloc address. In that rare case, we must use * active_mm instead to avoid a fault in this handler. See * * http://mail.nl.linux.org/linux-mm/2002-08/msg00258.html * (or search Internet on "mm vs. active_mm") * * if (!mm) * mm = tsk->active_mm; * pgd = pgd_offset (mm, regs->excvaddr); * pmd = pmd_offset (pgd, regs->excvaddr); * pmdval = *pmd; */ GET_CURRENT(a1,a2) l32i a0, a1, TASK_MM # tsk->mm beqz a0, 9f 8: rsr a3, excvaddr # fault address _PGD_OFFSET(a0, a3, a1) l32i a0, a0, 0 # read pmdval beqz a0, 2f /* Read ptevaddr and convert to top of page-table page. * * vpnval = read_ptevaddr_register() & PAGE_MASK; * vpnval += DTLB_WAY_PGTABLE; * pteval = mk_pte (virt_to_page(pmd_val(pmdval)), PAGE_KERNEL); * write_dtlb_entry (pteval, vpnval); * * The messy computation for 'pteval' above really simplifies * into the following: * * pteval = ((pmdval - PAGE_OFFSET + PHYS_OFFSET) & PAGE_MASK) * | PAGE_DIRECTORY */ movi a1, (PHYS_OFFSET - PAGE_OFFSET) & 0xffffffff add a0, a0, a1 # pmdval - PAGE_OFFSET extui a1, a0, 0, PAGE_SHIFT # ... & PAGE_MASK xor a0, a0, a1 movi a1, _PAGE_DIRECTORY or a0, a0, a1 # ... | PAGE_DIRECTORY /* * We utilize all three wired-ways (7-9) to hold pmd translations. * Memory regions are mapped to the DTLBs according to bits 28 and 29. * This allows to map the three most common regions to three different * DTLBs: * 0,1 -> way 7 program (0040.0000) and virtual (c000.0000) * 2 -> way 8 shared libaries (2000.0000) * 3 -> way 0 stack (3000.0000) */ extui a3, a3, 28, 2 # addr. bit 28 and 29 0,1,2,3 rsr a1, ptevaddr addx2 a3, a3, a3 # -> 0,3,6,9 srli a1, a1, PAGE_SHIFT extui a3, a3, 2, 2 # -> 0,0,1,2 slli a1, a1, PAGE_SHIFT # ptevaddr & PAGE_MASK addi a3, a3, DTLB_WAY_PGD add a1, a1, a3 # ... + way_number 3: wdtlb a0, a1 dsync /* Exit critical section. */ 4: rsr a3, excsave1 movi a0, 0 s32i a0, a3, EXC_TABLE_FIXUP /* Restore the working registers, and return. */ l32i a0, a2, PT_AREG0 l32i a1, a2, PT_AREG1 l32i a3, a2, PT_AREG3 l32i a2, a2, PT_DEPC bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f /* Restore excsave1 and return. */ rsr a2, depc rfe /* Return from double exception. */ 1: xsr a2, depc esync rfde 9: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0 bnez a0, 8b /* Even more unlikely case active_mm == 0. * We can get here with NMI in the middle of context_switch that * touches vmalloc area. */ movi a0, init_mm j 8b #if (DCACHE_WAY_SIZE > PAGE_SIZE) 2: /* Special case for cache aliasing. * We (should) only get here if a clear_user_page, copy_user_page * or the aliased cache flush functions got preemptively interrupted * by another task. Re-establish temporary mapping to the * TLBTEMP_BASE areas. */ /* We shouldn't be in a double exception */ l32i a0, a2, PT_DEPC bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 2f /* Make sure the exception originated in the special functions */ movi a0, __tlbtemp_mapping_start rsr a3, epc1 bltu a3, a0, 2f movi a0, __tlbtemp_mapping_end bgeu a3, a0, 2f /* Check if excvaddr was in one of the TLBTEMP_BASE areas. */ movi a3, TLBTEMP_BASE_1 rsr a0, excvaddr bltu a0, a3, 2f addi a1, a0, -TLBTEMP_SIZE bgeu a1, a3, 2f /* Check if we have to restore an ITLB mapping. */ movi a1, __tlbtemp_mapping_itlb rsr a3, epc1 sub a3, a3, a1 /* Calculate VPN */ movi a1, PAGE_MASK and a1, a1, a0 /* Jump for ITLB entry */ bgez a3, 1f /* We can use up to two TLBTEMP areas, one for src and one for dst. */ extui a3, a0, PAGE_SHIFT + DCACHE_ALIAS_ORDER, 1 add a1, a3, a1 /* PPN is in a6 for the first TLBTEMP area and in a7 for the second. */ mov a0, a6 movnez a0, a7, a3 j 3b /* ITLB entry. We only use dst in a6. */ 1: witlb a6, a1 isync j 4b #endif // DCACHE_WAY_SIZE > PAGE_SIZE 2: /* Invalid PGD, default exception handling */ rsr a1, depc s32i a1, a2, PT_AREG2 mov a1, a2 rsr a2, ps bbsi.l a2, PS_UM_BIT, 1f j _kernel_exception 1: j _user_exception ENDPROC(fast_second_level_miss) /* * StoreProhibitedException * * Update the pte and invalidate the itlb mapping for this pte. * * Entry condition: * * a0: trashed, original value saved on stack (PT_AREG0) * a1: a1 * a2: new stack pointer, original in DEPC * a3: a3 * depc: a2, original value saved on stack (PT_DEPC) * excsave_1: dispatch table * * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception */ ENTRY(fast_store_prohibited) /* Save a1 and a3. */ s32i a1, a2, PT_AREG1 s32i a3, a2, PT_AREG3 GET_CURRENT(a1,a2) l32i a0, a1, TASK_MM # tsk->mm beqz a0, 9f 8: rsr a1, excvaddr # fault address _PGD_OFFSET(a0, a1, a3) l32i a0, a0, 0 beqz a0, 2f /* * Note that we test _PAGE_WRITABLE_BIT only if PTE is present * and is not PAGE_NONE. See pgtable.h for possible PTE layouts. */ _PTE_OFFSET(a0, a1, a3) l32i a3, a0, 0 # read pteval movi a1, _PAGE_CA_INVALID ball a3, a1, 2f bbci.l a3, _PAGE_WRITABLE_BIT, 2f movi a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HW_WRITE or a3, a3, a1 rsr a1, excvaddr s32i a3, a0, 0 /* We need to flush the cache if we have page coloring. */ #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK dhwb a0, 0 #endif pdtlb a0, a1 wdtlb a3, a0 /* Exit critical section. */ movi a0, 0 rsr a3, excsave1 s32i a0, a3, EXC_TABLE_FIXUP /* Restore the working registers, and return. */ l32i a3, a2, PT_AREG3 l32i a1, a2, PT_AREG1 l32i a0, a2, PT_AREG0 l32i a2, a2, PT_DEPC bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f rsr a2, depc rfe /* Double exception. Restore FIXUP handler and return. */ 1: xsr a2, depc esync rfde 9: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0 j 8b 2: /* If there was a problem, handle fault in C */ rsr a3, depc # still holds a2 s32i a3, a2, PT_AREG2 mov a1, a2 rsr a2, ps bbsi.l a2, PS_UM_BIT, 1f j _kernel_exception 1: j _user_exception ENDPROC(fast_store_prohibited) #endif /* CONFIG_MMU */ /* * System Calls. * * void system_call (struct pt_regs* regs, int exccause) * a2 a3 */ .literal_position ENTRY(system_call) entry a1, 32 /* regs->syscall = regs->areg[2] */ l32i a3, a2, PT_AREG2 mov a6, a2 s32i a3, a2, PT_SYSCALL call4 do_syscall_trace_enter mov a3, a6 /* syscall = sys_call_table[syscall_nr] */ movi a4, sys_call_table movi a5, __NR_syscall_count movi a6, -ENOSYS bgeu a3, a5, 1f addx4 a4, a3, a4 l32i a4, a4, 0 movi a5, sys_ni_syscall; beq a4, a5, 1f /* Load args: arg0 - arg5 are passed via regs. */ l32i a6, a2, PT_AREG6 l32i a7, a2, PT_AREG3 l32i a8, a2, PT_AREG4 l32i a9, a2, PT_AREG5 l32i a10, a2, PT_AREG8 l32i a11, a2, PT_AREG9 /* Pass one additional argument to the syscall: pt_regs (on stack) */ s32i a2, a1, 0 callx4 a4 1: /* regs->areg[2] = return_value */ s32i a6, a2, PT_AREG2 mov a6, a2 call4 do_syscall_trace_leave retw ENDPROC(system_call) /* * Spill live registers on the kernel stack macro. * * Entry condition: ps.woe is set, ps.excm is cleared * Exit condition: windowstart has single bit set * May clobber: a12, a13 */ .macro spill_registers_kernel #if XCHAL_NUM_AREGS > 16 call12 1f _j 2f retw .align 4 1: _entry a1, 48 addi a12, a0, 3 #if XCHAL_NUM_AREGS > 32 .rept (XCHAL_NUM_AREGS - 32) / 12 _entry a1, 48 mov a12, a0 .endr #endif _entry a1, 16 #if XCHAL_NUM_AREGS % 12 == 0 mov a8, a8 #elif XCHAL_NUM_AREGS % 12 == 4 mov a12, a12 #elif XCHAL_NUM_AREGS % 12 == 8 mov a4, a4 #endif retw 2: #else mov a12, a12 #endif .endm /* * Task switch. * * struct task* _switch_to (struct task* prev, struct task* next) * a2 a2 a3 */ ENTRY(_switch_to) entry a1, 48 mov a11, a3 # and 'next' (a3) l32i a4, a2, TASK_THREAD_INFO l32i a5, a3, TASK_THREAD_INFO save_xtregs_user a4 a6 a8 a9 a12 a13 THREAD_XTREGS_USER #if THREAD_RA > 1020 || THREAD_SP > 1020 addi a10, a2, TASK_THREAD s32i a0, a10, THREAD_RA - TASK_THREAD # save return address s32i a1, a10, THREAD_SP - TASK_THREAD # save stack pointer #else s32i a0, a2, THREAD_RA # save return address s32i a1, a2, THREAD_SP # save stack pointer #endif #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) movi a6, __stack_chk_guard l32i a8, a3, TASK_STACK_CANARY s32i a8, a6, 0 #endif /* Disable ints while we manipulate the stack pointer. */ irq_save a14, a3 rsync /* Switch CPENABLE */ #if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS) l32i a3, a5, THREAD_CPENABLE xsr a3, cpenable s32i a3, a4, THREAD_CPENABLE #endif /* Flush register file. */ spill_registers_kernel /* Set kernel stack (and leave critical section) * Note: It's save to set it here. The stack will not be overwritten * because the kernel stack will only be loaded again after * we return from kernel space. */ rsr a3, excsave1 # exc_table addi a7, a5, PT_REGS_OFFSET s32i a7, a3, EXC_TABLE_KSTK /* restore context of the task 'next' */ l32i a0, a11, THREAD_RA # restore return address l32i a1, a11, THREAD_SP # restore stack pointer load_xtregs_user a5 a6 a8 a9 a12 a13 THREAD_XTREGS_USER wsr a14, ps rsync retw ENDPROC(_switch_to) ENTRY(ret_from_fork) /* void schedule_tail (struct task_struct *prev) * Note: prev is still in a6 (return value from fake call4 frame) */ call4 schedule_tail mov a6, a1 call4 do_syscall_trace_leave j common_exception_return ENDPROC(ret_from_fork) /* * Kernel thread creation helper * On entry, set up by copy_thread: a2 = thread_fn, a3 = thread_fn arg * left from _switch_to: a6 = prev */ ENTRY(ret_from_kernel_thread) call4 schedule_tail mov a6, a3 callx4 a2 j common_exception_return ENDPROC(ret_from_kernel_thread)
AirFortressIlikara/LS2K0300-linux-4.19
1,310
arch/xtensa/kernel/mxhead.S
/* * Xtensa Secondary Processors startup code. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2001 - 2013 Tensilica Inc. * * Joe Taylor <joe@tensilica.com> * Chris Zankel <chris@zankel.net> * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca> * Pete Delaney <piet@tensilica.com> */ #include <linux/linkage.h> #include <asm/cacheasm.h> #include <asm/initialize_mmu.h> #include <asm/mxregs.h> #include <asm/regs.h> .section .SecondaryResetVector.text, "ax" ENTRY(_SecondaryResetVector) _j _SetupOCD .begin no-absolute-literals .literal_position _SetupOCD: /* * Initialize WB, WS, and clear PS.EXCM (to allow loop instructions). * Set Interrupt Level just below XCHAL_DEBUGLEVEL to allow * xt-gdb to single step via DEBUG exceptions received directly * by ocd. */ movi a1, 1 movi a0, 0 wsr a1, windowstart wsr a0, windowbase rsync movi a1, LOCKLEVEL wsr a1, ps rsync _SetupMMU: #ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX initialize_mmu #endif /* * Start Secondary Processors with NULL pointer to boot params. */ movi a2, 0 # a2 == NULL movi a3, _startup jx a3 .end no-absolute-literals
AirFortressIlikara/LS2K0300-linux-4.19
6,932
arch/xtensa/kernel/coprocessor.S
/* * arch/xtensa/kernel/coprocessor.S * * Xtensa processor configuration-specific table of coprocessor and * other custom register layout information. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2003 - 2007 Tensilica Inc. */ #include <linux/linkage.h> #include <asm/asm-offsets.h> #include <asm/processor.h> #include <asm/coprocessor.h> #include <asm/thread_info.h> #include <asm/asm-uaccess.h> #include <asm/unistd.h> #include <asm/ptrace.h> #include <asm/current.h> #include <asm/pgtable.h> #include <asm/page.h> #include <asm/signal.h> #include <asm/tlbflush.h> #if XTENSA_HAVE_COPROCESSORS /* * Macros for lazy context switch. */ #define SAVE_CP_REGS(x) \ .align 4; \ .Lsave_cp_regs_cp##x: \ .if XTENSA_HAVE_COPROCESSOR(x); \ xchal_cp##x##_store a2 a4 a5 a6 a7; \ .endif; \ jx a0 #define SAVE_CP_REGS_TAB(x) \ .if XTENSA_HAVE_COPROCESSOR(x); \ .long .Lsave_cp_regs_cp##x - .Lsave_cp_regs_jump_table; \ .else; \ .long 0; \ .endif; \ .long THREAD_XTREGS_CP##x #define LOAD_CP_REGS(x) \ .align 4; \ .Lload_cp_regs_cp##x: \ .if XTENSA_HAVE_COPROCESSOR(x); \ xchal_cp##x##_load a2 a4 a5 a6 a7; \ .endif; \ jx a0 #define LOAD_CP_REGS_TAB(x) \ .if XTENSA_HAVE_COPROCESSOR(x); \ .long .Lload_cp_regs_cp##x - .Lload_cp_regs_jump_table; \ .else; \ .long 0; \ .endif; \ .long THREAD_XTREGS_CP##x SAVE_CP_REGS(0) SAVE_CP_REGS(1) SAVE_CP_REGS(2) SAVE_CP_REGS(3) SAVE_CP_REGS(4) SAVE_CP_REGS(5) SAVE_CP_REGS(6) SAVE_CP_REGS(7) LOAD_CP_REGS(0) LOAD_CP_REGS(1) LOAD_CP_REGS(2) LOAD_CP_REGS(3) LOAD_CP_REGS(4) LOAD_CP_REGS(5) LOAD_CP_REGS(6) LOAD_CP_REGS(7) .align 4 .Lsave_cp_regs_jump_table: SAVE_CP_REGS_TAB(0) SAVE_CP_REGS_TAB(1) SAVE_CP_REGS_TAB(2) SAVE_CP_REGS_TAB(3) SAVE_CP_REGS_TAB(4) SAVE_CP_REGS_TAB(5) SAVE_CP_REGS_TAB(6) SAVE_CP_REGS_TAB(7) .Lload_cp_regs_jump_table: LOAD_CP_REGS_TAB(0) LOAD_CP_REGS_TAB(1) LOAD_CP_REGS_TAB(2) LOAD_CP_REGS_TAB(3) LOAD_CP_REGS_TAB(4) LOAD_CP_REGS_TAB(5) LOAD_CP_REGS_TAB(6) LOAD_CP_REGS_TAB(7) /* * coprocessor_save(buffer, index) * a2 a3 * coprocessor_load(buffer, index) * a2 a3 * * Save or load coprocessor registers for coprocessor 'index'. * The register values are saved to or loaded from them 'buffer' address. * * Note that these functions don't update the coprocessor_owner information! * */ ENTRY(coprocessor_save) entry a1, 32 s32i a0, a1, 0 movi a0, .Lsave_cp_regs_jump_table addx8 a3, a3, a0 l32i a3, a3, 0 beqz a3, 1f add a0, a0, a3 callx0 a0 1: l32i a0, a1, 0 retw ENDPROC(coprocessor_save) ENTRY(coprocessor_load) entry a1, 32 s32i a0, a1, 0 movi a0, .Lload_cp_regs_jump_table addx4 a3, a3, a0 l32i a3, a3, 0 beqz a3, 1f add a0, a0, a3 callx0 a0 1: l32i a0, a1, 0 retw ENDPROC(coprocessor_load) /* * coprocessor_flush(struct task_info*, index) * a2 a3 * coprocessor_restore(struct task_info*, index) * a2 a3 * * Save or load coprocessor registers for coprocessor 'index'. * The register values are saved to or loaded from the coprocessor area * inside the task_info structure. * * Note that these functions don't update the coprocessor_owner information! * */ ENTRY(coprocessor_flush) entry a1, 32 s32i a0, a1, 0 movi a0, .Lsave_cp_regs_jump_table addx8 a3, a3, a0 l32i a4, a3, 4 l32i a3, a3, 0 add a2, a2, a4 beqz a3, 1f add a0, a0, a3 callx0 a0 1: l32i a0, a1, 0 retw ENDPROC(coprocessor_flush) ENTRY(coprocessor_restore) entry a1, 32 s32i a0, a1, 0 movi a0, .Lload_cp_regs_jump_table addx4 a3, a3, a0 l32i a4, a3, 4 l32i a3, a3, 0 add a2, a2, a4 beqz a3, 1f add a0, a0, a3 callx0 a0 1: l32i a0, a1, 0 retw ENDPROC(coprocessor_restore) /* * Entry condition: * * a0: trashed, original value saved on stack (PT_AREG0) * a1: a1 * a2: new stack pointer, original in DEPC * a3: a3 * depc: a2, original value saved on stack (PT_DEPC) * excsave_1: dispatch table * * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception */ ENTRY(fast_coprocessor_double) wsr a0, excsave1 call0 unrecoverable_exception ENDPROC(fast_coprocessor_double) ENTRY(fast_coprocessor) /* Save remaining registers a1-a3 and SAR */ s32i a3, a2, PT_AREG3 rsr a3, sar s32i a1, a2, PT_AREG1 s32i a3, a2, PT_SAR mov a1, a2 rsr a2, depc s32i a2, a1, PT_AREG2 /* * The hal macros require up to 4 temporary registers. We use a3..a6. */ s32i a4, a1, PT_AREG4 s32i a5, a1, PT_AREG5 s32i a6, a1, PT_AREG6 /* Find coprocessor number. Subtract first CP EXCCAUSE from EXCCAUSE */ rsr a3, exccause addi a3, a3, -EXCCAUSE_COPROCESSOR0_DISABLED /* Set corresponding CPENABLE bit -> (sar:cp-index, a3: 1<<cp-index)*/ ssl a3 # SAR: 32 - coprocessor_number movi a2, 1 rsr a0, cpenable sll a2, a2 or a0, a0, a2 wsr a0, cpenable rsync /* Retrieve previous owner. (a3 still holds CP number) */ movi a0, coprocessor_owner # list of owners addx4 a0, a3, a0 # entry for CP l32i a4, a0, 0 beqz a4, 1f # skip 'save' if no previous owner /* Disable coprocessor for previous owner. (a2 = 1 << CP number) */ l32i a5, a4, THREAD_CPENABLE xor a5, a5, a2 # (1 << cp-id) still in a2 s32i a5, a4, THREAD_CPENABLE /* * Get context save area and 'call' save routine. * (a4 still holds previous owner (thread_info), a3 CP number) */ movi a5, .Lsave_cp_regs_jump_table movi a0, 2f # a0: 'return' address addx8 a3, a3, a5 # a3: coprocessor number l32i a2, a3, 4 # a2: xtregs offset l32i a3, a3, 0 # a3: jump offset add a2, a2, a4 add a4, a3, a5 # a4: address of save routine jx a4 /* Note that only a0 and a1 were preserved. */ 2: rsr a3, exccause addi a3, a3, -EXCCAUSE_COPROCESSOR0_DISABLED movi a0, coprocessor_owner addx4 a0, a3, a0 /* Set new 'owner' (a0 points to the CP owner, a3 contains the CP nr) */ 1: GET_THREAD_INFO (a4, a1) s32i a4, a0, 0 /* Get context save area and 'call' load routine. */ movi a5, .Lload_cp_regs_jump_table movi a0, 1f addx8 a3, a3, a5 l32i a2, a3, 4 # a2: xtregs offset l32i a3, a3, 0 # a3: jump offset add a2, a2, a4 add a4, a3, a5 jx a4 /* Restore all registers and return from exception handler. */ 1: l32i a6, a1, PT_AREG6 l32i a5, a1, PT_AREG5 l32i a4, a1, PT_AREG4 l32i a0, a1, PT_SAR l32i a3, a1, PT_AREG3 l32i a2, a1, PT_AREG2 wsr a0, sar l32i a0, a1, PT_AREG0 l32i a1, a1, PT_AREG1 rfe ENDPROC(fast_coprocessor) .data ENTRY(coprocessor_owner) .fill XCHAL_CP_MAX, 4, 0 END(coprocessor_owner) #endif /* XTENSA_HAVE_COPROCESSORS */
AirFortressIlikara/LS2K0300-linux-4.19
8,504
arch/xtensa/kernel/vmlinux.lds.S
/* * arch/xtensa/kernel/vmlinux.lds.S * * Xtensa linker script * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2001 - 2008 Tensilica Inc. * * Chris Zankel <chris@zankel.net> * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca> * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com> */ #include <asm-generic/vmlinux.lds.h> #include <asm/page.h> #include <asm/thread_info.h> #include <asm/vectors.h> #include <variant/core.h> OUTPUT_ARCH(xtensa) ENTRY(_start) #ifdef __XTENSA_EB__ jiffies = jiffies_64 + 4; #else jiffies = jiffies_64; #endif /* Note: In the following macros, it would be nice to specify only the vector name and section kind and construct "sym" and "section" using CPP concatenation, but that does not work reliably. Concatenating a string with "." produces an invalid token. CPP will not print a warning because it thinks this is an assembly file, but it leaves them as multiple tokens and there may or may not be whitespace between them. */ /* Macro for a relocation entry */ #define RELOCATE_ENTRY(sym, section) \ LONG(sym ## _start); \ LONG(sym ## _end); \ LONG(LOADADDR(section)) /* * Macro to define a section for a vector. When CONFIG_VECTORS_OFFSET is * defined code for every vector is located with other init data. At startup * time head.S copies code for every vector to its final position according * to description recorded in the corresponding RELOCATE_ENTRY. */ #ifdef CONFIG_VECTORS_OFFSET #define SECTION_VECTOR(sym, section, addr, prevsec) \ section addr : AT(((LOADADDR(prevsec) + SIZEOF(prevsec)) + 3) & ~ 3) \ { \ . = ALIGN(4); \ sym ## _start = ABSOLUTE(.); \ *(section) \ sym ## _end = ABSOLUTE(.); \ } #else #define SECTION_VECTOR(section, addr) \ . = addr; \ *(section) #endif /* * Mapping of input sections to output sections when linking. */ SECTIONS { . = KERNELOFFSET; /* .text section */ _text = .; _stext = .; .text : { /* The HEAD_TEXT section must be the first section! */ HEAD_TEXT #ifndef CONFIG_VECTORS_OFFSET . = ALIGN(PAGE_SIZE); _vecbase = .; SECTION_VECTOR (.WindowVectors.text, WINDOW_VECTORS_VADDR) #if XCHAL_EXCM_LEVEL >= 2 SECTION_VECTOR (.Level2InterruptVector.text, INTLEVEL2_VECTOR_VADDR) #endif #if XCHAL_EXCM_LEVEL >= 3 SECTION_VECTOR (.Level3InterruptVector.text, INTLEVEL3_VECTOR_VADDR) #endif #if XCHAL_EXCM_LEVEL >= 4 SECTION_VECTOR (.Level4InterruptVector.text, INTLEVEL4_VECTOR_VADDR) #endif #if XCHAL_EXCM_LEVEL >= 5 SECTION_VECTOR (.Level5InterruptVector.text, INTLEVEL5_VECTOR_VADDR) #endif #if XCHAL_EXCM_LEVEL >= 6 SECTION_VECTOR (.Level6InterruptVector.text, INTLEVEL6_VECTOR_VADDR) #endif SECTION_VECTOR (.DebugInterruptVector.text, DEBUG_VECTOR_VADDR) SECTION_VECTOR (.KernelExceptionVector.text, KERNEL_VECTOR_VADDR) SECTION_VECTOR (.UserExceptionVector.text, USER_VECTOR_VADDR) SECTION_VECTOR (.DoubleExceptionVector.text, DOUBLEEXC_VECTOR_VADDR) #endif IRQENTRY_TEXT SOFTIRQENTRY_TEXT ENTRY_TEXT TEXT_TEXT SCHED_TEXT CPUIDLE_TEXT LOCK_TEXT } _etext = .; PROVIDE (etext = .); . = ALIGN(16); RODATA /* Relocation table */ .fixup : { *(.fixup) } EXCEPTION_TABLE(16) NOTES /* Data section */ _sdata = .; RW_DATA_SECTION(XCHAL_ICACHE_LINESIZE, PAGE_SIZE, THREAD_SIZE) _edata = .; /* Initialization code and data: */ . = ALIGN(PAGE_SIZE); __init_begin = .; INIT_TEXT_SECTION(PAGE_SIZE) .init.data : { INIT_DATA . = ALIGN(0x4); __tagtable_begin = .; *(.taglist) __tagtable_end = .; . = ALIGN(16); __boot_reloc_table_start = ABSOLUTE(.); #ifdef CONFIG_VECTORS_OFFSET RELOCATE_ENTRY(_WindowVectors_text, .WindowVectors.text); #if XCHAL_EXCM_LEVEL >= 2 RELOCATE_ENTRY(_Level2InterruptVector_text, .Level2InterruptVector.text); #endif #if XCHAL_EXCM_LEVEL >= 3 RELOCATE_ENTRY(_Level3InterruptVector_text, .Level3InterruptVector.text); #endif #if XCHAL_EXCM_LEVEL >= 4 RELOCATE_ENTRY(_Level4InterruptVector_text, .Level4InterruptVector.text); #endif #if XCHAL_EXCM_LEVEL >= 5 RELOCATE_ENTRY(_Level5InterruptVector_text, .Level5InterruptVector.text); #endif #if XCHAL_EXCM_LEVEL >= 6 RELOCATE_ENTRY(_Level6InterruptVector_text, .Level6InterruptVector.text); #endif RELOCATE_ENTRY(_KernelExceptionVector_text, .KernelExceptionVector.text); RELOCATE_ENTRY(_UserExceptionVector_text, .UserExceptionVector.text); RELOCATE_ENTRY(_DoubleExceptionVector_text, .DoubleExceptionVector.text); RELOCATE_ENTRY(_DebugInterruptVector_text, .DebugInterruptVector.text); #endif #if defined(CONFIG_SMP) RELOCATE_ENTRY(_SecondaryResetVector_text, .SecondaryResetVector.text); #endif __boot_reloc_table_end = ABSOLUTE(.) ; INIT_SETUP(XCHAL_ICACHE_LINESIZE) INIT_CALLS CON_INITCALL SECURITY_INITCALL INIT_RAM_FS } PERCPU_SECTION(XCHAL_ICACHE_LINESIZE) /* We need this dummy segment here */ . = ALIGN(4); .dummy : { LONG(0) } #ifdef CONFIG_VECTORS_OFFSET /* The vectors are relocated to the real position at startup time */ SECTION_VECTOR (_WindowVectors_text, .WindowVectors.text, WINDOW_VECTORS_VADDR, .dummy) SECTION_VECTOR (_DebugInterruptVector_text, .DebugInterruptVector.text, DEBUG_VECTOR_VADDR, .WindowVectors.text) #undef LAST #define LAST .DebugInterruptVector.text #if XCHAL_EXCM_LEVEL >= 2 SECTION_VECTOR (_Level2InterruptVector_text, .Level2InterruptVector.text, INTLEVEL2_VECTOR_VADDR, LAST) # undef LAST # define LAST .Level2InterruptVector.text #endif #if XCHAL_EXCM_LEVEL >= 3 SECTION_VECTOR (_Level3InterruptVector_text, .Level3InterruptVector.text, INTLEVEL3_VECTOR_VADDR, LAST) # undef LAST # define LAST .Level3InterruptVector.text #endif #if XCHAL_EXCM_LEVEL >= 4 SECTION_VECTOR (_Level4InterruptVector_text, .Level4InterruptVector.text, INTLEVEL4_VECTOR_VADDR, LAST) # undef LAST # define LAST .Level4InterruptVector.text #endif #if XCHAL_EXCM_LEVEL >= 5 SECTION_VECTOR (_Level5InterruptVector_text, .Level5InterruptVector.text, INTLEVEL5_VECTOR_VADDR, LAST) # undef LAST # define LAST .Level5InterruptVector.text #endif #if XCHAL_EXCM_LEVEL >= 6 SECTION_VECTOR (_Level6InterruptVector_text, .Level6InterruptVector.text, INTLEVEL6_VECTOR_VADDR, LAST) # undef LAST # define LAST .Level6InterruptVector.text #endif SECTION_VECTOR (_KernelExceptionVector_text, .KernelExceptionVector.text, KERNEL_VECTOR_VADDR, LAST) #undef LAST SECTION_VECTOR (_UserExceptionVector_text, .UserExceptionVector.text, USER_VECTOR_VADDR, .KernelExceptionVector.text) SECTION_VECTOR (_DoubleExceptionVector_text, .DoubleExceptionVector.text, DOUBLEEXC_VECTOR_VADDR, .UserExceptionVector.text) . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3; #endif #if defined(CONFIG_SMP) SECTION_VECTOR (_SecondaryResetVector_text, .SecondaryResetVector.text, RESET_VECTOR1_VADDR, .DoubleExceptionVector.text) . = LOADADDR(.SecondaryResetVector.text)+SIZEOF(.SecondaryResetVector.text); #endif . = ALIGN(PAGE_SIZE); __init_end = .; BSS_SECTION(0, 8192, 0) _end = .; .xt.lit : { *(.xt.lit) } .xt.prop : { *(.xt.prop) } .debug 0 : { *(.debug) } .line 0 : { *(.line) } .debug_srcinfo 0 : { *(.debug_srcinfo) } .debug_sfnames 0 : { *(.debug_sfnames) } .debug_aranges 0 : { *(.debug_aranges) } .debug_pubnames 0 : { *(.debug_pubnames) } .debug_info 0 : { *(.debug_info) } .debug_abbrev 0 : { *(.debug_abbrev) } .debug_line 0 : { *(.debug_line) } .debug_frame 0 : { *(.debug_frame) } .debug_str 0 : { *(.debug_str) } .debug_loc 0 : { *(.debug_loc) } .debug_macinfo 0 : { *(.debug_macinfo) } .debug_weaknames 0 : { *(.debug_weaknames) } .debug_funcnames 0 : { *(.debug_funcnames) } .debug_typenames 0 : { *(.debug_typenames) } .debug_varnames 0 : { *(.debug_varnames) } .xt.insn 0 : { *(.xt.insn) *(.gnu.linkonce.x*) } .xt.lit 0 : { *(.xt.lit) *(.gnu.linkonce.p*) } /* Sections to be discarded */ DISCARDS }
AirFortressIlikara/LS2K0300-linux-4.19
12,241
arch/xtensa/kernel/align.S
/* * arch/xtensa/kernel/align.S * * Handle unalignment exceptions in kernel space. * * This file is subject to the terms and conditions of the GNU General * Public License. See the file "COPYING" in the main directory of * this archive for more details. * * Copyright (C) 2001 - 2005 Tensilica, Inc. * Copyright (C) 2014 Cadence Design Systems Inc. * * Rewritten by Chris Zankel <chris@zankel.net> * * Based on work from Joe Taylor <joe@tensilica.com, joetylr@yahoo.com> * and Marc Gauthier <marc@tensilica.com, marc@alimni.uwaterloo.ca> */ #include <linux/linkage.h> #include <asm/current.h> #include <asm/asm-offsets.h> #include <asm/asmmacro.h> #include <asm/processor.h> #if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION /* First-level exception handler for unaligned exceptions. * * Note: This handler works only for kernel exceptions. Unaligned user * access should get a seg fault. */ /* Big and little endian 16-bit values are located in * different halves of a register. HWORD_START helps to * abstract the notion of extracting a 16-bit value from a * register. * We also have to define new shifting instructions because * lsb and msb are on 'opposite' ends in a register for * different endian machines. * * Assume a memory region in ascending address: * 0 1 2 3|4 5 6 7 * * When loading one word into a register, the content of that register is: * LE 3 2 1 0, 7 6 5 4 * BE 0 1 2 3, 4 5 6 7 * * Masking the bits of the higher/lower address means: * LE X X 0 0, 0 0 X X * BE 0 0 X X, X X 0 0 * * Shifting to higher/lower addresses, means: * LE shift left / shift right * BE shift right / shift left * * Extracting 16 bits from a 32 bit reg. value to higher/lower address means: * LE mask 0 0 X X / shift left * BE shift left / mask 0 0 X X */ #define UNALIGNED_USER_EXCEPTION #if XCHAL_HAVE_BE #define HWORD_START 16 #define INSN_OP0 28 #define INSN_T 24 #define INSN_OP1 16 .macro __ssa8r r; ssa8l \r; .endm .macro __sh r, s; srl \r, \s; .endm .macro __sl r, s; sll \r, \s; .endm .macro __exth r, s; extui \r, \s, 0, 16; .endm .macro __extl r, s; slli \r, \s, 16; .endm #else #define HWORD_START 0 #define INSN_OP0 0 #define INSN_T 4 #define INSN_OP1 12 .macro __ssa8r r; ssa8b \r; .endm .macro __sh r, s; sll \r, \s; .endm .macro __sl r, s; srl \r, \s; .endm .macro __exth r, s; slli \r, \s, 16; .endm .macro __extl r, s; extui \r, \s, 0, 16; .endm #endif /* * xxxx xxxx = imm8 field * yyyy = imm4 field * ssss = s field * tttt = t field * * 16 0 * ------------------- * L32I.N yyyy ssss tttt 1000 * S32I.N yyyy ssss tttt 1001 * * 23 0 * ----------------------------- * res 0000 0010 * L16UI xxxx xxxx 0001 ssss tttt 0010 * L32I xxxx xxxx 0010 ssss tttt 0010 * XXX 0011 ssss tttt 0010 * XXX 0100 ssss tttt 0010 * S16I xxxx xxxx 0101 ssss tttt 0010 * S32I xxxx xxxx 0110 ssss tttt 0010 * XXX 0111 ssss tttt 0010 * XXX 1000 ssss tttt 0010 * L16SI xxxx xxxx 1001 ssss tttt 0010 * XXX 1010 0010 * **L32AI xxxx xxxx 1011 ssss tttt 0010 unsupported * XXX 1100 0010 * XXX 1101 0010 * XXX 1110 0010 * **S32RI xxxx xxxx 1111 ssss tttt 0010 unsupported * ----------------------------- * ^ ^ ^ * sub-opcode (NIBBLE_R) -+ | | * t field (NIBBLE_T) -----------+ | * major opcode (NIBBLE_OP0) --------------+ */ #define OP0_L32I_N 0x8 /* load immediate narrow */ #define OP0_S32I_N 0x9 /* store immediate narrow */ #define OP1_SI_MASK 0x4 /* OP1 bit set for stores */ #define OP1_SI_BIT 2 /* OP1 bit number for stores */ #define OP1_L32I 0x2 #define OP1_L16UI 0x1 #define OP1_L16SI 0x9 #define OP1_L32AI 0xb #define OP1_S32I 0x6 #define OP1_S16I 0x5 #define OP1_S32RI 0xf /* * Entry condition: * * a0: trashed, original value saved on stack (PT_AREG0) * a1: a1 * a2: new stack pointer, original in DEPC * a3: a3 * depc: a2, original value saved on stack (PT_DEPC) * excsave_1: dispatch table * * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception */ .literal_position ENTRY(fast_unaligned) /* Note: We don't expect the address to be aligned on a word * boundary. After all, the processor generated that exception * and it would be a hardware fault. */ /* Save some working register */ s32i a4, a2, PT_AREG4 s32i a5, a2, PT_AREG5 s32i a6, a2, PT_AREG6 s32i a7, a2, PT_AREG7 s32i a8, a2, PT_AREG8 rsr a0, depc s32i a0, a2, PT_AREG2 s32i a3, a2, PT_AREG3 rsr a3, excsave1 movi a4, fast_unaligned_fixup s32i a4, a3, EXC_TABLE_FIXUP /* Keep value of SAR in a0 */ rsr a0, sar rsr a8, excvaddr # load unaligned memory address /* Now, identify one of the following load/store instructions. * * The only possible danger of a double exception on the * following l32i instructions is kernel code in vmalloc * memory. The processor was just executing at the EPC_1 * address, and indeed, already fetched the instruction. That * guarantees a TLB mapping, which hasn't been replaced by * this unaligned exception handler that uses only static TLB * mappings. However, high-level interrupt handlers might * modify TLB entries, so for the generic case, we register a * TABLE_FIXUP handler here, too. */ /* a3...a6 saved on stack, a2 = SP */ /* Extract the instruction that caused the unaligned access. */ rsr a7, epc1 # load exception address movi a3, ~3 and a3, a3, a7 # mask lower bits l32i a4, a3, 0 # load 2 words l32i a5, a3, 4 __ssa8 a7 __src_b a4, a4, a5 # a4 has the instruction /* Analyze the instruction (load or store?). */ extui a5, a4, INSN_OP0, 4 # get insn.op0 nibble #if XCHAL_HAVE_DENSITY _beqi a5, OP0_L32I_N, .Lload # L32I.N, jump addi a6, a5, -OP0_S32I_N _beqz a6, .Lstore # S32I.N, do a store #endif /* 'store indicator bit' not set, jump */ _bbci.l a4, OP1_SI_BIT + INSN_OP1, .Lload /* Store: Jump to table entry to get the value in the source register.*/ .Lstore:movi a5, .Lstore_table # table extui a6, a4, INSN_T, 4 # get source register addx8 a5, a6, a5 jx a5 # jump into table /* Load: Load memory address. */ .Lload: movi a3, ~3 and a3, a3, a8 # align memory address __ssa8 a8 #ifdef UNALIGNED_USER_EXCEPTION addi a3, a3, 8 l32e a5, a3, -8 l32e a6, a3, -4 #else l32i a5, a3, 0 l32i a6, a3, 4 #endif __src_b a3, a5, a6 # a3 has the data word #if XCHAL_HAVE_DENSITY addi a7, a7, 2 # increment PC (assume 16-bit insn) extui a5, a4, INSN_OP0, 4 _beqi a5, OP0_L32I_N, 1f # l32i.n: jump addi a7, a7, 1 #else addi a7, a7, 3 #endif extui a5, a4, INSN_OP1, 4 _beqi a5, OP1_L32I, 1f # l32i: jump extui a3, a3, 0, 16 # extract lower 16 bits _beqi a5, OP1_L16UI, 1f addi a5, a5, -OP1_L16SI _bnez a5, .Linvalid_instruction_load /* sign extend value */ slli a3, a3, 16 srai a3, a3, 16 /* Set target register. */ 1: extui a4, a4, INSN_T, 4 # extract target register movi a5, .Lload_table addx8 a4, a4, a5 jx a4 # jump to entry for target register .align 8 .Lload_table: s32i a3, a2, PT_AREG0; _j .Lexit; .align 8 mov a1, a3; _j .Lexit; .align 8 # fishy?? s32i a3, a2, PT_AREG2; _j .Lexit; .align 8 s32i a3, a2, PT_AREG3; _j .Lexit; .align 8 s32i a3, a2, PT_AREG4; _j .Lexit; .align 8 s32i a3, a2, PT_AREG5; _j .Lexit; .align 8 s32i a3, a2, PT_AREG6; _j .Lexit; .align 8 s32i a3, a2, PT_AREG7; _j .Lexit; .align 8 s32i a3, a2, PT_AREG8; _j .Lexit; .align 8 mov a9, a3 ; _j .Lexit; .align 8 mov a10, a3 ; _j .Lexit; .align 8 mov a11, a3 ; _j .Lexit; .align 8 mov a12, a3 ; _j .Lexit; .align 8 mov a13, a3 ; _j .Lexit; .align 8 mov a14, a3 ; _j .Lexit; .align 8 mov a15, a3 ; _j .Lexit; .align 8 .Lstore_table: l32i a3, a2, PT_AREG0; _j 1f; .align 8 mov a3, a1; _j 1f; .align 8 # fishy?? l32i a3, a2, PT_AREG2; _j 1f; .align 8 l32i a3, a2, PT_AREG3; _j 1f; .align 8 l32i a3, a2, PT_AREG4; _j 1f; .align 8 l32i a3, a2, PT_AREG5; _j 1f; .align 8 l32i a3, a2, PT_AREG6; _j 1f; .align 8 l32i a3, a2, PT_AREG7; _j 1f; .align 8 l32i a3, a2, PT_AREG8; _j 1f; .align 8 mov a3, a9 ; _j 1f; .align 8 mov a3, a10 ; _j 1f; .align 8 mov a3, a11 ; _j 1f; .align 8 mov a3, a12 ; _j 1f; .align 8 mov a3, a13 ; _j 1f; .align 8 mov a3, a14 ; _j 1f; .align 8 mov a3, a15 ; _j 1f; .align 8 /* We cannot handle this exception. */ .extern _kernel_exception .Linvalid_instruction_load: .Linvalid_instruction_store: movi a4, 0 rsr a3, excsave1 s32i a4, a3, EXC_TABLE_FIXUP /* Restore a4...a8 and SAR, set SP, and jump to default exception. */ l32i a8, a2, PT_AREG8 l32i a7, a2, PT_AREG7 l32i a6, a2, PT_AREG6 l32i a5, a2, PT_AREG5 l32i a4, a2, PT_AREG4 wsr a0, sar mov a1, a2 rsr a0, ps bbsi.l a0, PS_UM_BIT, 2f # jump if user mode movi a0, _kernel_exception jx a0 2: movi a0, _user_exception jx a0 1: # a7: instruction pointer, a4: instruction, a3: value movi a6, 0 # mask: ffffffff:00000000 #if XCHAL_HAVE_DENSITY addi a7, a7, 2 # incr. PC,assume 16-bit instruction extui a5, a4, INSN_OP0, 4 # extract OP0 addi a5, a5, -OP0_S32I_N _beqz a5, 1f # s32i.n: jump addi a7, a7, 1 # increment PC, 32-bit instruction #else addi a7, a7, 3 # increment PC, 32-bit instruction #endif extui a5, a4, INSN_OP1, 4 # extract OP1 _beqi a5, OP1_S32I, 1f # jump if 32 bit store _bnei a5, OP1_S16I, .Linvalid_instruction_store movi a5, -1 __extl a3, a3 # get 16-bit value __exth a6, a5 # get 16-bit mask ffffffff:ffff0000 /* Get memory address */ 1: movi a4, ~3 and a4, a4, a8 # align memory address /* Insert value into memory */ movi a5, -1 # mask: ffffffff:XXXX0000 #ifdef UNALIGNED_USER_EXCEPTION addi a4, a4, 8 #endif __ssa8r a8 __src_b a8, a5, a6 # lo-mask F..F0..0 (BE) 0..0F..F (LE) __src_b a6, a6, a5 # hi-mask 0..0F..F (BE) F..F0..0 (LE) #ifdef UNALIGNED_USER_EXCEPTION l32e a5, a4, -8 #else l32i a5, a4, 0 # load lower address word #endif and a5, a5, a8 # mask __sh a8, a3 # shift value or a5, a5, a8 # or with original value #ifdef UNALIGNED_USER_EXCEPTION s32e a5, a4, -8 l32e a8, a4, -4 #else s32i a5, a4, 0 # store l32i a8, a4, 4 # same for upper address word #endif __sl a5, a3 and a6, a8, a6 or a6, a6, a5 #ifdef UNALIGNED_USER_EXCEPTION s32e a6, a4, -4 #else s32i a6, a4, 4 #endif .Lexit: #if XCHAL_HAVE_LOOPS rsr a4, lend # check if we reached LEND bne a7, a4, 1f rsr a4, lcount # and LCOUNT != 0 beqz a4, 1f addi a4, a4, -1 # decrement LCOUNT and set rsr a7, lbeg # set PC to LBEGIN wsr a4, lcount #endif 1: wsr a7, epc1 # skip emulated instruction /* Update icount if we're single-stepping in userspace. */ rsr a4, icountlevel beqz a4, 1f bgeui a4, LOCKLEVEL + 1, 1f rsr a4, icount addi a4, a4, 1 wsr a4, icount 1: movi a4, 0 rsr a3, excsave1 s32i a4, a3, EXC_TABLE_FIXUP /* Restore working register */ l32i a8, a2, PT_AREG8 l32i a7, a2, PT_AREG7 l32i a6, a2, PT_AREG6 l32i a5, a2, PT_AREG5 l32i a4, a2, PT_AREG4 l32i a3, a2, PT_AREG3 /* restore SAR and return */ wsr a0, sar l32i a0, a2, PT_AREG0 l32i a2, a2, PT_AREG2 rfe ENDPROC(fast_unaligned) ENTRY(fast_unaligned_fixup) l32i a2, a3, EXC_TABLE_DOUBLE_SAVE wsr a3, excsave1 l32i a8, a2, PT_AREG8 l32i a7, a2, PT_AREG7 l32i a6, a2, PT_AREG6 l32i a5, a2, PT_AREG5 l32i a4, a2, PT_AREG4 l32i a0, a2, PT_AREG2 xsr a0, depc # restore depc and a0 wsr a0, sar rsr a0, exccause s32i a0, a2, PT_DEPC # mark as a regular exception rsr a0, ps bbsi.l a0, PS_UM_BIT, 1f # jump if user mode rsr a0, exccause addx4 a0, a0, a3 # find entry in table l32i a0, a0, EXC_TABLE_FAST_KERNEL # load handler l32i a3, a2, PT_AREG3 jx a0 1: rsr a0, exccause addx4 a0, a0, a3 # find entry in table l32i a0, a0, EXC_TABLE_FAST_USER # load handler l32i a3, a2, PT_AREG3 jx a0 ENDPROC(fast_unaligned_fixup) #endif /* XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION */
AirFortressIlikara/LS2K0300-linux-4.19
3,680
arch/xtensa/lib/strnlen_user.S
/* * arch/xtensa/lib/strnlen_user.S * * This file is subject to the terms and conditions of the GNU General * Public License. See the file "COPYING" in the main directory of * this archive for more details. * * Returns strnlen, including trailing zero terminator. * Zero indicates error. * * Copyright (C) 2002 Tensilica Inc. */ #include <linux/linkage.h> #include <variant/core.h> #include <asm/asmmacro.h> /* * size_t __strnlen_user(const char *s, size_t len) */ #ifdef __XTENSA_EB__ # define MASK0 0xff000000 # define MASK1 0x00ff0000 # define MASK2 0x0000ff00 # define MASK3 0x000000ff #else # define MASK0 0x000000ff # define MASK1 0x0000ff00 # define MASK2 0x00ff0000 # define MASK3 0xff000000 #endif # Register use: # a2/ src # a3/ len # a4/ tmp # a5/ mask0 # a6/ mask1 # a7/ mask2 # a8/ mask3 # a9/ tmp # a10/ tmp .text ENTRY(__strnlen_user) entry sp, 16 # minimal stack frame # a2/ s, a3/ len addi a4, a2, -4 # because we overincrement at the end; # we compensate with load offsets of 4 movi a5, MASK0 # mask for byte 0 movi a6, MASK1 # mask for byte 1 movi a7, MASK2 # mask for byte 2 movi a8, MASK3 # mask for byte 3 bbsi.l a2, 0, .L1mod2 # if only 8-bit aligned bbsi.l a2, 1, .L2mod4 # if only 16-bit aligned /* * String is word-aligned. */ .Laligned: srli a10, a3, 2 # number of loop iterations with 4B per loop #if XCHAL_HAVE_LOOPS loopnez a10, .Ldone #else beqz a10, .Ldone slli a10, a10, 2 add a10, a10, a4 # a10 = end of last 4B chunk #endif /* XCHAL_HAVE_LOOPS */ .Loop: EX(10f) l32i a9, a4, 4 # get next word of string addi a4, a4, 4 # advance string pointer bnone a9, a5, .Lz0 # if byte 0 is zero bnone a9, a6, .Lz1 # if byte 1 is zero bnone a9, a7, .Lz2 # if byte 2 is zero bnone a9, a8, .Lz3 # if byte 3 is zero #if !XCHAL_HAVE_LOOPS blt a4, a10, .Loop #endif .Ldone: EX(10f) l32i a9, a4, 4 # load 4 bytes for remaining checks bbci.l a3, 1, .L100 # check two more bytes (bytes 0, 1 of word) addi a4, a4, 2 # advance string pointer bnone a9, a5, .Lz0 # if byte 0 is zero bnone a9, a6, .Lz1 # if byte 1 is zero .L100: bbci.l a3, 0, .L101 # check one more byte (byte 2 of word) # Actually, we don't need to check. Zero or nonzero, we'll add one. # Do not add an extra one for the NULL terminator since we have # exhausted the original len parameter. addi a4, a4, 1 # advance string pointer .L101: sub a2, a4, a2 # compute length retw # NOTE that in several places below, we point to the byte just after # the zero byte in order to include the NULL terminator in the count. .Lz3: # byte 3 is zero addi a4, a4, 3 # point to zero byte .Lz0: # byte 0 is zero addi a4, a4, 1 # point just beyond zero byte sub a2, a4, a2 # subtract to get length retw .Lz1: # byte 1 is zero addi a4, a4, 1+1 # point just beyond zero byte sub a2, a4, a2 # subtract to get length retw .Lz2: # byte 2 is zero addi a4, a4, 2+1 # point just beyond zero byte sub a2, a4, a2 # subtract to get length retw .L1mod2: # address is odd EX(10f) l8ui a9, a4, 4 # get byte 0 addi a4, a4, 1 # advance string pointer beqz a9, .Lz3 # if byte 0 is zero bbci.l a4, 1, .Laligned # if string pointer is now word-aligned .L2mod4: # address is 2 mod 4 addi a4, a4, 2 # advance ptr for aligned access EX(10f) l32i a9, a4, 0 # get word with first two bytes of string bnone a9, a7, .Lz2 # if byte 2 (of word, not string) is zero bany a9, a8, .Laligned # if byte 3 (of word, not string) is nonzero # byte 3 is zero addi a4, a4, 3+1 # point just beyond zero byte sub a2, a4, a2 # subtract to get length retw ENDPROC(__strnlen_user) .section .fixup, "ax" .align 4 10: movi a2, 0 retw
AirFortressIlikara/LS2K0300-linux-4.19
8,825
arch/xtensa/lib/checksum.S
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * IP/TCP/UDP checksumming routines * * Xtensa version: Copyright (C) 2001 Tensilica, Inc. by Kevin Chea * Optimized by Joe Taylor * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/errno.h> #include <linux/linkage.h> #include <variant/core.h> #include <asm/asmmacro.h> /* * computes a partial checksum, e.g. for TCP/UDP fragments */ /* * unsigned int csum_partial(const unsigned char *buf, int len, * unsigned int sum); * a2 = buf * a3 = len * a4 = sum * * This function assumes 2- or 4-byte alignment. Other alignments will fail! */ /* ONES_ADD converts twos-complement math to ones-complement. */ #define ONES_ADD(sum, val) \ add sum, sum, val ; \ bgeu sum, val, 99f ; \ addi sum, sum, 1 ; \ 99: ; .text ENTRY(csum_partial) /* * Experiments with Ethernet and SLIP connections show that buf * is aligned on either a 2-byte or 4-byte boundary. */ entry sp, 32 extui a5, a2, 0, 2 bnez a5, 8f /* branch if 2-byte aligned */ /* Fall-through on common case, 4-byte alignment */ 1: srli a5, a3, 5 /* 32-byte chunks */ #if XCHAL_HAVE_LOOPS loopgtz a5, 2f #else beqz a5, 2f slli a5, a5, 5 add a5, a5, a2 /* a5 = end of last 32-byte chunk */ .Loop1: #endif l32i a6, a2, 0 l32i a7, a2, 4 ONES_ADD(a4, a6) ONES_ADD(a4, a7) l32i a6, a2, 8 l32i a7, a2, 12 ONES_ADD(a4, a6) ONES_ADD(a4, a7) l32i a6, a2, 16 l32i a7, a2, 20 ONES_ADD(a4, a6) ONES_ADD(a4, a7) l32i a6, a2, 24 l32i a7, a2, 28 ONES_ADD(a4, a6) ONES_ADD(a4, a7) addi a2, a2, 4*8 #if !XCHAL_HAVE_LOOPS blt a2, a5, .Loop1 #endif 2: extui a5, a3, 2, 3 /* remaining 4-byte chunks */ #if XCHAL_HAVE_LOOPS loopgtz a5, 3f #else beqz a5, 3f slli a5, a5, 2 add a5, a5, a2 /* a5 = end of last 4-byte chunk */ .Loop2: #endif l32i a6, a2, 0 ONES_ADD(a4, a6) addi a2, a2, 4 #if !XCHAL_HAVE_LOOPS blt a2, a5, .Loop2 #endif 3: _bbci.l a3, 1, 5f /* remaining 2-byte chunk */ l16ui a6, a2, 0 ONES_ADD(a4, a6) addi a2, a2, 2 5: _bbci.l a3, 0, 7f /* remaining 1-byte chunk */ 6: l8ui a6, a2, 0 #ifdef __XTENSA_EB__ slli a6, a6, 8 /* load byte into bits 8..15 */ #endif ONES_ADD(a4, a6) 7: mov a2, a4 retw /* uncommon case, buf is 2-byte aligned */ 8: beqz a3, 7b /* branch if len == 0 */ beqi a3, 1, 6b /* branch if len == 1 */ extui a5, a2, 0, 1 bnez a5, 8f /* branch if 1-byte aligned */ l16ui a6, a2, 0 /* common case, len >= 2 */ ONES_ADD(a4, a6) addi a2, a2, 2 /* adjust buf */ addi a3, a3, -2 /* adjust len */ j 1b /* now buf is 4-byte aligned */ /* case: odd-byte aligned, len > 1 * This case is dog slow, so don't give us an odd address. * (I don't think this ever happens, but just in case.) */ 8: srli a5, a3, 2 /* 4-byte chunks */ #if XCHAL_HAVE_LOOPS loopgtz a5, 2f #else beqz a5, 2f slli a5, a5, 2 add a5, a5, a2 /* a5 = end of last 4-byte chunk */ .Loop3: #endif l8ui a6, a2, 0 /* bits 24..31 */ l16ui a7, a2, 1 /* bits 8..23 */ l8ui a8, a2, 3 /* bits 0.. 8 */ #ifdef __XTENSA_EB__ slli a6, a6, 24 #else slli a8, a8, 24 #endif slli a7, a7, 8 or a7, a7, a6 or a7, a7, a8 ONES_ADD(a4, a7) addi a2, a2, 4 #if !XCHAL_HAVE_LOOPS blt a2, a5, .Loop3 #endif 2: _bbci.l a3, 1, 3f /* remaining 2-byte chunk, still odd addr */ l8ui a6, a2, 0 l8ui a7, a2, 1 #ifdef __XTENSA_EB__ slli a6, a6, 8 #else slli a7, a7, 8 #endif or a7, a7, a6 ONES_ADD(a4, a7) addi a2, a2, 2 3: j 5b /* branch to handle the remaining byte */ ENDPROC(csum_partial) /* * Copy from ds while checksumming, otherwise like csum_partial */ /* unsigned int csum_partial_copy_generic (const char *src, char *dst, int len, int sum, int *src_err_ptr, int *dst_err_ptr) a2 = src a3 = dst a4 = len a5 = sum a6 = src_err_ptr a7 = dst_err_ptr a8 = temp a9 = temp a10 = temp a11 = original len for exception handling a12 = original dst for exception handling This function is optimized for 4-byte aligned addresses. Other alignments work, but not nearly as efficiently. */ ENTRY(csum_partial_copy_generic) entry sp, 32 mov a12, a3 mov a11, a4 or a10, a2, a3 /* We optimize the following alignment tests for the 4-byte aligned case. Two bbsi.l instructions might seem more optimal (commented out below). However, both labels 5: and 3: are out of the imm8 range, so the assembler relaxes them into equivalent bbci.l, j combinations, which is actually slower. */ extui a9, a10, 0, 2 beqz a9, 1f /* branch if both are 4-byte aligned */ bbsi.l a10, 0, 5f /* branch if one address is odd */ j 3f /* one address is 2-byte aligned */ /* _bbsi.l a10, 0, 5f */ /* branch if odd address */ /* _bbsi.l a10, 1, 3f */ /* branch if 2-byte-aligned address */ 1: /* src and dst are both 4-byte aligned */ srli a10, a4, 5 /* 32-byte chunks */ #if XCHAL_HAVE_LOOPS loopgtz a10, 2f #else beqz a10, 2f slli a10, a10, 5 add a10, a10, a2 /* a10 = end of last 32-byte src chunk */ .Loop5: #endif EX(10f) l32i a9, a2, 0 EX(10f) l32i a8, a2, 4 EX(11f) s32i a9, a3, 0 EX(11f) s32i a8, a3, 4 ONES_ADD(a5, a9) ONES_ADD(a5, a8) EX(10f) l32i a9, a2, 8 EX(10f) l32i a8, a2, 12 EX(11f) s32i a9, a3, 8 EX(11f) s32i a8, a3, 12 ONES_ADD(a5, a9) ONES_ADD(a5, a8) EX(10f) l32i a9, a2, 16 EX(10f) l32i a8, a2, 20 EX(11f) s32i a9, a3, 16 EX(11f) s32i a8, a3, 20 ONES_ADD(a5, a9) ONES_ADD(a5, a8) EX(10f) l32i a9, a2, 24 EX(10f) l32i a8, a2, 28 EX(11f) s32i a9, a3, 24 EX(11f) s32i a8, a3, 28 ONES_ADD(a5, a9) ONES_ADD(a5, a8) addi a2, a2, 32 addi a3, a3, 32 #if !XCHAL_HAVE_LOOPS blt a2, a10, .Loop5 #endif 2: extui a10, a4, 2, 3 /* remaining 4-byte chunks */ extui a4, a4, 0, 2 /* reset len for general-case, 2-byte chunks */ #if XCHAL_HAVE_LOOPS loopgtz a10, 3f #else beqz a10, 3f slli a10, a10, 2 add a10, a10, a2 /* a10 = end of last 4-byte src chunk */ .Loop6: #endif EX(10f) l32i a9, a2, 0 EX(11f) s32i a9, a3, 0 ONES_ADD(a5, a9) addi a2, a2, 4 addi a3, a3, 4 #if !XCHAL_HAVE_LOOPS blt a2, a10, .Loop6 #endif 3: /* Control comes to here in two cases: (1) It may fall through to here from the 4-byte alignment case to process, at most, one 2-byte chunk. (2) It branches to here from above if either src or dst is 2-byte aligned, and we process all bytes here, except for perhaps a trailing odd byte. It's inefficient, so align your addresses to 4-byte boundaries. a2 = src a3 = dst a4 = len a5 = sum */ srli a10, a4, 1 /* 2-byte chunks */ #if XCHAL_HAVE_LOOPS loopgtz a10, 4f #else beqz a10, 4f slli a10, a10, 1 add a10, a10, a2 /* a10 = end of last 2-byte src chunk */ .Loop7: #endif EX(10f) l16ui a9, a2, 0 EX(11f) s16i a9, a3, 0 ONES_ADD(a5, a9) addi a2, a2, 2 addi a3, a3, 2 #if !XCHAL_HAVE_LOOPS blt a2, a10, .Loop7 #endif 4: /* This section processes a possible trailing odd byte. */ _bbci.l a4, 0, 8f /* 1-byte chunk */ EX(10f) l8ui a9, a2, 0 EX(11f) s8i a9, a3, 0 #ifdef __XTENSA_EB__ slli a9, a9, 8 /* shift byte to bits 8..15 */ #endif ONES_ADD(a5, a9) 8: mov a2, a5 retw 5: /* Control branch to here when either src or dst is odd. We process all bytes using 8-bit accesses. Grossly inefficient, so don't feed us an odd address. */ srli a10, a4, 1 /* handle in pairs for 16-bit csum */ #if XCHAL_HAVE_LOOPS loopgtz a10, 6f #else beqz a10, 6f slli a10, a10, 1 add a10, a10, a2 /* a10 = end of last odd-aligned, 2-byte src chunk */ .Loop8: #endif EX(10f) l8ui a9, a2, 0 EX(10f) l8ui a8, a2, 1 EX(11f) s8i a9, a3, 0 EX(11f) s8i a8, a3, 1 #ifdef __XTENSA_EB__ slli a9, a9, 8 /* combine into a single 16-bit value */ #else /* for checksum computation */ slli a8, a8, 8 #endif or a9, a9, a8 ONES_ADD(a5, a9) addi a2, a2, 2 addi a3, a3, 2 #if !XCHAL_HAVE_LOOPS blt a2, a10, .Loop8 #endif 6: j 4b /* process the possible trailing odd byte */ ENDPROC(csum_partial_copy_generic) # Exception handler: .section .fixup, "ax" /* a6 = src_err_ptr a7 = dst_err_ptr a11 = original len for exception handling a12 = original dst for exception handling */ 10: _movi a2, -EFAULT s32i a2, a6, 0 /* src_err_ptr */ # clear the complete destination - computing the rest # is too much work movi a2, 0 #if XCHAL_HAVE_LOOPS loopgtz a11, 2f #else beqz a11, 2f add a11, a11, a12 /* a11 = ending address */ .Leloop: #endif s8i a2, a12, 0 addi a12, a12, 1 #if !XCHAL_HAVE_LOOPS blt a12, a11, .Leloop #endif 2: retw 11: movi a2, -EFAULT s32i a2, a7, 0 /* dst_err_ptr */ movi a2, 0 retw .previous
AirFortressIlikara/LS2K0300-linux-4.19
7,131
arch/xtensa/lib/usercopy.S
/* * arch/xtensa/lib/usercopy.S * * Copy to/from user space (derived from arch/xtensa/lib/hal/memcopy.S) * * DO NOT COMBINE this function with <arch/xtensa/lib/hal/memcopy.S>. * It needs to remain separate and distinct. The hal files are part * of the Xtensa link-time HAL, and those files may differ per * processor configuration. Patching the kernel for another * processor configuration includes replacing the hal files, and we * could lose the special functionality for accessing user-space * memory during such a patch. We sacrifice a little code space here * in favor to simplify code maintenance. * * This file is subject to the terms and conditions of the GNU General * Public License. See the file "COPYING" in the main directory of * this archive for more details. * * Copyright (C) 2002 Tensilica Inc. */ /* * size_t __xtensa_copy_user (void *dst, const void *src, size_t len); * * The returned value is the number of bytes not copied. Implies zero * is success. * * The general case algorithm is as follows: * If the destination and source are both aligned, * do 16B chunks with a loop, and then finish up with * 8B, 4B, 2B, and 1B copies conditional on the length. * If destination is aligned and source unaligned, * do the same, but use SRC to align the source data. * If destination is unaligned, align it by conditionally * copying 1B and 2B and then retest. * This code tries to use fall-through braches for the common * case of aligned destinations (except for the branches to * the alignment label). * * Register use: * a0/ return address * a1/ stack pointer * a2/ return value * a3/ src * a4/ length * a5/ dst * a6/ tmp * a7/ tmp * a8/ tmp * a9/ tmp * a10/ tmp * a11/ original length */ #include <linux/linkage.h> #include <variant/core.h> #include <asm/asmmacro.h> .text ENTRY(__xtensa_copy_user) entry sp, 16 # minimal stack frame # a2/ dst, a3/ src, a4/ len mov a5, a2 # copy dst so that a2 is return value mov a11, a4 # preserve original len for error case .Lcommon: bbsi.l a2, 0, .Ldst1mod2 # if dst is 1 mod 2 bbsi.l a2, 1, .Ldst2mod4 # if dst is 2 mod 4 .Ldstaligned: # return here from .Ldstunaligned when dst is aligned srli a7, a4, 4 # number of loop iterations with 16B # per iteration movi a8, 3 # if source is also aligned, bnone a3, a8, .Laligned # then use word copy __ssa8 a3 # set shift amount from byte offset bnez a4, .Lsrcunaligned movi a2, 0 # return success for len==0 retw /* * Destination is unaligned */ .Ldst1mod2: # dst is only byte aligned bltui a4, 7, .Lbytecopy # do short copies byte by byte # copy 1 byte EX(10f) l8ui a6, a3, 0 addi a3, a3, 1 EX(10f) s8i a6, a5, 0 addi a5, a5, 1 addi a4, a4, -1 bbci.l a5, 1, .Ldstaligned # if dst is now aligned, then # return to main algorithm .Ldst2mod4: # dst 16-bit aligned # copy 2 bytes bltui a4, 6, .Lbytecopy # do short copies byte by byte EX(10f) l8ui a6, a3, 0 EX(10f) l8ui a7, a3, 1 addi a3, a3, 2 EX(10f) s8i a6, a5, 0 EX(10f) s8i a7, a5, 1 addi a5, a5, 2 addi a4, a4, -2 j .Ldstaligned # dst is now aligned, return to main algorithm /* * Byte by byte copy */ .align 4 .byte 0 # 1 mod 4 alignment for LOOPNEZ # (0 mod 4 alignment for LBEG) .Lbytecopy: #if XCHAL_HAVE_LOOPS loopnez a4, .Lbytecopydone #else /* !XCHAL_HAVE_LOOPS */ beqz a4, .Lbytecopydone add a7, a3, a4 # a7 = end address for source #endif /* !XCHAL_HAVE_LOOPS */ .Lnextbyte: EX(10f) l8ui a6, a3, 0 addi a3, a3, 1 EX(10f) s8i a6, a5, 0 addi a5, a5, 1 #if !XCHAL_HAVE_LOOPS blt a3, a7, .Lnextbyte #endif /* !XCHAL_HAVE_LOOPS */ .Lbytecopydone: movi a2, 0 # return success for len bytes copied retw /* * Destination and source are word-aligned. */ # copy 16 bytes per iteration for word-aligned dst and word-aligned src .align 4 # 1 mod 4 alignment for LOOPNEZ .byte 0 # (0 mod 4 alignment for LBEG) .Laligned: #if XCHAL_HAVE_LOOPS loopnez a7, .Loop1done #else /* !XCHAL_HAVE_LOOPS */ beqz a7, .Loop1done slli a8, a7, 4 add a8, a8, a3 # a8 = end of last 16B source chunk #endif /* !XCHAL_HAVE_LOOPS */ .Loop1: EX(10f) l32i a6, a3, 0 EX(10f) l32i a7, a3, 4 EX(10f) s32i a6, a5, 0 EX(10f) l32i a6, a3, 8 EX(10f) s32i a7, a5, 4 EX(10f) l32i a7, a3, 12 EX(10f) s32i a6, a5, 8 addi a3, a3, 16 EX(10f) s32i a7, a5, 12 addi a5, a5, 16 #if !XCHAL_HAVE_LOOPS blt a3, a8, .Loop1 #endif /* !XCHAL_HAVE_LOOPS */ .Loop1done: bbci.l a4, 3, .L2 # copy 8 bytes EX(10f) l32i a6, a3, 0 EX(10f) l32i a7, a3, 4 addi a3, a3, 8 EX(10f) s32i a6, a5, 0 EX(10f) s32i a7, a5, 4 addi a5, a5, 8 .L2: bbci.l a4, 2, .L3 # copy 4 bytes EX(10f) l32i a6, a3, 0 addi a3, a3, 4 EX(10f) s32i a6, a5, 0 addi a5, a5, 4 .L3: bbci.l a4, 1, .L4 # copy 2 bytes EX(10f) l16ui a6, a3, 0 addi a3, a3, 2 EX(10f) s16i a6, a5, 0 addi a5, a5, 2 .L4: bbci.l a4, 0, .L5 # copy 1 byte EX(10f) l8ui a6, a3, 0 EX(10f) s8i a6, a5, 0 .L5: movi a2, 0 # return success for len bytes copied retw /* * Destination is aligned, Source is unaligned */ .align 4 .byte 0 # 1 mod 4 alignement for LOOPNEZ # (0 mod 4 alignment for LBEG) .Lsrcunaligned: # copy 16 bytes per iteration for word-aligned dst and unaligned src and a10, a3, a8 # save unalignment offset for below sub a3, a3, a10 # align a3 (to avoid sim warnings only; not needed for hardware) EX(10f) l32i a6, a3, 0 # load first word #if XCHAL_HAVE_LOOPS loopnez a7, .Loop2done #else /* !XCHAL_HAVE_LOOPS */ beqz a7, .Loop2done slli a12, a7, 4 add a12, a12, a3 # a12 = end of last 16B source chunk #endif /* !XCHAL_HAVE_LOOPS */ .Loop2: EX(10f) l32i a7, a3, 4 EX(10f) l32i a8, a3, 8 __src_b a6, a6, a7 EX(10f) s32i a6, a5, 0 EX(10f) l32i a9, a3, 12 __src_b a7, a7, a8 EX(10f) s32i a7, a5, 4 EX(10f) l32i a6, a3, 16 __src_b a8, a8, a9 EX(10f) s32i a8, a5, 8 addi a3, a3, 16 __src_b a9, a9, a6 EX(10f) s32i a9, a5, 12 addi a5, a5, 16 #if !XCHAL_HAVE_LOOPS blt a3, a12, .Loop2 #endif /* !XCHAL_HAVE_LOOPS */ .Loop2done: bbci.l a4, 3, .L12 # copy 8 bytes EX(10f) l32i a7, a3, 4 EX(10f) l32i a8, a3, 8 __src_b a6, a6, a7 EX(10f) s32i a6, a5, 0 addi a3, a3, 8 __src_b a7, a7, a8 EX(10f) s32i a7, a5, 4 addi a5, a5, 8 mov a6, a8 .L12: bbci.l a4, 2, .L13 # copy 4 bytes EX(10f) l32i a7, a3, 4 addi a3, a3, 4 __src_b a6, a6, a7 EX(10f) s32i a6, a5, 0 addi a5, a5, 4 mov a6, a7 .L13: add a3, a3, a10 # readjust a3 with correct misalignment bbci.l a4, 1, .L14 # copy 2 bytes EX(10f) l8ui a6, a3, 0 EX(10f) l8ui a7, a3, 1 addi a3, a3, 2 EX(10f) s8i a6, a5, 0 EX(10f) s8i a7, a5, 1 addi a5, a5, 2 .L14: bbci.l a4, 0, .L15 # copy 1 byte EX(10f) l8ui a6, a3, 0 EX(10f) s8i a6, a5, 0 .L15: movi a2, 0 # return success for len bytes copied retw ENDPROC(__xtensa_copy_user) .section .fixup, "ax" .align 4 /* a2 = original dst; a5 = current dst; a11= original len * bytes_copied = a5 - a2 * retval = bytes_not_copied = original len - bytes_copied * retval = a11 - (a5 - a2) */ 10: sub a2, a5, a2 /* a2 <-- bytes copied */ sub a2, a11, a2 /* a2 <-- bytes not copied */ retw
AirFortressIlikara/LS2K0300-linux-4.19
3,566
arch/xtensa/lib/memset.S
/* * arch/xtensa/lib/memset.S * * ANSI C standard library function memset * (Well, almost. .fixup code might return zero.) * * This file is subject to the terms and conditions of the GNU General * Public License. See the file "COPYING" in the main directory of * this archive for more details. * * Copyright (C) 2002 Tensilica Inc. */ #include <linux/linkage.h> #include <variant/core.h> #include <asm/asmmacro.h> /* * void *memset(void *dst, int c, size_t length) * * The algorithm is as follows: * Create a word with c in all byte positions * If the destination is aligned, * do 16B chucks with a loop, and then finish up with * 8B, 4B, 2B, and 1B stores conditional on the length. * If destination is unaligned, align it by conditionally * setting 1B and 2B and then go to aligned case. * This code tries to use fall-through branches for the common * case of an aligned destination (except for the branches to * the alignment labels). */ .text ENTRY(__memset) WEAK(memset) entry sp, 16 # minimal stack frame # a2/ dst, a3/ c, a4/ length extui a3, a3, 0, 8 # mask to just 8 bits slli a7, a3, 8 # duplicate character in all bytes of word or a3, a3, a7 # ... slli a7, a3, 16 # ... or a3, a3, a7 # ... mov a5, a2 # copy dst so that a2 is return value movi a6, 3 # for alignment tests bany a2, a6, .Ldstunaligned # if dst is unaligned .L0: # return here from .Ldstunaligned when dst is aligned srli a7, a4, 4 # number of loop iterations with 16B # per iteration bnez a4, .Laligned retw /* * Destination is word-aligned. */ # set 16 bytes per iteration for word-aligned dst .align 4 # 1 mod 4 alignment for LOOPNEZ .byte 0 # (0 mod 4 alignment for LBEG) .Laligned: #if XCHAL_HAVE_LOOPS loopnez a7, .Loop1done #else /* !XCHAL_HAVE_LOOPS */ beqz a7, .Loop1done slli a6, a7, 4 add a6, a6, a5 # a6 = end of last 16B chunk #endif /* !XCHAL_HAVE_LOOPS */ .Loop1: EX(10f) s32i a3, a5, 0 EX(10f) s32i a3, a5, 4 EX(10f) s32i a3, a5, 8 EX(10f) s32i a3, a5, 12 addi a5, a5, 16 #if !XCHAL_HAVE_LOOPS blt a5, a6, .Loop1 #endif /* !XCHAL_HAVE_LOOPS */ .Loop1done: bbci.l a4, 3, .L2 # set 8 bytes EX(10f) s32i a3, a5, 0 EX(10f) s32i a3, a5, 4 addi a5, a5, 8 .L2: bbci.l a4, 2, .L3 # set 4 bytes EX(10f) s32i a3, a5, 0 addi a5, a5, 4 .L3: bbci.l a4, 1, .L4 # set 2 bytes EX(10f) s16i a3, a5, 0 addi a5, a5, 2 .L4: bbci.l a4, 0, .L5 # set 1 byte EX(10f) s8i a3, a5, 0 .L5: .Lret1: retw /* * Destination is unaligned */ .Ldstunaligned: bltui a4, 8, .Lbyteset # do short copies byte by byte bbci.l a5, 0, .L20 # branch if dst alignment half-aligned # dst is only byte aligned # set 1 byte EX(10f) s8i a3, a5, 0 addi a5, a5, 1 addi a4, a4, -1 # now retest if dst aligned bbci.l a5, 1, .L0 # if now aligned, return to main algorithm .L20: # dst half-aligned # set 2 bytes EX(10f) s16i a3, a5, 0 addi a5, a5, 2 addi a4, a4, -2 j .L0 # dst is now aligned, return to main algorithm /* * Byte by byte set */ .align 4 .byte 0 # 1 mod 4 alignment for LOOPNEZ # (0 mod 4 alignment for LBEG) .Lbyteset: #if XCHAL_HAVE_LOOPS loopnez a4, .Lbytesetdone #else /* !XCHAL_HAVE_LOOPS */ beqz a4, .Lbytesetdone add a6, a5, a4 # a6 = ending address #endif /* !XCHAL_HAVE_LOOPS */ .Lbyteloop: EX(10f) s8i a3, a5, 0 addi a5, a5, 1 #if !XCHAL_HAVE_LOOPS blt a5, a6, .Lbyteloop #endif /* !XCHAL_HAVE_LOOPS */ .Lbytesetdone: retw ENDPROC(__memset) .section .fixup, "ax" .align 4 /* We return zero if a failure occurred. */ 10: movi a2, 0 retw
AirFortressIlikara/LS2K0300-linux-4.19
12,566
arch/xtensa/lib/memcopy.S
/* * arch/xtensa/lib/hal/memcopy.S -- Core HAL library functions * xthal_memcpy and xthal_bcopy * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2002 - 2012 Tensilica Inc. */ #include <linux/linkage.h> #include <variant/core.h> #include <asm/asmmacro.h> /* * void *memcpy(void *dst, const void *src, size_t len); * * This function is intended to do the same thing as the standard * library function memcpy() for most cases. * However, where the source and/or destination references * an instruction RAM or ROM or a data RAM or ROM, that * source and/or destination will always be accessed with * 32-bit load and store instructions (as required for these * types of devices). * * !!!!!!! XTFIXME: * !!!!!!! Handling of IRAM/IROM has not yet * !!!!!!! been implemented. * * The (general case) algorithm is as follows: * If destination is unaligned, align it by conditionally * copying 1 and 2 bytes. * If source is aligned, * do 16 bytes with a loop, and then finish up with * 8, 4, 2, and 1 byte copies conditional on the length; * else (if source is unaligned), * do the same, but use SRC to align the source data. * This code tries to use fall-through branches for the common * case of aligned source and destination and multiple * of 4 (or 8) length. * * Register use: * a0/ return address * a1/ stack pointer * a2/ return value * a3/ src * a4/ length * a5/ dst * a6/ tmp * a7/ tmp * a8/ tmp * a9/ tmp * a10/ tmp * a11/ tmp */ .text /* * Byte by byte copy */ .align 4 .byte 0 # 1 mod 4 alignment for LOOPNEZ # (0 mod 4 alignment for LBEG) .Lbytecopy: #if XCHAL_HAVE_LOOPS loopnez a4, .Lbytecopydone #else /* !XCHAL_HAVE_LOOPS */ beqz a4, .Lbytecopydone add a7, a3, a4 # a7 = end address for source #endif /* !XCHAL_HAVE_LOOPS */ .Lnextbyte: l8ui a6, a3, 0 addi a3, a3, 1 s8i a6, a5, 0 addi a5, a5, 1 #if !XCHAL_HAVE_LOOPS bne a3, a7, .Lnextbyte # continue loop if $a3:src != $a7:src_end #endif /* !XCHAL_HAVE_LOOPS */ .Lbytecopydone: retw /* * Destination is unaligned */ .align 4 .Ldst1mod2: # dst is only byte aligned _bltui a4, 7, .Lbytecopy # do short copies byte by byte # copy 1 byte l8ui a6, a3, 0 addi a3, a3, 1 addi a4, a4, -1 s8i a6, a5, 0 addi a5, a5, 1 _bbci.l a5, 1, .Ldstaligned # if dst is now aligned, then # return to main algorithm .Ldst2mod4: # dst 16-bit aligned # copy 2 bytes _bltui a4, 6, .Lbytecopy # do short copies byte by byte l8ui a6, a3, 0 l8ui a7, a3, 1 addi a3, a3, 2 addi a4, a4, -2 s8i a6, a5, 0 s8i a7, a5, 1 addi a5, a5, 2 j .Ldstaligned # dst is now aligned, return to main algorithm ENTRY(__memcpy) WEAK(memcpy) entry sp, 16 # minimal stack frame # a2/ dst, a3/ src, a4/ len mov a5, a2 # copy dst so that a2 is return value .Lcommon: _bbsi.l a2, 0, .Ldst1mod2 # if dst is 1 mod 2 _bbsi.l a2, 1, .Ldst2mod4 # if dst is 2 mod 4 .Ldstaligned: # return here from .Ldst?mod? once dst is aligned srli a7, a4, 4 # number of loop iterations with 16B # per iteration movi a8, 3 # if source is not aligned, _bany a3, a8, .Lsrcunaligned # then use shifting copy /* * Destination and source are word-aligned, use word copy. */ # copy 16 bytes per iteration for word-aligned dst and word-aligned src #if XCHAL_HAVE_LOOPS loopnez a7, .Loop1done #else /* !XCHAL_HAVE_LOOPS */ beqz a7, .Loop1done slli a8, a7, 4 add a8, a8, a3 # a8 = end of last 16B source chunk #endif /* !XCHAL_HAVE_LOOPS */ .Loop1: l32i a6, a3, 0 l32i a7, a3, 4 s32i a6, a5, 0 l32i a6, a3, 8 s32i a7, a5, 4 l32i a7, a3, 12 s32i a6, a5, 8 addi a3, a3, 16 s32i a7, a5, 12 addi a5, a5, 16 #if !XCHAL_HAVE_LOOPS bne a3, a8, .Loop1 # continue loop if a3:src != a8:src_end #endif /* !XCHAL_HAVE_LOOPS */ .Loop1done: bbci.l a4, 3, .L2 # copy 8 bytes l32i a6, a3, 0 l32i a7, a3, 4 addi a3, a3, 8 s32i a6, a5, 0 s32i a7, a5, 4 addi a5, a5, 8 .L2: bbsi.l a4, 2, .L3 bbsi.l a4, 1, .L4 bbsi.l a4, 0, .L5 retw .L3: # copy 4 bytes l32i a6, a3, 0 addi a3, a3, 4 s32i a6, a5, 0 addi a5, a5, 4 bbsi.l a4, 1, .L4 bbsi.l a4, 0, .L5 retw .L4: # copy 2 bytes l16ui a6, a3, 0 addi a3, a3, 2 s16i a6, a5, 0 addi a5, a5, 2 bbsi.l a4, 0, .L5 retw .L5: # copy 1 byte l8ui a6, a3, 0 s8i a6, a5, 0 retw /* * Destination is aligned, Source is unaligned */ .align 4 .Lsrcunaligned: _beqz a4, .Ldone # avoid loading anything for zero-length copies # copy 16 bytes per iteration for word-aligned dst and unaligned src __ssa8 a3 # set shift amount from byte offset /* set to 1 when running on ISS (simulator) with the lint or ferret client, or 0 to save a few cycles */ #define SIM_CHECKS_ALIGNMENT 1 #if XCHAL_UNALIGNED_LOAD_EXCEPTION || SIM_CHECKS_ALIGNMENT and a11, a3, a8 # save unalignment offset for below sub a3, a3, a11 # align a3 #endif l32i a6, a3, 0 # load first word #if XCHAL_HAVE_LOOPS loopnez a7, .Loop2done #else /* !XCHAL_HAVE_LOOPS */ beqz a7, .Loop2done slli a10, a7, 4 add a10, a10, a3 # a10 = end of last 16B source chunk #endif /* !XCHAL_HAVE_LOOPS */ .Loop2: l32i a7, a3, 4 l32i a8, a3, 8 __src_b a6, a6, a7 s32i a6, a5, 0 l32i a9, a3, 12 __src_b a7, a7, a8 s32i a7, a5, 4 l32i a6, a3, 16 __src_b a8, a8, a9 s32i a8, a5, 8 addi a3, a3, 16 __src_b a9, a9, a6 s32i a9, a5, 12 addi a5, a5, 16 #if !XCHAL_HAVE_LOOPS bne a3, a10, .Loop2 # continue loop if a3:src != a10:src_end #endif /* !XCHAL_HAVE_LOOPS */ .Loop2done: bbci.l a4, 3, .L12 # copy 8 bytes l32i a7, a3, 4 l32i a8, a3, 8 __src_b a6, a6, a7 s32i a6, a5, 0 addi a3, a3, 8 __src_b a7, a7, a8 s32i a7, a5, 4 addi a5, a5, 8 mov a6, a8 .L12: bbci.l a4, 2, .L13 # copy 4 bytes l32i a7, a3, 4 addi a3, a3, 4 __src_b a6, a6, a7 s32i a6, a5, 0 addi a5, a5, 4 mov a6, a7 .L13: #if XCHAL_UNALIGNED_LOAD_EXCEPTION || SIM_CHECKS_ALIGNMENT add a3, a3, a11 # readjust a3 with correct misalignment #endif bbsi.l a4, 1, .L14 bbsi.l a4, 0, .L15 .Ldone: retw .L14: # copy 2 bytes l8ui a6, a3, 0 l8ui a7, a3, 1 addi a3, a3, 2 s8i a6, a5, 0 s8i a7, a5, 1 addi a5, a5, 2 bbsi.l a4, 0, .L15 retw .L15: # copy 1 byte l8ui a6, a3, 0 s8i a6, a5, 0 retw ENDPROC(__memcpy) /* * void bcopy(const void *src, void *dest, size_t n); */ ENTRY(bcopy) entry sp, 16 # minimal stack frame # a2=src, a3=dst, a4=len mov a5, a3 mov a3, a2 mov a2, a5 j .Lmovecommon # go to common code for memmove+bcopy ENDPROC(bcopy) /* * void *memmove(void *dst, const void *src, size_t len); * * This function is intended to do the same thing as the standard * library function memmove() for most cases. * However, where the source and/or destination references * an instruction RAM or ROM or a data RAM or ROM, that * source and/or destination will always be accessed with * 32-bit load and store instructions (as required for these * types of devices). * * !!!!!!! XTFIXME: * !!!!!!! Handling of IRAM/IROM has not yet * !!!!!!! been implemented. * * The (general case) algorithm is as follows: * If end of source doesn't overlap destination then use memcpy. * Otherwise do memcpy backwards. * * Register use: * a0/ return address * a1/ stack pointer * a2/ return value * a3/ src * a4/ length * a5/ dst * a6/ tmp * a7/ tmp * a8/ tmp * a9/ tmp * a10/ tmp * a11/ tmp */ /* * Byte by byte copy */ .align 4 .byte 0 # 1 mod 4 alignment for LOOPNEZ # (0 mod 4 alignment for LBEG) .Lbackbytecopy: #if XCHAL_HAVE_LOOPS loopnez a4, .Lbackbytecopydone #else /* !XCHAL_HAVE_LOOPS */ beqz a4, .Lbackbytecopydone sub a7, a3, a4 # a7 = start address for source #endif /* !XCHAL_HAVE_LOOPS */ .Lbacknextbyte: addi a3, a3, -1 l8ui a6, a3, 0 addi a5, a5, -1 s8i a6, a5, 0 #if !XCHAL_HAVE_LOOPS bne a3, a7, .Lbacknextbyte # continue loop if # $a3:src != $a7:src_start #endif /* !XCHAL_HAVE_LOOPS */ .Lbackbytecopydone: retw /* * Destination is unaligned */ .align 4 .Lbackdst1mod2: # dst is only byte aligned _bltui a4, 7, .Lbackbytecopy # do short copies byte by byte # copy 1 byte addi a3, a3, -1 l8ui a6, a3, 0 addi a5, a5, -1 s8i a6, a5, 0 addi a4, a4, -1 _bbci.l a5, 1, .Lbackdstaligned # if dst is now aligned, then # return to main algorithm .Lbackdst2mod4: # dst 16-bit aligned # copy 2 bytes _bltui a4, 6, .Lbackbytecopy # do short copies byte by byte addi a3, a3, -2 l8ui a6, a3, 0 l8ui a7, a3, 1 addi a5, a5, -2 s8i a6, a5, 0 s8i a7, a5, 1 addi a4, a4, -2 j .Lbackdstaligned # dst is now aligned, # return to main algorithm ENTRY(__memmove) WEAK(memmove) entry sp, 16 # minimal stack frame # a2/ dst, a3/ src, a4/ len mov a5, a2 # copy dst so that a2 is return value .Lmovecommon: sub a6, a5, a3 bgeu a6, a4, .Lcommon add a5, a5, a4 add a3, a3, a4 _bbsi.l a5, 0, .Lbackdst1mod2 # if dst is 1 mod 2 _bbsi.l a5, 1, .Lbackdst2mod4 # if dst is 2 mod 4 .Lbackdstaligned: # return here from .Lbackdst?mod? once dst is aligned srli a7, a4, 4 # number of loop iterations with 16B # per iteration movi a8, 3 # if source is not aligned, _bany a3, a8, .Lbacksrcunaligned # then use shifting copy /* * Destination and source are word-aligned, use word copy. */ # copy 16 bytes per iteration for word-aligned dst and word-aligned src #if XCHAL_HAVE_LOOPS loopnez a7, .backLoop1done #else /* !XCHAL_HAVE_LOOPS */ beqz a7, .backLoop1done slli a8, a7, 4 sub a8, a3, a8 # a8 = start of first 16B source chunk #endif /* !XCHAL_HAVE_LOOPS */ .backLoop1: addi a3, a3, -16 l32i a7, a3, 12 l32i a6, a3, 8 addi a5, a5, -16 s32i a7, a5, 12 l32i a7, a3, 4 s32i a6, a5, 8 l32i a6, a3, 0 s32i a7, a5, 4 s32i a6, a5, 0 #if !XCHAL_HAVE_LOOPS bne a3, a8, .backLoop1 # continue loop if a3:src != a8:src_start #endif /* !XCHAL_HAVE_LOOPS */ .backLoop1done: bbci.l a4, 3, .Lback2 # copy 8 bytes addi a3, a3, -8 l32i a6, a3, 0 l32i a7, a3, 4 addi a5, a5, -8 s32i a6, a5, 0 s32i a7, a5, 4 .Lback2: bbsi.l a4, 2, .Lback3 bbsi.l a4, 1, .Lback4 bbsi.l a4, 0, .Lback5 retw .Lback3: # copy 4 bytes addi a3, a3, -4 l32i a6, a3, 0 addi a5, a5, -4 s32i a6, a5, 0 bbsi.l a4, 1, .Lback4 bbsi.l a4, 0, .Lback5 retw .Lback4: # copy 2 bytes addi a3, a3, -2 l16ui a6, a3, 0 addi a5, a5, -2 s16i a6, a5, 0 bbsi.l a4, 0, .Lback5 retw .Lback5: # copy 1 byte addi a3, a3, -1 l8ui a6, a3, 0 addi a5, a5, -1 s8i a6, a5, 0 retw /* * Destination is aligned, Source is unaligned */ .align 4 .Lbacksrcunaligned: _beqz a4, .Lbackdone # avoid loading anything for zero-length copies # copy 16 bytes per iteration for word-aligned dst and unaligned src __ssa8 a3 # set shift amount from byte offset #define SIM_CHECKS_ALIGNMENT 1 /* set to 1 when running on ISS with * the lint or ferret client, or 0 * to save a few cycles */ #if XCHAL_UNALIGNED_LOAD_EXCEPTION || SIM_CHECKS_ALIGNMENT and a11, a3, a8 # save unalignment offset for below sub a3, a3, a11 # align a3 #endif l32i a6, a3, 0 # load first word #if XCHAL_HAVE_LOOPS loopnez a7, .backLoop2done #else /* !XCHAL_HAVE_LOOPS */ beqz a7, .backLoop2done slli a10, a7, 4 sub a10, a3, a10 # a10 = start of first 16B source chunk #endif /* !XCHAL_HAVE_LOOPS */ .backLoop2: addi a3, a3, -16 l32i a7, a3, 12 l32i a8, a3, 8 addi a5, a5, -16 __src_b a6, a7, a6 s32i a6, a5, 12 l32i a9, a3, 4 __src_b a7, a8, a7 s32i a7, a5, 8 l32i a6, a3, 0 __src_b a8, a9, a8 s32i a8, a5, 4 __src_b a9, a6, a9 s32i a9, a5, 0 #if !XCHAL_HAVE_LOOPS bne a3, a10, .backLoop2 # continue loop if a3:src != a10:src_start #endif /* !XCHAL_HAVE_LOOPS */ .backLoop2done: bbci.l a4, 3, .Lback12 # copy 8 bytes addi a3, a3, -8 l32i a7, a3, 4 l32i a8, a3, 0 addi a5, a5, -8 __src_b a6, a7, a6 s32i a6, a5, 4 __src_b a7, a8, a7 s32i a7, a5, 0 mov a6, a8 .Lback12: bbci.l a4, 2, .Lback13 # copy 4 bytes addi a3, a3, -4 l32i a7, a3, 0 addi a5, a5, -4 __src_b a6, a7, a6 s32i a6, a5, 0 mov a6, a7 .Lback13: #if XCHAL_UNALIGNED_LOAD_EXCEPTION || SIM_CHECKS_ALIGNMENT add a3, a3, a11 # readjust a3 with correct misalignment #endif bbsi.l a4, 1, .Lback14 bbsi.l a4, 0, .Lback15 .Lbackdone: retw .Lback14: # copy 2 bytes addi a3, a3, -2 l8ui a6, a3, 0 l8ui a7, a3, 1 addi a5, a5, -2 s8i a6, a5, 0 s8i a7, a5, 1 bbsi.l a4, 0, .Lback15 retw .Lback15: # copy 1 byte addi a3, a3, -1 addi a5, a5, -1 l8ui a6, a3, 0 s8i a6, a5, 0 retw ENDPROC(__memmove)
AirFortressIlikara/LS2K0300-linux-4.19
5,383
arch/xtensa/lib/strncpy_user.S
/* * arch/xtensa/lib/strncpy_user.S * * This file is subject to the terms and conditions of the GNU General * Public License. See the file "COPYING" in the main directory of * this archive for more details. * * Returns: -EFAULT if exception before terminator, N if the entire * buffer filled, else strlen. * * Copyright (C) 2002 Tensilica Inc. */ #include <linux/errno.h> #include <linux/linkage.h> #include <variant/core.h> #include <asm/asmmacro.h> /* * char *__strncpy_user(char *dst, const char *src, size_t len) */ #ifdef __XTENSA_EB__ # define MASK0 0xff000000 # define MASK1 0x00ff0000 # define MASK2 0x0000ff00 # define MASK3 0x000000ff #else # define MASK0 0x000000ff # define MASK1 0x0000ff00 # define MASK2 0x00ff0000 # define MASK3 0xff000000 #endif # Register use # a0/ return address # a1/ stack pointer # a2/ return value # a3/ src # a4/ len # a5/ mask0 # a6/ mask1 # a7/ mask2 # a8/ mask3 # a9/ tmp # a10/ tmp # a11/ dst # a12/ tmp .text ENTRY(__strncpy_user) entry sp, 16 # minimal stack frame # a2/ dst, a3/ src, a4/ len mov a11, a2 # leave dst in return value register beqz a4, .Lret # if len is zero movi a5, MASK0 # mask for byte 0 movi a6, MASK1 # mask for byte 1 movi a7, MASK2 # mask for byte 2 movi a8, MASK3 # mask for byte 3 bbsi.l a3, 0, .Lsrc1mod2 # if only 8-bit aligned bbsi.l a3, 1, .Lsrc2mod4 # if only 16-bit aligned .Lsrcaligned: # return here when src is word-aligned srli a12, a4, 2 # number of loop iterations with 4B per loop movi a9, 3 bnone a11, a9, .Laligned j .Ldstunaligned .Lsrc1mod2: # src address is odd EX(11f) l8ui a9, a3, 0 # get byte 0 addi a3, a3, 1 # advance src pointer EX(10f) s8i a9, a11, 0 # store byte 0 beqz a9, .Lret # if byte 0 is zero addi a11, a11, 1 # advance dst pointer addi a4, a4, -1 # decrement len beqz a4, .Lret # if len is zero bbci.l a3, 1, .Lsrcaligned # if src is now word-aligned .Lsrc2mod4: # src address is 2 mod 4 EX(11f) l8ui a9, a3, 0 # get byte 0 /* 1-cycle interlock */ EX(10f) s8i a9, a11, 0 # store byte 0 beqz a9, .Lret # if byte 0 is zero addi a11, a11, 1 # advance dst pointer addi a4, a4, -1 # decrement len beqz a4, .Lret # if len is zero EX(11f) l8ui a9, a3, 1 # get byte 0 addi a3, a3, 2 # advance src pointer EX(10f) s8i a9, a11, 0 # store byte 0 beqz a9, .Lret # if byte 0 is zero addi a11, a11, 1 # advance dst pointer addi a4, a4, -1 # decrement len bnez a4, .Lsrcaligned # if len is nonzero .Lret: sub a2, a11, a2 # compute strlen retw /* * dst is word-aligned, src is word-aligned */ .align 4 # 1 mod 4 alignment for LOOPNEZ .byte 0 # (0 mod 4 alignment for LBEG) .Laligned: #if XCHAL_HAVE_LOOPS loopnez a12, .Loop1done #else beqz a12, .Loop1done slli a12, a12, 2 add a12, a12, a11 # a12 = end of last 4B chunck #endif .Loop1: EX(11f) l32i a9, a3, 0 # get word from src addi a3, a3, 4 # advance src pointer bnone a9, a5, .Lz0 # if byte 0 is zero bnone a9, a6, .Lz1 # if byte 1 is zero bnone a9, a7, .Lz2 # if byte 2 is zero EX(10f) s32i a9, a11, 0 # store word to dst bnone a9, a8, .Lz3 # if byte 3 is zero addi a11, a11, 4 # advance dst pointer #if !XCHAL_HAVE_LOOPS blt a11, a12, .Loop1 #endif .Loop1done: bbci.l a4, 1, .L100 # copy 2 bytes EX(11f) l16ui a9, a3, 0 addi a3, a3, 2 # advance src pointer #ifdef __XTENSA_EB__ bnone a9, a7, .Lz0 # if byte 2 is zero bnone a9, a8, .Lz1 # if byte 3 is zero #else bnone a9, a5, .Lz0 # if byte 0 is zero bnone a9, a6, .Lz1 # if byte 1 is zero #endif EX(10f) s16i a9, a11, 0 addi a11, a11, 2 # advance dst pointer .L100: bbci.l a4, 0, .Lret EX(11f) l8ui a9, a3, 0 /* slot */ EX(10f) s8i a9, a11, 0 beqz a9, .Lret # if byte is zero addi a11, a11, 1-3 # advance dst ptr 1, but also cancel # the effect of adding 3 in .Lz3 code /* fall thru to .Lz3 and "retw" */ .Lz3: # byte 3 is zero addi a11, a11, 3 # advance dst pointer sub a2, a11, a2 # compute strlen retw .Lz0: # byte 0 is zero #ifdef __XTENSA_EB__ movi a9, 0 #endif /* __XTENSA_EB__ */ EX(10f) s8i a9, a11, 0 sub a2, a11, a2 # compute strlen retw .Lz1: # byte 1 is zero #ifdef __XTENSA_EB__ extui a9, a9, 16, 16 #endif /* __XTENSA_EB__ */ EX(10f) s16i a9, a11, 0 addi a11, a11, 1 # advance dst pointer sub a2, a11, a2 # compute strlen retw .Lz2: # byte 2 is zero #ifdef __XTENSA_EB__ extui a9, a9, 16, 16 #endif /* __XTENSA_EB__ */ EX(10f) s16i a9, a11, 0 movi a9, 0 EX(10f) s8i a9, a11, 2 addi a11, a11, 2 # advance dst pointer sub a2, a11, a2 # compute strlen retw .align 4 # 1 mod 4 alignment for LOOPNEZ .byte 0 # (0 mod 4 alignment for LBEG) .Ldstunaligned: /* * for now just use byte copy loop */ #if XCHAL_HAVE_LOOPS loopnez a4, .Lunalignedend #else beqz a4, .Lunalignedend add a12, a11, a4 # a12 = ending address #endif /* XCHAL_HAVE_LOOPS */ .Lnextbyte: EX(11f) l8ui a9, a3, 0 addi a3, a3, 1 EX(10f) s8i a9, a11, 0 beqz a9, .Lunalignedend addi a11, a11, 1 #if !XCHAL_HAVE_LOOPS blt a11, a12, .Lnextbyte #endif .Lunalignedend: sub a2, a11, a2 # compute strlen retw ENDPROC(__strncpy_user) .section .fixup, "ax" .align 4 /* For now, just return -EFAULT. Future implementations might * like to clear remaining kernel space, like the fixup * implementation in memset(). Thus, we differentiate between * load/store fixups. */ 10: 11: movi a2, -EFAULT retw
AirFortressIlikara/LS2K0300-linux-4.19
7,587
arch/xtensa/mm/misc.S
/* * arch/xtensa/mm/misc.S * * Miscellaneous assembly functions. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2001 - 2007 Tensilica Inc. * * Chris Zankel <chris@zankel.net> */ #include <linux/linkage.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/asmmacro.h> #include <asm/cacheasm.h> #include <asm/tlbflush.h> /* * clear_page and clear_user_page are the same for non-cache-aliased configs. * * clear_page (unsigned long page) * a2 */ ENTRY(clear_page) entry a1, 16 movi a3, 0 __loopi a2, a7, PAGE_SIZE, 32 s32i a3, a2, 0 s32i a3, a2, 4 s32i a3, a2, 8 s32i a3, a2, 12 s32i a3, a2, 16 s32i a3, a2, 20 s32i a3, a2, 24 s32i a3, a2, 28 __endla a2, a7, 32 retw ENDPROC(clear_page) /* * copy_page and copy_user_page are the same for non-cache-aliased configs. * * copy_page (void *to, void *from) * a2 a3 */ ENTRY(copy_page) entry a1, 16 __loopi a2, a4, PAGE_SIZE, 32 l32i a8, a3, 0 l32i a9, a3, 4 s32i a8, a2, 0 s32i a9, a2, 4 l32i a8, a3, 8 l32i a9, a3, 12 s32i a8, a2, 8 s32i a9, a2, 12 l32i a8, a3, 16 l32i a9, a3, 20 s32i a8, a2, 16 s32i a9, a2, 20 l32i a8, a3, 24 l32i a9, a3, 28 s32i a8, a2, 24 s32i a9, a2, 28 addi a2, a2, 32 addi a3, a3, 32 __endl a2, a4 retw ENDPROC(copy_page) #ifdef CONFIG_MMU /* * If we have to deal with cache aliasing, we use temporary memory mappings * to ensure that the source and destination pages have the same color as * the virtual address. We use way 0 and 1 for temporary mappings in such cases. * * The temporary DTLB entries shouldn't be flushed by interrupts, but are * flushed by preemptive task switches. Special code in the * fast_second_level_miss handler re-established the temporary mapping. * It requires that the PPNs for the destination and source addresses are * in a6, and a7, respectively. */ /* TLB miss exceptions are treated special in the following region */ ENTRY(__tlbtemp_mapping_start) #if (DCACHE_WAY_SIZE > PAGE_SIZE) /* * clear_page_alias(void *addr, unsigned long paddr) * a2 a3 */ ENTRY(clear_page_alias) entry a1, 32 /* Skip setting up a temporary DTLB if not aliased low page. */ movi a5, PAGE_OFFSET movi a6, 0 beqz a3, 1f /* Setup a temporary DTLB for the addr. */ addi a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE) mov a4, a2 wdtlb a6, a2 dsync 1: movi a3, 0 __loopi a2, a7, PAGE_SIZE, 32 s32i a3, a2, 0 s32i a3, a2, 4 s32i a3, a2, 8 s32i a3, a2, 12 s32i a3, a2, 16 s32i a3, a2, 20 s32i a3, a2, 24 s32i a3, a2, 28 __endla a2, a7, 32 bnez a6, 1f retw /* We need to invalidate the temporary idtlb entry, if any. */ 1: idtlb a4 dsync retw ENDPROC(clear_page_alias) /* * copy_page_alias(void *to, void *from, * a2 a3 * unsigned long to_paddr, unsigned long from_paddr) * a4 a5 */ ENTRY(copy_page_alias) entry a1, 32 /* Skip setting up a temporary DTLB for destination if not aliased. */ movi a6, 0 movi a7, 0 beqz a4, 1f /* Setup a temporary DTLB for destination. */ addi a6, a4, (PAGE_KERNEL | _PAGE_HW_WRITE) wdtlb a6, a2 dsync /* Skip setting up a temporary DTLB for source if not aliased. */ 1: beqz a5, 1f /* Setup a temporary DTLB for source. */ addi a7, a5, PAGE_KERNEL addi a8, a3, 1 # way1 wdtlb a7, a8 dsync 1: __loopi a2, a4, PAGE_SIZE, 32 l32i a8, a3, 0 l32i a9, a3, 4 s32i a8, a2, 0 s32i a9, a2, 4 l32i a8, a3, 8 l32i a9, a3, 12 s32i a8, a2, 8 s32i a9, a2, 12 l32i a8, a3, 16 l32i a9, a3, 20 s32i a8, a2, 16 s32i a9, a2, 20 l32i a8, a3, 24 l32i a9, a3, 28 s32i a8, a2, 24 s32i a9, a2, 28 addi a2, a2, 32 addi a3, a3, 32 __endl a2, a4 /* We need to invalidate any temporary mapping! */ bnez a6, 1f bnez a7, 2f retw 1: addi a2, a2, -PAGE_SIZE idtlb a2 dsync bnez a7, 2f retw 2: addi a3, a3, -PAGE_SIZE+1 idtlb a3 dsync retw ENDPROC(copy_page_alias) #endif #if (DCACHE_WAY_SIZE > PAGE_SIZE) /* * void __flush_invalidate_dcache_page_alias (addr, phys) * a2 a3 */ ENTRY(__flush_invalidate_dcache_page_alias) entry sp, 16 movi a7, 0 # required for exception handler addi a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE) mov a4, a2 wdtlb a6, a2 dsync ___flush_invalidate_dcache_page a2 a3 idtlb a4 dsync retw ENDPROC(__flush_invalidate_dcache_page_alias) /* * void __invalidate_dcache_page_alias (addr, phys) * a2 a3 */ ENTRY(__invalidate_dcache_page_alias) entry sp, 16 movi a7, 0 # required for exception handler addi a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE) mov a4, a2 wdtlb a6, a2 dsync ___invalidate_dcache_page a2 a3 idtlb a4 dsync retw ENDPROC(__invalidate_dcache_page_alias) #endif ENTRY(__tlbtemp_mapping_itlb) #if (ICACHE_WAY_SIZE > PAGE_SIZE) ENTRY(__invalidate_icache_page_alias) entry sp, 16 addi a6, a3, (PAGE_KERNEL_EXEC | _PAGE_HW_WRITE) mov a4, a2 witlb a6, a2 isync ___invalidate_icache_page a2 a3 iitlb a4 isync retw ENDPROC(__invalidate_icache_page_alias) #endif /* End of special treatment in tlb miss exception */ ENTRY(__tlbtemp_mapping_end) #endif /* CONFIG_MMU /* * void __invalidate_icache_page(ulong start) */ ENTRY(__invalidate_icache_page) entry sp, 16 ___invalidate_icache_page a2 a3 isync retw ENDPROC(__invalidate_icache_page) /* * void __invalidate_dcache_page(ulong start) */ ENTRY(__invalidate_dcache_page) entry sp, 16 ___invalidate_dcache_page a2 a3 dsync retw ENDPROC(__invalidate_dcache_page) /* * void __flush_invalidate_dcache_page(ulong start) */ ENTRY(__flush_invalidate_dcache_page) entry sp, 16 ___flush_invalidate_dcache_page a2 a3 dsync retw ENDPROC(__flush_invalidate_dcache_page) /* * void __flush_dcache_page(ulong start) */ ENTRY(__flush_dcache_page) entry sp, 16 ___flush_dcache_page a2 a3 dsync retw ENDPROC(__flush_dcache_page) /* * void __invalidate_icache_range(ulong start, ulong size) */ ENTRY(__invalidate_icache_range) entry sp, 16 ___invalidate_icache_range a2 a3 a4 isync retw ENDPROC(__invalidate_icache_range) /* * void __flush_invalidate_dcache_range(ulong start, ulong size) */ ENTRY(__flush_invalidate_dcache_range) entry sp, 16 ___flush_invalidate_dcache_range a2 a3 a4 dsync retw ENDPROC(__flush_invalidate_dcache_range) /* * void _flush_dcache_range(ulong start, ulong size) */ ENTRY(__flush_dcache_range) entry sp, 16 ___flush_dcache_range a2 a3 a4 dsync retw ENDPROC(__flush_dcache_range) /* * void _invalidate_dcache_range(ulong start, ulong size) */ ENTRY(__invalidate_dcache_range) entry sp, 16 ___invalidate_dcache_range a2 a3 a4 retw ENDPROC(__invalidate_dcache_range) /* * void _invalidate_icache_all(void) */ ENTRY(__invalidate_icache_all) entry sp, 16 ___invalidate_icache_all a2 a3 isync retw ENDPROC(__invalidate_icache_all) /* * void _flush_invalidate_dcache_all(void) */ ENTRY(__flush_invalidate_dcache_all) entry sp, 16 ___flush_invalidate_dcache_all a2 a3 dsync retw ENDPROC(__flush_invalidate_dcache_all) /* * void _invalidate_dcache_all(void) */ ENTRY(__invalidate_dcache_all) entry sp, 16 ___invalidate_dcache_all a2 a3 dsync retw ENDPROC(__invalidate_dcache_all)
AirFortressIlikara/LS2K0300-linux-4.19
4,664
arch/xtensa/boot/boot-redboot/bootstrap.S
/* SPDX-License-Identifier: GPL-2.0 */ #include <variant/core.h> #include <asm/regs.h> #include <asm/asmmacro.h> #include <asm/cacheasm.h> /* * RB-Data: RedBoot data/bss * P: Boot-Parameters * L: Kernel-Loader * * The Linux-Kernel image including the loader must be loaded * to a position so that the kernel and the boot parameters * can fit in the space before the load address. * ______________________________________________________ * |_RB-Data_|_P_|__________|_L_|___Linux-Kernel___|______| * ^ * ^ Load address * ______________________________________________________ * |___Linux-Kernel___|_P_|_L_|___________________________| * * The loader copies the parameter to the position that will * be the end of the kernel and itself to the end of the * parameter list. */ /* Make sure we have enough space for the 'uncompressor' */ #define STACK_SIZE 32768 #define HEAP_SIZE (131072*4) # a2: Parameter list # a3: Size of parameter list .section .start, "ax" .globl __start /* this must be the first byte of the loader! */ __start: entry sp, 32 # we do not intend to return _call0 _start __start_a0: .align 4 .section .text, "ax" .literal_position .begin literal_prefix .text /* put literals in here! */ .globl _start _start: /* 'reset' window registers */ movi a4, 1 wsr a4, ps rsync rsr a5, windowbase ssl a5 sll a4, a4 wsr a4, windowstart rsync movi a4, 0x00040000 wsr a4, ps rsync /* copy the loader to its address * Note: The loader itself is a very small piece, so we assume we * don't partially overlap. We also assume (even more important) * that the kernel image is out of the way. Usually, when the * load address of this image is not at an arbitrary address, * but aligned to some 10K's we shouldn't overlap. */ /* Note: The assembler cannot relax "addi a0, a0, ..." to an l32r, so we load to a4 first. */ # addi a4, a0, __start - __start_a0 # mov a0, a4 movi a4, __start movi a5, __start_a0 add a4, a0, a4 sub a0, a4, a5 movi a4, __start movi a5, __reloc_end # a0: address where this code has been loaded # a4: compiled address of __start # a5: compiled end address mov.n a7, a0 mov.n a8, a4 1: l32i a10, a7, 0 l32i a11, a7, 4 s32i a10, a8, 0 s32i a11, a8, 4 l32i a10, a7, 8 l32i a11, a7, 12 s32i a10, a8, 8 s32i a11, a8, 12 addi a8, a8, 16 addi a7, a7, 16 blt a8, a5, 1b /* We have to flush and invalidate the caches here before we jump. */ #if XCHAL_DCACHE_IS_WRITEBACK ___flush_dcache_all a5 a6 #endif ___invalidate_icache_all a5 a6 isync movi a11, _reloc jx a11 .globl _reloc _reloc: /* RedBoot is now at the end of the memory, so we don't have * to copy the parameter list. Keep the code around; in case * we need it again. */ #if 0 # a0: load address # a2: start address of parameter list # a3: length of parameter list # a4: __start /* copy the parameter list out of the way */ movi a6, _param_start add a3, a2, a3 2: l32i a8, a2, 0 s32i a8, a6, 0 addi a2, a2, 4 addi a6, a6, 4 blt a2, a3, 2b #endif /* clear BSS section */ movi a6, __bss_start movi a7, __bss_end movi.n a5, 0 3: s32i a5, a6, 0 addi a6, a6, 4 blt a6, a7, 3b movi a5, -16 movi a1, _stack + STACK_SIZE and a1, a1, a5 /* Uncompress the kernel */ # a0: load address # a2: boot parameter # a4: __start movi a3, __image_load sub a4, a3, a4 add a8, a0, a4 # a1 Stack # a8(a4) Load address of the image movi a6, _image_start movi a10, _image_end movi a7, 0x1000000 sub a11, a10, a6 movi a9, complen s32i a11, a9, 0 movi a0, 0 # a6 destination # a7 maximum size of destination # a8 source # a9 ptr to length .extern gunzip movi a4, gunzip beqz a4, 1f callx4 a4 j 2f # a6 destination start # a7 maximum size of destination # a8 source start # a9 ptr to length # a10 destination end 1: l32i a9, a8, 0 l32i a11, a8, 4 s32i a9, a6, 0 s32i a11, a6, 4 l32i a9, a8, 8 l32i a11, a8, 12 s32i a9, a6, 8 s32i a11, a6, 12 addi a6, a6, 16 addi a8, a8, 16 blt a6, a10, 1b /* jump to the kernel */ 2: #if XCHAL_DCACHE_IS_WRITEBACK ___flush_dcache_all a5 a6 #endif ___invalidate_icache_all a5 a6 isync # a2 Boot parameter list movi a0, _image_start jx a0 .align 16 .data .globl avail_ram avail_ram: .long _heap .globl end_avail end_avail: .long _heap + HEAP_SIZE .comm _stack, STACK_SIZE .comm _heap, HEAP_SIZE .globl end_avail .comm complen, 4 .end literal_prefix
AirFortressIlikara/LS2K0300-linux-4.19
1,500
arch/xtensa/boot/boot-elf/bootstrap.S
/* * arch/xtensa/boot/boot-elf/bootstrap.S * * Low-level exception handling * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2004 - 2013 by Tensilica Inc. * * Chris Zankel <chris@zankel.net> * Marc Gauthier <marc@tensilica.com> * Piet Delaney <piet@tensilica.com> */ #include <asm/bootparam.h> #include <asm/initialize_mmu.h> #include <asm/vectors.h> #include <linux/linkage.h> .section .ResetVector.text, "ax" .global _ResetVector .global reset _ResetVector: _j _SetupMMU .begin no-absolute-literals .literal_position #if defined(CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX) && \ XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY .literal RomInitAddr, CONFIG_KERNEL_LOAD_ADDRESS #else .literal RomInitAddr, KERNELOFFSET #endif #ifndef CONFIG_PARSE_BOOTPARAM .literal RomBootParam, 0 #else .literal RomBootParam, _bootparam .align 4 _bootparam: .short BP_TAG_FIRST .short 4 .long BP_VERSION .short BP_TAG_LAST .short 0 .long 0 #endif .align 4 _SetupMMU: movi a0, 0 wsr a0, windowbase rsync movi a0, 1 wsr a0, windowstart rsync movi a0, 0x1F wsr a0, ps rsync #ifndef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX initialize_mmu #endif .end no-absolute-literals rsil a0, XCHAL_DEBUGLEVEL-1 rsync reset: l32r a0, RomInitAddr l32r a2, RomBootParam movi a3, 0 movi a4, 0 jx a0
AirFortressIlikara/LS2K0300-linux-4.19
4,480
arch/s390/kernel/pgm_check.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Program check table. * * Copyright IBM Corp. 2012 */ #include <linux/linkage.h> #define PGM_CHECK(handler) .long handler #define PGM_CHECK_DEFAULT PGM_CHECK(default_trap_handler) /* * The program check table contains exactly 128 (0x00-0x7f) entries. Each * line defines the function to be called corresponding to the program check * interruption code. */ .section .rodata, "a" ENTRY(pgm_check_table) PGM_CHECK_DEFAULT /* 00 */ PGM_CHECK(illegal_op) /* 01 */ PGM_CHECK(privileged_op) /* 02 */ PGM_CHECK(execute_exception) /* 03 */ PGM_CHECK(do_protection_exception) /* 04 */ PGM_CHECK(addressing_exception) /* 05 */ PGM_CHECK(specification_exception) /* 06 */ PGM_CHECK(data_exception) /* 07 */ PGM_CHECK(overflow_exception) /* 08 */ PGM_CHECK(divide_exception) /* 09 */ PGM_CHECK(overflow_exception) /* 0a */ PGM_CHECK(divide_exception) /* 0b */ PGM_CHECK(hfp_overflow_exception) /* 0c */ PGM_CHECK(hfp_underflow_exception) /* 0d */ PGM_CHECK(hfp_significance_exception) /* 0e */ PGM_CHECK(hfp_divide_exception) /* 0f */ PGM_CHECK(do_dat_exception) /* 10 */ PGM_CHECK(do_dat_exception) /* 11 */ PGM_CHECK(translation_exception) /* 12 */ PGM_CHECK(special_op_exception) /* 13 */ PGM_CHECK_DEFAULT /* 14 */ PGM_CHECK(operand_exception) /* 15 */ PGM_CHECK_DEFAULT /* 16 */ PGM_CHECK_DEFAULT /* 17 */ PGM_CHECK(transaction_exception) /* 18 */ PGM_CHECK_DEFAULT /* 19 */ PGM_CHECK_DEFAULT /* 1a */ PGM_CHECK(vector_exception) /* 1b */ PGM_CHECK(space_switch_exception) /* 1c */ PGM_CHECK(hfp_sqrt_exception) /* 1d */ PGM_CHECK_DEFAULT /* 1e */ PGM_CHECK_DEFAULT /* 1f */ PGM_CHECK_DEFAULT /* 20 */ PGM_CHECK_DEFAULT /* 21 */ PGM_CHECK_DEFAULT /* 22 */ PGM_CHECK_DEFAULT /* 23 */ PGM_CHECK_DEFAULT /* 24 */ PGM_CHECK_DEFAULT /* 25 */ PGM_CHECK_DEFAULT /* 26 */ PGM_CHECK_DEFAULT /* 27 */ PGM_CHECK_DEFAULT /* 28 */ PGM_CHECK_DEFAULT /* 29 */ PGM_CHECK_DEFAULT /* 2a */ PGM_CHECK_DEFAULT /* 2b */ PGM_CHECK_DEFAULT /* 2c */ PGM_CHECK_DEFAULT /* 2d */ PGM_CHECK_DEFAULT /* 2e */ PGM_CHECK_DEFAULT /* 2f */ PGM_CHECK_DEFAULT /* 30 */ PGM_CHECK_DEFAULT /* 31 */ PGM_CHECK_DEFAULT /* 32 */ PGM_CHECK_DEFAULT /* 33 */ PGM_CHECK_DEFAULT /* 34 */ PGM_CHECK_DEFAULT /* 35 */ PGM_CHECK_DEFAULT /* 36 */ PGM_CHECK_DEFAULT /* 37 */ PGM_CHECK(do_dat_exception) /* 38 */ PGM_CHECK(do_dat_exception) /* 39 */ PGM_CHECK(do_dat_exception) /* 3a */ PGM_CHECK(do_dat_exception) /* 3b */ PGM_CHECK_DEFAULT /* 3c */ PGM_CHECK_DEFAULT /* 3d */ PGM_CHECK_DEFAULT /* 3e */ PGM_CHECK_DEFAULT /* 3f */ PGM_CHECK_DEFAULT /* 40 */ PGM_CHECK_DEFAULT /* 41 */ PGM_CHECK_DEFAULT /* 42 */ PGM_CHECK_DEFAULT /* 43 */ PGM_CHECK_DEFAULT /* 44 */ PGM_CHECK_DEFAULT /* 45 */ PGM_CHECK_DEFAULT /* 46 */ PGM_CHECK_DEFAULT /* 47 */ PGM_CHECK_DEFAULT /* 48 */ PGM_CHECK_DEFAULT /* 49 */ PGM_CHECK_DEFAULT /* 4a */ PGM_CHECK_DEFAULT /* 4b */ PGM_CHECK_DEFAULT /* 4c */ PGM_CHECK_DEFAULT /* 4d */ PGM_CHECK_DEFAULT /* 4e */ PGM_CHECK_DEFAULT /* 4f */ PGM_CHECK_DEFAULT /* 50 */ PGM_CHECK_DEFAULT /* 51 */ PGM_CHECK_DEFAULT /* 52 */ PGM_CHECK_DEFAULT /* 53 */ PGM_CHECK_DEFAULT /* 54 */ PGM_CHECK_DEFAULT /* 55 */ PGM_CHECK_DEFAULT /* 56 */ PGM_CHECK_DEFAULT /* 57 */ PGM_CHECK_DEFAULT /* 58 */ PGM_CHECK_DEFAULT /* 59 */ PGM_CHECK_DEFAULT /* 5a */ PGM_CHECK_DEFAULT /* 5b */ PGM_CHECK_DEFAULT /* 5c */ PGM_CHECK_DEFAULT /* 5d */ PGM_CHECK_DEFAULT /* 5e */ PGM_CHECK_DEFAULT /* 5f */ PGM_CHECK_DEFAULT /* 60 */ PGM_CHECK_DEFAULT /* 61 */ PGM_CHECK_DEFAULT /* 62 */ PGM_CHECK_DEFAULT /* 63 */ PGM_CHECK_DEFAULT /* 64 */ PGM_CHECK_DEFAULT /* 65 */ PGM_CHECK_DEFAULT /* 66 */ PGM_CHECK_DEFAULT /* 67 */ PGM_CHECK_DEFAULT /* 68 */ PGM_CHECK_DEFAULT /* 69 */ PGM_CHECK_DEFAULT /* 6a */ PGM_CHECK_DEFAULT /* 6b */ PGM_CHECK_DEFAULT /* 6c */ PGM_CHECK_DEFAULT /* 6d */ PGM_CHECK_DEFAULT /* 6e */ PGM_CHECK_DEFAULT /* 6f */ PGM_CHECK_DEFAULT /* 70 */ PGM_CHECK_DEFAULT /* 71 */ PGM_CHECK_DEFAULT /* 72 */ PGM_CHECK_DEFAULT /* 73 */ PGM_CHECK_DEFAULT /* 74 */ PGM_CHECK_DEFAULT /* 75 */ PGM_CHECK_DEFAULT /* 76 */ PGM_CHECK_DEFAULT /* 77 */ PGM_CHECK_DEFAULT /* 78 */ PGM_CHECK_DEFAULT /* 79 */ PGM_CHECK_DEFAULT /* 7a */ PGM_CHECK_DEFAULT /* 7b */ PGM_CHECK_DEFAULT /* 7c */ PGM_CHECK_DEFAULT /* 7d */ PGM_CHECK_DEFAULT /* 7e */ PGM_CHECK_DEFAULT /* 7f */
AirFortressIlikara/LS2K0300-linux-4.19
3,099
arch/s390/kernel/head64.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright IBM Corp. 1999, 2010 * * Author(s): Hartmut Penner <hp@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> * Rob van der Heij <rvdhei@iae.nl> * Heiko Carstens <heiko.carstens@de.ibm.com> * */ #include <linux/init.h> #include <linux/linkage.h> #include <asm/asm-offsets.h> #include <asm/thread_info.h> #include <asm/page.h> __HEAD ENTRY(startup_continue) tm __LC_STFLE_FAC_LIST+5,0x80 # LPP available ? jz 0f xc __LC_LPP+1(7,0),__LC_LPP+1 # clear lpp and current_pid mvi __LC_LPP,0x80 # and set LPP_MAGIC .insn s,0xb2800000,__LC_LPP # load program parameter 0: larl %r1,tod_clock_base mvc 0(16,%r1),__LC_BOOT_CLOCK larl %r13,.LPG1 # get base lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers lg %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area # move IPL device to lowcore larl %r0,boot_vdso_data stg %r0,__LC_VDSO_PER_CPU # # Setup stack # larl %r14,init_task stg %r14,__LC_CURRENT larl %r15,init_thread_union aghi %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER) # init_task_union + THREAD_SIZE stg %r15,__LC_KERNEL_STACK # set end of kernel stack aghi %r15,-160 # # Early setup functions that may not rely on an initialized bss section, # like moving the initrd. Returns with an initialized bss section. # brasl %r14,startup_init_nobss # # Early machine initialization and detection functions. # brasl %r14,startup_init # check control registers stctg %c0,%c15,0(%r15) oi 6(%r15),0x60 # enable sigp emergency & external call oi 4(%r15),0x10 # switch on low address proctection lctlg %c0,%c15,0(%r15) lam 0,15,.Laregs-.LPG1(%r13) # load acrs needed by uaccess brasl %r14,start_kernel # go to C code # # We returned from start_kernel ?!? PANIK # basr %r13,0 lpswe .Ldw-.(%r13) # load disabled wait psw .align 16 .LPG1: .Lctl: .quad 0x04040000 # cr0: AFP registers & secondary space .quad 0 # cr1: primary space segment table .quad .Lduct # cr2: dispatchable unit control table .quad 0 # cr3: instruction authorization .quad 0xffff # cr4: instruction authorization .quad .Lduct # cr5: primary-aste origin .quad 0 # cr6: I/O interrupts .quad 0 # cr7: secondary space segment table .quad 0 # cr8: access registers translation .quad 0 # cr9: tracing off .quad 0 # cr10: tracing off .quad 0 # cr11: tracing off .quad 0 # cr12: tracing off .quad 0 # cr13: home space segment table .quad 0xc0000000 # cr14: machine check handling off .quad .Llinkage_stack # cr15: linkage stack operations .Lpcmsk:.quad 0x0000000180000000 .L4malign:.quad 0xffffffffffc00000 .Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8 .Lnop: .long 0x07000700 .Lparmaddr: .quad PARMAREA .align 64 .Lduct: .long 0,.Laste,.Laste,0,.Lduald,0,0,0 .long 0,0,0,0,0,0,0,0 .Laste: .quad 0,0xffffffffffffffff,0,0,0,0,0,0 .align 128 .Lduald:.rept 8 .long 0x80000000,0,0,0 # invalid access-list entries .endr .Llinkage_stack: .long 0,0,0x89000000,0,0,0,0x8a000000,0 .Ldw: .quad 0x0002000180000000,0x0000000000000000 .Laregs:.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
AirFortressIlikara/LS2K0300-linux-4.19
41,064
arch/s390/kernel/entry.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * S390 low-level entry points. * * Copyright IBM Corp. 1999, 2012 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), * Hartmut Penner (hp@de.ibm.com), * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), * Heiko Carstens <heiko.carstens@de.ibm.com> */ #include <linux/init.h> #include <linux/linkage.h> #include <asm/alternative-asm.h> #include <asm/processor.h> #include <asm/cache.h> #include <asm/ctl_reg.h> #include <asm/dwarf.h> #include <asm/errno.h> #include <asm/ptrace.h> #include <asm/thread_info.h> #include <asm/asm-offsets.h> #include <asm/unistd.h> #include <asm/page.h> #include <asm/sigp.h> #include <asm/irq.h> #include <asm/vx-insn.h> #include <asm/setup.h> #include <asm/nmi.h> #include <asm/export.h> #include <asm/nospec-insn.h> __PT_R0 = __PT_GPRS __PT_R1 = __PT_GPRS + 8 __PT_R2 = __PT_GPRS + 16 __PT_R3 = __PT_GPRS + 24 __PT_R4 = __PT_GPRS + 32 __PT_R5 = __PT_GPRS + 40 __PT_R6 = __PT_GPRS + 48 __PT_R7 = __PT_GPRS + 56 __PT_R8 = __PT_GPRS + 64 __PT_R9 = __PT_GPRS + 72 __PT_R10 = __PT_GPRS + 80 __PT_R11 = __PT_GPRS + 88 __PT_R12 = __PT_GPRS + 96 __PT_R13 = __PT_GPRS + 104 __PT_R14 = __PT_GPRS + 112 __PT_R15 = __PT_GPRS + 120 STACK_SHIFT = PAGE_SHIFT + THREAD_SIZE_ORDER STACK_SIZE = 1 << STACK_SHIFT STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE _TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ _TIF_UPROBE | _TIF_GUARDED_STORAGE | _TIF_PATCH_PENDING) _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ _TIF_SYSCALL_TRACEPOINT) _CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE_PRIMARY | \ _CIF_ASCE_SECONDARY | _CIF_FPU) _PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART) _LPP_OFFSET = __LC_LPP #define BASED(name) name-cleanup_critical(%r13) .macro TRACE_IRQS_ON #ifdef CONFIG_TRACE_IRQFLAGS basr %r2,%r0 brasl %r14,trace_hardirqs_on_caller #endif .endm .macro TRACE_IRQS_OFF #ifdef CONFIG_TRACE_IRQFLAGS basr %r2,%r0 brasl %r14,trace_hardirqs_off_caller #endif .endm .macro LOCKDEP_SYS_EXIT #ifdef CONFIG_LOCKDEP tm __PT_PSW+1(%r11),0x01 # returning to user ? jz .+10 brasl %r14,lockdep_sys_exit #endif .endm .macro CHECK_STACK stacksize,savearea #ifdef CONFIG_CHECK_STACK tml %r15,\stacksize - CONFIG_STACK_GUARD lghi %r14,\savearea jz stack_overflow #endif .endm .macro SWITCH_ASYNC savearea,timer tmhh %r8,0x0001 # interrupting from user ? jnz 1f lgr %r14,%r9 slg %r14,BASED(.Lcritical_start) clg %r14,BASED(.Lcritical_length) jhe 0f lghi %r11,\savearea # inside critical section, do cleanup brasl %r14,cleanup_critical tmhh %r8,0x0001 # retest problem state after cleanup jnz 1f 0: lg %r14,__LC_ASYNC_STACK # are we already on the async stack? slgr %r14,%r15 srag %r14,%r14,STACK_SHIFT jnz 2f CHECK_STACK 1<<STACK_SHIFT,\savearea aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) j 3f 1: UPDATE_VTIME %r14,%r15,\timer BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP 2: lg %r15,__LC_ASYNC_STACK # load async stack 3: la %r11,STACK_FRAME_OVERHEAD(%r15) .endm .macro UPDATE_VTIME w1,w2,enter_timer lg \w1,__LC_EXIT_TIMER lg \w2,__LC_LAST_UPDATE_TIMER slg \w1,\enter_timer slg \w2,__LC_EXIT_TIMER alg \w1,__LC_USER_TIMER alg \w2,__LC_SYSTEM_TIMER stg \w1,__LC_USER_TIMER stg \w2,__LC_SYSTEM_TIMER mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer .endm .macro REENABLE_IRQS stg %r8,__LC_RETURN_PSW ni __LC_RETURN_PSW,0xbf ssm __LC_RETURN_PSW .endm .macro STCK savearea #ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES .insn s,0xb27c0000,\savearea # store clock fast #else .insn s,0xb2050000,\savearea # store clock #endif .endm /* * The TSTMSK macro generates a test-under-mask instruction by * calculating the memory offset for the specified mask value. * Mask value can be any constant. The macro shifts the mask * value to calculate the memory offset for the test-under-mask * instruction. */ .macro TSTMSK addr, mask, size=8, bytepos=0 .if (\bytepos < \size) && (\mask >> 8) .if (\mask & 0xff) .error "Mask exceeds byte boundary" .endif TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)" .exitm .endif .ifeq \mask .error "Mask must not be zero" .endif off = \size - \bytepos - 1 tm off+\addr, \mask .endm .macro BPOFF ALTERNATIVE "", ".long 0xb2e8c000", 82 .endm .macro BPON ALTERNATIVE "", ".long 0xb2e8d000", 82 .endm .macro BPENTER tif_ptr,tif_mask ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .long 0xb2e8d000", \ "", 82 .endm .macro BPEXIT tif_ptr,tif_mask TSTMSK \tif_ptr,\tif_mask ALTERNATIVE "jz .+8; .long 0xb2e8c000", \ "jnz .+8; .long 0xb2e8d000", 82 .endm GEN_BR_THUNK %r9 GEN_BR_THUNK %r14 GEN_BR_THUNK %r14,%r11 .section .kprobes.text, "ax" .Ldummy: /* * This nop exists only in order to avoid that __switch_to starts at * the beginning of the kprobes text section. In that case we would * have several symbols at the same address. E.g. objdump would take * an arbitrary symbol name when disassembling this code. * With the added nop in between the __switch_to symbol is unique * again. */ nop 0 ENTRY(__bpon) .globl __bpon BPON BR_EX %r14 /* * Scheduler resume function, called by switch_to * gpr2 = (task_struct *) prev * gpr3 = (task_struct *) next * Returns: * gpr2 = prev */ ENTRY(__switch_to) stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task lghi %r4,__TASK_stack lghi %r1,__TASK_thread lg %r5,0(%r4,%r3) # start of kernel stack of next stg %r15,__THREAD_ksp(%r1,%r2) # store kernel stack of prev lgr %r15,%r5 aghi %r15,STACK_INIT # end of kernel stack of next stg %r3,__LC_CURRENT # store task struct of next stg %r15,__LC_KERNEL_STACK # store end of kernel stack lg %r15,__THREAD_ksp(%r1,%r3) # load kernel stack of next aghi %r3,__TASK_pid mvc __LC_CURRENT_PID(4,%r0),0(%r3) # store pid of next lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40 BR_EX %r14 .L__critical_start: #if IS_ENABLED(CONFIG_KVM) /* * sie64a calling convention: * %r2 pointer to sie control block * %r3 guest register save area */ ENTRY(sie64a) stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers lg %r12,__LC_CURRENT stg %r2,__SF_SIE_CONTROL(%r15) # save control block pointer stg %r3,__SF_SIE_SAVEAREA(%r15) # save guest register save area xc __SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0 mvc __SF_SIE_FLAGS(8,%r15),__TI_flags(%r12) # copy thread flags TSTMSK __LC_CPU_FLAGS,_CIF_FPU # load guest fp/vx registers ? jno .Lsie_load_guest_gprs brasl %r14,load_fpu_regs # load guest fp/vx regs .Lsie_load_guest_gprs: lmg %r0,%r13,0(%r3) # load guest gprs 0-13 lg %r14,__LC_GMAP # get gmap pointer ltgr %r14,%r14 jz .Lsie_gmap lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce .Lsie_gmap: lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now tm __SIE_PROG20+3(%r14),3 # last exit... jnz .Lsie_skip TSTMSK __LC_CPU_FLAGS,_CIF_FPU jo .Lsie_skip # exit if fp/vx regs changed BPEXIT __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) .Lsie_entry: sie 0(%r14) .Lsie_exit: BPOFF BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) .Lsie_skip: ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE lctlg %c1,%c1,__LC_USER_ASCE # load primary asce .Lsie_done: # some program checks are suppressing. C code (e.g. do_protection_exception) # will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There # are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable. # Other instructions between sie64a and .Lsie_done should not cause program # interrupts. So lets use 3 nops as a landing pad for all possible rewinds. # See also .Lcleanup_sie .Lrewind_pad6: nopr 7 .Lrewind_pad4: nopr 7 .Lrewind_pad2: nopr 7 .globl sie_exit sie_exit: lg %r14,__SF_SIE_SAVEAREA(%r15) # load guest register save area stmg %r0,%r13,0(%r14) # save guest gprs 0-13 xgr %r0,%r0 # clear guest registers to xgr %r1,%r1 # prevent speculative use xgr %r2,%r2 xgr %r3,%r3 xgr %r4,%r4 xgr %r5,%r5 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers lg %r2,__SF_SIE_REASON(%r15) # return exit reason code BR_EX %r14 .Lsie_fault: lghi %r14,-EFAULT stg %r14,__SF_SIE_REASON(%r15) # set exit reason code j sie_exit EX_TABLE(.Lrewind_pad6,.Lsie_fault) EX_TABLE(.Lrewind_pad4,.Lsie_fault) EX_TABLE(.Lrewind_pad2,.Lsie_fault) EX_TABLE(sie_exit,.Lsie_fault) EXPORT_SYMBOL(sie64a) EXPORT_SYMBOL(sie_exit) #endif /* * SVC interrupt handler routine. System calls are synchronous events and * are executed with interrupts enabled. */ ENTRY(system_call) stpt __LC_SYNC_ENTER_TIMER .Lsysc_stmg: stmg %r8,%r15,__LC_SAVE_AREA_SYNC BPOFF lg %r12,__LC_CURRENT lghi %r13,__TASK_thread lghi %r14,_PIF_SYSCALL .Lsysc_per: lg %r15,__LC_KERNEL_STACK la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs .Lsysc_vtime: UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP stmg %r0,%r7,__PT_R0(%r11) mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC stg %r14,__PT_FLAGS(%r11) .Lsysc_do_svc: # clear user controlled register to prevent speculative use xgr %r0,%r0 # load address of system call table lg %r10,__THREAD_sysc_table(%r13,%r12) llgh %r8,__PT_INT_CODE+2(%r11) slag %r8,%r8,2 # shift and test for svc 0 jnz .Lsysc_nr_ok # svc 0: system call number in %r1 llgfr %r1,%r1 # clear high word in r1 cghi %r1,NR_syscalls jnl .Lsysc_nr_ok sth %r1,__PT_INT_CODE+2(%r11) slag %r8,%r1,2 .Lsysc_nr_ok: xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) stg %r2,__PT_ORIG_GPR2(%r11) stg %r7,STACK_FRAME_OVERHEAD(%r15) lgf %r9,0(%r8,%r10) # get system call add. TSTMSK __TI_flags(%r12),_TIF_TRACE jnz .Lsysc_tracesys BASR_EX %r14,%r9 # call sys_xxxx stg %r2,__PT_R2(%r11) # store return value .Lsysc_return: #ifdef CONFIG_DEBUG_RSEQ lgr %r2,%r11 brasl %r14,rseq_syscall #endif LOCKDEP_SYS_EXIT .Lsysc_tif: TSTMSK __PT_FLAGS(%r11),_PIF_WORK jnz .Lsysc_work TSTMSK __TI_flags(%r12),_TIF_WORK jnz .Lsysc_work # check for work TSTMSK __LC_CPU_FLAGS,_CIF_WORK jnz .Lsysc_work BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP .Lsysc_restore: lg %r14,__LC_VDSO_PER_CPU lmg %r0,%r10,__PT_R0(%r11) mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) .Lsysc_exit_timer: stpt __LC_EXIT_TIMER mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER lmg %r11,%r15,__PT_R11(%r11) lpswe __LC_RETURN_PSW .Lsysc_done: # # One of the work bits is on. Find out which one. # .Lsysc_work: TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING jo .Lsysc_mcck_pending TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED jo .Lsysc_reschedule TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL_RESTART jo .Lsysc_syscall_restart #ifdef CONFIG_UPROBES TSTMSK __TI_flags(%r12),_TIF_UPROBE jo .Lsysc_uprobe_notify #endif TSTMSK __TI_flags(%r12),_TIF_GUARDED_STORAGE jo .Lsysc_guarded_storage TSTMSK __PT_FLAGS(%r11),_PIF_PER_TRAP jo .Lsysc_singlestep #ifdef CONFIG_LIVEPATCH TSTMSK __TI_flags(%r12),_TIF_PATCH_PENDING jo .Lsysc_patch_pending # handle live patching just before # signals and possible syscall restart #endif TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL_RESTART jo .Lsysc_syscall_restart TSTMSK __TI_flags(%r12),_TIF_SIGPENDING jo .Lsysc_sigpending TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME jo .Lsysc_notify_resume TSTMSK __LC_CPU_FLAGS,_CIF_FPU jo .Lsysc_vxrs TSTMSK __LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY) jnz .Lsysc_asce j .Lsysc_return # beware of critical section cleanup # # _TIF_NEED_RESCHED is set, call schedule # .Lsysc_reschedule: larl %r14,.Lsysc_return jg schedule # # _CIF_MCCK_PENDING is set, call handler # .Lsysc_mcck_pending: larl %r14,.Lsysc_return jg s390_handle_mcck # TIF bit will be cleared by handler # # _CIF_ASCE_PRIMARY and/or _CIF_ASCE_SECONDARY set, load user space asce # .Lsysc_asce: ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_SECONDARY lctlg %c7,%c7,__LC_VDSO_ASCE # load secondary asce TSTMSK __LC_CPU_FLAGS,_CIF_ASCE_PRIMARY jz .Lsysc_return #ifndef CONFIG_HAVE_MARCH_Z10_FEATURES tm __LC_STFLE_FAC_LIST+3,0x10 # has MVCOS ? jnz .Lsysc_set_fs_fixup ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_PRIMARY lctlg %c1,%c1,__LC_USER_ASCE # load primary asce j .Lsysc_return .Lsysc_set_fs_fixup: #endif larl %r14,.Lsysc_return jg set_fs_fixup # # CIF_FPU is set, restore floating-point controls and floating-point registers. # .Lsysc_vxrs: larl %r14,.Lsysc_return jg load_fpu_regs # # _TIF_SIGPENDING is set, call do_signal # .Lsysc_sigpending: lgr %r2,%r11 # pass pointer to pt_regs brasl %r14,do_signal TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL jno .Lsysc_return .Lsysc_do_syscall: lghi %r13,__TASK_thread lmg %r2,%r7,__PT_R2(%r11) # load svc arguments lghi %r1,0 # svc 0 returns -ENOSYS j .Lsysc_do_svc # # _TIF_NOTIFY_RESUME is set, call do_notify_resume # .Lsysc_notify_resume: lgr %r2,%r11 # pass pointer to pt_regs larl %r14,.Lsysc_return jg do_notify_resume # # _TIF_UPROBE is set, call uprobe_notify_resume # #ifdef CONFIG_UPROBES .Lsysc_uprobe_notify: lgr %r2,%r11 # pass pointer to pt_regs larl %r14,.Lsysc_return jg uprobe_notify_resume #endif # # _TIF_GUARDED_STORAGE is set, call guarded_storage_load # .Lsysc_guarded_storage: lgr %r2,%r11 # pass pointer to pt_regs larl %r14,.Lsysc_return jg gs_load_bc_cb # # _TIF_PATCH_PENDING is set, call klp_update_patch_state # #ifdef CONFIG_LIVEPATCH .Lsysc_patch_pending: lg %r2,__LC_CURRENT # pass pointer to task struct larl %r14,.Lsysc_return jg klp_update_patch_state #endif # # _PIF_PER_TRAP is set, call do_per_trap # .Lsysc_singlestep: ni __PT_FLAGS+7(%r11),255-_PIF_PER_TRAP lgr %r2,%r11 # pass pointer to pt_regs larl %r14,.Lsysc_return jg do_per_trap # # _PIF_SYSCALL_RESTART is set, repeat the current system call # .Lsysc_syscall_restart: ni __PT_FLAGS+7(%r11),255-_PIF_SYSCALL_RESTART lmg %r1,%r7,__PT_R1(%r11) # load svc arguments lg %r2,__PT_ORIG_GPR2(%r11) j .Lsysc_do_svc # # call tracehook_report_syscall_entry/tracehook_report_syscall_exit before # and after the system call # .Lsysc_tracesys: lgr %r2,%r11 # pass pointer to pt_regs la %r3,0 llgh %r0,__PT_INT_CODE+2(%r11) stg %r0,__PT_R2(%r11) brasl %r14,do_syscall_trace_enter lghi %r0,NR_syscalls clgr %r0,%r2 jnh .Lsysc_tracenogo sllg %r8,%r2,2 lgf %r9,0(%r8,%r10) .Lsysc_tracego: lmg %r3,%r7,__PT_R3(%r11) stg %r7,STACK_FRAME_OVERHEAD(%r15) lg %r2,__PT_ORIG_GPR2(%r11) BASR_EX %r14,%r9 # call sys_xxx stg %r2,__PT_R2(%r11) # store return value .Lsysc_tracenogo: TSTMSK __TI_flags(%r12),_TIF_TRACE jz .Lsysc_return lgr %r2,%r11 # pass pointer to pt_regs larl %r14,.Lsysc_return jg do_syscall_trace_exit # # a new process exits the kernel with ret_from_fork # ENTRY(ret_from_fork) la %r11,STACK_FRAME_OVERHEAD(%r15) lg %r12,__LC_CURRENT brasl %r14,schedule_tail TRACE_IRQS_ON ssm __LC_SVC_NEW_PSW # reenable interrupts tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ? jne .Lsysc_tracenogo # it's a kernel thread lmg %r9,%r10,__PT_R9(%r11) # load gprs ENTRY(kernel_thread_starter) la %r2,0(%r10) BASR_EX %r14,%r9 j .Lsysc_tracenogo /* * Program check handler routine */ ENTRY(pgm_check_handler) stpt __LC_SYNC_ENTER_TIMER BPOFF stmg %r8,%r15,__LC_SAVE_AREA_SYNC lg %r10,__LC_LAST_BREAK lg %r12,__LC_CURRENT lghi %r11,0 larl %r13,cleanup_critical lmg %r8,%r9,__LC_PGM_OLD_PSW tmhh %r8,0x0001 # test problem state bit jnz 2f # -> fault in user space #if IS_ENABLED(CONFIG_KVM) # cleanup critical section for program checks in sie64a lgr %r14,%r9 slg %r14,BASED(.Lsie_critical_start) clg %r14,BASED(.Lsie_critical_length) jhe 0f lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE lctlg %c1,%c1,__LC_USER_ASCE # load primary asce larl %r9,sie_exit # skip forward to sie_exit lghi %r11,_PIF_GUEST_FAULT #endif 0: tmhh %r8,0x4000 # PER bit set in old PSW ? jnz 1f # -> enabled, can't be a double fault tm __LC_PGM_ILC+3,0x80 # check for per exception jnz .Lpgm_svcper # -> single stepped svc 1: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) j 4f 2: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP lg %r15,__LC_KERNEL_STACK lgr %r14,%r12 aghi %r14,__TASK_thread # pointer to thread_struct lghi %r13,__LC_PGM_TDB tm __LC_PGM_ILC+2,0x02 # check for transaction abort jz 3f mvc __THREAD_trap_tdb(256,%r14),0(%r13) 3: stg %r10,__THREAD_last_break(%r14) 4: lgr %r13,%r11 la %r11,STACK_FRAME_OVERHEAD(%r15) stmg %r0,%r7,__PT_R0(%r11) # clear user controlled registers to prevent speculative use xgr %r0,%r0 xgr %r1,%r1 xgr %r2,%r2 xgr %r3,%r3 xgr %r4,%r4 xgr %r5,%r5 xgr %r6,%r6 xgr %r7,%r7 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC stmg %r8,%r9,__PT_PSW(%r11) mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC mvc __PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE stg %r13,__PT_FLAGS(%r11) stg %r10,__PT_ARGS(%r11) tm __LC_PGM_ILC+3,0x80 # check for per exception jz 5f tmhh %r8,0x0001 # kernel per event ? jz .Lpgm_kprobe oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID 5: REENABLE_IRQS xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) larl %r1,pgm_check_table llgh %r10,__PT_INT_CODE+2(%r11) nill %r10,0x007f sll %r10,2 je .Lpgm_return lgf %r9,0(%r10,%r1) # load address of handler routine lgr %r2,%r11 # pass pointer to pt_regs BASR_EX %r14,%r9 # branch to interrupt-handler .Lpgm_return: LOCKDEP_SYS_EXIT tm __PT_PSW+1(%r11),0x01 # returning to user ? jno .Lsysc_restore TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL jo .Lsysc_do_syscall j .Lsysc_tif # # PER event in supervisor state, must be kprobes # .Lpgm_kprobe: REENABLE_IRQS xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) lgr %r2,%r11 # pass pointer to pt_regs brasl %r14,do_per_trap j .Lpgm_return # # single stepped system call # .Lpgm_svcper: mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW lghi %r13,__TASK_thread larl %r14,.Lsysc_per stg %r14,__LC_RETURN_PSW+8 lghi %r14,_PIF_SYSCALL | _PIF_PER_TRAP lpswe __LC_RETURN_PSW # branch to .Lsysc_per and enable irqs /* * IO interrupt handler routine */ ENTRY(io_int_handler) STCK __LC_INT_CLOCK stpt __LC_ASYNC_ENTER_TIMER BPOFF stmg %r8,%r15,__LC_SAVE_AREA_ASYNC lg %r12,__LC_CURRENT larl %r13,cleanup_critical lmg %r8,%r9,__LC_IO_OLD_PSW SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER stmg %r0,%r7,__PT_R0(%r11) # clear user controlled registers to prevent speculative use xgr %r0,%r0 xgr %r1,%r1 xgr %r2,%r2 xgr %r3,%r3 xgr %r4,%r4 xgr %r5,%r5 xgr %r6,%r6 xgr %r7,%r7 xgr %r10,%r10 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC stmg %r8,%r9,__PT_PSW(%r11) mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ jo .Lio_restore TRACE_IRQS_OFF xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) .Lio_loop: lgr %r2,%r11 # pass pointer to pt_regs lghi %r3,IO_INTERRUPT tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ? jz .Lio_call lghi %r3,THIN_INTERRUPT .Lio_call: brasl %r14,do_IRQ TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPAR jz .Lio_return tpi 0 jz .Lio_return mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID j .Lio_loop .Lio_return: LOCKDEP_SYS_EXIT TRACE_IRQS_ON .Lio_tif: TSTMSK __TI_flags(%r12),_TIF_WORK jnz .Lio_work # there is work to do (signals etc.) TSTMSK __LC_CPU_FLAGS,_CIF_WORK jnz .Lio_work .Lio_restore: lg %r14,__LC_VDSO_PER_CPU lmg %r0,%r10,__PT_R0(%r11) mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) tm __PT_PSW+1(%r11),0x01 # returning to user ? jno .Lio_exit_kernel BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP .Lio_exit_timer: stpt __LC_EXIT_TIMER mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER .Lio_exit_kernel: lmg %r11,%r15,__PT_R11(%r11) lpswe __LC_RETURN_PSW .Lio_done: # # There is work todo, find out in which context we have been interrupted: # 1) if we return to user space we can do all _TIF_WORK work # 2) if we return to kernel code and kvm is enabled check if we need to # modify the psw to leave SIE # 3) if we return to kernel code and preemptive scheduling is enabled check # the preemption counter and if it is zero call preempt_schedule_irq # Before any work can be done, a switch to the kernel stack is required. # .Lio_work: tm __PT_PSW+1(%r11),0x01 # returning to user ? jo .Lio_work_user # yes -> do resched & signal #ifdef CONFIG_PREEMPT # check for preemptive scheduling icm %r0,15,__LC_PREEMPT_COUNT jnz .Lio_restore # preemption is disabled TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED jno .Lio_restore # switch to kernel stack lg %r1,__PT_R15(%r11) aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) la %r11,STACK_FRAME_OVERHEAD(%r1) lgr %r15,%r1 # TRACE_IRQS_ON already done at .Lio_return, call # TRACE_IRQS_OFF to keep things symmetrical TRACE_IRQS_OFF brasl %r14,preempt_schedule_irq j .Lio_return #else j .Lio_restore #endif # # Need to do work before returning to userspace, switch to kernel stack # .Lio_work_user: lg %r1,__LC_KERNEL_STACK mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) la %r11,STACK_FRAME_OVERHEAD(%r1) lgr %r15,%r1 # # One of the work bits is on. Find out which one. # .Lio_work_tif: TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING jo .Lio_mcck_pending TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED jo .Lio_reschedule #ifdef CONFIG_LIVEPATCH TSTMSK __TI_flags(%r12),_TIF_PATCH_PENDING jo .Lio_patch_pending #endif TSTMSK __TI_flags(%r12),_TIF_SIGPENDING jo .Lio_sigpending TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME jo .Lio_notify_resume TSTMSK __TI_flags(%r12),_TIF_GUARDED_STORAGE jo .Lio_guarded_storage TSTMSK __LC_CPU_FLAGS,_CIF_FPU jo .Lio_vxrs TSTMSK __LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY) jnz .Lio_asce j .Lio_return # beware of critical section cleanup # # _CIF_MCCK_PENDING is set, call handler # .Lio_mcck_pending: # TRACE_IRQS_ON already done at .Lio_return brasl %r14,s390_handle_mcck # TIF bit will be cleared by handler TRACE_IRQS_OFF j .Lio_return # # _CIF_ASCE_PRIMARY and/or CIF_ASCE_SECONDARY set, load user space asce # .Lio_asce: ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_SECONDARY lctlg %c7,%c7,__LC_VDSO_ASCE # load secondary asce TSTMSK __LC_CPU_FLAGS,_CIF_ASCE_PRIMARY jz .Lio_return #ifndef CONFIG_HAVE_MARCH_Z10_FEATURES tm __LC_STFLE_FAC_LIST+3,0x10 # has MVCOS ? jnz .Lio_set_fs_fixup ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_PRIMARY lctlg %c1,%c1,__LC_USER_ASCE # load primary asce j .Lio_return .Lio_set_fs_fixup: #endif larl %r14,.Lio_return jg set_fs_fixup # # CIF_FPU is set, restore floating-point controls and floating-point registers. # .Lio_vxrs: larl %r14,.Lio_return jg load_fpu_regs # # _TIF_GUARDED_STORAGE is set, call guarded_storage_load # .Lio_guarded_storage: # TRACE_IRQS_ON already done at .Lio_return ssm __LC_SVC_NEW_PSW # reenable interrupts lgr %r2,%r11 # pass pointer to pt_regs brasl %r14,gs_load_bc_cb ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts TRACE_IRQS_OFF j .Lio_return # # _TIF_NEED_RESCHED is set, call schedule # .Lio_reschedule: # TRACE_IRQS_ON already done at .Lio_return ssm __LC_SVC_NEW_PSW # reenable interrupts brasl %r14,schedule # call scheduler ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts TRACE_IRQS_OFF j .Lio_return # # _TIF_PATCH_PENDING is set, call klp_update_patch_state # #ifdef CONFIG_LIVEPATCH .Lio_patch_pending: lg %r2,__LC_CURRENT # pass pointer to task struct larl %r14,.Lio_return jg klp_update_patch_state #endif # # _TIF_SIGPENDING or is set, call do_signal # .Lio_sigpending: # TRACE_IRQS_ON already done at .Lio_return ssm __LC_SVC_NEW_PSW # reenable interrupts lgr %r2,%r11 # pass pointer to pt_regs brasl %r14,do_signal ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts TRACE_IRQS_OFF j .Lio_return # # _TIF_NOTIFY_RESUME or is set, call do_notify_resume # .Lio_notify_resume: # TRACE_IRQS_ON already done at .Lio_return ssm __LC_SVC_NEW_PSW # reenable interrupts lgr %r2,%r11 # pass pointer to pt_regs brasl %r14,do_notify_resume ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts TRACE_IRQS_OFF j .Lio_return /* * External interrupt handler routine */ ENTRY(ext_int_handler) STCK __LC_INT_CLOCK stpt __LC_ASYNC_ENTER_TIMER BPOFF stmg %r8,%r15,__LC_SAVE_AREA_ASYNC lg %r12,__LC_CURRENT larl %r13,cleanup_critical lmg %r8,%r9,__LC_EXT_OLD_PSW SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER stmg %r0,%r7,__PT_R0(%r11) # clear user controlled registers to prevent speculative use xgr %r0,%r0 xgr %r1,%r1 xgr %r2,%r2 xgr %r3,%r3 xgr %r4,%r4 xgr %r5,%r5 xgr %r6,%r6 xgr %r7,%r7 xgr %r10,%r10 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC stmg %r8,%r9,__PT_PSW(%r11) lghi %r1,__LC_EXT_PARAMS2 mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS mvc __PT_INT_PARM_LONG(8,%r11),0(%r1) xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ jo .Lio_restore TRACE_IRQS_OFF xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) lgr %r2,%r11 # pass pointer to pt_regs lghi %r3,EXT_INTERRUPT brasl %r14,do_IRQ j .Lio_return /* * Load idle PSW. The second "half" of this function is in .Lcleanup_idle. */ ENTRY(psw_idle) stg %r14,(__SF_GPRS+8*8)(%r15) stg %r3,__SF_EMPTY(%r15) larl %r1,.Lpsw_idle_lpsw+4 stg %r1,__SF_EMPTY+8(%r15) #ifdef CONFIG_SMP larl %r1,smp_cpu_mtid llgf %r1,0(%r1) ltgr %r1,%r1 jz .Lpsw_idle_stcctm .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+16(%r15) .Lpsw_idle_stcctm: #endif oi __LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT BPON STCK __CLOCK_IDLE_ENTER(%r2) stpt __TIMER_IDLE_ENTER(%r2) .Lpsw_idle_lpsw: lpswe __SF_EMPTY(%r15) BR_EX %r14 .Lpsw_idle_end: /* * Store floating-point controls and floating-point or vector register * depending whether the vector facility is available. A critical section * cleanup assures that the registers are stored even if interrupted for * some other work. The CIF_FPU flag is set to trigger a lazy restore * of the register contents at return from io or a system call. */ ENTRY(save_fpu_regs) lg %r2,__LC_CURRENT aghi %r2,__TASK_thread TSTMSK __LC_CPU_FLAGS,_CIF_FPU jo .Lsave_fpu_regs_exit stfpc __THREAD_FPU_fpc(%r2) lg %r3,__THREAD_FPU_regs(%r2) TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX jz .Lsave_fpu_regs_fp # no -> store FP regs VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3) VSTM %v16,%v31,256,%r3 # vstm 16,31,256(3) j .Lsave_fpu_regs_done # -> set CIF_FPU flag .Lsave_fpu_regs_fp: std 0,0(%r3) std 1,8(%r3) std 2,16(%r3) std 3,24(%r3) std 4,32(%r3) std 5,40(%r3) std 6,48(%r3) std 7,56(%r3) std 8,64(%r3) std 9,72(%r3) std 10,80(%r3) std 11,88(%r3) std 12,96(%r3) std 13,104(%r3) std 14,112(%r3) std 15,120(%r3) .Lsave_fpu_regs_done: oi __LC_CPU_FLAGS+7,_CIF_FPU .Lsave_fpu_regs_exit: BR_EX %r14 .Lsave_fpu_regs_end: EXPORT_SYMBOL(save_fpu_regs) /* * Load floating-point controls and floating-point or vector registers. * A critical section cleanup assures that the register contents are * loaded even if interrupted for some other work. * * There are special calling conventions to fit into sysc and io return work: * %r15: <kernel stack> * The function requires: * %r4 */ load_fpu_regs: lg %r4,__LC_CURRENT aghi %r4,__TASK_thread TSTMSK __LC_CPU_FLAGS,_CIF_FPU jno .Lload_fpu_regs_exit lfpc __THREAD_FPU_fpc(%r4) TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area jz .Lload_fpu_regs_fp # -> no VX, load FP regs VLM %v0,%v15,0,%r4 VLM %v16,%v31,256,%r4 j .Lload_fpu_regs_done .Lload_fpu_regs_fp: ld 0,0(%r4) ld 1,8(%r4) ld 2,16(%r4) ld 3,24(%r4) ld 4,32(%r4) ld 5,40(%r4) ld 6,48(%r4) ld 7,56(%r4) ld 8,64(%r4) ld 9,72(%r4) ld 10,80(%r4) ld 11,88(%r4) ld 12,96(%r4) ld 13,104(%r4) ld 14,112(%r4) ld 15,120(%r4) .Lload_fpu_regs_done: ni __LC_CPU_FLAGS+7,255-_CIF_FPU .Lload_fpu_regs_exit: BR_EX %r14 .Lload_fpu_regs_end: .L__critical_end: /* * Machine check handler routines */ ENTRY(mcck_int_handler) STCK __LC_MCCK_CLOCK BPOFF la %r1,4095 # validate r1 spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # validate cpu timer sckc __LC_CLOCK_COMPARATOR # validate comparator lam %a0,%a15,__LC_AREGS_SAVE_AREA-4095(%r1) # validate acrs lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# validate gprs lg %r12,__LC_CURRENT larl %r13,cleanup_critical lmg %r8,%r9,__LC_MCK_OLD_PSW TSTMSK __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE jo .Lmcck_panic # yes -> rest of mcck code invalid TSTMSK __LC_MCCK_CODE,MCCK_CODE_CR_VALID jno .Lmcck_panic # control registers invalid -> panic la %r14,4095 lctlg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r14) # validate ctl regs ptlb lg %r11,__LC_MCESAD-4095(%r14) # extended machine check save area nill %r11,0xfc00 # MCESA_ORIGIN_MASK TSTMSK __LC_CREGS_SAVE_AREA+16-4095(%r14),CR2_GUARDED_STORAGE jno 0f TSTMSK __LC_MCCK_CODE,MCCK_CODE_GS_VALID jno 0f .insn rxy,0xe3000000004d,0,__MCESA_GS_SAVE_AREA(%r11) # LGSC 0: l %r14,__LC_FP_CREG_SAVE_AREA-4095(%r14) TSTMSK __LC_MCCK_CODE,MCCK_CODE_FC_VALID jo 0f sr %r14,%r14 0: sfpc %r14 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX jo 0f lghi %r14,__LC_FPREGS_SAVE_AREA ld %f0,0(%r14) ld %f1,8(%r14) ld %f2,16(%r14) ld %f3,24(%r14) ld %f4,32(%r14) ld %f5,40(%r14) ld %f6,48(%r14) ld %f7,56(%r14) ld %f8,64(%r14) ld %f9,72(%r14) ld %f10,80(%r14) ld %f11,88(%r14) ld %f12,96(%r14) ld %f13,104(%r14) ld %f14,112(%r14) ld %f15,120(%r14) j 1f 0: VLM %v0,%v15,0,%r11 VLM %v16,%v31,256,%r11 1: lghi %r14,__LC_CPU_TIMER_SAVE_AREA mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) TSTMSK __LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID jo 3f la %r14,__LC_SYNC_ENTER_TIMER clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER jl 0f la %r14,__LC_ASYNC_ENTER_TIMER 0: clc 0(8,%r14),__LC_EXIT_TIMER jl 1f la %r14,__LC_EXIT_TIMER 1: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER jl 2f la %r14,__LC_LAST_UPDATE_TIMER 2: spt 0(%r14) mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 3: TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_MWP_VALID jno .Lmcck_panic tmhh %r8,0x0001 # interrupting from user ? jnz 4f TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID jno .Lmcck_panic 4: SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_MCCK_ENTER_TIMER .Lmcck_skip: lghi %r14,__LC_GPREGS_SAVE_AREA+64 stmg %r0,%r7,__PT_R0(%r11) # clear user controlled registers to prevent speculative use xgr %r0,%r0 xgr %r1,%r1 xgr %r2,%r2 xgr %r3,%r3 xgr %r4,%r4 xgr %r5,%r5 xgr %r6,%r6 xgr %r7,%r7 xgr %r10,%r10 mvc __PT_R8(64,%r11),0(%r14) stmg %r8,%r9,__PT_PSW(%r11) xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) lgr %r2,%r11 # pass pointer to pt_regs brasl %r14,s390_do_machine_check tm __PT_PSW+1(%r11),0x01 # returning to user ? jno .Lmcck_return lg %r1,__LC_KERNEL_STACK # switch to kernel stack mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) la %r11,STACK_FRAME_OVERHEAD(%r1) lgr %r15,%r1 ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING jno .Lmcck_return TRACE_IRQS_OFF brasl %r14,s390_handle_mcck TRACE_IRQS_ON .Lmcck_return: lg %r14,__LC_VDSO_PER_CPU lmg %r0,%r10,__PT_R0(%r11) mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? jno 0f BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP stpt __LC_EXIT_TIMER mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER 0: lmg %r11,%r15,__PT_R11(%r11) lpswe __LC_RETURN_MCCK_PSW .Lmcck_panic: lg %r15,__LC_PANIC_STACK la %r11,STACK_FRAME_OVERHEAD(%r15) j .Lmcck_skip # # PSW restart interrupt handler # ENTRY(restart_int_handler) ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40 stg %r15,__LC_SAVE_AREA_RESTART lg %r15,__LC_RESTART_STACK aghi %r15,-__PT_SIZE # create pt_regs on stack xc 0(__PT_SIZE,%r15),0(%r15) stmg %r0,%r14,__PT_R0(%r15) mvc __PT_R15(8,%r15),__LC_SAVE_AREA_RESTART mvc __PT_PSW(16,%r15),__LC_RST_OLD_PSW # store restart old psw aghi %r15,-STACK_FRAME_OVERHEAD # create stack frame on stack xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15) lg %r1,__LC_RESTART_FN # load fn, parm & source cpu lg %r2,__LC_RESTART_DATA lg %r3,__LC_RESTART_SOURCE ltgr %r3,%r3 # test source cpu address jm 1f # negative -> skip source stop 0: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu brc 10,0b # wait for status stored 1: basr %r14,%r1 # call function stap __SF_EMPTY(%r15) # store cpu address llgh %r3,__SF_EMPTY(%r15) 2: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu brc 2,2b 3: j 3b .section .kprobes.text, "ax" #ifdef CONFIG_CHECK_STACK /* * The synchronous or the asynchronous stack overflowed. We are dead. * No need to properly save the registers, we are going to panic anyway. * Setup a pt_regs so that show_trace can provide a good call trace. */ stack_overflow: lg %r15,__LC_PANIC_STACK # change to panic stack la %r11,STACK_FRAME_OVERHEAD(%r15) stmg %r0,%r7,__PT_R0(%r11) stmg %r8,%r9,__PT_PSW(%r11) mvc __PT_R8(64,%r11),0(%r14) stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) lgr %r2,%r11 # pass pointer to pt_regs jg kernel_stack_overflow #endif cleanup_critical: #if IS_ENABLED(CONFIG_KVM) clg %r9,BASED(.Lcleanup_table_sie) # .Lsie_gmap jl 0f clg %r9,BASED(.Lcleanup_table_sie+8)# .Lsie_done jl .Lcleanup_sie #endif clg %r9,BASED(.Lcleanup_table) # system_call jl 0f clg %r9,BASED(.Lcleanup_table+8) # .Lsysc_do_svc jl .Lcleanup_system_call clg %r9,BASED(.Lcleanup_table+16) # .Lsysc_tif jl 0f clg %r9,BASED(.Lcleanup_table+24) # .Lsysc_restore jl .Lcleanup_sysc_tif clg %r9,BASED(.Lcleanup_table+32) # .Lsysc_done jl .Lcleanup_sysc_restore clg %r9,BASED(.Lcleanup_table+40) # .Lio_tif jl 0f clg %r9,BASED(.Lcleanup_table+48) # .Lio_restore jl .Lcleanup_io_tif clg %r9,BASED(.Lcleanup_table+56) # .Lio_done jl .Lcleanup_io_restore clg %r9,BASED(.Lcleanup_table+64) # psw_idle jl 0f clg %r9,BASED(.Lcleanup_table+72) # .Lpsw_idle_end jl .Lcleanup_idle clg %r9,BASED(.Lcleanup_table+80) # save_fpu_regs jl 0f clg %r9,BASED(.Lcleanup_table+88) # .Lsave_fpu_regs_end jl .Lcleanup_save_fpu_regs clg %r9,BASED(.Lcleanup_table+96) # load_fpu_regs jl 0f clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end jl .Lcleanup_load_fpu_regs 0: BR_EX %r14,%r11 .align 8 .Lcleanup_table: .quad system_call .quad .Lsysc_do_svc .quad .Lsysc_tif .quad .Lsysc_restore .quad .Lsysc_done .quad .Lio_tif .quad .Lio_restore .quad .Lio_done .quad psw_idle .quad .Lpsw_idle_end .quad save_fpu_regs .quad .Lsave_fpu_regs_end .quad load_fpu_regs .quad .Lload_fpu_regs_end #if IS_ENABLED(CONFIG_KVM) .Lcleanup_table_sie: .quad .Lsie_gmap .quad .Lsie_done .Lcleanup_sie: cghi %r11,__LC_SAVE_AREA_ASYNC #Is this in normal interrupt? je 1f slg %r9,BASED(.Lsie_crit_mcck_start) clg %r9,BASED(.Lsie_crit_mcck_length) jh 1f oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST 1: BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) lg %r9,__SF_SIE_CONTROL(%r15) # get control block pointer ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE lctlg %c1,%c1,__LC_USER_ASCE # load primary asce larl %r9,sie_exit # skip forward to sie_exit BR_EX %r14,%r11 #endif .Lcleanup_system_call: # check if stpt has been executed clg %r9,BASED(.Lcleanup_system_call_insn) jh 0f mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER cghi %r11,__LC_SAVE_AREA_ASYNC je 0f mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER 0: # check if stmg has been executed clg %r9,BASED(.Lcleanup_system_call_insn+8) jh 0f mvc __LC_SAVE_AREA_SYNC(64),0(%r11) 0: # check if base register setup + TIF bit load has been done clg %r9,BASED(.Lcleanup_system_call_insn+16) jhe 0f # set up saved register r12 task struct pointer stg %r12,32(%r11) # set up saved register r13 __TASK_thread offset mvc 40(8,%r11),BASED(.Lcleanup_system_call_const) 0: # check if the user time update has been done clg %r9,BASED(.Lcleanup_system_call_insn+24) jh 0f lg %r15,__LC_EXIT_TIMER slg %r15,__LC_SYNC_ENTER_TIMER alg %r15,__LC_USER_TIMER stg %r15,__LC_USER_TIMER 0: # check if the system time update has been done clg %r9,BASED(.Lcleanup_system_call_insn+32) jh 0f lg %r15,__LC_LAST_UPDATE_TIMER slg %r15,__LC_EXIT_TIMER alg %r15,__LC_SYSTEM_TIMER stg %r15,__LC_SYSTEM_TIMER 0: # update accounting time stamp mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP # set up saved register r11 lg %r15,__LC_KERNEL_STACK la %r9,STACK_FRAME_OVERHEAD(%r15) stg %r9,24(%r11) # r11 pt_regs pointer # fill pt_regs mvc __PT_R8(64,%r9),__LC_SAVE_AREA_SYNC stmg %r0,%r7,__PT_R0(%r9) mvc __PT_PSW(16,%r9),__LC_SVC_OLD_PSW mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC xc __PT_FLAGS(8,%r9),__PT_FLAGS(%r9) mvi __PT_FLAGS+7(%r9),_PIF_SYSCALL # setup saved register r15 stg %r15,56(%r11) # r15 stack pointer # set new psw address and exit larl %r9,.Lsysc_do_svc BR_EX %r14,%r11 .Lcleanup_system_call_insn: .quad system_call .quad .Lsysc_stmg .quad .Lsysc_per .quad .Lsysc_vtime+36 .quad .Lsysc_vtime+42 .Lcleanup_system_call_const: .quad __TASK_thread .Lcleanup_sysc_tif: larl %r9,.Lsysc_tif BR_EX %r14,%r11 .Lcleanup_sysc_restore: # check if stpt has been executed clg %r9,BASED(.Lcleanup_sysc_restore_insn) jh 0f mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER cghi %r11,__LC_SAVE_AREA_ASYNC je 0f mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER 0: clg %r9,BASED(.Lcleanup_sysc_restore_insn+8) je 1f lg %r9,24(%r11) # get saved pointer to pt_regs mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) mvc 0(64,%r11),__PT_R8(%r9) lmg %r0,%r7,__PT_R0(%r9) 1: lmg %r8,%r9,__LC_RETURN_PSW BR_EX %r14,%r11 .Lcleanup_sysc_restore_insn: .quad .Lsysc_exit_timer .quad .Lsysc_done - 4 .Lcleanup_io_tif: larl %r9,.Lio_tif BR_EX %r14,%r11 .Lcleanup_io_restore: # check if stpt has been executed clg %r9,BASED(.Lcleanup_io_restore_insn) jh 0f mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER 0: clg %r9,BASED(.Lcleanup_io_restore_insn+8) je 1f lg %r9,24(%r11) # get saved r11 pointer to pt_regs mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) mvc 0(64,%r11),__PT_R8(%r9) lmg %r0,%r7,__PT_R0(%r9) 1: lmg %r8,%r9,__LC_RETURN_PSW BR_EX %r14,%r11 .Lcleanup_io_restore_insn: .quad .Lio_exit_timer .quad .Lio_done - 4 .Lcleanup_idle: ni __LC_CPU_FLAGS+7,255-_CIF_ENABLED_WAIT # copy interrupt clock & cpu timer mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER cghi %r11,__LC_SAVE_AREA_ASYNC je 0f mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER 0: # check if stck & stpt have been executed clg %r9,BASED(.Lcleanup_idle_insn) jhe 1f mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2) mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2) 1: # calculate idle cycles #ifdef CONFIG_SMP clg %r9,BASED(.Lcleanup_idle_insn) jl 3f larl %r1,smp_cpu_mtid llgf %r1,0(%r1) ltgr %r1,%r1 jz 3f .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+80(%r15) larl %r3,mt_cycles ag %r3,__LC_PERCPU_OFFSET la %r4,__SF_EMPTY+16(%r15) 2: lg %r0,0(%r3) slg %r0,0(%r4) alg %r0,64(%r4) stg %r0,0(%r3) la %r3,8(%r3) la %r4,8(%r4) brct %r1,2b #endif 3: # account system time going idle lg %r9,__LC_STEAL_TIMER alg %r9,__CLOCK_IDLE_ENTER(%r2) slg %r9,__LC_LAST_UPDATE_CLOCK stg %r9,__LC_STEAL_TIMER mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2) lg %r9,__LC_SYSTEM_TIMER alg %r9,__LC_LAST_UPDATE_TIMER slg %r9,__TIMER_IDLE_ENTER(%r2) stg %r9,__LC_SYSTEM_TIMER mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2) # prepare return psw nihh %r8,0xfcfd # clear irq & wait state bits lg %r9,48(%r11) # return from psw_idle BR_EX %r14,%r11 .Lcleanup_idle_insn: .quad .Lpsw_idle_lpsw .Lcleanup_save_fpu_regs: larl %r9,save_fpu_regs BR_EX %r14,%r11 .Lcleanup_load_fpu_regs: larl %r9,load_fpu_regs BR_EX %r14,%r11 /* * Integer constants */ .align 8 .Lcritical_start: .quad .L__critical_start .Lcritical_length: .quad .L__critical_end - .L__critical_start #if IS_ENABLED(CONFIG_KVM) .Lsie_critical_start: .quad .Lsie_gmap .Lsie_critical_length: .quad .Lsie_done - .Lsie_gmap .Lsie_crit_mcck_start: .quad .Lsie_entry .Lsie_crit_mcck_length: .quad .Lsie_skip - .Lsie_entry #endif .section .rodata, "a" #define SYSCALL(esame,emu) .long esame .globl sys_call_table sys_call_table: #include "asm/syscall_table.h" #undef SYSCALL #ifdef CONFIG_COMPAT #define SYSCALL(esame,emu) .long emu .globl sys_call_table_emu sys_call_table_emu: #include "asm/syscall_table.h" #undef SYSCALL #endif
AirFortressIlikara/LS2K0300-linux-4.19
2,048
arch/s390/kernel/relocate_kernel.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright IBM Corp. 2005 * * Author(s): Rolf Adelsberger, * Heiko Carstens <heiko.carstens@de.ibm.com> * */ #include <linux/linkage.h> #include <asm/page.h> #include <asm/sigp.h> /* * moves the new kernel to its destination... * %r2 = pointer to first kimage_entry_t * %r3 = start address - where to jump to after the job is done... * * %r5 will be used as temp. storage * %r6 holds the destination address * %r7 = PAGE_SIZE * %r8 holds the source address * %r9 = PAGE_SIZE * * 0xf000 is a page_mask */ .text ENTRY(relocate_kernel) basr %r13,0 # base address .base: lghi %r7,PAGE_SIZE # load PAGE_SIZE in r7 lghi %r9,PAGE_SIZE # load PAGE_SIZE in r9 lg %r5,0(%r2) # read another word for indirection page aghi %r2,8 # increment pointer tml %r5,0x1 # is it a destination page? je .indir_check # NO, goto "indir_check" lgr %r6,%r5 # r6 = r5 nill %r6,0xf000 # mask it out and... j .base # ...next iteration .indir_check: tml %r5,0x2 # is it a indirection page? je .done_test # NO, goto "done_test" nill %r5,0xf000 # YES, mask out, lgr %r2,%r5 # move it into the right register, j .base # and read next... .done_test: tml %r5,0x4 # is it the done indicator? je .source_test # NO! Well, then it should be the source indicator... j .done # ok, lets finish it here... .source_test: tml %r5,0x8 # it should be a source indicator... je .base # NO, ignore it... lgr %r8,%r5 # r8 = r5 nill %r8,0xf000 # masking 0: mvcle %r6,%r8,0x0 # copy PAGE_SIZE bytes from r8 to r6 - pad with 0 jo 0b j .base .done: sgr %r0,%r0 # clear register r0 la %r4,load_psw-.base(%r13) # load psw-address into the register o %r3,4(%r4) # or load address into psw st %r3,4(%r4) mvc 0(8,%r0),0(%r4) # copy psw to absolute address 0 diag %r0,%r0,0x308 .align 8 load_psw: .long 0x00080000,0x80000000 relocate_kernel_end: .align 8 .globl relocate_kernel_len relocate_kernel_len: .quad relocate_kernel_end - relocate_kernel
AirFortressIlikara/LS2K0300-linux-4.19
3,234
arch/s390/kernel/base.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/s390/kernel/base.S * * Copyright IBM Corp. 2006, 2007 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> * Michael Holzheu <holzheu@de.ibm.com> */ #include <linux/linkage.h> #include <asm/asm-offsets.h> #include <asm/nospec-insn.h> #include <asm/ptrace.h> #include <asm/sigp.h> GEN_BR_THUNK %r9 GEN_BR_THUNK %r14 ENTRY(s390_base_mcck_handler) basr %r13,0 0: lg %r15,__LC_PANIC_STACK # load panic stack aghi %r15,-STACK_FRAME_OVERHEAD larl %r1,s390_base_mcck_handler_fn lg %r9,0(%r1) ltgr %r9,%r9 jz 1f BASR_EX %r14,%r9 1: la %r1,4095 lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1) lpswe __LC_MCK_OLD_PSW .section .bss .align 8 .globl s390_base_mcck_handler_fn s390_base_mcck_handler_fn: .quad 0 .previous ENTRY(s390_base_ext_handler) stmg %r0,%r15,__LC_SAVE_AREA_ASYNC basr %r13,0 0: aghi %r15,-STACK_FRAME_OVERHEAD larl %r1,s390_base_ext_handler_fn lg %r9,0(%r1) ltgr %r9,%r9 jz 1f BASR_EX %r14,%r9 1: lmg %r0,%r15,__LC_SAVE_AREA_ASYNC ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit lpswe __LC_EXT_OLD_PSW .section .bss .align 8 .globl s390_base_ext_handler_fn s390_base_ext_handler_fn: .quad 0 .previous ENTRY(s390_base_pgm_handler) stmg %r0,%r15,__LC_SAVE_AREA_SYNC basr %r13,0 0: aghi %r15,-STACK_FRAME_OVERHEAD larl %r1,s390_base_pgm_handler_fn lg %r9,0(%r1) ltgr %r9,%r9 jz 1f BASR_EX %r14,%r9 lmg %r0,%r15,__LC_SAVE_AREA_SYNC lpswe __LC_PGM_OLD_PSW 1: lpswe disabled_wait_psw-0b(%r13) .align 8 disabled_wait_psw: .quad 0x0002000180000000,0x0000000000000000 + s390_base_pgm_handler .section .bss .align 8 .globl s390_base_pgm_handler_fn s390_base_pgm_handler_fn: .quad 0 .previous # # Calls diag 308 subcode 1 and continues execution # ENTRY(diag308_reset) larl %r4,.Lctlregs # Save control registers stctg %c0,%c15,0(%r4) lg %r2,0(%r4) # Disable lowcore protection nilh %r2,0xefff larl %r4,.Lctlreg0 stg %r2,0(%r4) lctlg %c0,%c0,0(%r4) larl %r4,.Lfpctl # Floating point control register stfpc 0(%r4) larl %r4,.Lprefix # Save prefix register stpx 0(%r4) larl %r4,.Lprefix_zero # Set prefix register to 0 spx 0(%r4) larl %r4,.Lcontinue_psw # Save PSW flags epsw %r2,%r3 stm %r2,%r3,0(%r4) larl %r4,.Lrestart_psw # Setup restart PSW at absolute 0 lghi %r3,0 lg %r4,0(%r4) # Save PSW sturg %r4,%r3 # Use sturg, because of large pages lghi %r1,1 lghi %r0,0 diag %r0,%r1,0x308 .Lrestart_part2: lhi %r0,0 # Load r0 with zero lhi %r1,2 # Use mode 2 = ESAME (dump) sigp %r1,%r0,SIGP_SET_ARCHITECTURE # Switch to ESAME mode sam64 # Switch to 64 bit addressing mode larl %r4,.Lctlregs # Restore control registers lctlg %c0,%c15,0(%r4) larl %r4,.Lfpctl # Restore floating point ctl register lfpc 0(%r4) larl %r4,.Lprefix # Restore prefix register spx 0(%r4) larl %r4,.Lcontinue_psw # Restore PSW flags lpswe 0(%r4) .Lcontinue: BR_EX %r14 .align 16 .Lrestart_psw: .long 0x00080000,0x80000000 + .Lrestart_part2 .section .data..nosave,"aw",@progbits .align 8 .Lcontinue_psw: .quad 0,.Lcontinue .previous .section .bss .align 8 .Lctlreg0: .quad 0 .Lctlregs: .rept 16 .quad 0 .endr .Lfpctl: .long 0 .Lprefix: .long 0 .Lprefix_zero: .long 0 .previous
AirFortressIlikara/LS2K0300-linux-4.19
2,702
arch/s390/kernel/mcount.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright IBM Corp. 2008, 2009 * * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, * */ #include <linux/linkage.h> #include <asm/asm-offsets.h> #include <asm/ftrace.h> #include <asm/nospec-insn.h> #include <asm/ptrace.h> #include <asm/export.h> GEN_BR_THUNK %r1 GEN_BR_THUNK %r14 .section .kprobes.text, "ax" ENTRY(ftrace_stub) BR_EX %r14 #define STACK_FRAME_SIZE (STACK_FRAME_OVERHEAD + __PT_SIZE) #define STACK_PTREGS (STACK_FRAME_OVERHEAD) #define STACK_PTREGS_GPRS (STACK_PTREGS + __PT_GPRS) #define STACK_PTREGS_PSW (STACK_PTREGS + __PT_PSW) #ifdef __PACK_STACK /* allocate just enough for r14, r15 and backchain */ #define TRACED_FUNC_FRAME_SIZE 24 #else #define TRACED_FUNC_FRAME_SIZE STACK_FRAME_OVERHEAD #endif ENTRY(_mcount) BR_EX %r14 EXPORT_SYMBOL(_mcount) ENTRY(ftrace_caller) .globl ftrace_regs_caller .set ftrace_regs_caller,ftrace_caller stg %r14,(__SF_GPRS+8*8)(%r15) # save traced function caller lgr %r1,%r15 #if !(defined(CC_USING_HOTPATCH) || defined(CC_USING_NOP_MCOUNT)) aghi %r0,MCOUNT_RETURN_FIXUP #endif # allocate stack frame for ftrace_caller to contain traced function aghi %r15,-TRACED_FUNC_FRAME_SIZE stg %r1,__SF_BACKCHAIN(%r15) stg %r0,(__SF_GPRS+8*8)(%r15) stg %r15,(__SF_GPRS+9*8)(%r15) # allocate pt_regs and stack frame for ftrace_trace_function aghi %r15,-STACK_FRAME_SIZE stg %r1,(STACK_PTREGS_GPRS+15*8)(%r15) aghi %r1,-TRACED_FUNC_FRAME_SIZE stg %r1,__SF_BACKCHAIN(%r15) stg %r0,(STACK_PTREGS_PSW+8)(%r15) stmg %r2,%r14,(STACK_PTREGS_GPRS+2*8)(%r15) #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES aghik %r2,%r0,-MCOUNT_INSN_SIZE lgrl %r4,function_trace_op lgrl %r1,ftrace_trace_function #else lgr %r2,%r0 aghi %r2,-MCOUNT_INSN_SIZE larl %r4,function_trace_op lg %r4,0(%r4) larl %r1,ftrace_trace_function lg %r1,0(%r1) #endif lgr %r3,%r14 la %r5,STACK_PTREGS(%r15) BASR_EX %r14,%r1 #ifdef CONFIG_FUNCTION_GRAPH_TRACER # The j instruction gets runtime patched to a nop instruction. # See ftrace_enable_ftrace_graph_caller. ENTRY(ftrace_graph_caller) j ftrace_graph_caller_end lg %r2,(STACK_PTREGS_GPRS+14*8)(%r15) lg %r3,(STACK_PTREGS_PSW+8)(%r15) brasl %r14,prepare_ftrace_return stg %r2,(STACK_PTREGS_GPRS+14*8)(%r15) ftrace_graph_caller_end: .globl ftrace_graph_caller_end #endif lg %r1,(STACK_PTREGS_PSW+8)(%r15) lmg %r2,%r15,(STACK_PTREGS_GPRS+2*8)(%r15) BR_EX %r1 #ifdef CONFIG_FUNCTION_GRAPH_TRACER ENTRY(return_to_handler) stmg %r2,%r5,32(%r15) lgr %r1,%r15 aghi %r15,-STACK_FRAME_OVERHEAD stg %r1,__SF_BACKCHAIN(%r15) brasl %r14,ftrace_return_to_handler aghi %r15,STACK_FRAME_OVERHEAD lgr %r14,%r2 lmg %r2,%r5,32(%r15) BR_EX %r14 #endif
AirFortressIlikara/LS2K0300-linux-4.19
3,257
arch/s390/kernel/vmlinux.lds.S
/* SPDX-License-Identifier: GPL-2.0 */ /* ld script to make s390 Linux kernel * Written by Martin Schwidefsky (schwidefsky@de.ibm.com) */ #include <asm/thread_info.h> #include <asm/page.h> /* * Put .bss..swapper_pg_dir as the first thing in .bss. This will * make sure it has 16k alignment. */ #define BSS_FIRST_SECTIONS *(.bss..swapper_pg_dir) /* Handle ro_after_init data on our own. */ #define RO_AFTER_INIT_DATA #include <asm-generic/vmlinux.lds.h> OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390") OUTPUT_ARCH(s390:64-bit) ENTRY(startup_continue) jiffies = jiffies_64; PHDRS { text PT_LOAD FLAGS(5); /* R_E */ data PT_LOAD FLAGS(7); /* RWE */ note PT_NOTE FLAGS(0); /* ___ */ } SECTIONS { . = 0x100000; .text : { _stext = .; /* Start of text section */ _text = .; /* Text and read-only data */ HEAD_TEXT TEXT_TEXT SCHED_TEXT CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT SOFTIRQENTRY_TEXT *(.text.*_indirect_*) *(.fixup) *(.gnu.warning) . = ALIGN(PAGE_SIZE); _etext = .; /* End of text section */ } :text = 0x0700 NOTES :text :note .dummy : { *(.dummy) } :data RO_DATA_SECTION(PAGE_SIZE) . = ALIGN(PAGE_SIZE); _sdata = .; /* Start of data section */ . = ALIGN(PAGE_SIZE); __start_ro_after_init = .; .data..ro_after_init : { *(.data..ro_after_init) } EXCEPTION_TABLE(16) . = ALIGN(PAGE_SIZE); __end_ro_after_init = .; RW_DATA_SECTION(0x100, PAGE_SIZE, THREAD_SIZE) _edata = .; /* End of data section */ /* will be freed after init */ . = ALIGN(PAGE_SIZE); /* Init code and data */ __init_begin = .; . = ALIGN(PAGE_SIZE); .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { _sinittext = .; INIT_TEXT . = ALIGN(PAGE_SIZE); _einittext = .; } /* * .exit.text is discarded at runtime, not link time, * to deal with references from __bug_table */ .exit.text : { EXIT_TEXT } .exit.data : { EXIT_DATA } /* * struct alt_inst entries. From the header (alternative.h): * "Alternative instructions for different CPU types or capabilities" * Think locking instructions on spinlocks. * Note, that it is a part of __init region. */ . = ALIGN(8); .altinstructions : { __alt_instructions = .; *(.altinstructions) __alt_instructions_end = .; } /* * And here are the replacement instructions. The linker sticks * them as binary blobs. The .altinstructions has enough data to * get the address and the length of them to patch the kernel safely. * Note, that it is a part of __init region. */ .altinstr_replacement : { *(.altinstr_replacement) } /* * Table with the patch locations to undo expolines */ .nospec_call_table : { __nospec_call_start = . ; *(.s390_indirect*) __nospec_call_end = . ; } .nospec_return_table : { __nospec_return_start = . ; *(.s390_return*) __nospec_return_end = . ; } /* early.c uses stsi, which requires page aligned data. */ . = ALIGN(PAGE_SIZE); INIT_DATA_SECTION(0x100) PERCPU_SECTION(0x100) . = ALIGN(PAGE_SIZE); __init_end = .; /* freed after init ends here */ BSS_SECTION(PAGE_SIZE, 4 * PAGE_SIZE, PAGE_SIZE) _end = . ; /* Debugging sections. */ STABS_DEBUG DWARF_DEBUG /* Sections to be discarded */ DISCARDS /DISCARD/ : { *(.eh_frame) } }
AirFortressIlikara/LS2K0300-linux-4.19
6,879
arch/s390/kernel/swsusp.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * S390 64-bit swsusp implementation * * Copyright IBM Corp. 2009 * * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com> * Michael Holzheu <holzheu@linux.vnet.ibm.com> */ #include <linux/linkage.h> #include <asm/page.h> #include <asm/ptrace.h> #include <asm/thread_info.h> #include <asm/asm-offsets.h> #include <asm/nospec-insn.h> #include <asm/sigp.h> /* * Save register context in absolute 0 lowcore and call swsusp_save() to * create in-memory kernel image. The context is saved in the designated * "store status" memory locations (see POP). * We return from this function twice. The first time during the suspend to * disk process. The second time via the swsusp_arch_resume() function * (see below) in the resume process. * This function runs with disabled interrupts. */ GEN_BR_THUNK %r14 .section .text ENTRY(swsusp_arch_suspend) stmg %r6,%r15,__SF_GPRS(%r15) lgr %r1,%r15 aghi %r15,-STACK_FRAME_OVERHEAD stg %r1,__SF_BACKCHAIN(%r15) /* Store FPU registers */ brasl %r14,save_fpu_regs /* Deactivate DAT */ stnsm __SF_EMPTY(%r15),0xfb /* Store prefix register on stack */ stpx __SF_EMPTY(%r15) /* Save prefix register contents for lowcore copy */ llgf %r10,__SF_EMPTY(%r15) /* Get pointer to save area */ lghi %r1,0x1000 /* Save CPU address */ stap __LC_EXT_CPU_ADDR(%r0) /* Store registers */ mvc 0x318(4,%r1),__SF_EMPTY(%r15) /* move prefix to lowcore */ stam %a0,%a15,0x340(%r1) /* store access registers */ stctg %c0,%c15,0x380(%r1) /* store control registers */ stmg %r0,%r15,0x280(%r1) /* store general registers */ stpt 0x328(%r1) /* store timer */ stck __SF_EMPTY(%r15) /* store clock */ stckc 0x330(%r1) /* store clock comparator */ /* Update cputime accounting before going to sleep */ lg %r0,__LC_LAST_UPDATE_TIMER slg %r0,0x328(%r1) alg %r0,__LC_SYSTEM_TIMER stg %r0,__LC_SYSTEM_TIMER mvc __LC_LAST_UPDATE_TIMER(8),0x328(%r1) lg %r0,__LC_LAST_UPDATE_CLOCK slg %r0,__SF_EMPTY(%r15) alg %r0,__LC_STEAL_TIMER stg %r0,__LC_STEAL_TIMER mvc __LC_LAST_UPDATE_CLOCK(8),__SF_EMPTY(%r15) /* Activate DAT */ stosm __SF_EMPTY(%r15),0x04 /* Set prefix page to zero */ xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15) spx __SF_EMPTY(%r15) /* Save absolute zero pages */ larl %r2,suspend_zero_pages lg %r2,0(%r2) lghi %r4,0 lghi %r3,2*PAGE_SIZE lghi %r5,2*PAGE_SIZE 1: mvcle %r2,%r4,0 jo 1b /* Copy lowcore to absolute zero lowcore */ lghi %r2,0 lgr %r4,%r10 lghi %r3,2*PAGE_SIZE lghi %r5,2*PAGE_SIZE 1: mvcle %r2,%r4,0 jo 1b /* Save image */ brasl %r14,swsusp_save /* Restore prefix register and return */ lghi %r1,0x1000 spx 0x318(%r1) lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15) lghi %r2,0 BR_EX %r14 /* * Restore saved memory image to correct place and restore register context. * Then we return to the function that called swsusp_arch_suspend(). * swsusp_arch_resume() runs with disabled interrupts. */ ENTRY(swsusp_arch_resume) stmg %r6,%r15,__SF_GPRS(%r15) lgr %r1,%r15 aghi %r15,-STACK_FRAME_OVERHEAD stg %r1,__SF_BACKCHAIN(%r15) /* Make all free pages stable */ lghi %r2,1 brasl %r14,arch_set_page_states /* Deactivate DAT */ stnsm __SF_EMPTY(%r15),0xfb /* Set prefix page to zero */ xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15) spx __SF_EMPTY(%r15) /* Restore saved image */ larl %r1,restore_pblist lg %r1,0(%r1) ltgr %r1,%r1 jz 2f 0: lg %r2,8(%r1) lg %r4,0(%r1) iske %r0,%r4 lghi %r3,PAGE_SIZE lghi %r5,PAGE_SIZE 1: mvcle %r2,%r4,0 jo 1b lg %r2,8(%r1) sske %r0,%r2 lg %r1,16(%r1) ltgr %r1,%r1 jnz 0b 2: ptlb /* flush tlb */ /* Reset System */ larl %r1,restart_entry larl %r2,.Lrestart_diag308_psw og %r1,0(%r2) stg %r1,0(%r0) larl %r1,.Lnew_pgm_check_psw epsw %r2,%r3 stm %r2,%r3,0(%r1) mvc __LC_PGM_NEW_PSW(16,%r0),0(%r1) lghi %r0,0 diag %r0,%r0,0x308 restart_entry: lhi %r1,1 sigp %r1,%r0,SIGP_SET_ARCHITECTURE sam64 #ifdef CONFIG_SMP larl %r1,smp_cpu_mt_shift icm %r1,15,0(%r1) jz smt_done llgfr %r1,%r1 smt_loop: sigp %r1,%r0,SIGP_SET_MULTI_THREADING brc 8,smt_done /* accepted */ brc 2,smt_loop /* busy, try again */ smt_done: #endif larl %r1,.Lnew_pgm_check_psw lpswe 0(%r1) pgm_check_entry: /* Switch to original suspend CPU */ larl %r1,.Lresume_cpu /* Resume CPU address: r2 */ stap 0(%r1) llgh %r2,0(%r1) llgh %r1,__LC_EXT_CPU_ADDR(%r0) /* Suspend CPU address: r1 */ cgr %r1,%r2 je restore_registers /* r1 = r2 -> nothing to do */ larl %r4,.Lrestart_suspend_psw /* Set new restart PSW */ mvc __LC_RST_NEW_PSW(16,%r0),0(%r4) 3: sigp %r9,%r1,SIGP_INITIAL_CPU_RESET /* sigp initial cpu reset */ brc 8,4f /* accepted */ brc 2,3b /* busy, try again */ /* Suspend CPU not available -> panic */ larl %r15,init_thread_union aghi %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER) aghi %r15,-STACK_FRAME_OVERHEAD larl %r2,.Lpanic_string brasl %r14,sclp_early_printk_force larl %r3,.Ldisabled_wait_31 lpsw 0(%r3) 4: /* Switch to suspend CPU */ sigp %r9,%r1,SIGP_RESTART /* sigp restart to suspend CPU */ brc 2,4b /* busy, try again */ 5: sigp %r9,%r2,SIGP_STOP /* sigp stop to current resume CPU */ brc 2,5b /* busy, try again */ 6: j 6b restart_suspend: larl %r1,.Lresume_cpu llgh %r2,0(%r1) 7: sigp %r9,%r2,SIGP_SENSE /* sigp sense, wait for resume CPU */ brc 8,7b /* accepted, status 0, still running */ brc 2,7b /* busy, try again */ tmll %r9,0x40 /* Test if resume CPU is stopped */ jz 7b restore_registers: /* Restore registers */ lghi %r13,0x1000 /* %r1 = pointer to save area */ /* Ignore time spent in suspended state. */ llgf %r1,0x318(%r13) stck __LC_LAST_UPDATE_CLOCK(%r1) spt 0x328(%r13) /* reprogram timer */ //sckc 0x330(%r13) /* set clock comparator */ lctlg %c0,%c15,0x380(%r13) /* load control registers */ lam %a0,%a15,0x340(%r13) /* load access registers */ /* Load old stack */ lg %r15,0x2f8(%r13) /* Save prefix register */ mvc __SF_EMPTY(4,%r15),0x318(%r13) /* Restore absolute zero pages */ lghi %r2,0 larl %r4,suspend_zero_pages lg %r4,0(%r4) lghi %r3,2*PAGE_SIZE lghi %r5,2*PAGE_SIZE 1: mvcle %r2,%r4,0 jo 1b /* Restore prefix register */ spx __SF_EMPTY(%r15) /* Activate DAT */ stosm __SF_EMPTY(%r15),0x04 /* Make all free pages unstable */ lghi %r2,0 brasl %r14,arch_set_page_states /* Call arch specific early resume code */ brasl %r14,s390_early_resume /* Return 0 */ lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15) lghi %r2,0 BR_EX %r14 .section .data..nosave,"aw",@progbits .align 8 .Ldisabled_wait_31: .long 0x000a0000,0x00000000 .Lpanic_string: .asciz "Resume not possible because suspend CPU is no longer available\n" .align 8 .Lrestart_diag308_psw: .long 0x00080000,0x80000000 .Lrestart_suspend_psw: .quad 0x0000000180000000,restart_suspend .Lnew_pgm_check_psw: .quad 0,pgm_check_entry .Lresume_cpu: .byte 0,0
AirFortressIlikara/LS2K0300-linux-4.19
1,828
arch/s390/kernel/reipl.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright IBM Corp 2000, 2011 * Author(s): Holger Smolinski <Holger.Smolinski@de.ibm.com>, * Denis Joseph Barrow, */ #include <linux/linkage.h> #include <asm/asm-offsets.h> #include <asm/nospec-insn.h> #include <asm/sigp.h> GEN_BR_THUNK %r9 # # Issue "store status" for the current CPU to its prefix page # and call passed function afterwards # # r2 = Function to be called after store status # r3 = Parameter for function # ENTRY(store_status) /* Save register one and load save area base */ stg %r1,__LC_SAVE_AREA_RESTART /* General purpose registers */ lghi %r1,__LC_GPREGS_SAVE_AREA stmg %r0,%r15,0(%r1) mvc 8(8,%r1),__LC_SAVE_AREA_RESTART /* Control registers */ lghi %r1,__LC_CREGS_SAVE_AREA stctg %c0,%c15,0(%r1) /* Access registers */ lghi %r1,__LC_AREGS_SAVE_AREA stam %a0,%a15,0(%r1) /* Floating point registers */ lghi %r1,__LC_FPREGS_SAVE_AREA std %f0, 0x00(%r1) std %f1, 0x08(%r1) std %f2, 0x10(%r1) std %f3, 0x18(%r1) std %f4, 0x20(%r1) std %f5, 0x28(%r1) std %f6, 0x30(%r1) std %f7, 0x38(%r1) std %f8, 0x40(%r1) std %f9, 0x48(%r1) std %f10,0x50(%r1) std %f11,0x58(%r1) std %f12,0x60(%r1) std %f13,0x68(%r1) std %f14,0x70(%r1) std %f15,0x78(%r1) /* Floating point control register */ lghi %r1,__LC_FP_CREG_SAVE_AREA stfpc 0(%r1) /* CPU timer */ lghi %r1,__LC_CPU_TIMER_SAVE_AREA stpt 0(%r1) /* Store prefix register */ lghi %r1,__LC_PREFIX_SAVE_AREA stpx 0(%r1) /* Clock comparator - seven bytes */ lghi %r1,__LC_CLOCK_COMP_SAVE_AREA larl %r4,.Lclkcmp stckc 0(%r4) mvc 1(7,%r1),1(%r4) /* Program status word */ lghi %r1,__LC_PSW_SAVE_AREA epsw %r4,%r5 st %r4,0(%r1) st %r5,4(%r1) stg %r2,8(%r1) lgr %r9,%r2 lgr %r2,%r3 BR_EX %r9 .section .bss .align 8 .Lclkcmp: .quad 0x0000000000000000 .previous
AirFortressIlikara/LS2K0300-linux-4.19
6,239
arch/s390/crypto/crc32be-vx.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Hardware-accelerated CRC-32 variants for Linux on z Systems * * Use the z/Architecture Vector Extension Facility to accelerate the * computing of CRC-32 checksums. * * This CRC-32 implementation algorithm processes the most-significant * bit first (BE). * * Copyright IBM Corp. 2015 * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> */ #include <linux/linkage.h> #include <asm/nospec-insn.h> #include <asm/vx-insn.h> /* Vector register range containing CRC-32 constants */ #define CONST_R1R2 %v9 #define CONST_R3R4 %v10 #define CONST_R5 %v11 #define CONST_R6 %v12 #define CONST_RU_POLY %v13 #define CONST_CRC_POLY %v14 .data .align 8 /* * The CRC-32 constant block contains reduction constants to fold and * process particular chunks of the input data stream in parallel. * * For the CRC-32 variants, the constants are precomputed according to * these defintions: * * R1 = x4*128+64 mod P(x) * R2 = x4*128 mod P(x) * R3 = x128+64 mod P(x) * R4 = x128 mod P(x) * R5 = x96 mod P(x) * R6 = x64 mod P(x) * * Barret reduction constant, u, is defined as floor(x**64 / P(x)). * * where P(x) is the polynomial in the normal domain and the P'(x) is the * polynomial in the reversed (bitreflected) domain. * * Note that the constant definitions below are extended in order to compute * intermediate results with a single VECTOR GALOIS FIELD MULTIPLY instruction. * The righmost doubleword can be 0 to prevent contribution to the result or * can be multiplied by 1 to perform an XOR without the need for a separate * VECTOR EXCLUSIVE OR instruction. * * CRC-32 (IEEE 802.3 Ethernet, ...) polynomials: * * P(x) = 0x04C11DB7 * P'(x) = 0xEDB88320 */ .Lconstants_CRC_32_BE: .quad 0x08833794c, 0x0e6228b11 # R1, R2 .quad 0x0c5b9cd4c, 0x0e8a45605 # R3, R4 .quad 0x0f200aa66, 1 << 32 # R5, x32 .quad 0x0490d678d, 1 # R6, 1 .quad 0x104d101df, 0 # u .quad 0x104C11DB7, 0 # P(x) .previous GEN_BR_THUNK %r14 .text /* * The CRC-32 function(s) use these calling conventions: * * Parameters: * * %r2: Initial CRC value, typically ~0; and final CRC (return) value. * %r3: Input buffer pointer, performance might be improved if the * buffer is on a doubleword boundary. * %r4: Length of the buffer, must be 64 bytes or greater. * * Register usage: * * %r5: CRC-32 constant pool base pointer. * V0: Initial CRC value and intermediate constants and results. * V1..V4: Data for CRC computation. * V5..V8: Next data chunks that are fetched from the input buffer. * * V9..V14: CRC-32 constants. */ ENTRY(crc32_be_vgfm_16) /* Load CRC-32 constants */ larl %r5,.Lconstants_CRC_32_BE VLM CONST_R1R2,CONST_CRC_POLY,0,%r5 /* Load the initial CRC value into the leftmost word of V0. */ VZERO %v0 VLVGF %v0,%r2,0 /* Load a 64-byte data chunk and XOR with CRC */ VLM %v1,%v4,0,%r3 /* 64-bytes into V1..V4 */ VX %v1,%v0,%v1 /* V1 ^= CRC */ aghi %r3,64 /* BUF = BUF + 64 */ aghi %r4,-64 /* LEN = LEN - 64 */ /* Check remaining buffer size and jump to proper folding method */ cghi %r4,64 jl .Lless_than_64bytes .Lfold_64bytes_loop: /* Load the next 64-byte data chunk into V5 to V8 */ VLM %v5,%v8,0,%r3 /* * Perform a GF(2) multiplication of the doublewords in V1 with * the reduction constants in V0. The intermediate result is * then folded (accumulated) with the next data chunk in V5 and * stored in V1. Repeat this step for the register contents * in V2, V3, and V4 respectively. */ VGFMAG %v1,CONST_R1R2,%v1,%v5 VGFMAG %v2,CONST_R1R2,%v2,%v6 VGFMAG %v3,CONST_R1R2,%v3,%v7 VGFMAG %v4,CONST_R1R2,%v4,%v8 /* Adjust buffer pointer and length for next loop */ aghi %r3,64 /* BUF = BUF + 64 */ aghi %r4,-64 /* LEN = LEN - 64 */ cghi %r4,64 jnl .Lfold_64bytes_loop .Lless_than_64bytes: /* Fold V1 to V4 into a single 128-bit value in V1 */ VGFMAG %v1,CONST_R3R4,%v1,%v2 VGFMAG %v1,CONST_R3R4,%v1,%v3 VGFMAG %v1,CONST_R3R4,%v1,%v4 /* Check whether to continue with 64-bit folding */ cghi %r4,16 jl .Lfinal_fold .Lfold_16bytes_loop: VL %v2,0,,%r3 /* Load next data chunk */ VGFMAG %v1,CONST_R3R4,%v1,%v2 /* Fold next data chunk */ /* Adjust buffer pointer and size for folding next data chunk */ aghi %r3,16 aghi %r4,-16 /* Process remaining data chunks */ cghi %r4,16 jnl .Lfold_16bytes_loop .Lfinal_fold: /* * The R5 constant is used to fold a 128-bit value into an 96-bit value * that is XORed with the next 96-bit input data chunk. To use a single * VGFMG instruction, multiply the rightmost 64-bit with x^32 (1<<32) to * form an intermediate 96-bit value (with appended zeros) which is then * XORed with the intermediate reduction result. */ VGFMG %v1,CONST_R5,%v1 /* * Further reduce the remaining 96-bit value to a 64-bit value using a * single VGFMG, the rightmost doubleword is multiplied with 0x1. The * intermediate result is then XORed with the product of the leftmost * doubleword with R6. The result is a 64-bit value and is subject to * the Barret reduction. */ VGFMG %v1,CONST_R6,%v1 /* * The input values to the Barret reduction are the degree-63 polynomial * in V1 (R(x)), degree-32 generator polynomial, and the reduction * constant u. The Barret reduction result is the CRC value of R(x) mod * P(x). * * The Barret reduction algorithm is defined as: * * 1. T1(x) = floor( R(x) / x^32 ) GF2MUL u * 2. T2(x) = floor( T1(x) / x^32 ) GF2MUL P(x) * 3. C(x) = R(x) XOR T2(x) mod x^32 * * Note: To compensate the division by x^32, use the vector unpack * instruction to move the leftmost word into the leftmost doubleword * of the vector register. The rightmost doubleword is multiplied * with zero to not contribute to the intermedate results. */ /* T1(x) = floor( R(x) / x^32 ) GF2MUL u */ VUPLLF %v2,%v1 VGFMG %v2,CONST_RU_POLY,%v2 /* * Compute the GF(2) product of the CRC polynomial in VO with T1(x) in * V2 and XOR the intermediate result, T2(x), with the value in V1. * The final result is in the rightmost word of V2. */ VUPLLF %v2,%v2 VGFMAG %v2,CONST_CRC_POLY,%v2,%v1 .Ldone: VLGVF %r2,%v2,3 BR_EX %r14 .previous
AirFortressIlikara/LS2K0300-linux-4.19
7,876
arch/s390/crypto/crc32le-vx.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Hardware-accelerated CRC-32 variants for Linux on z Systems * * Use the z/Architecture Vector Extension Facility to accelerate the * computing of bitreflected CRC-32 checksums for IEEE 802.3 Ethernet * and Castagnoli. * * This CRC-32 implementation algorithm is bitreflected and processes * the least-significant bit first (Little-Endian). * * Copyright IBM Corp. 2015 * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> */ #include <linux/linkage.h> #include <asm/nospec-insn.h> #include <asm/vx-insn.h> /* Vector register range containing CRC-32 constants */ #define CONST_PERM_LE2BE %v9 #define CONST_R2R1 %v10 #define CONST_R4R3 %v11 #define CONST_R5 %v12 #define CONST_RU_POLY %v13 #define CONST_CRC_POLY %v14 .data .align 8 /* * The CRC-32 constant block contains reduction constants to fold and * process particular chunks of the input data stream in parallel. * * For the CRC-32 variants, the constants are precomputed according to * these definitions: * * R1 = [(x4*128+32 mod P'(x) << 32)]' << 1 * R2 = [(x4*128-32 mod P'(x) << 32)]' << 1 * R3 = [(x128+32 mod P'(x) << 32)]' << 1 * R4 = [(x128-32 mod P'(x) << 32)]' << 1 * R5 = [(x64 mod P'(x) << 32)]' << 1 * R6 = [(x32 mod P'(x) << 32)]' << 1 * * The bitreflected Barret reduction constant, u', is defined as * the bit reversal of floor(x**64 / P(x)). * * where P(x) is the polynomial in the normal domain and the P'(x) is the * polynomial in the reversed (bitreflected) domain. * * CRC-32 (IEEE 802.3 Ethernet, ...) polynomials: * * P(x) = 0x04C11DB7 * P'(x) = 0xEDB88320 * * CRC-32C (Castagnoli) polynomials: * * P(x) = 0x1EDC6F41 * P'(x) = 0x82F63B78 */ .Lconstants_CRC_32_LE: .octa 0x0F0E0D0C0B0A09080706050403020100 # BE->LE mask .quad 0x1c6e41596, 0x154442bd4 # R2, R1 .quad 0x0ccaa009e, 0x1751997d0 # R4, R3 .octa 0x163cd6124 # R5 .octa 0x1F7011641 # u' .octa 0x1DB710641 # P'(x) << 1 .Lconstants_CRC_32C_LE: .octa 0x0F0E0D0C0B0A09080706050403020100 # BE->LE mask .quad 0x09e4addf8, 0x740eef02 # R2, R1 .quad 0x14cd00bd6, 0xf20c0dfe # R4, R3 .octa 0x0dd45aab8 # R5 .octa 0x0dea713f1 # u' .octa 0x105ec76f0 # P'(x) << 1 .previous GEN_BR_THUNK %r14 .text /* * The CRC-32 functions use these calling conventions: * * Parameters: * * %r2: Initial CRC value, typically ~0; and final CRC (return) value. * %r3: Input buffer pointer, performance might be improved if the * buffer is on a doubleword boundary. * %r4: Length of the buffer, must be 64 bytes or greater. * * Register usage: * * %r5: CRC-32 constant pool base pointer. * V0: Initial CRC value and intermediate constants and results. * V1..V4: Data for CRC computation. * V5..V8: Next data chunks that are fetched from the input buffer. * V9: Constant for BE->LE conversion and shift operations * * V10..V14: CRC-32 constants. */ ENTRY(crc32_le_vgfm_16) larl %r5,.Lconstants_CRC_32_LE j crc32_le_vgfm_generic ENTRY(crc32c_le_vgfm_16) larl %r5,.Lconstants_CRC_32C_LE j crc32_le_vgfm_generic crc32_le_vgfm_generic: /* Load CRC-32 constants */ VLM CONST_PERM_LE2BE,CONST_CRC_POLY,0,%r5 /* * Load the initial CRC value. * * The CRC value is loaded into the rightmost word of the * vector register and is later XORed with the LSB portion * of the loaded input data. */ VZERO %v0 /* Clear V0 */ VLVGF %v0,%r2,3 /* Load CRC into rightmost word */ /* Load a 64-byte data chunk and XOR with CRC */ VLM %v1,%v4,0,%r3 /* 64-bytes into V1..V4 */ VPERM %v1,%v1,%v1,CONST_PERM_LE2BE VPERM %v2,%v2,%v2,CONST_PERM_LE2BE VPERM %v3,%v3,%v3,CONST_PERM_LE2BE VPERM %v4,%v4,%v4,CONST_PERM_LE2BE VX %v1,%v0,%v1 /* V1 ^= CRC */ aghi %r3,64 /* BUF = BUF + 64 */ aghi %r4,-64 /* LEN = LEN - 64 */ cghi %r4,64 jl .Lless_than_64bytes .Lfold_64bytes_loop: /* Load the next 64-byte data chunk into V5 to V8 */ VLM %v5,%v8,0,%r3 VPERM %v5,%v5,%v5,CONST_PERM_LE2BE VPERM %v6,%v6,%v6,CONST_PERM_LE2BE VPERM %v7,%v7,%v7,CONST_PERM_LE2BE VPERM %v8,%v8,%v8,CONST_PERM_LE2BE /* * Perform a GF(2) multiplication of the doublewords in V1 with * the R1 and R2 reduction constants in V0. The intermediate result * is then folded (accumulated) with the next data chunk in V5 and * stored in V1. Repeat this step for the register contents * in V2, V3, and V4 respectively. */ VGFMAG %v1,CONST_R2R1,%v1,%v5 VGFMAG %v2,CONST_R2R1,%v2,%v6 VGFMAG %v3,CONST_R2R1,%v3,%v7 VGFMAG %v4,CONST_R2R1,%v4,%v8 aghi %r3,64 /* BUF = BUF + 64 */ aghi %r4,-64 /* LEN = LEN - 64 */ cghi %r4,64 jnl .Lfold_64bytes_loop .Lless_than_64bytes: /* * Fold V1 to V4 into a single 128-bit value in V1. Multiply V1 with R3 * and R4 and accumulating the next 128-bit chunk until a single 128-bit * value remains. */ VGFMAG %v1,CONST_R4R3,%v1,%v2 VGFMAG %v1,CONST_R4R3,%v1,%v3 VGFMAG %v1,CONST_R4R3,%v1,%v4 cghi %r4,16 jl .Lfinal_fold .Lfold_16bytes_loop: VL %v2,0,,%r3 /* Load next data chunk */ VPERM %v2,%v2,%v2,CONST_PERM_LE2BE VGFMAG %v1,CONST_R4R3,%v1,%v2 /* Fold next data chunk */ aghi %r3,16 aghi %r4,-16 cghi %r4,16 jnl .Lfold_16bytes_loop .Lfinal_fold: /* * Set up a vector register for byte shifts. The shift value must * be loaded in bits 1-4 in byte element 7 of a vector register. * Shift by 8 bytes: 0x40 * Shift by 4 bytes: 0x20 */ VLEIB %v9,0x40,7 /* * Prepare V0 for the next GF(2) multiplication: shift V0 by 8 bytes * to move R4 into the rightmost doubleword and set the leftmost * doubleword to 0x1. */ VSRLB %v0,CONST_R4R3,%v9 VLEIG %v0,1,0 /* * Compute GF(2) product of V1 and V0. The rightmost doubleword * of V1 is multiplied with R4. The leftmost doubleword of V1 is * multiplied by 0x1 and is then XORed with rightmost product. * Implicitly, the intermediate leftmost product becomes padded */ VGFMG %v1,%v0,%v1 /* * Now do the final 32-bit fold by multiplying the rightmost word * in V1 with R5 and XOR the result with the remaining bits in V1. * * To achieve this by a single VGFMAG, right shift V1 by a word * and store the result in V2 which is then accumulated. Use the * vector unpack instruction to load the rightmost half of the * doubleword into the rightmost doubleword element of V1; the other * half is loaded in the leftmost doubleword. * The vector register with CONST_R5 contains the R5 constant in the * rightmost doubleword and the leftmost doubleword is zero to ignore * the leftmost product of V1. */ VLEIB %v9,0x20,7 /* Shift by words */ VSRLB %v2,%v1,%v9 /* Store remaining bits in V2 */ VUPLLF %v1,%v1 /* Split rightmost doubleword */ VGFMAG %v1,CONST_R5,%v1,%v2 /* V1 = (V1 * R5) XOR V2 */ /* * Apply a Barret reduction to compute the final 32-bit CRC value. * * The input values to the Barret reduction are the degree-63 polynomial * in V1 (R(x)), degree-32 generator polynomial, and the reduction * constant u. The Barret reduction result is the CRC value of R(x) mod * P(x). * * The Barret reduction algorithm is defined as: * * 1. T1(x) = floor( R(x) / x^32 ) GF2MUL u * 2. T2(x) = floor( T1(x) / x^32 ) GF2MUL P(x) * 3. C(x) = R(x) XOR T2(x) mod x^32 * * Note: The leftmost doubleword of vector register containing * CONST_RU_POLY is zero and, thus, the intermediate GF(2) product * is zero and does not contribute to the final result. */ /* T1(x) = floor( R(x) / x^32 ) GF2MUL u */ VUPLLF %v2,%v1 VGFMG %v2,CONST_RU_POLY,%v2 /* * Compute the GF(2) product of the CRC polynomial with T1(x) in * V2 and XOR the intermediate result, T2(x), with the value in V1. * The final result is stored in word element 2 of V2. */ VUPLLF %v2,%v2 VGFMAG %v2,CONST_CRC_POLY,%v2,%v1 .Ldone: VLGVF %r2,%v2,2 BR_EX %r14 .previous
AirFortressIlikara/LS2K0300-linux-4.19
3,592
arch/s390/lib/mem.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * String handling functions. * * Copyright IBM Corp. 2012 */ #include <linux/linkage.h> #include <asm/export.h> #include <asm/nospec-insn.h> GEN_BR_THUNK %r14 /* * void *memmove(void *dest, const void *src, size_t n) */ ENTRY(memmove) ltgr %r4,%r4 lgr %r1,%r2 jz .Lmemmove_exit aghi %r4,-1 clgr %r2,%r3 jnh .Lmemmove_forward la %r5,1(%r4,%r3) clgr %r2,%r5 jl .Lmemmove_reverse .Lmemmove_forward: srlg %r0,%r4,8 ltgr %r0,%r0 jz .Lmemmove_forward_remainder .Lmemmove_forward_loop: mvc 0(256,%r1),0(%r3) la %r1,256(%r1) la %r3,256(%r3) brctg %r0,.Lmemmove_forward_loop .Lmemmove_forward_remainder: larl %r5,.Lmemmove_mvc ex %r4,0(%r5) .Lmemmove_exit: BR_EX %r14 .Lmemmove_reverse: ic %r0,0(%r4,%r3) stc %r0,0(%r4,%r1) brctg %r4,.Lmemmove_reverse ic %r0,0(%r4,%r3) stc %r0,0(%r4,%r1) BR_EX %r14 .Lmemmove_mvc: mvc 0(1,%r1),0(%r3) EXPORT_SYMBOL(memmove) /* * memset implementation * * This code corresponds to the C construct below. We do distinguish * between clearing (c == 0) and setting a memory array (c != 0) simply * because nearly all memset invocations in the kernel clear memory and * the xc instruction is preferred in such cases. * * void *memset(void *s, int c, size_t n) * { * if (likely(c == 0)) * return __builtin_memset(s, 0, n); * return __builtin_memset(s, c, n); * } */ ENTRY(memset) ltgr %r4,%r4 jz .Lmemset_exit ltgr %r3,%r3 jnz .Lmemset_fill aghi %r4,-1 srlg %r3,%r4,8 ltgr %r3,%r3 lgr %r1,%r2 jz .Lmemset_clear_remainder .Lmemset_clear_loop: xc 0(256,%r1),0(%r1) la %r1,256(%r1) brctg %r3,.Lmemset_clear_loop .Lmemset_clear_remainder: larl %r3,.Lmemset_xc ex %r4,0(%r3) .Lmemset_exit: BR_EX %r14 .Lmemset_fill: cghi %r4,1 lgr %r1,%r2 je .Lmemset_fill_exit aghi %r4,-2 srlg %r5,%r4,8 ltgr %r5,%r5 jz .Lmemset_fill_remainder .Lmemset_fill_loop: stc %r3,0(%r1) mvc 1(255,%r1),0(%r1) la %r1,256(%r1) brctg %r5,.Lmemset_fill_loop .Lmemset_fill_remainder: stc %r3,0(%r1) larl %r5,.Lmemset_mvc ex %r4,0(%r5) BR_EX %r14 .Lmemset_fill_exit: stc %r3,0(%r1) BR_EX %r14 .Lmemset_xc: xc 0(1,%r1),0(%r1) .Lmemset_mvc: mvc 1(1,%r1),0(%r1) EXPORT_SYMBOL(memset) /* * memcpy implementation * * void *memcpy(void *dest, const void *src, size_t n) */ ENTRY(memcpy) ltgr %r4,%r4 jz .Lmemcpy_exit aghi %r4,-1 srlg %r5,%r4,8 ltgr %r5,%r5 lgr %r1,%r2 jnz .Lmemcpy_loop .Lmemcpy_remainder: larl %r5,.Lmemcpy_mvc ex %r4,0(%r5) .Lmemcpy_exit: BR_EX %r14 .Lmemcpy_loop: mvc 0(256,%r1),0(%r3) la %r1,256(%r1) la %r3,256(%r3) brctg %r5,.Lmemcpy_loop j .Lmemcpy_remainder .Lmemcpy_mvc: mvc 0(1,%r1),0(%r3) EXPORT_SYMBOL(memcpy) /* * __memset16/32/64 * * void *__memset16(uint16_t *s, uint16_t v, size_t count) * void *__memset32(uint32_t *s, uint32_t v, size_t count) * void *__memset64(uint64_t *s, uint64_t v, size_t count) */ .macro __MEMSET bits,bytes,insn ENTRY(__memset\bits) ltgr %r4,%r4 jz .L__memset_exit\bits cghi %r4,\bytes je .L__memset_store\bits aghi %r4,-(\bytes+1) srlg %r5,%r4,8 ltgr %r5,%r5 lgr %r1,%r2 jz .L__memset_remainder\bits .L__memset_loop\bits: \insn %r3,0(%r1) mvc \bytes(256-\bytes,%r1),0(%r1) la %r1,256(%r1) brctg %r5,.L__memset_loop\bits .L__memset_remainder\bits: \insn %r3,0(%r1) larl %r5,.L__memset_mvc\bits ex %r4,0(%r5) BR_EX %r14 .L__memset_store\bits: \insn %r3,0(%r2) .L__memset_exit\bits: BR_EX %r14 .L__memset_mvc\bits: mvc \bytes(1,%r1),0(%r1) .endm __MEMSET 16,2,sth EXPORT_SYMBOL(__memset16) __MEMSET 32,4,st EXPORT_SYMBOL(__memset32) __MEMSET 64,8,stg EXPORT_SYMBOL(__memset64)
AirFortressIlikara/LS2K0300-linux-4.19
6,288
arch/s390/purgatory/head.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Purgatory setup code * * Copyright IBM Corp. 2018 * * Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com> */ #include <linux/linkage.h> #include <asm/asm-offsets.h> #include <asm/page.h> #include <asm/sigp.h> /* The purgatory is the code running between two kernels. It's main purpose * is to verify that the next kernel was not corrupted after load and to * start it. * * If the next kernel is a crash kernel there are some peculiarities to * consider: * * First the purgatory is called twice. Once only to verify the * sha digest. So if the crash kernel got corrupted the old kernel can try * to trigger a stand-alone dumper. And once to actually load the crash kernel. * * Second the purgatory also has to swap the crash memory region with its * destination at address 0. As the purgatory is part of crash memory this * requires some finesse. The tactic here is that the purgatory first copies * itself to the end of the destination and then swaps the rest of the * memory running from there. */ #define bufsz purgatory_end-stack .macro MEMCPY dst,src,len lgr %r0,\dst lgr %r1,\len lgr %r2,\src lgr %r3,\len 20: mvcle %r0,%r2,0 jo 20b .endm .macro MEMSWAP dst,src,buf,len 10: cghi \len,bufsz jh 11f lgr %r4,\len j 12f 11: lghi %r4,bufsz 12: MEMCPY \buf,\dst,%r4 MEMCPY \dst,\src,%r4 MEMCPY \src,\buf,%r4 agr \dst,%r4 agr \src,%r4 sgr \len,%r4 cghi \len,0 jh 10b .endm .macro START_NEXT_KERNEL base subcode lg %r4,kernel_entry-\base(%r13) lg %r5,load_psw_mask-\base(%r13) ogr %r4,%r5 stg %r4,0(%r0) xgr %r0,%r0 lghi %r1,\subcode diag %r0,%r1,0x308 .endm .text .align PAGE_SIZE ENTRY(purgatory_start) /* The purgatory might be called after a diag308 so better set * architecture and addressing mode. */ lhi %r1,1 sigp %r1,%r0,SIGP_SET_ARCHITECTURE sam64 larl %r5,gprregs stmg %r6,%r15,0(%r5) basr %r13,0 .base_crash: /* Setup stack */ larl %r15,purgatory_end aghi %r15,-160 /* If the next kernel is KEXEC_TYPE_CRASH the purgatory is called * directly with a flag passed in %r2 whether the purgatory shall do * checksum verification only (%r2 = 0 -> verification only). * * Check now and preserve over C function call by storing in * %r10 whith * 1 -> checksum verification only * 0 -> load new kernel */ lghi %r10,0 lg %r11,kernel_type-.base_crash(%r13) cghi %r11,1 /* KEXEC_TYPE_CRASH */ jne .do_checksum_verification cghi %r2,0 /* checksum verification only */ jne .do_checksum_verification lghi %r10,1 .do_checksum_verification: brasl %r14,verify_sha256_digest cghi %r10,1 /* checksum verification only */ je .return_old_kernel cghi %r2,0 /* checksum match */ jne .disabled_wait /* If the next kernel is a crash kernel the purgatory has to swap * the mem regions first. */ cghi %r11,1 /* KEXEC_TYPE_CRASH */ je .start_crash_kernel /* start normal kernel */ START_NEXT_KERNEL .base_crash 0 .return_old_kernel: lmg %r6,%r15,gprregs-.base_crash(%r13) br %r14 .disabled_wait: lpswe disabled_wait_psw-.base_crash(%r13) .start_crash_kernel: /* Location of purgatory_start in crash memory */ lgr %r8,%r13 aghi %r8,-(.base_crash-purgatory_start) /* Destination for this code i.e. end of memory to be swapped. */ lg %r9,crash_size-.base_crash(%r13) aghi %r9,-(purgatory_end-purgatory_start) /* Destination in crash memory, i.e. same as r9 but in crash memory. */ lg %r10,crash_start-.base_crash(%r13) agr %r10,%r9 /* Buffer location (in crash memory) and size. As the purgatory is * behind the point of no return it can re-use the stack as buffer. */ lghi %r11,bufsz larl %r12,stack MEMCPY %r12,%r9,%r11 /* dst -> (crash) buf */ MEMCPY %r9,%r8,%r11 /* self -> dst */ /* Jump to new location. */ lgr %r7,%r9 aghi %r7,.jump_to_dst-purgatory_start br %r7 .jump_to_dst: basr %r13,0 .base_dst: /* clear buffer */ MEMCPY %r12,%r10,%r11 /* (crash) buf -> (crash) dst */ /* Load new buffer location after jump */ larl %r7,stack aghi %r10,stack-purgatory_start MEMCPY %r10,%r7,%r11 /* (new) buf -> (crash) buf */ /* Now the code is set up to run from its designated location. Start * swapping the rest of crash memory now. * * The registers will be used as follow: * * %r0-%r4 reserved for macros defined above * %r5-%r6 tmp registers * %r7 pointer to current struct sha region * %r8 index to iterate over all sha regions * %r9 pointer in crash memory * %r10 pointer in old kernel * %r11 total size (still) to be moved * %r12 pointer to buffer */ lgr %r12,%r7 lgr %r11,%r9 lghi %r10,0 lg %r9,crash_start-.base_dst(%r13) lghi %r8,16 /* KEXEC_SEGMENTS_MAX */ larl %r7,purgatory_sha_regions j .loop_first /* Loop over all purgatory_sha_regions. */ .loop_next: aghi %r8,-1 cghi %r8,0 je .loop_out aghi %r7,__KEXEC_SHA_REGION_SIZE .loop_first: lg %r5,__KEXEC_SHA_REGION_START(%r7) cghi %r5,0 je .loop_next /* Copy [end last sha region, start current sha region) */ /* Note: kexec_sha_region->start points in crash memory */ sgr %r5,%r9 MEMCPY %r9,%r10,%r5 agr %r9,%r5 agr %r10,%r5 sgr %r11,%r5 /* Swap sha region */ lg %r6,__KEXEC_SHA_REGION_LEN(%r7) MEMSWAP %r9,%r10,%r12,%r6 sg %r11,__KEXEC_SHA_REGION_LEN(%r7) j .loop_next .loop_out: /* Copy rest of crash memory */ MEMCPY %r9,%r10,%r11 /* start crash kernel */ START_NEXT_KERNEL .base_dst 1 load_psw_mask: .long 0x00080000,0x80000000 .align 8 disabled_wait_psw: .quad 0x0002000180000000 .quad 0x0000000000000000 + .do_checksum_verification gprregs: .rept 10 .quad 0 .endr /* Macro to define a global variable with name and size (in bytes) to be * shared with C code. * * Add the .size and .type attribute to satisfy checks on the Elf_Sym during * purgatory load. */ .macro GLOBAL_VARIABLE name,size \name: .global \name .size \name,\size .type \name,object .skip \size,0 .endm GLOBAL_VARIABLE purgatory_sha256_digest,32 GLOBAL_VARIABLE purgatory_sha_regions,16*__KEXEC_SHA_REGION_SIZE GLOBAL_VARIABLE kernel_entry,8 GLOBAL_VARIABLE kernel_type,8 GLOBAL_VARIABLE crash_start,8 GLOBAL_VARIABLE crash_size,8 .align PAGE_SIZE stack: /* The buffer to move this code must be as big as the code. */ .skip stack-purgatory_start .align PAGE_SIZE purgatory_end:
AirFortressIlikara/LS2K0300-linux-4.19
8,749
arch/s390/boot/head.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright IBM Corp. 1999, 2010 * * Author(s): Hartmut Penner <hp@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> * Rob van der Heij <rvdhei@iae.nl> * Heiko Carstens <heiko.carstens@de.ibm.com> * * There are 5 different IPL methods * 1) load the image directly into ram at address 0 and do an PSW restart * 2) linload will load the image from address 0x10000 to memory 0x10000 * and start the code thru LPSW 0x0008000080010000 (VM only, deprecated) * 3) generate the tape ipl header, store the generated image on a tape * and ipl from it * In case of SL tape you need to IPL 5 times to get past VOL1 etc * 4) generate the vm reader ipl header, move the generated image to the * VM reader (use option NOH!) and do a ipl from reader (VM only) * 5) direct call of start by the SALIPL loader * We use the cpuid to distinguish between VM and native ipl * params for kernel are pushed to 0x10400 (see setup.h) * */ #include <linux/init.h> #include <linux/linkage.h> #include <asm/asm-offsets.h> #include <asm/thread_info.h> #include <asm/page.h> #include <asm/ptrace.h> #define ARCH_OFFSET 4 __HEAD #define IPL_BS 0x730 .org 0 .long 0x00080000,0x80000000+iplstart # The first 24 bytes are loaded .long 0x02000018,0x60000050 # by ipl to addresses 0-23. .long 0x02000068,0x60000050 # (a PSW and two CCWs). .fill 80-24,1,0x40 # bytes 24-79 are discarded !! .long 0x020000f0,0x60000050 # The next 160 byte are loaded .long 0x02000140,0x60000050 # to addresses 0x18-0xb7 .long 0x02000190,0x60000050 # They form the continuation .long 0x020001e0,0x60000050 # of the CCW program started .long 0x02000230,0x60000050 # by ipl and load the range .long 0x02000280,0x60000050 # 0x0f0-0x730 from the image .long 0x020002d0,0x60000050 # to the range 0x0f0-0x730 .long 0x02000320,0x60000050 # in memory. At the end of .long 0x02000370,0x60000050 # the channel program the PSW .long 0x020003c0,0x60000050 # at location 0 is loaded. .long 0x02000410,0x60000050 # Initial processing starts .long 0x02000460,0x60000050 # at 0x200 = iplstart. .long 0x020004b0,0x60000050 .long 0x02000500,0x60000050 .long 0x02000550,0x60000050 .long 0x020005a0,0x60000050 .long 0x020005f0,0x60000050 .long 0x02000640,0x60000050 .long 0x02000690,0x60000050 .long 0x020006e0,0x20000050 .org 0x200 # # subroutine to wait for end I/O # .Lirqwait: mvc __LC_IO_NEW_PSW(16),.Lnewpsw # set up IO interrupt psw lpsw .Lwaitpsw .Lioint: br %r14 .align 8 .Lnewpsw: .quad 0x0000000080000000,.Lioint .Lwaitpsw: .long 0x020a0000,0x80000000+.Lioint # # subroutine for loading cards from the reader # .Lloader: la %r4,0(%r14) la %r3,.Lorb # r2 = address of orb into r2 la %r5,.Lirb # r4 = address of irb la %r6,.Lccws la %r7,20 .Linit: st %r2,4(%r6) # initialize CCW data addresses la %r2,0x50(%r2) la %r6,8(%r6) bct 7,.Linit lctl %c6,%c6,.Lcr6 # set IO subclass mask slr %r2,%r2 .Lldlp: ssch 0(%r3) # load chunk of 1600 bytes bnz .Llderr .Lwait4irq: bas %r14,.Lirqwait c %r1,__LC_SUBCHANNEL_ID # compare subchannel number bne .Lwait4irq tsch 0(%r5) slr %r0,%r0 ic %r0,8(%r5) # get device status chi %r0,8 # channel end ? be .Lcont chi %r0,12 # channel end + device end ? be .Lcont l %r0,4(%r5) s %r0,8(%r3) # r0/8 = number of ccws executed mhi %r0,10 # *10 = number of bytes in ccws lh %r3,10(%r5) # get residual count sr %r0,%r3 # #ccws*80-residual=#bytes read ar %r2,%r0 br %r4 # r2 contains the total size .Lcont: ahi %r2,0x640 # add 0x640 to total size la %r6,.Lccws la %r7,20 .Lincr: l %r0,4(%r6) # update CCW data addresses ahi %r0,0x640 st %r0,4(%r6) ahi %r6,8 bct 7,.Lincr b .Lldlp .Llderr: lpsw .Lcrash .align 8 .Lorb: .long 0x00000000,0x0080ff00,.Lccws .Lirb: .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 .Lcr6: .long 0xff000000 .Lloadp:.long 0,0 .align 8 .Lcrash:.long 0x000a0000,0x00000000 .align 8 .Lccws: .rept 19 .long 0x02600050,0x00000000 .endr .long 0x02200050,0x00000000 iplstart: mvi __LC_AR_MODE_ID,1 # set esame flag slr %r0,%r0 # set cpuid to zero lhi %r1,2 # mode 2 = esame (dump) sigp %r1,%r0,0x12 # switch to esame mode bras %r13,0f .fill 16,4,0x0 0: lmh %r0,%r15,0(%r13) # clear high-order half of gprs sam31 # switch to 31 bit addressing mode lh %r1,__LC_SUBCHANNEL_ID # test if subchannel number bct %r1,.Lnoload # is valid l %r1,__LC_SUBCHANNEL_ID # load ipl subchannel number la %r2,IPL_BS # load start address bas %r14,.Lloader # load rest of ipl image l %r12,.Lparm # pointer to parameter area st %r1,IPL_DEVICE+ARCH_OFFSET-PARMAREA(%r12) # save ipl device number # # load parameter file from ipl device # .Lagain1: l %r2,.Linitrd # ramdisk loc. is temp bas %r14,.Lloader # load parameter file ltr %r2,%r2 # got anything ? bz .Lnopf chi %r2,895 bnh .Lnotrunc la %r2,895 .Lnotrunc: l %r4,.Linitrd clc 0(3,%r4),.L_hdr # if it is HDRx bz .Lagain1 # skip dataset header clc 0(3,%r4),.L_eof # if it is EOFx bz .Lagain1 # skip dateset trailer la %r5,0(%r4,%r2) lr %r3,%r2 la %r3,COMMAND_LINE-PARMAREA(%r12) # load adr. of command line mvc 0(256,%r3),0(%r4) mvc 256(256,%r3),256(%r4) mvc 512(256,%r3),512(%r4) mvc 768(122,%r3),768(%r4) slr %r0,%r0 b .Lcntlp .Ldelspc: ic %r0,0(%r2,%r3) chi %r0,0x20 # is it a space ? be .Lcntlp ahi %r2,1 b .Leolp .Lcntlp: brct %r2,.Ldelspc .Leolp: slr %r0,%r0 stc %r0,0(%r2,%r3) # terminate buffer .Lnopf: # # load ramdisk from ipl device # .Lagain2: l %r2,.Linitrd # addr of ramdisk st %r2,INITRD_START+ARCH_OFFSET-PARMAREA(%r12) bas %r14,.Lloader # load ramdisk st %r2,INITRD_SIZE+ARCH_OFFSET-PARMAREA(%r12) # store size of rd ltr %r2,%r2 bnz .Lrdcont st %r2,INITRD_START+ARCH_OFFSET-PARMAREA(%r12) # no ramdisk found .Lrdcont: l %r2,.Linitrd clc 0(3,%r2),.L_hdr # skip HDRx and EOFx bz .Lagain2 clc 0(3,%r2),.L_eof bz .Lagain2 # # reset files in VM reader # stidp .Lcpuid # store cpuid tm .Lcpuid,0xff # running VM ? bno .Lnoreset la %r2,.Lreset lhi %r3,26 diag %r2,%r3,8 la %r5,.Lirb stsch 0(%r5) # check if irq is pending tm 30(%r5),0x0f # by verifying if any of the bnz .Lwaitforirq # activity or status control tm 31(%r5),0xff # bits is set in the schib bz .Lnoreset .Lwaitforirq: bas %r14,.Lirqwait # wait for IO interrupt c %r1,__LC_SUBCHANNEL_ID # compare subchannel number bne .Lwaitforirq la %r5,.Lirb tsch 0(%r5) .Lnoreset: b .Lnoload # # everything loaded, go for it # .Lnoload: l %r1,.Lstartup br %r1 .Linitrd:.long _end # default address of initrd .Lparm: .long PARMAREA .Lstartup: .long startup .Lreset:.byte 0xc3,0xc8,0xc1,0xd5,0xc7,0xc5,0x40,0xd9,0xc4,0xd9,0x40 .byte 0xc1,0xd3,0xd3,0x40,0xd2,0xc5,0xc5,0xd7,0x40,0xd5,0xd6 .byte 0xc8,0xd6,0xd3,0xc4 # "change rdr all keep nohold" .L_eof: .long 0xc5d6c600 /* C'EOF' */ .L_hdr: .long 0xc8c4d900 /* C'HDR' */ .align 8 .Lcpuid:.fill 8,1,0 # # startup-code at 0x10000, running in absolute addressing mode # this is called either by the ipl loader or directly by PSW restart # or linload or SALIPL # .org 0x10000 ENTRY(startup) j .Lep_startup_normal .org EP_OFFSET # # This is a list of s390 kernel entry points. At address 0x1000f the number of # valid entry points is stored. # # IMPORTANT: Do not change this table, it is s390 kernel ABI! # .ascii EP_STRING .byte 0x00,0x01 # # kdump startup-code at 0x10010, running in 64 bit absolute addressing mode # .org 0x10010 ENTRY(startup_kdump) j .Lep_startup_kdump .Lep_startup_normal: mvi __LC_AR_MODE_ID,1 # set esame flag slr %r0,%r0 # set cpuid to zero lhi %r1,2 # mode 2 = esame (dump) sigp %r1,%r0,0x12 # switch to esame mode bras %r13,0f .fill 16,4,0x0 0: lmh %r0,%r15,0(%r13) # clear high-order half of gprs sam64 # switch to 64 bit addressing mode basr %r13,0 # get base .LPG0: xc 0x200(256),0x200 # partially clear lowcore xc 0x300(256),0x300 xc 0xe00(256),0xe00 xc 0xf00(256),0xf00 lctlg %c0,%c15,0x200(%r0) # initialize control registers stcke __LC_BOOT_CLOCK mvc __LC_LAST_UPDATE_CLOCK(8),__LC_BOOT_CLOCK+1 spt 6f-.LPG0(%r13) mvc __LC_LAST_UPDATE_TIMER(8),6f-.LPG0(%r13) l %r15,.Lstack-.LPG0(%r13) ahi %r15,-STACK_FRAME_OVERHEAD brasl %r14,verify_facilities #ifdef CONFIG_KERNEL_UNCOMPRESSED jg startup_continue #else jg startup_decompressor #endif .Lstack: .long 0x8000 + (1<<(PAGE_SHIFT+THREAD_SIZE_ORDER)) .align 8 6: .long 0x7fffffff,0xffffffff #include "head_kdump.S" # # params at 10400 (setup.h) # .org PARMAREA .long 0,0 # IPL_DEVICE .long 0,0 # INITRD_START .long 0,0 # INITRD_SIZE .long 0,0 # OLDMEM_BASE .long 0,0 # OLDMEM_SIZE .org COMMAND_LINE .byte "root=/dev/ram0 ro" .byte 0 .org 0x11000
AirFortressIlikara/LS2K0300-linux-4.19
2,348
arch/s390/boot/head_kdump.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * S390 kdump lowlevel functions (new kernel) * * Copyright IBM Corp. 2011 * Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com> */ #include <asm/sigp.h> #define DATAMOVER_ADDR 0x4000 #define COPY_PAGE_ADDR 0x6000 #ifdef CONFIG_CRASH_DUMP # # kdump entry (new kernel - not yet relocated) # # Note: This code has to be position independent # .align 2 .Lep_startup_kdump: lhi %r1,2 # mode 2 = esame (dump) sigp %r1,%r0,SIGP_SET_ARCHITECTURE # Switch to esame mode sam64 # Switch to 64 bit addressing basr %r13,0 .Lbase: larl %r2,.Lbase_addr # Check, if we have been lg %r2,0(%r2) # already relocated: clgr %r2,%r13 # jne .Lrelocate # No : Start data mover lghi %r2,0 # Yes: Start kdump kernel brasl %r14,startup_kdump_relocated .Lrelocate: larl %r4,startup lg %r2,0x418(%r4) # Get kdump base lg %r3,0x420(%r4) # Get kdump size larl %r10,.Lcopy_start # Source of data mover lghi %r8,DATAMOVER_ADDR # Target of data mover mvc 0(256,%r8),0(%r10) # Copy data mover code agr %r8,%r2 # Copy data mover to mvc 0(256,%r8),0(%r10) # reserved mem lghi %r14,DATAMOVER_ADDR # Jump to copied data mover basr %r14,%r14 .Lbase_addr: .quad .Lbase # # kdump data mover code (runs at address DATAMOVER_ADDR) # # r2: kdump base address # r3: kdump size # .Lcopy_start: basr %r13,0 # Base 0: lgr %r11,%r2 # Save kdump base address lgr %r12,%r2 agr %r12,%r3 # Compute kdump end address lghi %r5,0 lghi %r10,COPY_PAGE_ADDR # Load copy page address 1: mvc 0(256,%r10),0(%r5) # Copy old kernel to tmp mvc 0(256,%r5),0(%r11) # Copy new kernel to old mvc 0(256,%r11),0(%r10) # Copy tmp to new aghi %r11,256 aghi %r5,256 clgr %r11,%r12 jl 1b lg %r14,.Lstartup_kdump-0b(%r13) basr %r14,%r14 # Start relocated kernel .Lstartup_kdump: .long 0x00000000,0x00000000 + startup_kdump_relocated .Lcopy_end: # # Startup of kdump (relocated new kernel) # .align 2 startup_kdump_relocated: basr %r13,0 0: lpswe .Lrestart_psw-0b(%r13) # Start new kernel... .align 8 .Lrestart_psw: .quad 0x0000000080000000,0x0000000000000000 + startup #else .align 2 .Lep_startup_kdump: larl %r13,startup_kdump_crash lpswe 0(%r13) .align 8 startup_kdump_crash: .quad 0x0002000080000000,0x0000000000000000 + startup_kdump_crash #endif /* CONFIG_CRASH_DUMP */
AirFortressIlikara/LS2K0300-linux-4.19
4,434
arch/s390/kernel/vdso32/clock_gettime.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Userland implementation of clock_gettime() for 32 bits processes in a * s390 kernel for use in the vDSO * * Copyright IBM Corp. 2008 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) */ #include <asm/vdso.h> #include <asm/asm-offsets.h> #include <asm/unistd.h> #include <asm/dwarf.h> #include <asm/ptrace.h> .text .align 4 .globl __kernel_clock_gettime .type __kernel_clock_gettime,@function __kernel_clock_gettime: CFI_STARTPROC ahi %r15,-16 CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16 CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD basr %r5,0 0: al %r5,21f-0b(%r5) /* get &_vdso_data */ chi %r2,__CLOCK_REALTIME_COARSE je 10f chi %r2,__CLOCK_REALTIME je 11f chi %r2,__CLOCK_MONOTONIC_COARSE je 9f chi %r2,__CLOCK_MONOTONIC jne 19f /* CLOCK_MONOTONIC */ 1: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */ tml %r4,0x0001 /* pending update ? loop */ jnz 1b stcke 0(%r15) /* Store TOD clock */ lm %r0,%r1,1(%r15) s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ sl %r1,__VDSO_XTIME_STAMP+4(%r5) brc 3,2f ahi %r0,-1 2: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */ lr %r2,%r0 l %r0,__VDSO_TK_MULT(%r5) ltr %r1,%r1 mr %r0,%r0 jnm 3f a %r0,__VDSO_TK_MULT(%r5) 3: alr %r0,%r2 al %r0,__VDSO_WTOM_NSEC(%r5) al %r1,__VDSO_WTOM_NSEC+4(%r5) brc 12,5f ahi %r0,1 5: l %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */ srdl %r0,0(%r2) /* >> tk->shift */ l %r2,__VDSO_WTOM_SEC+4(%r5) cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */ jne 1b basr %r5,0 6: ltr %r0,%r0 jnz 7f cl %r1,20f-6b(%r5) jl 8f 7: ahi %r2,1 sl %r1,20f-6b(%r5) brc 3,6b ahi %r0,-1 j 6b 8: st %r2,0(%r3) /* store tp->tv_sec */ st %r1,4(%r3) /* store tp->tv_nsec */ lhi %r2,0 ahi %r15,16 CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD CFI_RESTORE 15 br %r14 /* CLOCK_MONOTONIC_COARSE */ CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16 CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD 9: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */ tml %r4,0x0001 /* pending update ? loop */ jnz 9b l %r2,__VDSO_WTOM_CRS_SEC+4(%r5) l %r1,__VDSO_WTOM_CRS_NSEC+4(%r5) cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */ jne 9b j 8b /* CLOCK_REALTIME_COARSE */ 10: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */ tml %r4,0x0001 /* pending update ? loop */ jnz 10b l %r2,__VDSO_XTIME_CRS_SEC+4(%r5) l %r1,__VDSO_XTIME_CRS_NSEC+4(%r5) cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */ jne 10b j 17f /* CLOCK_REALTIME */ 11: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */ tml %r4,0x0001 /* pending update ? loop */ jnz 11b stcke 0(%r15) /* Store TOD clock */ lm %r0,%r1,__VDSO_TS_END(%r5) /* TOD steering end time */ s %r0,1(%r15) /* no - ts_steering_end */ sl %r1,5(%r15) brc 3,22f ahi %r0,-1 22: ltr %r0,%r0 /* past end of steering? */ jm 24f srdl %r0,15 /* 1 per 2^16 */ tm __VDSO_TS_DIR+3(%r5),0x01 /* steering direction? */ jz 23f lcr %r0,%r0 /* negative TOD offset */ lcr %r1,%r1 je 23f ahi %r0,-1 23: a %r0,1(%r15) /* add TOD timestamp */ al %r1,5(%r15) brc 12,25f ahi %r0,1 j 25f 24: lm %r0,%r1,1(%r15) /* load TOD timestamp */ 25: s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ sl %r1,__VDSO_XTIME_STAMP+4(%r5) brc 3,12f ahi %r0,-1 12: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */ lr %r2,%r0 l %r0,__VDSO_TK_MULT(%r5) ltr %r1,%r1 mr %r0,%r0 jnm 13f a %r0,__VDSO_TK_MULT(%r5) 13: alr %r0,%r2 al %r0,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */ al %r1,__VDSO_XTIME_NSEC+4(%r5) brc 12,14f ahi %r0,1 14: l %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */ srdl %r0,0(%r2) /* >> tk->shift */ l %r2,__VDSO_XTIME_SEC+4(%r5) cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */ jne 11b basr %r5,0 15: ltr %r0,%r0 jnz 16f cl %r1,20f-15b(%r5) jl 17f 16: ahi %r2,1 sl %r1,20f-15b(%r5) brc 3,15b ahi %r0,-1 j 15b 17: st %r2,0(%r3) /* store tp->tv_sec */ st %r1,4(%r3) /* store tp->tv_nsec */ lhi %r2,0 ahi %r15,16 CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD CFI_RESTORE 15 br %r14 /* Fallback to system call */ CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16 CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD 19: lhi %r1,__NR_clock_gettime svc 0 ahi %r15,16 CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD CFI_RESTORE 15 br %r14 CFI_ENDPROC 20: .long 1000000000 21: .long _vdso_data - 0b .size __kernel_clock_gettime,.-__kernel_clock_gettime
AirFortressIlikara/LS2K0300-linux-4.19
1,031
arch/s390/kernel/vdso32/clock_getres.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Userland implementation of clock_getres() for 32 bits processes in a * s390 kernel for use in the vDSO * * Copyright IBM Corp. 2008 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) */ #include <asm/vdso.h> #include <asm/asm-offsets.h> #include <asm/unistd.h> #include <asm/dwarf.h> .text .align 4 .globl __kernel_clock_getres .type __kernel_clock_getres,@function __kernel_clock_getres: CFI_STARTPROC basr %r1,0 la %r1,4f-.(%r1) chi %r2,__CLOCK_REALTIME je 0f chi %r2,__CLOCK_MONOTONIC je 0f la %r1,5f-4f(%r1) chi %r2,__CLOCK_REALTIME_COARSE je 0f chi %r2,__CLOCK_MONOTONIC_COARSE jne 3f 0: ltr %r3,%r3 jz 2f /* res == NULL */ 1: l %r0,0(%r1) xc 0(4,%r3),0(%r3) /* set tp->tv_sec to zero */ st %r0,4(%r3) /* store tp->tv_usec */ 2: lhi %r2,0 br %r14 3: lhi %r1,__NR_clock_getres /* fallback to svc */ svc 0 br %r14 CFI_ENDPROC 4: .long __CLOCK_REALTIME_RES 5: .long __CLOCK_COARSE_RES .size __kernel_clock_getres,.-__kernel_clock_getres
AirFortressIlikara/LS2K0300-linux-4.19
3,516
arch/s390/kernel/vdso32/vdso32.lds.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * This is the infamous ld script for the 32 bits vdso * library */ #include <asm/page.h> #include <asm/vdso.h> OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390") OUTPUT_ARCH(s390:31-bit) ENTRY(_start) SECTIONS { . = VDSO32_LBASE + SIZEOF_HEADERS; .hash : { *(.hash) } :text .gnu.hash : { *(.gnu.hash) } .dynsym : { *(.dynsym) } .dynstr : { *(.dynstr) } .gnu.version : { *(.gnu.version) } .gnu.version_d : { *(.gnu.version_d) } .gnu.version_r : { *(.gnu.version_r) } .note : { *(.note.*) } :text :note . = ALIGN(16); .text : { *(.text .stub .text.* .gnu.linkonce.t.*) } :text PROVIDE(__etext = .); PROVIDE(_etext = .); PROVIDE(etext = .); /* * Other stuff is appended to the text segment: */ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } .rodata1 : { *(.rodata1) } .dynamic : { *(.dynamic) } :text :dynamic .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr .eh_frame : { KEEP (*(.eh_frame)) } :text .gcc_except_table : { *(.gcc_except_table .gcc_except_table.*) } .rela.dyn ALIGN(8) : { *(.rela.dyn) } .got ALIGN(8) : { *(.got .toc) } _end = .; PROVIDE(end = .); /* * Stabs debugging sections are here too. */ .stab 0 : { *(.stab) } .stabstr 0 : { *(.stabstr) } .stab.excl 0 : { *(.stab.excl) } .stab.exclstr 0 : { *(.stab.exclstr) } .stab.index 0 : { *(.stab.index) } .stab.indexstr 0 : { *(.stab.indexstr) } .comment 0 : { *(.comment) } /* * DWARF debug sections. * Symbols in the DWARF debugging sections are relative to the * beginning of the section so we begin them at 0. */ /* DWARF 1 */ .debug 0 : { *(.debug) } .line 0 : { *(.line) } /* GNU DWARF 1 extensions */ .debug_srcinfo 0 : { *(.debug_srcinfo) } .debug_sfnames 0 : { *(.debug_sfnames) } /* DWARF 1.1 and DWARF 2 */ .debug_aranges 0 : { *(.debug_aranges) } .debug_pubnames 0 : { *(.debug_pubnames) } /* DWARF 2 */ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) } .debug_abbrev 0 : { *(.debug_abbrev) } .debug_line 0 : { *(.debug_line) } .debug_frame 0 : { *(.debug_frame) } .debug_str 0 : { *(.debug_str) } .debug_loc 0 : { *(.debug_loc) } .debug_macinfo 0 : { *(.debug_macinfo) } /* SGI/MIPS DWARF 2 extensions */ .debug_weaknames 0 : { *(.debug_weaknames) } .debug_funcnames 0 : { *(.debug_funcnames) } .debug_typenames 0 : { *(.debug_typenames) } .debug_varnames 0 : { *(.debug_varnames) } /* DWARF 3 */ .debug_pubtypes 0 : { *(.debug_pubtypes) } .debug_ranges 0 : { *(.debug_ranges) } .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) } . = ALIGN(PAGE_SIZE); PROVIDE(_vdso_data = .); /DISCARD/ : { *(.note.GNU-stack) *(.branch_lt) *(.data .data.* .gnu.linkonce.d.* .sdata*) *(.bss .sbss .dynbss .dynsbss) } } /* * Very old versions of ld do not recognize this name token; use the constant. */ #define PT_GNU_EH_FRAME 0x6474e550 /* * We must supply the ELF program headers explicitly to get just one * PT_LOAD segment, and set the flags explicitly to make segments read-only. */ PHDRS { text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */ dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ note PT_NOTE FLAGS(4); /* PF_R */ eh_frame_hdr PT_GNU_EH_FRAME; } /* * This controls what symbols we export from the DSO. */ VERSION { VDSO_VERSION_STRING { global: /* * Has to be there for the kernel to find */ __kernel_gettimeofday; __kernel_clock_gettime; __kernel_clock_getres; __kernel_getcpu; local: *; }; }
AirFortressIlikara/LS2K0300-linux-4.19
2,521
arch/s390/kernel/vdso32/gettimeofday.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Userland implementation of gettimeofday() for 32 bits processes in a * s390 kernel for use in the vDSO * * Copyright IBM Corp. 2008 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) */ #include <asm/vdso.h> #include <asm/asm-offsets.h> #include <asm/unistd.h> #include <asm/dwarf.h> #include <asm/ptrace.h> .text .align 4 .globl __kernel_gettimeofday .type __kernel_gettimeofday,@function __kernel_gettimeofday: CFI_STARTPROC ahi %r15,-16 CFI_ADJUST_CFA_OFFSET 16 CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD basr %r5,0 0: al %r5,13f-0b(%r5) /* get &_vdso_data */ 1: ltr %r3,%r3 /* check if tz is NULL */ je 2f mvc 0(8,%r3),__VDSO_TIMEZONE(%r5) 2: ltr %r2,%r2 /* check if tv is NULL */ je 10f l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */ tml %r4,0x0001 /* pending update ? loop */ jnz 1b stcke 0(%r15) /* Store TOD clock */ lm %r0,%r1,__VDSO_TS_END(%r5) /* TOD steering end time */ s %r0,1(%r15) sl %r1,5(%r15) brc 3,14f ahi %r0,-1 14: ltr %r0,%r0 /* past end of steering? */ jm 16f srdl %r0,15 /* 1 per 2^16 */ tm __VDSO_TS_DIR+3(%r5),0x01 /* steering direction? */ jz 15f lcr %r0,%r0 /* negative TOD offset */ lcr %r1,%r1 je 15f ahi %r0,-1 15: a %r0,1(%r15) /* add TOD timestamp */ al %r1,5(%r15) brc 12,17f ahi %r0,1 j 17f 16: lm %r0,%r1,1(%r15) /* load TOD timestamp */ 17: s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ sl %r1,__VDSO_XTIME_STAMP+4(%r5) brc 3,3f ahi %r0,-1 3: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */ st %r0,0(%r15) l %r0,__VDSO_TK_MULT(%r5) ltr %r1,%r1 mr %r0,%r0 jnm 4f a %r0,__VDSO_TK_MULT(%r5) 4: al %r0,0(%r15) al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */ al %r1,__VDSO_XTIME_NSEC+4(%r5) brc 12,5f ahi %r0,1 5: mvc 0(4,%r15),__VDSO_XTIME_SEC+4(%r5) cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */ jne 1b l %r4,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */ srdl %r0,0(%r4) /* >> tk->shift */ l %r4,0(%r15) /* get tv_sec from stack */ basr %r5,0 6: ltr %r0,%r0 jnz 7f cl %r1,11f-6b(%r5) jl 8f 7: ahi %r4,1 sl %r1,11f-6b(%r5) brc 3,6b ahi %r0,-1 j 6b 8: st %r4,0(%r2) /* store tv->tv_sec */ ltr %r1,%r1 m %r0,12f-6b(%r5) jnm 9f al %r0,12f-6b(%r5) 9: srl %r0,6 st %r0,4(%r2) /* store tv->tv_usec */ 10: slr %r2,%r2 ahi %r15,16 CFI_ADJUST_CFA_OFFSET -16 CFI_RESTORE 15 br %r14 CFI_ENDPROC 11: .long 1000000000 12: .long 274877907 13: .long _vdso_data - 0b .size __kernel_gettimeofday,.-__kernel_gettimeofday
AirFortressIlikara/LS2K0300-linux-4.19
4,543
arch/s390/kernel/vdso64/clock_gettime.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Userland implementation of clock_gettime() for 64 bits processes in a * s390 kernel for use in the vDSO * * Copyright IBM Corp. 2008 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) */ #include <asm/vdso.h> #include <asm/asm-offsets.h> #include <asm/unistd.h> #include <asm/dwarf.h> #include <asm/ptrace.h> .text .align 4 .globl __kernel_clock_gettime .type __kernel_clock_gettime,@function __kernel_clock_gettime: CFI_STARTPROC aghi %r15,-16 CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16 CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD larl %r5,_vdso_data cghi %r2,__CLOCK_REALTIME_COARSE je 4f cghi %r2,__CLOCK_REALTIME je 5f cghi %r2,-3 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */ je 9f cghi %r2,__CLOCK_MONOTONIC_COARSE je 3f cghi %r2,__CLOCK_MONOTONIC jne 12f /* CLOCK_MONOTONIC */ 0: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */ tmll %r4,0x0001 /* pending update ? loop */ jnz 0b stcke 0(%r15) /* Store TOD clock */ lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */ lg %r0,__VDSO_WTOM_SEC(%r5) lg %r1,1(%r15) sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */ alg %r1,__VDSO_WTOM_NSEC(%r5) srlg %r1,%r1,0(%r2) /* >> tk->shift */ clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */ jne 0b larl %r5,13f 1: clg %r1,0(%r5) jl 2f slg %r1,0(%r5) aghi %r0,1 j 1b 2: stg %r0,0(%r3) /* store tp->tv_sec */ stg %r1,8(%r3) /* store tp->tv_nsec */ lghi %r2,0 aghi %r15,16 CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD CFI_RESTORE 15 br %r14 /* CLOCK_MONOTONIC_COARSE */ CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16 CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD 3: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */ tmll %r4,0x0001 /* pending update ? loop */ jnz 3b lg %r0,__VDSO_WTOM_CRS_SEC(%r5) lg %r1,__VDSO_WTOM_CRS_NSEC(%r5) clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */ jne 3b j 2b /* CLOCK_REALTIME_COARSE */ 4: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */ tmll %r4,0x0001 /* pending update ? loop */ jnz 4b lg %r0,__VDSO_XTIME_CRS_SEC(%r5) lg %r1,__VDSO_XTIME_CRS_NSEC(%r5) clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */ jne 4b j 7f /* CLOCK_REALTIME */ 5: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */ tmll %r4,0x0001 /* pending update ? loop */ jnz 5b stcke 0(%r15) /* Store TOD clock */ lg %r1,1(%r15) lg %r0,__VDSO_TS_END(%r5) /* TOD steering end time */ slgr %r0,%r1 /* now - ts_steering_end */ ltgr %r0,%r0 /* past end of steering ? */ jm 17f srlg %r0,%r0,15 /* 1 per 2^16 */ tm __VDSO_TS_DIR+3(%r5),0x01 /* steering direction? */ jz 18f lcgr %r0,%r0 /* negative TOD offset */ 18: algr %r1,%r0 /* add steering offset */ 17: lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */ sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */ alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */ srlg %r1,%r1,0(%r2) /* >> tk->shift */ lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */ clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */ jne 5b larl %r5,13f 6: clg %r1,0(%r5) jl 7f slg %r1,0(%r5) aghi %r0,1 j 6b 7: stg %r0,0(%r3) /* store tp->tv_sec */ stg %r1,8(%r3) /* store tp->tv_nsec */ lghi %r2,0 aghi %r15,16 CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD CFI_RESTORE 15 br %r14 /* CPUCLOCK_VIRT for this thread */ CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16 CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD 9: lghi %r4,0 icm %r0,15,__VDSO_ECTG_OK(%r5) jz 12f sacf 256 /* Magic ectg instruction */ .insn ssf,0xc80100000000,__VDSO_ECTG_BASE(4),__VDSO_ECTG_USER(4),4 sacf 0 algr %r1,%r0 /* r1 = cputime as TOD value */ mghi %r1,1000 /* convert to nanoseconds */ srlg %r1,%r1,12 /* r1 = cputime in nanosec */ lgr %r4,%r1 larl %r5,13f srlg %r1,%r1,9 /* divide by 1000000000 */ mlg %r0,8(%r5) srlg %r0,%r0,11 /* r0 = tv_sec */ stg %r0,0(%r3) msg %r0,0(%r5) /* calculate tv_nsec */ slgr %r4,%r0 /* r4 = tv_nsec */ stg %r4,8(%r3) lghi %r2,0 aghi %r15,16 CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD CFI_RESTORE 15 br %r14 /* Fallback to system call */ CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16 CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD 12: lghi %r1,__NR_clock_gettime svc 0 aghi %r15,16 CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD CFI_RESTORE 15 br %r14 CFI_ENDPROC 13: .quad 1000000000 14: .quad 19342813113834067 .size __kernel_clock_gettime,.-__kernel_clock_gettime
AirFortressIlikara/LS2K0300-linux-4.19
3,516
arch/s390/kernel/vdso64/vdso64.lds.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * This is the infamous ld script for the 64 bits vdso * library */ #include <asm/page.h> #include <asm/vdso.h> OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390") OUTPUT_ARCH(s390:64-bit) ENTRY(_start) SECTIONS { . = VDSO64_LBASE + SIZEOF_HEADERS; .hash : { *(.hash) } :text .gnu.hash : { *(.gnu.hash) } .dynsym : { *(.dynsym) } .dynstr : { *(.dynstr) } .gnu.version : { *(.gnu.version) } .gnu.version_d : { *(.gnu.version_d) } .gnu.version_r : { *(.gnu.version_r) } .note : { *(.note.*) } :text :note . = ALIGN(16); .text : { *(.text .stub .text.* .gnu.linkonce.t.*) } :text PROVIDE(__etext = .); PROVIDE(_etext = .); PROVIDE(etext = .); /* * Other stuff is appended to the text segment: */ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } .rodata1 : { *(.rodata1) } .dynamic : { *(.dynamic) } :text :dynamic .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr .eh_frame : { KEEP (*(.eh_frame)) } :text .gcc_except_table : { *(.gcc_except_table .gcc_except_table.*) } .rela.dyn ALIGN(8) : { *(.rela.dyn) } .got ALIGN(8) : { *(.got .toc) } _end = .; PROVIDE(end = .); /* * Stabs debugging sections are here too. */ .stab 0 : { *(.stab) } .stabstr 0 : { *(.stabstr) } .stab.excl 0 : { *(.stab.excl) } .stab.exclstr 0 : { *(.stab.exclstr) } .stab.index 0 : { *(.stab.index) } .stab.indexstr 0 : { *(.stab.indexstr) } .comment 0 : { *(.comment) } /* * DWARF debug sections. * Symbols in the DWARF debugging sections are relative to the * beginning of the section so we begin them at 0. */ /* DWARF 1 */ .debug 0 : { *(.debug) } .line 0 : { *(.line) } /* GNU DWARF 1 extensions */ .debug_srcinfo 0 : { *(.debug_srcinfo) } .debug_sfnames 0 : { *(.debug_sfnames) } /* DWARF 1.1 and DWARF 2 */ .debug_aranges 0 : { *(.debug_aranges) } .debug_pubnames 0 : { *(.debug_pubnames) } /* DWARF 2 */ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) } .debug_abbrev 0 : { *(.debug_abbrev) } .debug_line 0 : { *(.debug_line) } .debug_frame 0 : { *(.debug_frame) } .debug_str 0 : { *(.debug_str) } .debug_loc 0 : { *(.debug_loc) } .debug_macinfo 0 : { *(.debug_macinfo) } /* SGI/MIPS DWARF 2 extensions */ .debug_weaknames 0 : { *(.debug_weaknames) } .debug_funcnames 0 : { *(.debug_funcnames) } .debug_typenames 0 : { *(.debug_typenames) } .debug_varnames 0 : { *(.debug_varnames) } /* DWARF 3 */ .debug_pubtypes 0 : { *(.debug_pubtypes) } .debug_ranges 0 : { *(.debug_ranges) } .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) } . = ALIGN(PAGE_SIZE); PROVIDE(_vdso_data = .); /DISCARD/ : { *(.note.GNU-stack) *(.branch_lt) *(.data .data.* .gnu.linkonce.d.* .sdata*) *(.bss .sbss .dynbss .dynsbss) } } /* * Very old versions of ld do not recognize this name token; use the constant. */ #define PT_GNU_EH_FRAME 0x6474e550 /* * We must supply the ELF program headers explicitly to get just one * PT_LOAD segment, and set the flags explicitly to make segments read-only. */ PHDRS { text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */ dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ note PT_NOTE FLAGS(4); /* PF_R */ eh_frame_hdr PT_GNU_EH_FRAME; } /* * This controls what symbols we export from the DSO. */ VERSION { VDSO_VERSION_STRING { global: /* * Has to be there for the kernel to find */ __kernel_gettimeofday; __kernel_clock_gettime; __kernel_clock_getres; __kernel_getcpu; local: *; }; }
AirFortressIlikara/LS2K0300-linux-4.19
1,201
arch/s390/kernel/vdso64/clock_getres.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Userland implementation of clock_getres() for 64 bits processes in a * s390 kernel for use in the vDSO * * Copyright IBM Corp. 2008 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) */ #include <asm/vdso.h> #include <asm/asm-offsets.h> #include <asm/unistd.h> #include <asm/dwarf.h> .text .align 4 .globl __kernel_clock_getres .type __kernel_clock_getres,@function __kernel_clock_getres: CFI_STARTPROC larl %r1,3f lg %r0,0(%r1) cghi %r2,__CLOCK_REALTIME_COARSE je 0f cghi %r2,__CLOCK_MONOTONIC_COARSE je 0f larl %r1,_vdso_data llgf %r0,__VDSO_CLOCK_REALTIME_RES(%r1) cghi %r2,__CLOCK_REALTIME je 0f cghi %r2,__CLOCK_MONOTONIC je 0f cghi %r2,__CLOCK_THREAD_CPUTIME_ID je 0f cghi %r2,-2 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */ jne 2f larl %r5,_vdso_data icm %r0,15,__LC_ECTG_OK(%r5) jz 2f 0: ltgr %r3,%r3 jz 1f /* res == NULL */ xc 0(8,%r3),0(%r3) /* set tp->tv_sec to zero */ stg %r0,8(%r3) /* store tp->tv_usec */ 1: lghi %r2,0 br %r14 2: lghi %r1,__NR_clock_getres /* fallback to svc */ svc 0 br %r14 CFI_ENDPROC 3: .quad __CLOCK_COARSE_RES .size __kernel_clock_getres,.-__kernel_clock_getres
AirFortressIlikara/LS2K0300-linux-4.19
2,030
arch/s390/kernel/vdso64/gettimeofday.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Userland implementation of gettimeofday() for 64 bits processes in a * s390 kernel for use in the vDSO * * Copyright IBM Corp. 2008 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) */ #include <asm/vdso.h> #include <asm/asm-offsets.h> #include <asm/unistd.h> #include <asm/dwarf.h> #include <asm/ptrace.h> .text .align 4 .globl __kernel_gettimeofday .type __kernel_gettimeofday,@function __kernel_gettimeofday: CFI_STARTPROC aghi %r15,-16 CFI_ADJUST_CFA_OFFSET 16 CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD larl %r5,_vdso_data 0: ltgr %r3,%r3 /* check if tz is NULL */ je 1f mvc 0(8,%r3),__VDSO_TIMEZONE(%r5) 1: ltgr %r2,%r2 /* check if tv is NULL */ je 4f lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */ tmll %r4,0x0001 /* pending update ? loop */ jnz 0b stcke 0(%r15) /* Store TOD clock */ lg %r1,1(%r15) lg %r0,__VDSO_TS_END(%r5) /* TOD steering end time */ slgr %r0,%r1 /* now - ts_steering_end */ ltgr %r0,%r0 /* past end of steering ? */ jm 6f srlg %r0,%r0,15 /* 1 per 2^16 */ tm __VDSO_TS_DIR+3(%r5),0x01 /* steering direction? */ jz 7f lcgr %r0,%r0 /* negative TOD offset */ 7: algr %r1,%r0 /* add steering offset */ 6: sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */ alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */ lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */ clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */ jne 0b lgf %r5,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */ srlg %r1,%r1,0(%r5) /* >> tk->shift */ larl %r5,5f 2: clg %r1,0(%r5) jl 3f slg %r1,0(%r5) aghi %r0,1 j 2b 3: stg %r0,0(%r2) /* store tv->tv_sec */ slgr %r0,%r0 /* tv_nsec -> tv_usec */ ml %r0,8(%r5) srlg %r0,%r0,6 stg %r0,8(%r2) /* store tv->tv_usec */ 4: lghi %r2,0 aghi %r15,16 CFI_ADJUST_CFA_OFFSET -16 CFI_RESTORE 15 br %r14 CFI_ENDPROC 5: .quad 1000000000 .long 274877907 .size __kernel_gettimeofday,.-__kernel_gettimeofday
AirFortressIlikara/LS2K0300-linux-4.19
1,223
arch/s390/boot/compressed/head.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Startup glue code to uncompress the kernel * * Copyright IBM Corp. 2010 * * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> */ #include <linux/init.h> #include <linux/linkage.h> #include <asm/asm-offsets.h> #include <asm/thread_info.h> #include <asm/page.h> #include "sizes.h" __HEAD ENTRY(startup_decompressor) basr %r13,0 # get base .LPG1: # setup stack lg %r15,.Lstack-.LPG1(%r13) aghi %r15,-160 brasl %r14,decompress_kernel # Set up registers for memory mover. We move the decompressed image to # 0x100000, where startup_continue of the decompressed image is supposed # to be. lgr %r4,%r2 lg %r2,.Loffset-.LPG1(%r13) lg %r3,.Lmvsize-.LPG1(%r13) lgr %r5,%r3 # Move the memory mover someplace safe so it doesn't overwrite itself. la %r1,0x200 mvc 0(mover_end-mover,%r1),mover-.LPG1(%r13) # When the memory mover is done we pass control to # arch/s390/kernel/head64.S:startup_continue which lives at 0x100000 in # the decompressed image. lgr %r6,%r2 br %r1 mover: mvcle %r2,%r4,0 jo mover br %r6 mover_end: .align 8 .Lstack: .quad 0x8000 + (1<<(PAGE_SHIFT+THREAD_SIZE_ORDER)) .Loffset: .quad 0x100000 .Lmvsize: .quad SZ__bss_start
AirFortressIlikara/LS2K0300-linux-4.19
1,038
arch/s390/boot/compressed/vmlinux.lds.S
/* SPDX-License-Identifier: GPL-2.0 */ #include <asm-generic/vmlinux.lds.h> OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390") OUTPUT_ARCH(s390:64-bit) ENTRY(startup) SECTIONS { /* Be careful parts of head_64.S assume startup_32 is at * address 0. */ . = 0; .head.text : { _head = . ; HEAD_TEXT _ehead = . ; } .text : { _text = .; /* Text */ *(.text) *(.text.*) _etext = . ; } .rodata : { _rodata = . ; *(.rodata) /* read-only data */ *(EXCLUDE_FILE (*piggy.o) .rodata.compressed) _erodata = . ; } .data : { _data = . ; *(.data) *(.data.*) _edata = . ; } startup_continue = 0x100000; #ifdef CONFIG_KERNEL_UNCOMPRESSED . = 0x100000; #else . = ALIGN(8); #endif .rodata.compressed : { *(.rodata.compressed) } . = ALIGN(256); .bss : { _bss = . ; *(.bss) *(.bss.*) *(COMMON) . = ALIGN(8); /* For convenience during zeroing */ _ebss = .; } _end = .; /* Sections to be discarded */ /DISCARD/ : { *(.eh_frame) *(__ex_table) *(*__ksymtab*) *(___kcrctab*) } }
AirFortressIlikara/LS2K0300-linux-4.19
3,261
arch/x86/xen/xen-asm.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Asm versions of Xen pv-ops, suitable for direct use. * * We only bother with direct forms (ie, vcpu in percpu data) of the * operations here; the indirect forms are better handled in C. */ #include <asm/asm-offsets.h> #include <asm/percpu.h> #include <asm/processor-flags.h> #include <asm/frame.h> #include <linux/linkage.h> /* * Enable events. This clears the event mask and tests the pending * event status with one and operation. If there are pending events, * then enter the hypervisor to get them handled. */ ENTRY(xen_irq_enable_direct) FRAME_BEGIN /* Unmask events */ movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask /* * Preempt here doesn't matter because that will deal with any * pending interrupts. The pending check may end up being run * on the wrong CPU, but that doesn't hurt. */ /* Test for pending */ testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending jz 1f call check_events 1: FRAME_END ret ENDPROC(xen_irq_enable_direct) /* * Disabling events is simply a matter of making the event mask * non-zero. */ ENTRY(xen_irq_disable_direct) movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask ret ENDPROC(xen_irq_disable_direct) /* * (xen_)save_fl is used to get the current interrupt enable status. * Callers expect the status to be in X86_EFLAGS_IF, and other bits * may be set in the return value. We take advantage of this by * making sure that X86_EFLAGS_IF has the right value (and other bits * in that byte are 0), but other bits in the return value are * undefined. We need to toggle the state of the bit, because Xen and * x86 use opposite senses (mask vs enable). */ ENTRY(xen_save_fl_direct) testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask setz %ah addb %ah, %ah ret ENDPROC(xen_save_fl_direct) /* * In principle the caller should be passing us a value return from * xen_save_fl_direct, but for robustness sake we test only the * X86_EFLAGS_IF flag rather than the whole byte. After setting the * interrupt mask state, it checks for unmasked pending events and * enters the hypervisor to get them delivered if so. */ ENTRY(xen_restore_fl_direct) FRAME_BEGIN #ifdef CONFIG_X86_64 testw $X86_EFLAGS_IF, %di #else testb $X86_EFLAGS_IF>>8, %ah #endif setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask /* * Preempt here doesn't matter because that will deal with any * pending interrupts. The pending check may end up being run * on the wrong CPU, but that doesn't hurt. */ /* check for unmasked and pending */ cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending jnz 1f call check_events 1: FRAME_END ret ENDPROC(xen_restore_fl_direct) /* * Force an event check by making a hypercall, but preserve regs * before making the call. */ ENTRY(check_events) FRAME_BEGIN #ifdef CONFIG_X86_32 push %eax push %ecx push %edx call xen_force_evtchn_callback pop %edx pop %ecx pop %eax #else push %rax push %rcx push %rdx push %rsi push %rdi push %r8 push %r9 push %r10 push %r11 call xen_force_evtchn_callback pop %r11 pop %r10 pop %r9 pop %r8 pop %rdi pop %rsi pop %rdx pop %rcx pop %rax #endif FRAME_END ret ENDPROC(check_events)
AirFortressIlikara/LS2K0300-linux-4.19
6,050
arch/x86/xen/xen-asm_32.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Asm versions of Xen pv-ops, suitable for direct use. * * We only bother with direct forms (ie, vcpu in pda) of the * operations here; the indirect forms are better handled in C. */ #include <asm/thread_info.h> #include <asm/processor-flags.h> #include <asm/segment.h> #include <asm/asm.h> #include <xen/interface/xen.h> #include <linux/linkage.h> /* Pseudo-flag used for virtual NMI, which we don't implement yet */ #define XEN_EFLAGS_NMI 0x80000000 /* * This is run where a normal iret would be run, with the same stack setup: * 8: eflags * 4: cs * esp-> 0: eip * * This attempts to make sure that any pending events are dealt with * on return to usermode, but there is a small window in which an * event can happen just before entering usermode. If the nested * interrupt ends up setting one of the TIF_WORK_MASK pending work * flags, they will not be tested again before returning to * usermode. This means that a process can end up with pending work, * which will be unprocessed until the process enters and leaves the * kernel again, which could be an unbounded amount of time. This * means that a pending signal or reschedule event could be * indefinitely delayed. * * The fix is to notice a nested interrupt in the critical window, and * if one occurs, then fold the nested interrupt into the current * interrupt stack frame, and re-process it iteratively rather than * recursively. This means that it will exit via the normal path, and * all pending work will be dealt with appropriately. * * Because the nested interrupt handler needs to deal with the current * stack state in whatever form its in, we keep things simple by only * using a single register which is pushed/popped on the stack. */ .macro POP_FS 1: popw %fs .pushsection .fixup, "ax" 2: movw $0, (%esp) jmp 1b .popsection _ASM_EXTABLE(1b,2b) .endm ENTRY(xen_iret) /* test eflags for special cases */ testl $(X86_EFLAGS_VM | XEN_EFLAGS_NMI), 8(%esp) jnz hyper_iret push %eax ESP_OFFSET=4 # bytes pushed onto stack /* Store vcpu_info pointer for easy access */ #ifdef CONFIG_SMP pushw %fs movl $(__KERNEL_PERCPU), %eax movl %eax, %fs movl %fs:xen_vcpu, %eax POP_FS #else movl %ss:xen_vcpu, %eax #endif /* check IF state we're restoring */ testb $X86_EFLAGS_IF>>8, 8+1+ESP_OFFSET(%esp) /* * Maybe enable events. Once this happens we could get a * recursive event, so the critical region starts immediately * afterwards. However, if that happens we don't end up * resuming the code, so we don't have to be worried about * being preempted to another CPU. */ setz %ss:XEN_vcpu_info_mask(%eax) xen_iret_start_crit: /* check for unmasked and pending */ cmpw $0x0001, %ss:XEN_vcpu_info_pending(%eax) /* * If there's something pending, mask events again so we can * jump back into xen_hypervisor_callback. Otherwise do not * touch XEN_vcpu_info_mask. */ jne 1f movb $1, %ss:XEN_vcpu_info_mask(%eax) 1: popl %eax /* * From this point on the registers are restored and the stack * updated, so we don't need to worry about it if we're * preempted */ iret_restore_end: /* * Jump to hypervisor_callback after fixing up the stack. * Events are masked, so jumping out of the critical region is * OK. */ je xen_hypervisor_callback 1: iret xen_iret_end_crit: _ASM_EXTABLE(1b, iret_exc) hyper_iret: /* put this out of line since its very rarely used */ jmp hypercall_page + __HYPERVISOR_iret * 32 .globl xen_iret_start_crit, xen_iret_end_crit /* * This is called by xen_hypervisor_callback in entry.S when it sees * that the EIP at the time of interrupt was between * xen_iret_start_crit and xen_iret_end_crit. We're passed the EIP in * %eax so we can do a more refined determination of what to do. * * The stack format at this point is: * ---------------- * ss : (ss/esp may be present if we came from usermode) * esp : * eflags } outer exception info * cs } * eip } * ---------------- <- edi (copy dest) * eax : outer eax if it hasn't been restored * ---------------- * eflags } nested exception info * cs } (no ss/esp because we're nested * eip } from the same ring) * orig_eax }<- esi (copy src) * - - - - - - - - * fs } * es } * ds } SAVE_ALL state * eax } * : : * ebx }<- esp * ---------------- * * In order to deliver the nested exception properly, we need to shift * everything from the return addr up to the error code so it sits * just under the outer exception info. This means that when we * handle the exception, we do it in the context of the outer * exception rather than starting a new one. * * The only caveat is that if the outer eax hasn't been restored yet * (ie, it's still on stack), we need to insert its value into the * SAVE_ALL state before going on, since it's usermode state which we * eventually need to restore. */ ENTRY(xen_iret_crit_fixup) /* * Paranoia: Make sure we're really coming from kernel space. * One could imagine a case where userspace jumps into the * critical range address, but just before the CPU delivers a * GP, it decides to deliver an interrupt instead. Unlikely? * Definitely. Easy to avoid? Yes. The Intel documents * explicitly say that the reported EIP for a bad jump is the * jump instruction itself, not the destination, but some * virtual environments get this wrong. */ movl PT_CS(%esp), %ecx andl $SEGMENT_RPL_MASK, %ecx cmpl $USER_RPL, %ecx je 2f lea PT_ORIG_EAX(%esp), %esi lea PT_EFLAGS(%esp), %edi /* * If eip is before iret_restore_end then stack * hasn't been restored yet. */ cmp $iret_restore_end, %eax jae 1f movl 0+4(%edi), %eax /* copy EAX (just above top of frame) */ movl %eax, PT_EAX(%esp) lea ESP_OFFSET(%edi), %edi /* move dest up over saved regs */ /* set up the copy */ 1: std mov $PT_EIP / 4, %ecx /* saved regs up to orig_eax */ rep movsl cld lea 4(%edi), %esp /* point esp to new frame */ 2: jmp xen_do_upcall
AirFortressIlikara/LS2K0300-linux-4.19
3,839
arch/x86/xen/xen-asm_64.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Asm versions of Xen pv-ops, suitable for direct use. * * We only bother with direct forms (ie, vcpu in pda) of the * operations here; the indirect forms are better handled in C. */ #include <asm/errno.h> #include <asm/percpu.h> #include <asm/processor-flags.h> #include <asm/segment.h> #include <asm/asm-offsets.h> #include <asm/thread_info.h> #include <asm/asm.h> #include <xen/interface/xen.h> #include <linux/init.h> #include <linux/linkage.h> .macro xen_pv_trap name ENTRY(xen_\name) pop %rcx pop %r11 jmp \name END(xen_\name) _ASM_NOKPROBE(xen_\name) .endm xen_pv_trap divide_error xen_pv_trap debug xen_pv_trap xendebug xen_pv_trap int3 xen_pv_trap xennmi xen_pv_trap overflow xen_pv_trap bounds xen_pv_trap invalid_op xen_pv_trap device_not_available xen_pv_trap double_fault xen_pv_trap coprocessor_segment_overrun xen_pv_trap invalid_TSS xen_pv_trap segment_not_present xen_pv_trap stack_segment xen_pv_trap general_protection xen_pv_trap page_fault xen_pv_trap spurious_interrupt_bug xen_pv_trap coprocessor_error xen_pv_trap alignment_check #ifdef CONFIG_X86_MCE xen_pv_trap machine_check #endif /* CONFIG_X86_MCE */ xen_pv_trap simd_coprocessor_error #ifdef CONFIG_IA32_EMULATION xen_pv_trap entry_INT80_compat #endif xen_pv_trap hypervisor_callback __INIT ENTRY(xen_early_idt_handler_array) i = 0 .rept NUM_EXCEPTION_VECTORS pop %rcx pop %r11 jmp early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE i = i + 1 .fill xen_early_idt_handler_array + i*XEN_EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc .endr END(xen_early_idt_handler_array) __FINIT hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32 /* * Xen64 iret frame: * * ss * rsp * rflags * cs * rip <-- standard iret frame * * flags * * rcx } * r11 }<-- pushed by hypercall page * rsp->rax } */ ENTRY(xen_iret) pushq $0 jmp hypercall_iret ENTRY(xen_sysret64) /* * We're already on the usermode stack at this point, but * still with the kernel gs, so we can easily switch back */ movq %rsp, PER_CPU_VAR(rsp_scratch) movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp pushq $__USER_DS pushq PER_CPU_VAR(rsp_scratch) pushq %r11 pushq $__USER_CS pushq %rcx pushq $VGCF_in_syscall jmp hypercall_iret /* * Xen handles syscall callbacks much like ordinary exceptions, which * means we have: * - kernel gs * - kernel rsp * - an iret-like stack frame on the stack (including rcx and r11): * ss * rsp * rflags * cs * rip * r11 * rsp->rcx */ /* Normal 64-bit system call target */ ENTRY(xen_syscall_target) popq %rcx popq %r11 /* * Neither Xen nor the kernel really knows what the old SS and * CS were. The kernel expects __USER_DS and __USER_CS, so * report those values even though Xen will guess its own values. */ movq $__USER_DS, 4*8(%rsp) movq $__USER_CS, 1*8(%rsp) jmp entry_SYSCALL_64_after_hwframe ENDPROC(xen_syscall_target) #ifdef CONFIG_IA32_EMULATION /* 32-bit compat syscall target */ ENTRY(xen_syscall32_target) popq %rcx popq %r11 /* * Neither Xen nor the kernel really knows what the old SS and * CS were. The kernel expects __USER32_DS and __USER32_CS, so * report those values even though Xen will guess its own values. */ movq $__USER32_DS, 4*8(%rsp) movq $__USER32_CS, 1*8(%rsp) jmp entry_SYSCALL_compat_after_hwframe ENDPROC(xen_syscall32_target) /* 32-bit compat sysenter target */ ENTRY(xen_sysenter_target) mov 0*8(%rsp), %rcx mov 1*8(%rsp), %r11 mov 5*8(%rsp), %rsp jmp entry_SYSENTER_compat ENDPROC(xen_sysenter_target) #else /* !CONFIG_IA32_EMULATION */ ENTRY(xen_syscall32_target) ENTRY(xen_sysenter_target) lea 16(%rsp), %rsp /* strip %rcx, %r11 */ mov $-ENOSYS, %rax pushq $0 jmp hypercall_iret ENDPROC(xen_syscall32_target) ENDPROC(xen_sysenter_target) #endif /* CONFIG_IA32_EMULATION */
AirFortressIlikara/LS2K0300-linux-4.19
3,010
arch/x86/xen/xen-head.S
/* SPDX-License-Identifier: GPL-2.0 */ /* Xen-specific pieces of head.S, intended to be included in the right place in head.S */ #ifdef CONFIG_XEN #include <linux/elfnote.h> #include <linux/init.h> #include <asm/boot.h> #include <asm/asm.h> #include <asm/msr.h> #include <asm/page_types.h> #include <asm/percpu.h> #include <asm/unwind_hints.h> #include <xen/interface/elfnote.h> #include <xen/interface/features.h> #include <xen/interface/xen.h> #include <xen/interface/xen-mca.h> #include <asm/xen/interface.h> #ifdef CONFIG_XEN_PV __INIT ENTRY(startup_xen) UNWIND_HINT_EMPTY cld /* Clear .bss */ xor %eax,%eax mov $__bss_start, %_ASM_DI mov $__bss_stop, %_ASM_CX sub %_ASM_DI, %_ASM_CX shr $__ASM_SEL(2, 3), %_ASM_CX rep __ASM_SIZE(stos) mov %_ASM_SI, xen_start_info mov $init_thread_union+THREAD_SIZE, %_ASM_SP #ifdef CONFIG_X86_64 /* Set up %gs. * * The base of %gs always points to the bottom of the irqstack * union. If the stack protector canary is enabled, it is * located at %gs:40. Note that, on SMP, the boot cpu uses * init data section till per cpu areas are set up. */ movl $MSR_GS_BASE,%ecx movq $INIT_PER_CPU_VAR(irq_stack_union),%rax cdq wrmsr #endif jmp xen_start_kernel END(startup_xen) __FINIT #endif .pushsection .text .balign PAGE_SIZE ENTRY(hypercall_page) .rept (PAGE_SIZE / 32) UNWIND_HINT_EMPTY .skip 32 .endr #define HYPERCALL(n) \ .equ xen_hypercall_##n, hypercall_page + __HYPERVISOR_##n * 32; \ .type xen_hypercall_##n, @function; .size xen_hypercall_##n, 32 #include <asm/xen-hypercalls.h> #undef HYPERCALL END(hypercall_page) .popsection ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz "linux") ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION, .asciz "2.6") ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION, .asciz "xen-3.0") #ifdef CONFIG_X86_32 ELFNOTE(Xen, XEN_ELFNOTE_VIRT_BASE, _ASM_PTR __PAGE_OFFSET) #else ELFNOTE(Xen, XEN_ELFNOTE_VIRT_BASE, _ASM_PTR __START_KERNEL_map) /* Map the p2m table to a 512GB-aligned user address. */ ELFNOTE(Xen, XEN_ELFNOTE_INIT_P2M, .quad (PUD_SIZE * PTRS_PER_PUD)) #endif #ifdef CONFIG_XEN_PV ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, _ASM_PTR startup_xen) #endif ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, _ASM_PTR hypercall_page) ELFNOTE(Xen, XEN_ELFNOTE_FEATURES, .ascii "!writable_page_tables|pae_pgdir_above_4gb") ELFNOTE(Xen, XEN_ELFNOTE_SUPPORTED_FEATURES, .long (1 << XENFEAT_writable_page_tables) | \ (1 << XENFEAT_dom0) | \ (1 << XENFEAT_linux_rsdp_unrestricted)) ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE, .asciz "yes") ELFNOTE(Xen, XEN_ELFNOTE_LOADER, .asciz "generic") ELFNOTE(Xen, XEN_ELFNOTE_L1_MFN_VALID, .quad _PAGE_PRESENT; .quad _PAGE_PRESENT) ELFNOTE(Xen, XEN_ELFNOTE_SUSPEND_CANCEL, .long 1) ELFNOTE(Xen, XEN_ELFNOTE_MOD_START_PFN, .long 1) ELFNOTE(Xen, XEN_ELFNOTE_HV_START_LOW, _ASM_PTR __HYPERVISOR_VIRT_START) ELFNOTE(Xen, XEN_ELFNOTE_PADDR_OFFSET, _ASM_PTR 0) #endif /*CONFIG_XEN */
AirFortressIlikara/LS2K0300-linux-4.19
4,749
arch/x86/xen/xen-pvh.S
/* * Copyright C 2016, Oracle and/or its affiliates. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ .code32 .text #define _pa(x) ((x) - __START_KERNEL_map) #include <linux/elfnote.h> #include <linux/init.h> #include <linux/linkage.h> #include <asm/segment.h> #include <asm/asm.h> #include <asm/boot.h> #include <asm/processor-flags.h> #include <asm/msr.h> #include <xen/interface/elfnote.h> __HEAD /* * Entry point for PVH guests. * * Xen ABI specifies the following register state when we come here: * * - `ebx`: contains the physical memory address where the loader has placed * the boot start info structure. * - `cr0`: bit 0 (PE) must be set. All the other writeable bits are cleared. * - `cr4`: all bits are cleared. * - `cs `: must be a 32-bit read/execute code segment with a base of ‘0’ * and a limit of ‘0xFFFFFFFF’. The selector value is unspecified. * - `ds`, `es`: must be a 32-bit read/write data segment with a base of * ‘0’ and a limit of ‘0xFFFFFFFF’. The selector values are all * unspecified. * - `tr`: must be a 32-bit TSS (active) with a base of '0' and a limit * of '0x67'. * - `eflags`: bit 17 (VM) must be cleared. Bit 9 (IF) must be cleared. * Bit 8 (TF) must be cleared. Other bits are all unspecified. * * All other processor registers and flag bits are unspecified. The OS is in * charge of setting up it's own stack, GDT and IDT. */ #define PVH_GDT_ENTRY_CS 1 #define PVH_GDT_ENTRY_DS 2 #define PVH_GDT_ENTRY_CANARY 3 #define PVH_CS_SEL (PVH_GDT_ENTRY_CS * 8) #define PVH_DS_SEL (PVH_GDT_ENTRY_DS * 8) #define PVH_CANARY_SEL (PVH_GDT_ENTRY_CANARY * 8) ENTRY(pvh_start_xen) cld lgdt (_pa(gdt)) mov $PVH_DS_SEL,%eax mov %eax,%ds mov %eax,%es mov %eax,%ss /* Stash hvm_start_info. */ mov $_pa(pvh_start_info), %edi mov %ebx, %esi mov _pa(pvh_start_info_sz), %ecx shr $2,%ecx rep movsl mov $_pa(early_stack_end), %esp /* Enable PAE mode. */ mov %cr4, %eax orl $X86_CR4_PAE, %eax mov %eax, %cr4 #ifdef CONFIG_X86_64 /* Enable Long mode. */ mov $MSR_EFER, %ecx rdmsr btsl $_EFER_LME, %eax wrmsr /* Enable pre-constructed page tables. */ mov $_pa(init_top_pgt), %eax mov %eax, %cr3 mov $(X86_CR0_PG | X86_CR0_PE), %eax mov %eax, %cr0 /* Jump to 64-bit mode. */ ljmp $PVH_CS_SEL, $_pa(1f) /* 64-bit entry point. */ .code64 1: /* Set base address in stack canary descriptor. */ mov $MSR_GS_BASE,%ecx mov $_pa(canary), %eax xor %edx, %edx wrmsr call xen_prepare_pvh /* startup_64 expects boot_params in %rsi. */ mov $_pa(pvh_bootparams), %rsi mov $_pa(startup_64), %rax jmp *%rax #else /* CONFIG_X86_64 */ /* Set base address in stack canary descriptor. */ movl $_pa(gdt_start),%eax movl $_pa(canary),%ecx movw %cx, (PVH_GDT_ENTRY_CANARY * 8) + 2(%eax) shrl $16, %ecx movb %cl, (PVH_GDT_ENTRY_CANARY * 8) + 4(%eax) movb %ch, (PVH_GDT_ENTRY_CANARY * 8) + 7(%eax) mov $PVH_CANARY_SEL,%eax mov %eax,%gs call mk_early_pgtbl_32 mov $_pa(initial_page_table), %eax mov %eax, %cr3 mov %cr0, %eax or $(X86_CR0_PG | X86_CR0_PE), %eax mov %eax, %cr0 ljmp $PVH_CS_SEL, $1f 1: call xen_prepare_pvh mov $_pa(pvh_bootparams), %esi /* startup_32 doesn't expect paging and PAE to be on. */ ljmp $PVH_CS_SEL, $_pa(2f) 2: mov %cr0, %eax and $~X86_CR0_PG, %eax mov %eax, %cr0 mov %cr4, %eax and $~X86_CR4_PAE, %eax mov %eax, %cr4 ljmp $PVH_CS_SEL, $_pa(startup_32) #endif END(pvh_start_xen) .section ".init.data","aw" .balign 8 gdt: .word gdt_end - gdt_start .long _pa(gdt_start) .word 0 gdt_start: .quad 0x0000000000000000 /* NULL descriptor */ #ifdef CONFIG_X86_64 .quad GDT_ENTRY(0xa09a, 0, 0xfffff) /* PVH_CS_SEL */ #else .quad GDT_ENTRY(0xc09a, 0, 0xfffff) /* PVH_CS_SEL */ #endif .quad GDT_ENTRY(0xc092, 0, 0xfffff) /* PVH_DS_SEL */ .quad GDT_ENTRY(0x4090, 0, 0x18) /* PVH_CANARY_SEL */ gdt_end: .balign 16 canary: .fill 48, 1, 0 early_stack: .fill BOOT_STACK_SIZE, 1, 0 early_stack_end: ELFNOTE(Xen, XEN_ELFNOTE_PHYS32_ENTRY, _ASM_PTR (pvh_start_xen - __START_KERNEL_map))
AirFortressIlikara/LS2K0300-linux-4.19
1,761
arch/x86/power/hibernate_asm_32.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * This may not use any stack, nor any variable that is not "NoSave": * * Its rewriting one kernel image with another. What is stack in "old" * image could very well be data page in "new" image, and overwriting * your own stack under you is bad idea. */ #include <linux/linkage.h> #include <asm/segment.h> #include <asm/page_types.h> #include <asm/asm-offsets.h> #include <asm/processor-flags.h> .text ENTRY(swsusp_arch_suspend) movl %esp, saved_context_esp movl %ebx, saved_context_ebx movl %ebp, saved_context_ebp movl %esi, saved_context_esi movl %edi, saved_context_edi pushfl popl saved_context_eflags call swsusp_save ret ENTRY(restore_image) movl mmu_cr4_features, %ecx movl resume_pg_dir, %eax subl $__PAGE_OFFSET, %eax movl %eax, %cr3 jecxz 1f # cr4 Pentium and higher, skip if zero andl $~(X86_CR4_PGE), %ecx movl %ecx, %cr4; # turn off PGE movl %cr3, %eax; # flush TLB movl %eax, %cr3 1: movl restore_pblist, %edx .p2align 4,,7 copy_loop: testl %edx, %edx jz done movl pbe_address(%edx), %esi movl pbe_orig_address(%edx), %edi movl $1024, %ecx rep movsl movl pbe_next(%edx), %edx jmp copy_loop .p2align 4,,7 done: /* go back to the original page tables */ movl $swapper_pg_dir, %eax subl $__PAGE_OFFSET, %eax movl %eax, %cr3 movl mmu_cr4_features, %ecx jecxz 1f # cr4 Pentium and higher, skip if zero movl %ecx, %cr4; # turn PGE back on 1: movl saved_context_esp, %esp movl saved_context_ebp, %ebp movl saved_context_ebx, %ebx movl saved_context_esi, %esi movl saved_context_edi, %edi pushl saved_context_eflags popfl /* Saved in save_processor_state. */ movl $saved_context, %eax lgdt saved_context_gdt_desc(%eax) xorl %eax, %eax ret
AirFortressIlikara/LS2K0300-linux-4.19
3,634
arch/x86/power/hibernate_asm_64.S
/* * Hibernation support for x86-64 * * Distribute under GPLv2. * * Copyright 2007 Rafael J. Wysocki <rjw@sisk.pl> * Copyright 2005 Andi Kleen <ak@suse.de> * Copyright 2004 Pavel Machek <pavel@suse.cz> * * swsusp_arch_resume must not use any stack or any nonlocal variables while * copying pages: * * Its rewriting one kernel image with another. What is stack in "old" * image could very well be data page in "new" image, and overwriting * your own stack under you is bad idea. */ .text #include <linux/linkage.h> #include <asm/segment.h> #include <asm/page_types.h> #include <asm/asm-offsets.h> #include <asm/processor-flags.h> #include <asm/frame.h> ENTRY(swsusp_arch_suspend) movq $saved_context, %rax movq %rsp, pt_regs_sp(%rax) movq %rbp, pt_regs_bp(%rax) movq %rsi, pt_regs_si(%rax) movq %rdi, pt_regs_di(%rax) movq %rbx, pt_regs_bx(%rax) movq %rcx, pt_regs_cx(%rax) movq %rdx, pt_regs_dx(%rax) movq %r8, pt_regs_r8(%rax) movq %r9, pt_regs_r9(%rax) movq %r10, pt_regs_r10(%rax) movq %r11, pt_regs_r11(%rax) movq %r12, pt_regs_r12(%rax) movq %r13, pt_regs_r13(%rax) movq %r14, pt_regs_r14(%rax) movq %r15, pt_regs_r15(%rax) pushfq popq pt_regs_flags(%rax) /* save cr3 */ movq %cr3, %rax movq %rax, restore_cr3(%rip) FRAME_BEGIN call swsusp_save FRAME_END ret ENDPROC(swsusp_arch_suspend) ENTRY(restore_image) /* prepare to jump to the image kernel */ movq restore_jump_address(%rip), %r8 movq restore_cr3(%rip), %r9 /* prepare to switch to temporary page tables */ movq temp_level4_pgt(%rip), %rax movq mmu_cr4_features(%rip), %rbx /* prepare to copy image data to their original locations */ movq restore_pblist(%rip), %rdx /* jump to relocated restore code */ movq relocated_restore_code(%rip), %rcx jmpq *%rcx /* code below has been relocated to a safe page */ ENTRY(core_restore_code) /* switch to temporary page tables */ movq %rax, %cr3 /* flush TLB */ movq %rbx, %rcx andq $~(X86_CR4_PGE), %rcx movq %rcx, %cr4; # turn off PGE movq %cr3, %rcx; # flush TLB movq %rcx, %cr3; movq %rbx, %cr4; # turn PGE back on .Lloop: testq %rdx, %rdx jz .Ldone /* get addresses from the pbe and copy the page */ movq pbe_address(%rdx), %rsi movq pbe_orig_address(%rdx), %rdi movq $(PAGE_SIZE >> 3), %rcx rep movsq /* progress to the next pbe */ movq pbe_next(%rdx), %rdx jmp .Lloop .Ldone: /* jump to the restore_registers address from the image header */ jmpq *%r8 /* code below belongs to the image kernel */ .align PAGE_SIZE ENTRY(restore_registers) /* go back to the original page tables */ movq %r9, %cr3 /* Flush TLB, including "global" things (vmalloc) */ movq mmu_cr4_features(%rip), %rax movq %rax, %rdx andq $~(X86_CR4_PGE), %rdx movq %rdx, %cr4; # turn off PGE movq %cr3, %rcx; # flush TLB movq %rcx, %cr3 movq %rax, %cr4; # turn PGE back on /* We don't restore %rax, it must be 0 anyway */ movq $saved_context, %rax movq pt_regs_sp(%rax), %rsp movq pt_regs_bp(%rax), %rbp movq pt_regs_si(%rax), %rsi movq pt_regs_di(%rax), %rdi movq pt_regs_bx(%rax), %rbx movq pt_regs_cx(%rax), %rcx movq pt_regs_dx(%rax), %rdx movq pt_regs_r8(%rax), %r8 movq pt_regs_r9(%rax), %r9 movq pt_regs_r10(%rax), %r10 movq pt_regs_r11(%rax), %r11 movq pt_regs_r12(%rax), %r12 movq pt_regs_r13(%rax), %r13 movq pt_regs_r14(%rax), %r14 movq pt_regs_r15(%rax), %r15 pushq pt_regs_flags(%rax) popfq /* Saved in save_processor_state. */ lgdt saved_context_gdt_desc(%rax) xorl %eax, %eax /* tell the hibernation core that we've just restored the memory */ movq %rax, in_suspend(%rip) ret ENDPROC(restore_registers)
AirFortressIlikara/LS2K0300-linux-4.19
1,041
arch/x86/um/setjmp_64.S
/* SPDX-License-Identifier: GPL-2.0 */ # # arch/x86_64/setjmp.S # # setjmp/longjmp for the x86-64 architecture # # # The jmp_buf is assumed to contain the following, in order: # %rbx # %rsp (post-return) # %rbp # %r12 # %r13 # %r14 # %r15 # <return address> # .text .align 4 .globl kernel_setjmp .type kernel_setjmp, @function kernel_setjmp: pop %rsi # Return address, and adjust the stack xorl %eax,%eax # Return value movq %rbx,(%rdi) movq %rsp,8(%rdi) # Post-return %rsp! push %rsi # Make the call/return stack happy movq %rbp,16(%rdi) movq %r12,24(%rdi) movq %r13,32(%rdi) movq %r14,40(%rdi) movq %r15,48(%rdi) movq %rsi,56(%rdi) # Return address ret .size kernel_setjmp,.-kernel_setjmp .text .align 4 .globl kernel_longjmp .type kernel_longjmp, @function kernel_longjmp: movl %esi,%eax # Return value (int) movq (%rdi),%rbx movq 8(%rdi),%rsp movq 16(%rdi),%rbp movq 24(%rdi),%r12 movq 32(%rdi),%r13 movq 40(%rdi),%r14 movq 48(%rdi),%r15 jmp *56(%rdi) .size kernel_longjmp,.-kernel_longjmp
AirFortressIlikara/LS2K0300-linux-4.19
4,903
arch/x86/um/checksum_32.S
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * IP/TCP/UDP checksumming routines * * Authors: Jorge Cwik, <jorge@laser.satlink.net> * Arnt Gulbrandsen, <agulbra@nvg.unit.no> * Tom May, <ftom@netcom.com> * Pentium Pro/II routines: * Alexander Kjeldaas <astor@guardian.no> * Finn Arne Gangstad <finnag@guardian.no> * Lots of code moved from tcp.c and ip.c; see those files * for more names. * * Changes: Ingo Molnar, converted csum_partial_copy() to 2.1 exception * handling. * Andi Kleen, add zeroing on error * converted to pure assembler * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <asm/errno.h> #include <asm/asm.h> #include <asm/export.h> /* * computes a partial checksum, e.g. for TCP/UDP fragments */ /* unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum) */ .text .align 4 .globl csum_partial #ifndef CONFIG_X86_USE_PPRO_CHECKSUM /* * Experiments with Ethernet and SLIP connections show that buff * is aligned on either a 2-byte or 4-byte boundary. We get at * least a twofold speedup on 486 and Pentium if it is 4-byte aligned. * Fortunately, it is easy to convert 2-byte alignment to 4-byte * alignment for the unrolled loop. */ csum_partial: pushl %esi pushl %ebx movl 20(%esp),%eax # Function arg: unsigned int sum movl 16(%esp),%ecx # Function arg: int len movl 12(%esp),%esi # Function arg: unsigned char *buff testl $2, %esi # Check alignment. jz 2f # Jump if alignment is ok. subl $2, %ecx # Alignment uses up two bytes. jae 1f # Jump if we had at least two bytes. addl $2, %ecx # ecx was < 2. Deal with it. jmp 4f 1: movw (%esi), %bx addl $2, %esi addw %bx, %ax adcl $0, %eax 2: movl %ecx, %edx shrl $5, %ecx jz 2f testl %esi, %esi 1: movl (%esi), %ebx adcl %ebx, %eax movl 4(%esi), %ebx adcl %ebx, %eax movl 8(%esi), %ebx adcl %ebx, %eax movl 12(%esi), %ebx adcl %ebx, %eax movl 16(%esi), %ebx adcl %ebx, %eax movl 20(%esi), %ebx adcl %ebx, %eax movl 24(%esi), %ebx adcl %ebx, %eax movl 28(%esi), %ebx adcl %ebx, %eax lea 32(%esi), %esi dec %ecx jne 1b adcl $0, %eax 2: movl %edx, %ecx andl $0x1c, %edx je 4f shrl $2, %edx # This clears CF 3: adcl (%esi), %eax lea 4(%esi), %esi dec %edx jne 3b adcl $0, %eax 4: andl $3, %ecx jz 7f cmpl $2, %ecx jb 5f movw (%esi),%cx leal 2(%esi),%esi je 6f shll $16,%ecx 5: movb (%esi),%cl 6: addl %ecx,%eax adcl $0, %eax 7: popl %ebx popl %esi ret #else /* Version for PentiumII/PPro */ csum_partial: pushl %esi pushl %ebx movl 20(%esp),%eax # Function arg: unsigned int sum movl 16(%esp),%ecx # Function arg: int len movl 12(%esp),%esi # Function arg: const unsigned char *buf testl $2, %esi jnz 30f 10: movl %ecx, %edx movl %ecx, %ebx andl $0x7c, %ebx shrl $7, %ecx addl %ebx,%esi shrl $2, %ebx negl %ebx lea 45f(%ebx,%ebx,2), %ebx testl %esi, %esi jmp *%ebx # Handle 2-byte-aligned regions 20: addw (%esi), %ax lea 2(%esi), %esi adcl $0, %eax jmp 10b 30: subl $2, %ecx ja 20b je 32f movzbl (%esi),%ebx # csumming 1 byte, 2-aligned addl %ebx, %eax adcl $0, %eax jmp 80f 32: addw (%esi), %ax # csumming 2 bytes, 2-aligned adcl $0, %eax jmp 80f 40: addl -128(%esi), %eax adcl -124(%esi), %eax adcl -120(%esi), %eax adcl -116(%esi), %eax adcl -112(%esi), %eax adcl -108(%esi), %eax adcl -104(%esi), %eax adcl -100(%esi), %eax adcl -96(%esi), %eax adcl -92(%esi), %eax adcl -88(%esi), %eax adcl -84(%esi), %eax adcl -80(%esi), %eax adcl -76(%esi), %eax adcl -72(%esi), %eax adcl -68(%esi), %eax adcl -64(%esi), %eax adcl -60(%esi), %eax adcl -56(%esi), %eax adcl -52(%esi), %eax adcl -48(%esi), %eax adcl -44(%esi), %eax adcl -40(%esi), %eax adcl -36(%esi), %eax adcl -32(%esi), %eax adcl -28(%esi), %eax adcl -24(%esi), %eax adcl -20(%esi), %eax adcl -16(%esi), %eax adcl -12(%esi), %eax adcl -8(%esi), %eax adcl -4(%esi), %eax 45: lea 128(%esi), %esi adcl $0, %eax dec %ecx jge 40b movl %edx, %ecx 50: andl $3, %ecx jz 80f # Handle the last 1-3 bytes without jumping notl %ecx # 1->2, 2->1, 3->0, higher bits are masked movl $0xffffff,%ebx # by the shll and shrl instructions shll $3,%ecx shrl %cl,%ebx andl -128(%esi),%ebx # esi is 4-aligned so should be ok addl %ebx,%eax adcl $0,%eax 80: popl %ebx popl %esi ret #endif EXPORT_SYMBOL(csum_partial)
AirFortressIlikara/LS2K0300-linux-4.19
1,072
arch/x86/um/setjmp_32.S
/* SPDX-License-Identifier: GPL-2.0 */ # # arch/i386/setjmp.S # # setjmp/longjmp for the i386 architecture # # # The jmp_buf is assumed to contain the following, in order: # %ebx # %esp # %ebp # %esi # %edi # <return address> # .text .align 4 .globl kernel_setjmp .type kernel_setjmp, @function kernel_setjmp: #ifdef _REGPARM movl %eax,%edx #else movl 4(%esp),%edx #endif popl %ecx # Return address, and adjust the stack xorl %eax,%eax # Return value movl %ebx,(%edx) movl %esp,4(%edx) # Post-return %esp! pushl %ecx # Make the call/return stack happy movl %ebp,8(%edx) movl %esi,12(%edx) movl %edi,16(%edx) movl %ecx,20(%edx) # Return address ret .size kernel_setjmp,.-kernel_setjmp .text .align 4 .globl kernel_longjmp .type kernel_longjmp, @function kernel_longjmp: #ifdef _REGPARM xchgl %eax,%edx #else movl 4(%esp),%edx # jmp_ptr address movl 8(%esp),%eax # Return value #endif movl (%edx),%ebx movl 4(%edx),%esp movl 8(%edx),%ebp movl 12(%edx),%esi movl 16(%edx),%edi jmp *20(%edx) .size kernel_longjmp,.-kernel_longjmp
AirFortressIlikara/LS2K0300-linux-4.19
5,177
arch/x86/kernel/ftrace_32.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2017 Steven Rostedt, VMware Inc. */ #include <linux/linkage.h> #include <asm/page_types.h> #include <asm/segment.h> #include <asm/export.h> #include <asm/ftrace.h> #include <asm/nospec-branch.h> #include <asm/frame.h> #ifdef CC_USING_FENTRY # define function_hook __fentry__ EXPORT_SYMBOL(__fentry__) #else # define function_hook mcount EXPORT_SYMBOL(mcount) #endif #ifdef CONFIG_DYNAMIC_FTRACE /* mcount uses a frame pointer even if CONFIG_FRAME_POINTER is not set */ #if !defined(CC_USING_FENTRY) || defined(CONFIG_FRAME_POINTER) # define USING_FRAME_POINTER #endif #ifdef USING_FRAME_POINTER # define MCOUNT_FRAME 1 /* using frame = true */ #else # define MCOUNT_FRAME 0 /* using frame = false */ #endif ENTRY(function_hook) ret END(function_hook) ENTRY(ftrace_caller) #ifdef USING_FRAME_POINTER # ifdef CC_USING_FENTRY /* * Frame pointers are of ip followed by bp. * Since fentry is an immediate jump, we are left with * parent-ip, function-ip. We need to add a frame with * parent-ip followed by ebp. */ pushl 4(%esp) /* parent ip */ pushl %ebp movl %esp, %ebp pushl 2*4(%esp) /* function ip */ # endif /* For mcount, the function ip is directly above */ pushl %ebp movl %esp, %ebp #endif pushl %eax pushl %ecx pushl %edx pushl $0 /* Pass NULL as regs pointer */ #ifdef USING_FRAME_POINTER /* Load parent ebp into edx */ movl 4*4(%esp), %edx #else /* There's no frame pointer, load the appropriate stack addr instead */ lea 4*4(%esp), %edx #endif movl (MCOUNT_FRAME+4)*4(%esp), %eax /* load the rip */ /* Get the parent ip */ movl 4(%edx), %edx /* edx has ebp */ movl function_trace_op, %ecx subl $MCOUNT_INSN_SIZE, %eax .globl ftrace_call ftrace_call: call ftrace_stub addl $4, %esp /* skip NULL pointer */ popl %edx popl %ecx popl %eax #ifdef USING_FRAME_POINTER popl %ebp # ifdef CC_USING_FENTRY addl $4,%esp /* skip function ip */ popl %ebp /* this is the orig bp */ addl $4, %esp /* skip parent ip */ # endif #endif .Lftrace_ret: #ifdef CONFIG_FUNCTION_GRAPH_TRACER .globl ftrace_graph_call ftrace_graph_call: jmp ftrace_stub #endif /* This is weak to keep gas from relaxing the jumps */ WEAK(ftrace_stub) ret END(ftrace_caller) ENTRY(ftrace_regs_caller) /* * i386 does not save SS and ESP when coming from kernel. * Instead, to get sp, &regs->sp is used (see ptrace.h). * Unfortunately, that means eflags must be at the same location * as the current return ip is. We move the return ip into the * regs->ip location, and move flags into the return ip location. */ pushl $__KERNEL_CS pushl 4(%esp) /* Save the return ip */ pushl $0 /* Load 0 into orig_ax */ pushl %gs pushl %fs pushl %es pushl %ds pushl %eax /* Get flags and place them into the return ip slot */ pushf popl %eax movl %eax, 8*4(%esp) pushl %ebp pushl %edi pushl %esi pushl %edx pushl %ecx pushl %ebx ENCODE_FRAME_POINTER movl 12*4(%esp), %eax /* Load ip (1st parameter) */ subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */ #ifdef CC_USING_FENTRY movl 15*4(%esp), %edx /* Load parent ip (2nd parameter) */ #else movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */ #endif movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */ pushl %esp /* Save pt_regs as 4th parameter */ GLOBAL(ftrace_regs_call) call ftrace_stub addl $4, %esp /* Skip pt_regs */ /* restore flags */ push 14*4(%esp) popf /* Move return ip back to its original location */ movl 12*4(%esp), %eax movl %eax, 14*4(%esp) popl %ebx popl %ecx popl %edx popl %esi popl %edi popl %ebp popl %eax popl %ds popl %es popl %fs popl %gs /* use lea to not affect flags */ lea 3*4(%esp), %esp /* Skip orig_ax, ip and cs */ jmp .Lftrace_ret #else /* ! CONFIG_DYNAMIC_FTRACE */ ENTRY(function_hook) cmpl $__PAGE_OFFSET, %esp jb ftrace_stub /* Paging not enabled yet? */ cmpl $ftrace_stub, ftrace_trace_function jnz .Ltrace #ifdef CONFIG_FUNCTION_GRAPH_TRACER cmpl $ftrace_stub, ftrace_graph_return jnz ftrace_graph_caller cmpl $ftrace_graph_entry_stub, ftrace_graph_entry jnz ftrace_graph_caller #endif .globl ftrace_stub ftrace_stub: ret /* taken from glibc */ .Ltrace: pushl %eax pushl %ecx pushl %edx movl 0xc(%esp), %eax movl 0x4(%ebp), %edx subl $MCOUNT_INSN_SIZE, %eax movl ftrace_trace_function, %ecx CALL_NOSPEC %ecx popl %edx popl %ecx popl %eax jmp ftrace_stub END(function_hook) #endif /* CONFIG_DYNAMIC_FTRACE */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER ENTRY(ftrace_graph_caller) pushl %eax pushl %ecx pushl %edx movl 3*4(%esp), %eax /* Even with frame pointers, fentry doesn't have one here */ #ifdef CC_USING_FENTRY lea 4*4(%esp), %edx movl $0, %ecx #else lea 0x4(%ebp), %edx movl (%ebp), %ecx #endif subl $MCOUNT_INSN_SIZE, %eax call prepare_ftrace_return popl %edx popl %ecx popl %eax ret END(ftrace_graph_caller) .globl return_to_handler return_to_handler: pushl %eax pushl %edx #ifdef CC_USING_FENTRY movl $0, %eax #else movl %ebp, %eax #endif call ftrace_return_to_handler movl %eax, %ecx popl %edx popl %eax JMP_NOSPEC %ecx #endif
AirFortressIlikara/LS2K0300-linux-4.19
3,810
arch/x86/kernel/verify_cpu.S
/* * * verify_cpu.S - Code for cpu long mode and SSE verification. This * code has been borrowed from boot/setup.S and was introduced by * Andi Kleen. * * Copyright (c) 2007 Andi Kleen (ak@suse.de) * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com) * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com) * Copyright (c) 2010 Kees Cook (kees.cook@canonical.com) * * This source code is licensed under the GNU General Public License, * Version 2. See the file COPYING for more details. * * This is a common code for verification whether CPU supports * long mode and SSE or not. It is not called directly instead this * file is included at various places and compiled in that context. * This file is expected to run in 32bit code. Currently: * * arch/x86/boot/compressed/head_64.S: Boot cpu verification * arch/x86/kernel/trampoline_64.S: secondary processor verification * arch/x86/kernel/head_32.S: processor startup * * verify_cpu, returns the status of longmode and SSE in register %eax. * 0: Success 1: Failure * * On Intel, the XD_DISABLE flag will be cleared as a side-effect. * * The caller needs to check for the error code and take the action * appropriately. Either display a message or halt. */ #include <asm/cpufeatures.h> #include <asm/msr-index.h> ENTRY(verify_cpu) pushf # Save caller passed flags push $0 # Kill any dangerous flags popf #ifndef __x86_64__ pushfl # standard way to check for cpuid popl %eax movl %eax,%ebx xorl $0x200000,%eax pushl %eax popfl pushfl popl %eax cmpl %eax,%ebx jz .Lverify_cpu_no_longmode # cpu has no cpuid #endif movl $0x0,%eax # See if cpuid 1 is implemented cpuid cmpl $0x1,%eax jb .Lverify_cpu_no_longmode # no cpuid 1 xor %di,%di cmpl $0x68747541,%ebx # AuthenticAMD jnz .Lverify_cpu_noamd cmpl $0x69746e65,%edx jnz .Lverify_cpu_noamd cmpl $0x444d4163,%ecx jnz .Lverify_cpu_noamd mov $1,%di # cpu is from AMD jmp .Lverify_cpu_check .Lverify_cpu_noamd: cmpl $0x756e6547,%ebx # GenuineIntel? jnz .Lverify_cpu_check cmpl $0x49656e69,%edx jnz .Lverify_cpu_check cmpl $0x6c65746e,%ecx jnz .Lverify_cpu_check # only call IA32_MISC_ENABLE when: # family > 6 || (family == 6 && model >= 0xd) movl $0x1, %eax # check CPU family and model cpuid movl %eax, %ecx andl $0x0ff00f00, %eax # mask family and extended family shrl $8, %eax cmpl $6, %eax ja .Lverify_cpu_clear_xd # family > 6, ok jb .Lverify_cpu_check # family < 6, skip andl $0x000f00f0, %ecx # mask model and extended model shrl $4, %ecx cmpl $0xd, %ecx jb .Lverify_cpu_check # family == 6, model < 0xd, skip .Lverify_cpu_clear_xd: movl $MSR_IA32_MISC_ENABLE, %ecx rdmsr btrl $2, %edx # clear MSR_IA32_MISC_ENABLE_XD_DISABLE jnc .Lverify_cpu_check # only write MSR if bit was changed wrmsr .Lverify_cpu_check: movl $0x1,%eax # Does the cpu have what it takes cpuid andl $REQUIRED_MASK0,%edx xorl $REQUIRED_MASK0,%edx jnz .Lverify_cpu_no_longmode movl $0x80000000,%eax # See if extended cpuid is implemented cpuid cmpl $0x80000001,%eax jb .Lverify_cpu_no_longmode # no extended cpuid movl $0x80000001,%eax # Does the cpu have what it takes cpuid andl $REQUIRED_MASK1,%edx xorl $REQUIRED_MASK1,%edx jnz .Lverify_cpu_no_longmode .Lverify_cpu_sse_test: movl $1,%eax cpuid andl $SSE_MASK,%edx cmpl $SSE_MASK,%edx je .Lverify_cpu_sse_ok test %di,%di jz .Lverify_cpu_no_longmode # only try to force SSE on AMD movl $MSR_K7_HWCR,%ecx rdmsr btr $15,%eax # enable SSE wrmsr xor %di,%di # don't loop jmp .Lverify_cpu_sse_test # try again .Lverify_cpu_no_longmode: popf # Restore caller passed flags movl $1,%eax ret .Lverify_cpu_sse_ok: popf # Restore caller passed flags xorl %eax, %eax ret ENDPROC(verify_cpu)
AirFortressIlikara/LS2K0300-linux-4.19
8,186
arch/x86/kernel/ftrace_64.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2014 Steven Rostedt, Red Hat Inc */ #include <linux/linkage.h> #include <asm/ptrace.h> #include <asm/ftrace.h> #include <asm/export.h> #include <asm/nospec-branch.h> #include <asm/unwind_hints.h> #include <asm/frame.h> .code64 .section .entry.text, "ax" #ifdef CC_USING_FENTRY # define function_hook __fentry__ EXPORT_SYMBOL(__fentry__) #else # define function_hook mcount EXPORT_SYMBOL(mcount) #endif #ifdef CONFIG_FRAME_POINTER # ifdef CC_USING_FENTRY /* Save parent and function stack frames (rip and rbp) */ # define MCOUNT_FRAME_SIZE (8+16*2) # else /* Save just function stack frame (rip and rbp) */ # define MCOUNT_FRAME_SIZE (8+16) # endif #else /* No need to save a stack frame */ # define MCOUNT_FRAME_SIZE 0 #endif /* CONFIG_FRAME_POINTER */ /* Size of stack used to save mcount regs in save_mcount_regs */ #define MCOUNT_REG_SIZE (SS+8 + MCOUNT_FRAME_SIZE) /* * gcc -pg option adds a call to 'mcount' in most functions. * When -mfentry is used, the call is to 'fentry' and not 'mcount' * and is done before the function's stack frame is set up. * They both require a set of regs to be saved before calling * any C code and restored before returning back to the function. * * On boot up, all these calls are converted into nops. When tracing * is enabled, the call can jump to either ftrace_caller or * ftrace_regs_caller. Callbacks (tracing functions) that require * ftrace_regs_caller (like kprobes) need to have pt_regs passed to * it. For this reason, the size of the pt_regs structure will be * allocated on the stack and the required mcount registers will * be saved in the locations that pt_regs has them in. */ /* * @added: the amount of stack added before calling this * * After this is called, the following registers contain: * * %rdi - holds the address that called the trampoline * %rsi - holds the parent function (traced function's return address) * %rdx - holds the original %rbp */ .macro save_mcount_regs added=0 #ifdef CONFIG_FRAME_POINTER /* Save the original rbp */ pushq %rbp /* * Stack traces will stop at the ftrace trampoline if the frame pointer * is not set up properly. If fentry is used, we need to save a frame * pointer for the parent as well as the function traced, because the * fentry is called before the stack frame is set up, where as mcount * is called afterward. */ #ifdef CC_USING_FENTRY /* Save the parent pointer (skip orig rbp and our return address) */ pushq \added+8*2(%rsp) pushq %rbp movq %rsp, %rbp /* Save the return address (now skip orig rbp, rbp and parent) */ pushq \added+8*3(%rsp) #else /* Can't assume that rip is before this (unless added was zero) */ pushq \added+8(%rsp) #endif pushq %rbp movq %rsp, %rbp #endif /* CONFIG_FRAME_POINTER */ /* * We add enough stack to save all regs. */ subq $(MCOUNT_REG_SIZE - MCOUNT_FRAME_SIZE), %rsp movq %rax, RAX(%rsp) movq %rcx, RCX(%rsp) movq %rdx, RDX(%rsp) movq %rsi, RSI(%rsp) movq %rdi, RDI(%rsp) movq %r8, R8(%rsp) movq %r9, R9(%rsp) /* * Save the original RBP. Even though the mcount ABI does not * require this, it helps out callers. */ #ifdef CONFIG_FRAME_POINTER movq MCOUNT_REG_SIZE-8(%rsp), %rdx #else movq %rbp, %rdx #endif movq %rdx, RBP(%rsp) /* Copy the parent address into %rsi (second parameter) */ #ifdef CC_USING_FENTRY movq MCOUNT_REG_SIZE+8+\added(%rsp), %rsi #else /* %rdx contains original %rbp */ movq 8(%rdx), %rsi #endif /* Move RIP to its proper location */ movq MCOUNT_REG_SIZE+\added(%rsp), %rdi movq %rdi, RIP(%rsp) /* * Now %rdi (the first parameter) has the return address of * where ftrace_call returns. But the callbacks expect the * address of the call itself. */ subq $MCOUNT_INSN_SIZE, %rdi .endm .macro restore_mcount_regs movq R9(%rsp), %r9 movq R8(%rsp), %r8 movq RDI(%rsp), %rdi movq RSI(%rsp), %rsi movq RDX(%rsp), %rdx movq RCX(%rsp), %rcx movq RAX(%rsp), %rax /* ftrace_regs_caller can modify %rbp */ movq RBP(%rsp), %rbp addq $MCOUNT_REG_SIZE, %rsp .endm #ifdef CONFIG_DYNAMIC_FTRACE ENTRY(function_hook) retq ENDPROC(function_hook) ENTRY(ftrace_caller) /* save_mcount_regs fills in first two parameters */ save_mcount_regs GLOBAL(ftrace_caller_op_ptr) /* Load the ftrace_ops into the 3rd parameter */ movq function_trace_op(%rip), %rdx /* regs go into 4th parameter (but make it NULL) */ movq $0, %rcx GLOBAL(ftrace_call) call ftrace_stub restore_mcount_regs /* * The code up to this label is copied into trampolines so * think twice before adding any new code or changing the * layout here. */ GLOBAL(ftrace_epilogue) #ifdef CONFIG_FUNCTION_GRAPH_TRACER GLOBAL(ftrace_graph_call) jmp ftrace_stub #endif /* * This is weak to keep gas from relaxing the jumps. * It is also used to copy the retq for trampolines. */ WEAK(ftrace_stub) retq ENDPROC(ftrace_caller) ENTRY(ftrace_regs_caller) /* Save the current flags before any operations that can change them */ pushfq /* added 8 bytes to save flags */ save_mcount_regs 8 /* save_mcount_regs fills in first two parameters */ GLOBAL(ftrace_regs_caller_op_ptr) /* Load the ftrace_ops into the 3rd parameter */ movq function_trace_op(%rip), %rdx /* Save the rest of pt_regs */ movq %r15, R15(%rsp) movq %r14, R14(%rsp) movq %r13, R13(%rsp) movq %r12, R12(%rsp) movq %r11, R11(%rsp) movq %r10, R10(%rsp) movq %rbx, RBX(%rsp) /* Copy saved flags */ movq MCOUNT_REG_SIZE(%rsp), %rcx movq %rcx, EFLAGS(%rsp) /* Kernel segments */ movq $__KERNEL_DS, %rcx movq %rcx, SS(%rsp) movq $__KERNEL_CS, %rcx movq %rcx, CS(%rsp) /* Stack - skipping return address and flags */ leaq MCOUNT_REG_SIZE+8*2(%rsp), %rcx movq %rcx, RSP(%rsp) ENCODE_FRAME_POINTER /* regs go into 4th parameter */ leaq (%rsp), %rcx GLOBAL(ftrace_regs_call) call ftrace_stub /* Copy flags back to SS, to restore them */ movq EFLAGS(%rsp), %rax movq %rax, MCOUNT_REG_SIZE(%rsp) /* Handlers can change the RIP */ movq RIP(%rsp), %rax movq %rax, MCOUNT_REG_SIZE+8(%rsp) /* restore the rest of pt_regs */ movq R15(%rsp), %r15 movq R14(%rsp), %r14 movq R13(%rsp), %r13 movq R12(%rsp), %r12 movq R10(%rsp), %r10 movq RBX(%rsp), %rbx restore_mcount_regs /* Restore flags */ popfq /* * As this jmp to ftrace_epilogue can be a short jump * it must not be copied into the trampoline. * The trampoline will add the code to jump * to the return. */ GLOBAL(ftrace_regs_caller_end) jmp ftrace_epilogue ENDPROC(ftrace_regs_caller) #else /* ! CONFIG_DYNAMIC_FTRACE */ ENTRY(function_hook) cmpq $ftrace_stub, ftrace_trace_function jnz trace fgraph_trace: #ifdef CONFIG_FUNCTION_GRAPH_TRACER cmpq $ftrace_stub, ftrace_graph_return jnz ftrace_graph_caller cmpq $ftrace_graph_entry_stub, ftrace_graph_entry jnz ftrace_graph_caller #endif GLOBAL(ftrace_stub) retq trace: /* save_mcount_regs fills in first two parameters */ save_mcount_regs /* * When DYNAMIC_FTRACE is not defined, ARCH_SUPPORTS_FTRACE_OPS is not * set (see include/asm/ftrace.h and include/linux/ftrace.h). Only the * ip and parent ip are used and the list function is called when * function tracing is enabled. */ movq ftrace_trace_function, %r8 CALL_NOSPEC %r8 restore_mcount_regs jmp fgraph_trace ENDPROC(function_hook) #endif /* CONFIG_DYNAMIC_FTRACE */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER ENTRY(ftrace_graph_caller) /* Saves rbp into %rdx and fills first parameter */ save_mcount_regs #ifdef CC_USING_FENTRY leaq MCOUNT_REG_SIZE+8(%rsp), %rsi movq $0, %rdx /* No framepointers needed */ #else /* Save address of the return address of traced function */ leaq 8(%rdx), %rsi /* ftrace does sanity checks against frame pointers */ movq (%rdx), %rdx #endif call prepare_ftrace_return restore_mcount_regs retq ENDPROC(ftrace_graph_caller) ENTRY(return_to_handler) UNWIND_HINT_EMPTY subq $24, %rsp /* Save the return values */ movq %rax, (%rsp) movq %rdx, 8(%rsp) movq %rbp, %rdi call ftrace_return_to_handler movq %rax, %rdi movq 8(%rsp), %rdx movq (%rsp), %rax addq $24, %rsp JMP_NOSPEC %rdi END(return_to_handler) #endif
AirFortressIlikara/LS2K0300-linux-4.19
14,908
arch/x86/kernel/head_32.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * * Copyright (C) 1991, 1992 Linus Torvalds * * Enhanced CPU detection and feature setting code by Mike Jagdis * and Martin Mares, November 1997. */ .text #include <linux/threads.h> #include <linux/init.h> #include <linux/linkage.h> #include <asm/segment.h> #include <asm/page_types.h> #include <asm/pgtable_types.h> #include <asm/cache.h> #include <asm/thread_info.h> #include <asm/asm-offsets.h> #include <asm/setup.h> #include <asm/processor-flags.h> #include <asm/msr-index.h> #include <asm/cpufeatures.h> #include <asm/percpu.h> #include <asm/nops.h> #include <asm/bootparam.h> #include <asm/export.h> #include <asm/pgtable_32.h> /* Physical address */ #define pa(X) ((X) - __PAGE_OFFSET) /* * References to members of the new_cpu_data structure. */ #define X86 new_cpu_data+CPUINFO_x86 #define X86_VENDOR new_cpu_data+CPUINFO_x86_vendor #define X86_MODEL new_cpu_data+CPUINFO_x86_model #define X86_STEPPING new_cpu_data+CPUINFO_x86_stepping #define X86_HARD_MATH new_cpu_data+CPUINFO_hard_math #define X86_CPUID new_cpu_data+CPUINFO_cpuid_level #define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability #define X86_VENDOR_ID new_cpu_data+CPUINFO_x86_vendor_id #define SIZEOF_PTREGS 17*4 /* * Worst-case size of the kernel mapping we need to make: * a relocatable kernel can live anywhere in lowmem, so we need to be able * to map all of lowmem. */ KERNEL_PAGES = LOWMEM_PAGES INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE RESERVE_BRK(pagetables, INIT_MAP_SIZE) /* * 32-bit kernel entrypoint; only used by the boot CPU. On entry, * %esi points to the real-mode code as a 32-bit pointer. * CS and DS must be 4 GB flat segments, but we don't depend on * any particular GDT layout, because we load our own as soon as we * can. */ __HEAD ENTRY(startup_32) movl pa(initial_stack),%ecx /* test KEEP_SEGMENTS flag to see if the bootloader is asking us to not reload segments */ testb $KEEP_SEGMENTS, BP_loadflags(%esi) jnz 2f /* * Set segments to known values. */ lgdt pa(boot_gdt_descr) movl $(__BOOT_DS),%eax movl %eax,%ds movl %eax,%es movl %eax,%fs movl %eax,%gs movl %eax,%ss 2: leal -__PAGE_OFFSET(%ecx),%esp /* * Clear BSS first so that there are no surprises... */ cld xorl %eax,%eax movl $pa(__bss_start),%edi movl $pa(__bss_stop),%ecx subl %edi,%ecx shrl $2,%ecx rep ; stosl /* * Copy bootup parameters out of the way. * Note: %esi still has the pointer to the real-mode data. * With the kexec as boot loader, parameter segment might be loaded beyond * kernel image and might not even be addressable by early boot page tables. * (kexec on panic case). Hence copy out the parameters before initializing * page tables. */ movl $pa(boot_params),%edi movl $(PARAM_SIZE/4),%ecx cld rep movsl movl pa(boot_params) + NEW_CL_POINTER,%esi andl %esi,%esi jz 1f # No command line movl $pa(boot_command_line),%edi movl $(COMMAND_LINE_SIZE/4),%ecx rep movsl 1: #ifdef CONFIG_OLPC /* save OFW's pgdir table for later use when calling into OFW */ movl %cr3, %eax movl %eax, pa(olpc_ofw_pgd) #endif #ifdef CONFIG_MICROCODE /* Early load ucode on BSP. */ call load_ucode_bsp #endif /* Create early pagetables. */ call mk_early_pgtbl_32 /* Do early initialization of the fixmap area */ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax #ifdef CONFIG_X86_PAE #define KPMDS (((-__PAGE_OFFSET) >> 30) & 3) /* Number of kernel PMDs */ movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8) #else movl %eax,pa(initial_page_table+0xffc) #endif #ifdef CONFIG_PARAVIRT /* This is can only trip for a broken bootloader... */ cmpw $0x207, pa(boot_params + BP_version) jb .Ldefault_entry /* Paravirt-compatible boot parameters. Look to see what architecture we're booting under. */ movl pa(boot_params + BP_hardware_subarch), %eax cmpl $num_subarch_entries, %eax jae .Lbad_subarch movl pa(subarch_entries)(,%eax,4), %eax subl $__PAGE_OFFSET, %eax jmp *%eax .Lbad_subarch: WEAK(xen_entry) /* Unknown implementation; there's really nothing we can do at this point. */ ud2a __INITDATA subarch_entries: .long .Ldefault_entry /* normal x86/PC */ .long xen_entry /* Xen hypervisor */ .long .Ldefault_entry /* Moorestown MID */ num_subarch_entries = (. - subarch_entries) / 4 .previous #else jmp .Ldefault_entry #endif /* CONFIG_PARAVIRT */ #ifdef CONFIG_HOTPLUG_CPU /* * Boot CPU0 entry point. It's called from play_dead(). Everything has been set * up already except stack. We just set up stack here. Then call * start_secondary(). */ ENTRY(start_cpu0) movl initial_stack, %ecx movl %ecx, %esp call *(initial_code) 1: jmp 1b ENDPROC(start_cpu0) #endif /* * Non-boot CPU entry point; entered from trampoline.S * We can't lgdt here, because lgdt itself uses a data segment, but * we know the trampoline has already loaded the boot_gdt for us. * * If cpu hotplug is not supported then this code can go in init section * which will be freed later */ ENTRY(startup_32_smp) cld movl $(__BOOT_DS),%eax movl %eax,%ds movl %eax,%es movl %eax,%fs movl %eax,%gs movl pa(initial_stack),%ecx movl %eax,%ss leal -__PAGE_OFFSET(%ecx),%esp #ifdef CONFIG_MICROCODE /* Early load ucode on AP. */ call load_ucode_ap #endif .Ldefault_entry: movl $(CR0_STATE & ~X86_CR0_PG),%eax movl %eax,%cr0 /* * We want to start out with EFLAGS unambiguously cleared. Some BIOSes leave * bits like NT set. This would confuse the debugger if this code is traced. So * initialize them properly now before switching to protected mode. That means * DF in particular (even though we have cleared it earlier after copying the * command line) because GCC expects it. */ pushl $0 popfl /* * New page tables may be in 4Mbyte page mode and may be using the global pages. * * NOTE! If we are on a 486 we may have no cr4 at all! Specifically, cr4 exists * if and only if CPUID exists and has flags other than the FPU flag set. */ movl $-1,pa(X86_CPUID) # preset CPUID level movl $X86_EFLAGS_ID,%ecx pushl %ecx popfl # set EFLAGS=ID pushfl popl %eax # get EFLAGS testl $X86_EFLAGS_ID,%eax # did EFLAGS.ID remained set? jz .Lenable_paging # hw disallowed setting of ID bit # which means no CPUID and no CR4 xorl %eax,%eax cpuid movl %eax,pa(X86_CPUID) # save largest std CPUID function movl $1,%eax cpuid andl $~1,%edx # Ignore CPUID.FPU jz .Lenable_paging # No flags or only CPUID.FPU = no CR4 movl pa(mmu_cr4_features),%eax movl %eax,%cr4 testb $X86_CR4_PAE, %al # check if PAE is enabled jz .Lenable_paging /* Check if extended functions are implemented */ movl $0x80000000, %eax cpuid /* Value must be in the range 0x80000001 to 0x8000ffff */ subl $0x80000001, %eax cmpl $(0x8000ffff-0x80000001), %eax ja .Lenable_paging /* Clear bogus XD_DISABLE bits */ call verify_cpu mov $0x80000001, %eax cpuid /* Execute Disable bit supported? */ btl $(X86_FEATURE_NX & 31), %edx jnc .Lenable_paging /* Setup EFER (Extended Feature Enable Register) */ movl $MSR_EFER, %ecx rdmsr btsl $_EFER_NX, %eax /* Make changes effective */ wrmsr .Lenable_paging: /* * Enable paging */ movl $pa(initial_page_table), %eax movl %eax,%cr3 /* set the page table pointer.. */ movl $CR0_STATE,%eax movl %eax,%cr0 /* ..and set paging (PG) bit */ ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */ 1: /* Shift the stack pointer to a virtual address */ addl $__PAGE_OFFSET, %esp /* * start system 32-bit setup. We need to re-do some of the things done * in 16-bit mode for the "real" operations. */ movl setup_once_ref,%eax andl %eax,%eax jz 1f # Did we do this already? call *%eax 1: /* * Check if it is 486 */ movb $4,X86 # at least 486 cmpl $-1,X86_CPUID je .Lis486 /* get vendor info */ xorl %eax,%eax # call CPUID with 0 -> return vendor ID cpuid movl %eax,X86_CPUID # save CPUID level movl %ebx,X86_VENDOR_ID # lo 4 chars movl %edx,X86_VENDOR_ID+4 # next 4 chars movl %ecx,X86_VENDOR_ID+8 # last 4 chars orl %eax,%eax # do we have processor info as well? je .Lis486 movl $1,%eax # Use the CPUID instruction to get CPU type cpuid movb %al,%cl # save reg for future use andb $0x0f,%ah # mask processor family movb %ah,X86 andb $0xf0,%al # mask model shrb $4,%al movb %al,X86_MODEL andb $0x0f,%cl # mask mask revision movb %cl,X86_STEPPING movl %edx,X86_CAPABILITY .Lis486: movl $0x50022,%ecx # set AM, WP, NE and MP movl %cr0,%eax andl $0x80000011,%eax # Save PG,PE,ET orl %ecx,%eax movl %eax,%cr0 lgdt early_gdt_descr ljmp $(__KERNEL_CS),$1f 1: movl $(__KERNEL_DS),%eax # reload all the segment registers movl %eax,%ss # after changing gdt. movl $(__USER_DS),%eax # DS/ES contains default USER segment movl %eax,%ds movl %eax,%es movl $(__KERNEL_PERCPU), %eax movl %eax,%fs # set this cpu's percpu movl $(__KERNEL_STACK_CANARY),%eax movl %eax,%gs xorl %eax,%eax # Clear LDT lldt %ax call *(initial_code) 1: jmp 1b ENDPROC(startup_32_smp) #include "verify_cpu.S" /* * setup_once * * The setup work we only want to run on the BSP. * * Warning: %esi is live across this function. */ __INIT setup_once: #ifdef CONFIG_STACKPROTECTOR /* * Configure the stack canary. The linker can't handle this by * relocation. Manually set base address in stack canary * segment descriptor. */ movl $gdt_page,%eax movl $stack_canary,%ecx movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax) shrl $16, %ecx movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax) movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax) #endif andl $0,setup_once_ref /* Once is enough, thanks */ ret ENTRY(early_idt_handler_array) # 36(%esp) %eflags # 32(%esp) %cs # 28(%esp) %eip # 24(%rsp) error code i = 0 .rept NUM_EXCEPTION_VECTORS .if ((EXCEPTION_ERRCODE_MASK >> i) & 1) == 0 pushl $0 # Dummy error code, to make stack frame uniform .endif pushl $i # 20(%esp) Vector number jmp early_idt_handler_common i = i + 1 .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc .endr ENDPROC(early_idt_handler_array) early_idt_handler_common: /* * The stack is the hardware frame, an error code or zero, and the * vector number. */ cld incl %ss:early_recursion_flag /* The vector number is in pt_regs->gs */ cld pushl %fs /* pt_regs->fs (__fsh varies by model) */ pushl %es /* pt_regs->es (__esh varies by model) */ pushl %ds /* pt_regs->ds (__dsh varies by model) */ pushl %eax /* pt_regs->ax */ pushl %ebp /* pt_regs->bp */ pushl %edi /* pt_regs->di */ pushl %esi /* pt_regs->si */ pushl %edx /* pt_regs->dx */ pushl %ecx /* pt_regs->cx */ pushl %ebx /* pt_regs->bx */ /* Fix up DS and ES */ movl $(__KERNEL_DS), %ecx movl %ecx, %ds movl %ecx, %es /* Load the vector number into EDX */ movl PT_GS(%esp), %edx /* Load GS into pt_regs->gs (and maybe clobber __gsh) */ movw %gs, PT_GS(%esp) movl %esp, %eax /* args are pt_regs (EAX), trapnr (EDX) */ call early_fixup_exception popl %ebx /* pt_regs->bx */ popl %ecx /* pt_regs->cx */ popl %edx /* pt_regs->dx */ popl %esi /* pt_regs->si */ popl %edi /* pt_regs->di */ popl %ebp /* pt_regs->bp */ popl %eax /* pt_regs->ax */ popl %ds /* pt_regs->ds (always ignores __dsh) */ popl %es /* pt_regs->es (always ignores __esh) */ popl %fs /* pt_regs->fs (always ignores __fsh) */ popl %gs /* pt_regs->gs (always ignores __gsh) */ decl %ss:early_recursion_flag addl $4, %esp /* pop pt_regs->orig_ax */ iret ENDPROC(early_idt_handler_common) /* This is the default interrupt "handler" :-) */ ENTRY(early_ignore_irq) cld #ifdef CONFIG_PRINTK pushl %eax pushl %ecx pushl %edx pushl %es pushl %ds movl $(__KERNEL_DS),%eax movl %eax,%ds movl %eax,%es cmpl $2,early_recursion_flag je hlt_loop incl early_recursion_flag pushl 16(%esp) pushl 24(%esp) pushl 32(%esp) pushl 40(%esp) pushl $int_msg call printk call dump_stack addl $(5*4),%esp popl %ds popl %es popl %edx popl %ecx popl %eax #endif iret hlt_loop: hlt jmp hlt_loop ENDPROC(early_ignore_irq) __INITDATA .align 4 GLOBAL(early_recursion_flag) .long 0 __REFDATA .align 4 ENTRY(initial_code) .long i386_start_kernel ENTRY(setup_once_ref) .long setup_once #ifdef CONFIG_PAGE_TABLE_ISOLATION #define PGD_ALIGN (2 * PAGE_SIZE) #define PTI_USER_PGD_FILL 1024 #else #define PGD_ALIGN (PAGE_SIZE) #define PTI_USER_PGD_FILL 0 #endif /* * BSS section */ __PAGE_ALIGNED_BSS .align PGD_ALIGN #ifdef CONFIG_X86_PAE .globl initial_pg_pmd initial_pg_pmd: .fill 1024*KPMDS,4,0 #else .globl initial_page_table initial_page_table: .fill 1024,4,0 #endif .align PGD_ALIGN initial_pg_fixmap: .fill 1024,4,0 .globl swapper_pg_dir .align PGD_ALIGN swapper_pg_dir: .fill 1024,4,0 .fill PTI_USER_PGD_FILL,4,0 .globl empty_zero_page empty_zero_page: .fill 4096,1,0 EXPORT_SYMBOL(empty_zero_page) /* * This starts the data section. */ #ifdef CONFIG_X86_PAE __PAGE_ALIGNED_DATA /* Page-aligned for the benefit of paravirt? */ .align PGD_ALIGN ENTRY(initial_page_table) .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */ # if KPMDS == 3 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 .long pa(initial_pg_pmd+PGD_IDENT_ATTR+0x1000),0 .long pa(initial_pg_pmd+PGD_IDENT_ATTR+0x2000),0 # elif KPMDS == 2 .long 0,0 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 .long pa(initial_pg_pmd+PGD_IDENT_ATTR+0x1000),0 # elif KPMDS == 1 .long 0,0 .long 0,0 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 # else # error "Kernel PMDs should be 1, 2 or 3" # endif .align PAGE_SIZE /* needs to be page-sized too */ #endif .data .balign 4 ENTRY(initial_stack) /* * The SIZEOF_PTREGS gap is a convention which helps the in-kernel * unwinder reliably detect the end of the stack. */ .long init_thread_union + THREAD_SIZE - SIZEOF_PTREGS - \ TOP_OF_KERNEL_STACK_PADDING; __INITRODATA int_msg: .asciz "Unknown interrupt or fault at: %p %p %p\n" #include "../../x86/xen/xen-head.S" /* * The IDT and GDT 'descriptors' are a strange 48-bit object * only used by the lidt and lgdt instructions. They are not * like usual segment descriptors - they consist of a 16-bit * segment size, and 32-bit linear address value: */ .data .globl boot_gdt_descr ALIGN # early boot GDT descriptor (must use 1:1 address mapping) .word 0 # 32 bit align gdt_desc.address boot_gdt_descr: .word __BOOT_DS+7 .long boot_gdt - __PAGE_OFFSET # boot GDT descriptor (later on used by CPU#0): .word 0 # 32 bit align gdt_desc.address ENTRY(early_gdt_descr) .word GDT_ENTRIES*8-1 .long gdt_page /* Overwritten for secondary CPUs */ /* * The boot_gdt must mirror the equivalent in setup.S and is * used only for booting. */ .align L1_CACHE_BYTES ENTRY(boot_gdt) .fill GDT_ENTRY_BOOT_CS,8,0 .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */ .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
AirFortressIlikara/LS2K0300-linux-4.19
5,697
arch/x86/kernel/relocate_kernel_32.S
/* * relocate_kernel.S - put the kernel image in place to boot * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com> * * This source code is licensed under the GNU General Public License, * Version 2. See the file COPYING for more details. */ #include <linux/linkage.h> #include <asm/page_types.h> #include <asm/kexec.h> #include <asm/processor-flags.h> /* * Must be relocatable PIC code callable as a C function */ #define PTR(x) (x << 2) /* * control_page + KEXEC_CONTROL_CODE_MAX_SIZE * ~ control_page + PAGE_SIZE are used as data storage and stack for * jumping back */ #define DATA(offset) (KEXEC_CONTROL_CODE_MAX_SIZE+(offset)) /* Minimal CPU state */ #define ESP DATA(0x0) #define CR0 DATA(0x4) #define CR3 DATA(0x8) #define CR4 DATA(0xc) /* other data */ #define CP_VA_CONTROL_PAGE DATA(0x10) #define CP_PA_PGD DATA(0x14) #define CP_PA_SWAP_PAGE DATA(0x18) #define CP_PA_BACKUP_PAGES_MAP DATA(0x1c) .text .globl relocate_kernel relocate_kernel: /* Save the CPU context, used for jumping back */ pushl %ebx pushl %esi pushl %edi pushl %ebp pushf movl 20+8(%esp), %ebp /* list of pages */ movl PTR(VA_CONTROL_PAGE)(%ebp), %edi movl %esp, ESP(%edi) movl %cr0, %eax movl %eax, CR0(%edi) movl %cr3, %eax movl %eax, CR3(%edi) movl %cr4, %eax movl %eax, CR4(%edi) /* read the arguments and say goodbye to the stack */ movl 20+4(%esp), %ebx /* page_list */ movl 20+8(%esp), %ebp /* list of pages */ movl 20+12(%esp), %edx /* start address */ movl 20+16(%esp), %ecx /* cpu_has_pae */ movl 20+20(%esp), %esi /* preserve_context */ /* zero out flags, and disable interrupts */ pushl $0 popfl /* save some information for jumping back */ movl PTR(VA_CONTROL_PAGE)(%ebp), %edi movl %edi, CP_VA_CONTROL_PAGE(%edi) movl PTR(PA_PGD)(%ebp), %eax movl %eax, CP_PA_PGD(%edi) movl PTR(PA_SWAP_PAGE)(%ebp), %eax movl %eax, CP_PA_SWAP_PAGE(%edi) movl %ebx, CP_PA_BACKUP_PAGES_MAP(%edi) /* * get physical address of control page now * this is impossible after page table switch */ movl PTR(PA_CONTROL_PAGE)(%ebp), %edi /* switch to new set of page tables */ movl PTR(PA_PGD)(%ebp), %eax movl %eax, %cr3 /* setup a new stack at the end of the physical control page */ lea PAGE_SIZE(%edi), %esp /* jump to identity mapped page */ movl %edi, %eax addl $(identity_mapped - relocate_kernel), %eax pushl %eax ret identity_mapped: /* set return address to 0 if not preserving context */ pushl $0 /* store the start address on the stack */ pushl %edx /* * Set cr0 to a known state: * - Paging disabled * - Alignment check disabled * - Write protect disabled * - No task switch * - Don't do FP software emulation. * - Proctected mode enabled */ movl %cr0, %eax andl $~(X86_CR0_PG | X86_CR0_AM | X86_CR0_WP | X86_CR0_TS | X86_CR0_EM), %eax orl $(X86_CR0_PE), %eax movl %eax, %cr0 /* clear cr4 if applicable */ testl %ecx, %ecx jz 1f /* * Set cr4 to a known state: * Setting everything to zero seems safe. */ xorl %eax, %eax movl %eax, %cr4 jmp 1f 1: /* Flush the TLB (needed?) */ xorl %eax, %eax movl %eax, %cr3 movl CP_PA_SWAP_PAGE(%edi), %eax pushl %eax pushl %ebx call swap_pages addl $8, %esp /* * To be certain of avoiding problems with self-modifying code * I need to execute a serializing instruction here. * So I flush the TLB, it's handy, and not processor dependent. */ xorl %eax, %eax movl %eax, %cr3 /* * set all of the registers to known values * leave %esp alone */ testl %esi, %esi jnz 1f xorl %edi, %edi xorl %eax, %eax xorl %ebx, %ebx xorl %ecx, %ecx xorl %edx, %edx xorl %esi, %esi xorl %ebp, %ebp ret 1: popl %edx movl CP_PA_SWAP_PAGE(%edi), %esp addl $PAGE_SIZE, %esp 2: call *%edx /* get the re-entry point of the peer system */ movl 0(%esp), %ebp call 1f 1: popl %ebx subl $(1b - relocate_kernel), %ebx movl CP_VA_CONTROL_PAGE(%ebx), %edi lea PAGE_SIZE(%ebx), %esp movl CP_PA_SWAP_PAGE(%ebx), %eax movl CP_PA_BACKUP_PAGES_MAP(%ebx), %edx pushl %eax pushl %edx call swap_pages addl $8, %esp movl CP_PA_PGD(%ebx), %eax movl %eax, %cr3 movl %cr0, %eax orl $X86_CR0_PG, %eax movl %eax, %cr0 lea PAGE_SIZE(%edi), %esp movl %edi, %eax addl $(virtual_mapped - relocate_kernel), %eax pushl %eax ret virtual_mapped: movl CR4(%edi), %eax movl %eax, %cr4 movl CR3(%edi), %eax movl %eax, %cr3 movl CR0(%edi), %eax movl %eax, %cr0 movl ESP(%edi), %esp movl %ebp, %eax popf popl %ebp popl %edi popl %esi popl %ebx ret /* Do the copies */ swap_pages: movl 8(%esp), %edx movl 4(%esp), %ecx pushl %ebp pushl %ebx pushl %edi pushl %esi movl %ecx, %ebx jmp 1f 0: /* top, read another word from the indirection page */ movl (%ebx), %ecx addl $4, %ebx 1: testb $0x1, %cl /* is it a destination page */ jz 2f movl %ecx, %edi andl $0xfffff000, %edi jmp 0b 2: testb $0x2, %cl /* is it an indirection page */ jz 2f movl %ecx, %ebx andl $0xfffff000, %ebx jmp 0b 2: testb $0x4, %cl /* is it the done indicator */ jz 2f jmp 3f 2: testb $0x8, %cl /* is it the source indicator */ jz 0b /* Ignore it otherwise */ movl %ecx, %esi /* For every source page do a copy */ andl $0xfffff000, %esi movl %edi, %eax movl %esi, %ebp movl %edx, %edi movl $1024, %ecx rep ; movsl movl %ebp, %edi movl %eax, %esi movl $1024, %ecx rep ; movsl movl %eax, %edi movl %edx, %esi movl $1024, %ecx rep ; movsl lea PAGE_SIZE(%ebp), %esi jmp 0b 3: popl %esi popl %edi popl %ebx popl %ebp ret .globl kexec_control_code_size .set kexec_control_code_size, . - relocate_kernel
AirFortressIlikara/LS2K0300-linux-4.19
10,744
arch/x86/kernel/vmlinux.lds.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * ld script for the x86 kernel * * Historic 32-bit version written by Martin Mares <mj@atrey.karlin.mff.cuni.cz> * * Modernisation, unification and other changes and fixes: * Copyright (C) 2007-2009 Sam Ravnborg <sam@ravnborg.org> * * * Don't define absolute symbols until and unless you know that symbol * value is should remain constant even if kernel image is relocated * at run time. Absolute symbols are not relocated. If symbol value should * change if kernel is relocated, make the symbol section relative and * put it inside the section definition. */ #ifdef CONFIG_X86_32 #define LOAD_OFFSET __PAGE_OFFSET #else #define LOAD_OFFSET __START_KERNEL_map #endif #include <asm-generic/vmlinux.lds.h> #include <asm/asm-offsets.h> #include <asm/thread_info.h> #include <asm/page_types.h> #include <asm/orc_lookup.h> #include <asm/cache.h> #include <asm/boot.h> #undef i386 /* in case the preprocessor is a 32bit one */ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT) #ifdef CONFIG_X86_32 OUTPUT_ARCH(i386) ENTRY(phys_startup_32) #else OUTPUT_ARCH(i386:x86-64) ENTRY(phys_startup_64) #endif jiffies = jiffies_64; #if defined(CONFIG_X86_64) /* * On 64-bit, align RODATA to 2MB so we retain large page mappings for * boundaries spanning kernel text, rodata and data sections. * * However, kernel identity mappings will have different RWX permissions * to the pages mapping to text and to the pages padding (which are freed) the * text section. Hence kernel identity mappings will be broken to smaller * pages. For 64-bit, kernel text and kernel identity mappings are different, * so we can enable protection checks as well as retain 2MB large page * mappings for kernel text. */ #define X86_ALIGN_RODATA_BEGIN . = ALIGN(HPAGE_SIZE); #define X86_ALIGN_RODATA_END \ . = ALIGN(HPAGE_SIZE); \ __end_rodata_hpage_align = .; \ __end_rodata_aligned = .; #define ALIGN_ENTRY_TEXT_BEGIN . = ALIGN(PMD_SIZE); #define ALIGN_ENTRY_TEXT_END . = ALIGN(PMD_SIZE); /* * This section contains data which will be mapped as decrypted. Memory * encryption operates on a page basis. Make this section PMD-aligned * to avoid splitting the pages while mapping the section early. * * Note: We use a separate section so that only this section gets * decrypted to avoid exposing more than we wish. */ #define BSS_DECRYPTED \ . = ALIGN(PMD_SIZE); \ __start_bss_decrypted = .; \ *(.bss..decrypted); \ . = ALIGN(PAGE_SIZE); \ __start_bss_decrypted_unused = .; \ . = ALIGN(PMD_SIZE); \ __end_bss_decrypted = .; \ #else #define X86_ALIGN_RODATA_BEGIN #define X86_ALIGN_RODATA_END \ . = ALIGN(PAGE_SIZE); \ __end_rodata_aligned = .; #define ALIGN_ENTRY_TEXT_BEGIN #define ALIGN_ENTRY_TEXT_END #define BSS_DECRYPTED #endif PHDRS { text PT_LOAD FLAGS(5); /* R_E */ data PT_LOAD FLAGS(6); /* RW_ */ #ifdef CONFIG_X86_64 #ifdef CONFIG_SMP percpu PT_LOAD FLAGS(6); /* RW_ */ #endif init PT_LOAD FLAGS(7); /* RWE */ #endif note PT_NOTE FLAGS(0); /* ___ */ } SECTIONS { #ifdef CONFIG_X86_32 . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR; phys_startup_32 = ABSOLUTE(startup_32 - LOAD_OFFSET); #else . = __START_KERNEL; phys_startup_64 = ABSOLUTE(startup_64 - LOAD_OFFSET); #endif /* Text and read-only data */ .text : AT(ADDR(.text) - LOAD_OFFSET) { _text = .; _stext = .; /* bootstrapping code */ HEAD_TEXT TEXT_TEXT SCHED_TEXT CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT ALIGN_ENTRY_TEXT_BEGIN ENTRY_TEXT IRQENTRY_TEXT ALIGN_ENTRY_TEXT_END SOFTIRQENTRY_TEXT *(.fixup) *(.gnu.warning) #ifdef CONFIG_X86_64 . = ALIGN(PAGE_SIZE); __entry_trampoline_start = .; _entry_trampoline = .; *(.entry_trampoline) . = ALIGN(PAGE_SIZE); __entry_trampoline_end = .; ASSERT(. - _entry_trampoline == PAGE_SIZE, "entry trampoline is too big"); #endif #ifdef CONFIG_RETPOLINE __indirect_thunk_start = .; *(.text.__x86.indirect_thunk) __indirect_thunk_end = .; #endif /* End of text section */ _etext = .; } :text = 0x9090 NOTES :text :note EXCEPTION_TABLE(16) :text = 0x9090 /* .text should occupy whole number of pages */ . = ALIGN(PAGE_SIZE); X86_ALIGN_RODATA_BEGIN RO_DATA(PAGE_SIZE) X86_ALIGN_RODATA_END /* Data */ .data : AT(ADDR(.data) - LOAD_OFFSET) { /* Start of data section */ _sdata = .; /* init_task */ INIT_TASK_DATA(THREAD_SIZE) #ifdef CONFIG_X86_32 /* 32 bit has nosave before _edata */ NOSAVE_DATA #endif PAGE_ALIGNED_DATA(PAGE_SIZE) CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES) DATA_DATA CONSTRUCTORS /* rarely changed data like cpu maps */ READ_MOSTLY_DATA(INTERNODE_CACHE_BYTES) /* End of data section */ _edata = .; } :data BUG_TABLE ORC_UNWIND_TABLE . = ALIGN(PAGE_SIZE); __vvar_page = .; .vvar : AT(ADDR(.vvar) - LOAD_OFFSET) { /* work around gold bug 13023 */ __vvar_beginning_hack = .; /* Place all vvars at the offsets in asm/vvar.h. */ #define EMIT_VVAR(name, offset) \ . = __vvar_beginning_hack + offset; \ *(.vvar_ ## name) #define __VVAR_KERNEL_LDS #include <asm/vvar.h> #undef __VVAR_KERNEL_LDS #undef EMIT_VVAR /* * Pad the rest of the page with zeros. Otherwise the loader * can leave garbage here. */ . = __vvar_beginning_hack + PAGE_SIZE; } :data . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE); /* Init code and data - will be freed after init */ . = ALIGN(PAGE_SIZE); .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) { __init_begin = .; /* paired with __init_end */ } #if defined(CONFIG_X86_64) && defined(CONFIG_SMP) /* * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the * output PHDR, so the next output section - .init.text - should * start another segment - init. */ PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu) ASSERT(SIZEOF(.data..percpu) < CONFIG_PHYSICAL_START, "per-CPU data too large - increase CONFIG_PHYSICAL_START") #endif INIT_TEXT_SECTION(PAGE_SIZE) #ifdef CONFIG_X86_64 :init #endif /* * Section for code used exclusively before alternatives are run. All * references to such code must be patched out by alternatives, normally * by using X86_FEATURE_ALWAYS CPU feature bit. * * See static_cpu_has() for an example. */ .altinstr_aux : AT(ADDR(.altinstr_aux) - LOAD_OFFSET) { *(.altinstr_aux) } INIT_DATA_SECTION(16) .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) { __x86_cpu_dev_start = .; *(.x86_cpu_dev.init) __x86_cpu_dev_end = .; } #ifdef CONFIG_X86_INTEL_MID .x86_intel_mid_dev.init : AT(ADDR(.x86_intel_mid_dev.init) - \ LOAD_OFFSET) { __x86_intel_mid_dev_start = .; *(.x86_intel_mid_dev.init) __x86_intel_mid_dev_end = .; } #endif /* * start address and size of operations which during runtime * can be patched with virtualization friendly instructions or * baremetal native ones. Think page table operations. * Details in paravirt_types.h */ . = ALIGN(8); .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) { __parainstructions = .; *(.parainstructions) __parainstructions_end = .; } /* * struct alt_inst entries. From the header (alternative.h): * "Alternative instructions for different CPU types or capabilities" * Think locking instructions on spinlocks. */ . = ALIGN(8); .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) { __alt_instructions = .; *(.altinstructions) __alt_instructions_end = .; } /* * And here are the replacement instructions. The linker sticks * them as binary blobs. The .altinstructions has enough data to * get the address and the length of them to patch the kernel safely. */ .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) { *(.altinstr_replacement) } /* * struct iommu_table_entry entries are injected in this section. * It is an array of IOMMUs which during run time gets sorted depending * on its dependency order. After rootfs_initcall is complete * this section can be safely removed. */ .iommu_table : AT(ADDR(.iommu_table) - LOAD_OFFSET) { __iommu_table = .; *(.iommu_table) __iommu_table_end = .; } . = ALIGN(8); .apicdrivers : AT(ADDR(.apicdrivers) - LOAD_OFFSET) { __apicdrivers = .; *(.apicdrivers); __apicdrivers_end = .; } . = ALIGN(8); /* * .exit.text is discard at runtime, not link time, to deal with * references from .altinstructions and .eh_frame */ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { EXIT_TEXT } .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { EXIT_DATA } #if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP) PERCPU_SECTION(INTERNODE_CACHE_BYTES) #endif . = ALIGN(PAGE_SIZE); /* freed after init ends here */ .init.end : AT(ADDR(.init.end) - LOAD_OFFSET) { __init_end = .; } /* * smp_locks might be freed after init * start/end must be page aligned */ . = ALIGN(PAGE_SIZE); .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) { __smp_locks = .; *(.smp_locks) . = ALIGN(PAGE_SIZE); __smp_locks_end = .; } #ifdef CONFIG_X86_64 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { NOSAVE_DATA } #endif /* BSS */ . = ALIGN(PAGE_SIZE); .bss : AT(ADDR(.bss) - LOAD_OFFSET) { __bss_start = .; *(.bss..page_aligned) . = ALIGN(PAGE_SIZE); *(BSS_MAIN) BSS_DECRYPTED . = ALIGN(PAGE_SIZE); __bss_stop = .; } . = ALIGN(PAGE_SIZE); .brk : AT(ADDR(.brk) - LOAD_OFFSET) { __brk_base = .; . += 64 * 1024; /* 64k alignment slop space */ *(.brk_reservation) /* areas brk users have reserved */ __brk_limit = .; } . = ALIGN(PAGE_SIZE); /* keep VO_INIT_SIZE page aligned */ _end = .; STABS_DEBUG DWARF_DEBUG /* Sections to be discarded */ DISCARDS /DISCARD/ : { *(.eh_frame) } } #ifdef CONFIG_X86_32 /* * The ASSERT() sink to . is intentional, for binutils 2.14 compatibility: */ . = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE), "kernel image bigger than KERNEL_IMAGE_SIZE"); #else /* * Per-cpu symbols which need to be offset from __per_cpu_load * for the boot processor. */ #define INIT_PER_CPU(x) init_per_cpu__##x = ABSOLUTE(x) + __per_cpu_load INIT_PER_CPU(gdt_page); INIT_PER_CPU(irq_stack_union); /* * Build-time check on the image size: */ . = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE), "kernel image bigger than KERNEL_IMAGE_SIZE"); #ifdef CONFIG_SMP . = ASSERT((irq_stack_union == 0), "irq_stack_union is not at start of per-cpu area"); #endif #endif /* CONFIG_X86_32 */ #ifdef CONFIG_KEXEC_CORE #include <asm/kexec.h> . = ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE, "kexec control code size is too big"); #endif
AirFortressIlikara/LS2K0300-linux-4.19
5,846
arch/x86/kernel/relocate_kernel_64.S
/* * relocate_kernel.S - put the kernel image in place to boot * Copyright (C) 2002-2005 Eric Biederman <ebiederm@xmission.com> * * This source code is licensed under the GNU General Public License, * Version 2. See the file COPYING for more details. */ #include <linux/linkage.h> #include <asm/page_types.h> #include <asm/kexec.h> #include <asm/processor-flags.h> #include <asm/pgtable_types.h> /* * Must be relocatable PIC code callable as a C function */ #define PTR(x) (x << 3) #define PAGE_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) /* * control_page + KEXEC_CONTROL_CODE_MAX_SIZE * ~ control_page + PAGE_SIZE are used as data storage and stack for * jumping back */ #define DATA(offset) (KEXEC_CONTROL_CODE_MAX_SIZE+(offset)) /* Minimal CPU state */ #define RSP DATA(0x0) #define CR0 DATA(0x8) #define CR3 DATA(0x10) #define CR4 DATA(0x18) /* other data */ #define CP_PA_TABLE_PAGE DATA(0x20) #define CP_PA_SWAP_PAGE DATA(0x28) #define CP_PA_BACKUP_PAGES_MAP DATA(0x30) .text .align PAGE_SIZE .code64 .globl relocate_kernel relocate_kernel: /* * %rdi indirection_page * %rsi page_list * %rdx start address * %rcx preserve_context * %r8 sme_active */ /* Save the CPU context, used for jumping back */ pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 pushf movq PTR(VA_CONTROL_PAGE)(%rsi), %r11 movq %rsp, RSP(%r11) movq %cr0, %rax movq %rax, CR0(%r11) movq %cr3, %rax movq %rax, CR3(%r11) movq %cr4, %rax movq %rax, CR4(%r11) /* Save CR4. Required to enable the right paging mode later. */ movq %rax, %r13 /* zero out flags, and disable interrupts */ pushq $0 popfq /* Save SME active flag */ movq %r8, %r12 /* * get physical address of control page now * this is impossible after page table switch */ movq PTR(PA_CONTROL_PAGE)(%rsi), %r8 /* get physical address of page table now too */ movq PTR(PA_TABLE_PAGE)(%rsi), %r9 /* get physical address of swap page now */ movq PTR(PA_SWAP_PAGE)(%rsi), %r10 /* save some information for jumping back */ movq %r9, CP_PA_TABLE_PAGE(%r11) movq %r10, CP_PA_SWAP_PAGE(%r11) movq %rdi, CP_PA_BACKUP_PAGES_MAP(%r11) /* Switch to the identity mapped page tables */ movq %r9, %cr3 /* setup a new stack at the end of the physical control page */ lea PAGE_SIZE(%r8), %rsp /* jump to identity mapped page */ addq $(identity_mapped - relocate_kernel), %r8 pushq %r8 ret identity_mapped: /* set return address to 0 if not preserving context */ pushq $0 /* store the start address on the stack */ pushq %rdx /* * Set cr0 to a known state: * - Paging enabled * - Alignment check disabled * - Write protect disabled * - No task switch * - Don't do FP software emulation. * - Proctected mode enabled */ movq %cr0, %rax andq $~(X86_CR0_AM | X86_CR0_WP | X86_CR0_TS | X86_CR0_EM), %rax orl $(X86_CR0_PG | X86_CR0_PE), %eax movq %rax, %cr0 /* * Set cr4 to a known state: * - physical address extension enabled * - 5-level paging, if it was enabled before */ movl $X86_CR4_PAE, %eax testq $X86_CR4_LA57, %r13 jz 1f orl $X86_CR4_LA57, %eax 1: movq %rax, %cr4 jmp 1f 1: /* Flush the TLB (needed?) */ movq %r9, %cr3 /* * If SME is active, there could be old encrypted cache line * entries that will conflict with the now unencrypted memory * used by kexec. Flush the caches before copying the kernel. */ testq %r12, %r12 jz 1f wbinvd 1: movq %rcx, %r11 call swap_pages /* * To be certain of avoiding problems with self-modifying code * I need to execute a serializing instruction here. * So I flush the TLB by reloading %cr3 here, it's handy, * and not processor dependent. */ movq %cr3, %rax movq %rax, %cr3 /* * set all of the registers to known values * leave %rsp alone */ testq %r11, %r11 jnz 1f xorl %eax, %eax xorl %ebx, %ebx xorl %ecx, %ecx xorl %edx, %edx xorl %esi, %esi xorl %edi, %edi xorl %ebp, %ebp xorl %r8d, %r8d xorl %r9d, %r9d xorl %r10d, %r10d xorl %r11d, %r11d xorl %r12d, %r12d xorl %r13d, %r13d xorl %r14d, %r14d xorl %r15d, %r15d ret 1: popq %rdx leaq PAGE_SIZE(%r10), %rsp call *%rdx /* get the re-entry point of the peer system */ movq 0(%rsp), %rbp call 1f 1: popq %r8 subq $(1b - relocate_kernel), %r8 movq CP_PA_SWAP_PAGE(%r8), %r10 movq CP_PA_BACKUP_PAGES_MAP(%r8), %rdi movq CP_PA_TABLE_PAGE(%r8), %rax movq %rax, %cr3 lea PAGE_SIZE(%r8), %rsp call swap_pages movq $virtual_mapped, %rax pushq %rax ret virtual_mapped: movq RSP(%r8), %rsp movq CR4(%r8), %rax movq %rax, %cr4 movq CR3(%r8), %rax movq CR0(%r8), %r8 movq %rax, %cr3 movq %r8, %cr0 movq %rbp, %rax popf popq %r15 popq %r14 popq %r13 popq %r12 popq %rbp popq %rbx ret /* Do the copies */ swap_pages: movq %rdi, %rcx /* Put the page_list in %rcx */ xorl %edi, %edi xorl %esi, %esi jmp 1f 0: /* top, read another word for the indirection page */ movq (%rbx), %rcx addq $8, %rbx 1: testb $0x1, %cl /* is it a destination page? */ jz 2f movq %rcx, %rdi andq $0xfffffffffffff000, %rdi jmp 0b 2: testb $0x2, %cl /* is it an indirection page? */ jz 2f movq %rcx, %rbx andq $0xfffffffffffff000, %rbx jmp 0b 2: testb $0x4, %cl /* is it the done indicator? */ jz 2f jmp 3f 2: testb $0x8, %cl /* is it the source indicator? */ jz 0b /* Ignore it otherwise */ movq %rcx, %rsi /* For ever source page do a copy */ andq $0xfffffffffffff000, %rsi movq %rdi, %rdx movq %rsi, %rax movq %r10, %rdi movl $512, %ecx rep ; movsq movq %rax, %rdi movq %rdx, %rsi movl $512, %ecx rep ; movsq movq %rdx, %rdi movq %r10, %rsi movl $512, %ecx rep ; movsq lea PAGE_SIZE(%rax), %rsi jmp 0b 3: ret .globl kexec_control_code_size .set kexec_control_code_size, . - relocate_kernel
AirFortressIlikara/LS2K0300-linux-4.19
13,249
arch/x86/kernel/head_64.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/arch/x86/kernel/head_64.S -- start in 32bit and switch to 64bit * * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> * Copyright (C) 2000 Karsten Keil <kkeil@suse.de> * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de> * Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com> */ #include <linux/linkage.h> #include <linux/threads.h> #include <linux/init.h> #include <asm/segment.h> #include <asm/pgtable.h> #include <asm/page.h> #include <asm/msr.h> #include <asm/cache.h> #include <asm/processor-flags.h> #include <asm/percpu.h> #include <asm/nops.h> #include "../entry/calling.h" #include <asm/export.h> #include <asm/nospec-branch.h> #include <asm/fixmap.h> #ifdef CONFIG_PARAVIRT #include <asm/asm-offsets.h> #include <asm/paravirt.h> #define GET_CR2_INTO(reg) GET_CR2_INTO_RAX ; movq %rax, reg #else #define GET_CR2_INTO(reg) movq %cr2, reg #define INTERRUPT_RETURN iretq #endif /* we are not able to switch in one step to the final KERNEL ADDRESS SPACE * because we need identity-mapped pages. * */ #define l4_index(x) (((x) >> 39) & 511) #define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) L4_PAGE_OFFSET = l4_index(__PAGE_OFFSET_BASE_L4) L4_START_KERNEL = l4_index(__START_KERNEL_map) L3_START_KERNEL = pud_index(__START_KERNEL_map) .text __HEAD .code64 .globl startup_64 startup_64: UNWIND_HINT_EMPTY /* * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, * and someone has loaded an identity mapped page table * for us. These identity mapped page tables map all of the * kernel pages and possibly all of memory. * * %rsi holds a physical pointer to real_mode_data. * * We come here either directly from a 64bit bootloader, or from * arch/x86/boot/compressed/head_64.S. * * We only come here initially at boot nothing else comes here. * * Since we may be loaded at an address different from what we were * compiled to run at we first fixup the physical addresses in our page * tables and then reload them. */ /* Set up the stack for verify_cpu(), similar to initial_stack below */ leaq (__end_init_task - SIZEOF_PTREGS)(%rip), %rsp /* Sanitize CPU configuration */ call verify_cpu /* * Perform pagetable fixups. Additionally, if SME is active, encrypt * the kernel and retrieve the modifier (SME encryption mask if SME * is active) to be added to the initial pgdir entry that will be * programmed into CR3. */ leaq _text(%rip), %rdi pushq %rsi call __startup_64 popq %rsi /* Form the CR3 value being sure to include the CR3 modifier */ addq $(early_top_pgt - __START_KERNEL_map), %rax jmp 1f ENTRY(secondary_startup_64) UNWIND_HINT_EMPTY /* * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, * and someone has loaded a mapped page table. * * %rsi holds a physical pointer to real_mode_data. * * We come here either from startup_64 (using physical addresses) * or from trampoline.S (using virtual addresses). * * Using virtual addresses from trampoline.S removes the need * to have any identity mapped pages in the kernel page table * after the boot processor executes this code. */ /* Sanitize CPU configuration */ call verify_cpu /* * Retrieve the modifier (SME encryption mask if SME is active) to be * added to the initial pgdir entry that will be programmed into CR3. */ pushq %rsi call __startup_secondary_64 popq %rsi /* Form the CR3 value being sure to include the CR3 modifier */ addq $(init_top_pgt - __START_KERNEL_map), %rax 1: /* Enable PAE mode, PGE and LA57 */ movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx #ifdef CONFIG_X86_5LEVEL testl $1, __pgtable_l5_enabled(%rip) jz 1f orl $X86_CR4_LA57, %ecx 1: #endif movq %rcx, %cr4 /* Setup early boot stage 4-/5-level pagetables. */ addq phys_base(%rip), %rax movq %rax, %cr3 /* Ensure I am executing from virtual addresses */ movq $1f, %rax ANNOTATE_RETPOLINE_SAFE jmp *%rax 1: UNWIND_HINT_EMPTY /* Check if nx is implemented */ movl $0x80000001, %eax cpuid movl %edx,%edi /* Setup EFER (Extended Feature Enable Register) */ movl $MSR_EFER, %ecx rdmsr btsl $_EFER_SCE, %eax /* Enable System Call */ btl $20,%edi /* No Execute supported? */ jnc 1f btsl $_EFER_NX, %eax btsq $_PAGE_BIT_NX,early_pmd_flags(%rip) 1: wrmsr /* Make changes effective */ /* Setup cr0 */ movl $CR0_STATE, %eax /* Make changes effective */ movq %rax, %cr0 /* Setup a boot time stack */ movq initial_stack(%rip), %rsp /* zero EFLAGS after setting rsp */ pushq $0 popfq /* * We must switch to a new descriptor in kernel space for the GDT * because soon the kernel won't have access anymore to the userspace * addresses where we're currently running on. We have to do that here * because in 32bit we couldn't load a 64bit linear address. */ lgdt early_gdt_descr(%rip) /* set up data segments */ xorl %eax,%eax movl %eax,%ds movl %eax,%ss movl %eax,%es /* * We don't really need to load %fs or %gs, but load them anyway * to kill any stale realmode selectors. This allows execution * under VT hardware. */ movl %eax,%fs movl %eax,%gs /* Set up %gs. * * The base of %gs always points to the bottom of the irqstack * union. If the stack protector canary is enabled, it is * located at %gs:40. Note that, on SMP, the boot cpu uses * init data section till per cpu areas are set up. */ movl $MSR_GS_BASE,%ecx movl initial_gs(%rip),%eax movl initial_gs+4(%rip),%edx wrmsr /* rsi is pointer to real mode structure with interesting info. pass it to C */ movq %rsi, %rdi .Ljump_to_C_code: /* * Jump to run C code and to be on a real kernel address. * Since we are running on identity-mapped space we have to jump * to the full 64bit address, this is only possible as indirect * jump. In addition we need to ensure %cs is set so we make this * a far return. * * Note: do not change to far jump indirect with 64bit offset. * * AMD does not support far jump indirect with 64bit offset. * AMD64 Architecture Programmer's Manual, Volume 3: states only * JMP FAR mem16:16 FF /5 Far jump indirect, * with the target specified by a far pointer in memory. * JMP FAR mem16:32 FF /5 Far jump indirect, * with the target specified by a far pointer in memory. * * Intel64 does support 64bit offset. * Software Developer Manual Vol 2: states: * FF /5 JMP m16:16 Jump far, absolute indirect, * address given in m16:16 * FF /5 JMP m16:32 Jump far, absolute indirect, * address given in m16:32. * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect, * address given in m16:64. */ pushq $.Lafter_lret # put return address on stack for unwinder xorl %ebp, %ebp # clear frame pointer movq initial_code(%rip), %rax pushq $__KERNEL_CS # set correct cs pushq %rax # target address in negative space lretq .Lafter_lret: END(secondary_startup_64) #include "verify_cpu.S" #ifdef CONFIG_HOTPLUG_CPU /* * Boot CPU0 entry point. It's called from play_dead(). Everything has been set * up already except stack. We just set up stack here. Then call * start_secondary() via .Ljump_to_C_code. */ ENTRY(start_cpu0) movq initial_stack(%rip), %rsp UNWIND_HINT_EMPTY jmp .Ljump_to_C_code ENDPROC(start_cpu0) #endif /* Both SMP bootup and ACPI suspend change these variables */ __REFDATA .balign 8 GLOBAL(initial_code) .quad x86_64_start_kernel GLOBAL(initial_gs) .quad INIT_PER_CPU_VAR(irq_stack_union) GLOBAL(initial_stack) /* * The SIZEOF_PTREGS gap is a convention which helps the in-kernel * unwinder reliably detect the end of the stack. */ .quad init_thread_union + THREAD_SIZE - SIZEOF_PTREGS __FINITDATA __INIT ENTRY(early_idt_handler_array) i = 0 .rept NUM_EXCEPTION_VECTORS .if ((EXCEPTION_ERRCODE_MASK >> i) & 1) == 0 UNWIND_HINT_IRET_REGS pushq $0 # Dummy error code, to make stack frame uniform .else UNWIND_HINT_IRET_REGS offset=8 .endif pushq $i # 72(%rsp) Vector number jmp early_idt_handler_common UNWIND_HINT_IRET_REGS i = i + 1 .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc .endr UNWIND_HINT_IRET_REGS offset=16 END(early_idt_handler_array) early_idt_handler_common: /* * The stack is the hardware frame, an error code or zero, and the * vector number. */ cld incl early_recursion_flag(%rip) /* The vector number is currently in the pt_regs->di slot. */ pushq %rsi /* pt_regs->si */ movq 8(%rsp), %rsi /* RSI = vector number */ movq %rdi, 8(%rsp) /* pt_regs->di = RDI */ pushq %rdx /* pt_regs->dx */ pushq %rcx /* pt_regs->cx */ pushq %rax /* pt_regs->ax */ pushq %r8 /* pt_regs->r8 */ pushq %r9 /* pt_regs->r9 */ pushq %r10 /* pt_regs->r10 */ pushq %r11 /* pt_regs->r11 */ pushq %rbx /* pt_regs->bx */ pushq %rbp /* pt_regs->bp */ pushq %r12 /* pt_regs->r12 */ pushq %r13 /* pt_regs->r13 */ pushq %r14 /* pt_regs->r14 */ pushq %r15 /* pt_regs->r15 */ UNWIND_HINT_REGS cmpq $14,%rsi /* Page fault? */ jnz 10f GET_CR2_INTO(%rdi) /* Can clobber any volatile register if pv */ call early_make_pgtable andl %eax,%eax jz 20f /* All good */ 10: movq %rsp,%rdi /* RDI = pt_regs; RSI is already trapnr */ call early_fixup_exception 20: decl early_recursion_flag(%rip) jmp restore_regs_and_return_to_kernel END(early_idt_handler_common) __INITDATA .balign 4 GLOBAL(early_recursion_flag) .long 0 #define NEXT_PAGE(name) \ .balign PAGE_SIZE; \ GLOBAL(name) #ifdef CONFIG_PAGE_TABLE_ISOLATION /* * Each PGD needs to be 8k long and 8k aligned. We do not * ever go out to userspace with these, so we do not * strictly *need* the second page, but this allows us to * have a single set_pgd() implementation that does not * need to worry about whether it has 4k or 8k to work * with. * * This ensures PGDs are 8k long: */ #define PTI_USER_PGD_FILL 512 /* This ensures they are 8k-aligned: */ #define NEXT_PGD_PAGE(name) \ .balign 2 * PAGE_SIZE; \ GLOBAL(name) #else #define NEXT_PGD_PAGE(name) NEXT_PAGE(name) #define PTI_USER_PGD_FILL 0 #endif /* Automate the creation of 1 to 1 mapping pmd entries */ #define PMDS(START, PERM, COUNT) \ i = 0 ; \ .rept (COUNT) ; \ .quad (START) + (i << PMD_SHIFT) + (PERM) ; \ i = i + 1 ; \ .endr __INITDATA NEXT_PGD_PAGE(early_top_pgt) .fill 512,8,0 .fill PTI_USER_PGD_FILL,8,0 NEXT_PAGE(early_dynamic_pgts) .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0 .data #if defined(CONFIG_XEN_PV) || defined(CONFIG_XEN_PVH) NEXT_PGD_PAGE(init_top_pgt) .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC .org init_top_pgt + L4_PAGE_OFFSET*8, 0 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC .org init_top_pgt + L4_START_KERNEL*8, 0 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC .fill PTI_USER_PGD_FILL,8,0 NEXT_PAGE(level3_ident_pgt) .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC .fill 511, 8, 0 NEXT_PAGE(level2_ident_pgt) /* * Since I easily can, map the first 1G. * Don't set NX because code runs from these pages. * * Note: This sets _PAGE_GLOBAL despite whether * the CPU supports it or it is enabled. But, * the CPU should ignore the bit. */ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD) #else NEXT_PGD_PAGE(init_top_pgt) .fill 512,8,0 .fill PTI_USER_PGD_FILL,8,0 #endif #ifdef CONFIG_X86_5LEVEL NEXT_PAGE(level4_kernel_pgt) .fill 511,8,0 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC #endif NEXT_PAGE(level3_kernel_pgt) .fill L3_START_KERNEL,8,0 /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */ .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC NEXT_PAGE(level2_kernel_pgt) /* * 512 MB kernel mapping. We spend a full page on this pagetable * anyway. * * The kernel code+data+bss must not be bigger than that. * * (NOTE: at +512MB starts the module area, see MODULES_VADDR. * If you want to increase this then increase MODULES_VADDR * too.) * * This table is eventually used by the kernel during normal * runtime. Care must be taken to clear out undesired bits * later, like _PAGE_RW or _PAGE_GLOBAL in some cases. */ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE) NEXT_PAGE(level2_fixmap_pgt) .fill (512 - 4 - FIXMAP_PMD_NUM),8,0 pgtno = 0 .rept (FIXMAP_PMD_NUM) .quad level1_fixmap_pgt + (pgtno << PAGE_SHIFT) - __START_KERNEL_map \ + _PAGE_TABLE_NOENC; pgtno = pgtno + 1 .endr /* 6 MB reserved space + a 2MB hole */ .fill 4,8,0 NEXT_PAGE(level1_fixmap_pgt) .rept (FIXMAP_PMD_NUM) .fill 512,8,0 .endr #undef PMDS .data .align 16 .globl early_gdt_descr early_gdt_descr: .word GDT_ENTRIES*8-1 early_gdt_descr_base: .quad INIT_PER_CPU_VAR(gdt_page) ENTRY(phys_base) /* This must match the first entry in level2_kernel_pgt */ .quad 0x0000000000000000 EXPORT_SYMBOL(phys_base) #include "../../x86/xen/xen-head.S" __PAGE_ALIGNED_BSS NEXT_PAGE(empty_zero_page) .skip PAGE_SIZE EXPORT_SYMBOL(empty_zero_page)
AirFortressIlikara/LS2K0300-linux-4.19
16,236
arch/x86/crypto/crct10dif-pcl-asm_64.S
######################################################################## # Implement fast CRC-T10DIF computation with SSE and PCLMULQDQ instructions # # Copyright (c) 2013, Intel Corporation # # Authors: # Erdinc Ozturk <erdinc.ozturk@intel.com> # Vinodh Gopal <vinodh.gopal@intel.com> # James Guilford <james.guilford@intel.com> # Tim Chen <tim.c.chen@linux.intel.com> # # This software is available to you under a choice of one of two # licenses. You may choose to be licensed under the terms of the GNU # General Public License (GPL) Version 2, available from the file # COPYING in the main directory of this source tree, or the # OpenIB.org BSD license below: # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the # distribution. # # * Neither the name of the Intel Corporation nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # # THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION ""AS IS"" AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ######################################################################## # Function API: # UINT16 crc_t10dif_pcl( # UINT16 init_crc, //initial CRC value, 16 bits # const unsigned char *buf, //buffer pointer to calculate CRC on # UINT64 len //buffer length in bytes (64-bit data) # ); # # Reference paper titled "Fast CRC Computation for Generic # Polynomials Using PCLMULQDQ Instruction" # URL: http://www.intel.com/content/dam/www/public/us/en/documents # /white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf # # #include <linux/linkage.h> .text #define arg1 %rdi #define arg2 %rsi #define arg3 %rdx #define arg1_low32 %edi ENTRY(crc_t10dif_pcl) .align 16 # adjust the 16-bit initial_crc value, scale it to 32 bits shl $16, arg1_low32 # Allocate Stack Space mov %rsp, %rcx sub $16*2, %rsp # align stack to 16 byte boundary and $~(0x10 - 1), %rsp # check if smaller than 256 cmp $256, arg3 # for sizes less than 128, we can't fold 64B at a time... jl _less_than_128 # load the initial crc value movd arg1_low32, %xmm10 # initial crc # crc value does not need to be byte-reflected, but it needs # to be moved to the high part of the register. # because data will be byte-reflected and will align with # initial crc at correct place. pslldq $12, %xmm10 movdqa SHUF_MASK(%rip), %xmm11 # receive the initial 64B data, xor the initial crc value movdqu 16*0(arg2), %xmm0 movdqu 16*1(arg2), %xmm1 movdqu 16*2(arg2), %xmm2 movdqu 16*3(arg2), %xmm3 movdqu 16*4(arg2), %xmm4 movdqu 16*5(arg2), %xmm5 movdqu 16*6(arg2), %xmm6 movdqu 16*7(arg2), %xmm7 pshufb %xmm11, %xmm0 # XOR the initial_crc value pxor %xmm10, %xmm0 pshufb %xmm11, %xmm1 pshufb %xmm11, %xmm2 pshufb %xmm11, %xmm3 pshufb %xmm11, %xmm4 pshufb %xmm11, %xmm5 pshufb %xmm11, %xmm6 pshufb %xmm11, %xmm7 movdqa rk3(%rip), %xmm10 #xmm10 has rk3 and rk4 #imm value of pclmulqdq instruction #will determine which constant to use ################################################################# # we subtract 256 instead of 128 to save one instruction from the loop sub $256, arg3 # at this section of the code, there is 64*x+y (0<=y<64) bytes of # buffer. The _fold_64_B_loop will fold 64B at a time # until we have 64+y Bytes of buffer # fold 64B at a time. This section of the code folds 4 xmm # registers in parallel _fold_64_B_loop: # update the buffer pointer add $128, arg2 # buf += 64# movdqu 16*0(arg2), %xmm9 movdqu 16*1(arg2), %xmm12 pshufb %xmm11, %xmm9 pshufb %xmm11, %xmm12 movdqa %xmm0, %xmm8 movdqa %xmm1, %xmm13 pclmulqdq $0x0 , %xmm10, %xmm0 pclmulqdq $0x11, %xmm10, %xmm8 pclmulqdq $0x0 , %xmm10, %xmm1 pclmulqdq $0x11, %xmm10, %xmm13 pxor %xmm9 , %xmm0 xorps %xmm8 , %xmm0 pxor %xmm12, %xmm1 xorps %xmm13, %xmm1 movdqu 16*2(arg2), %xmm9 movdqu 16*3(arg2), %xmm12 pshufb %xmm11, %xmm9 pshufb %xmm11, %xmm12 movdqa %xmm2, %xmm8 movdqa %xmm3, %xmm13 pclmulqdq $0x0, %xmm10, %xmm2 pclmulqdq $0x11, %xmm10, %xmm8 pclmulqdq $0x0, %xmm10, %xmm3 pclmulqdq $0x11, %xmm10, %xmm13 pxor %xmm9 , %xmm2 xorps %xmm8 , %xmm2 pxor %xmm12, %xmm3 xorps %xmm13, %xmm3 movdqu 16*4(arg2), %xmm9 movdqu 16*5(arg2), %xmm12 pshufb %xmm11, %xmm9 pshufb %xmm11, %xmm12 movdqa %xmm4, %xmm8 movdqa %xmm5, %xmm13 pclmulqdq $0x0, %xmm10, %xmm4 pclmulqdq $0x11, %xmm10, %xmm8 pclmulqdq $0x0, %xmm10, %xmm5 pclmulqdq $0x11, %xmm10, %xmm13 pxor %xmm9 , %xmm4 xorps %xmm8 , %xmm4 pxor %xmm12, %xmm5 xorps %xmm13, %xmm5 movdqu 16*6(arg2), %xmm9 movdqu 16*7(arg2), %xmm12 pshufb %xmm11, %xmm9 pshufb %xmm11, %xmm12 movdqa %xmm6 , %xmm8 movdqa %xmm7 , %xmm13 pclmulqdq $0x0 , %xmm10, %xmm6 pclmulqdq $0x11, %xmm10, %xmm8 pclmulqdq $0x0 , %xmm10, %xmm7 pclmulqdq $0x11, %xmm10, %xmm13 pxor %xmm9 , %xmm6 xorps %xmm8 , %xmm6 pxor %xmm12, %xmm7 xorps %xmm13, %xmm7 sub $128, arg3 # check if there is another 64B in the buffer to be able to fold jge _fold_64_B_loop ################################################################## add $128, arg2 # at this point, the buffer pointer is pointing at the last y Bytes # of the buffer the 64B of folded data is in 4 of the xmm # registers: xmm0, xmm1, xmm2, xmm3 # fold the 8 xmm registers to 1 xmm register with different constants movdqa rk9(%rip), %xmm10 movdqa %xmm0, %xmm8 pclmulqdq $0x11, %xmm10, %xmm0 pclmulqdq $0x0 , %xmm10, %xmm8 pxor %xmm8, %xmm7 xorps %xmm0, %xmm7 movdqa rk11(%rip), %xmm10 movdqa %xmm1, %xmm8 pclmulqdq $0x11, %xmm10, %xmm1 pclmulqdq $0x0 , %xmm10, %xmm8 pxor %xmm8, %xmm7 xorps %xmm1, %xmm7 movdqa rk13(%rip), %xmm10 movdqa %xmm2, %xmm8 pclmulqdq $0x11, %xmm10, %xmm2 pclmulqdq $0x0 , %xmm10, %xmm8 pxor %xmm8, %xmm7 pxor %xmm2, %xmm7 movdqa rk15(%rip), %xmm10 movdqa %xmm3, %xmm8 pclmulqdq $0x11, %xmm10, %xmm3 pclmulqdq $0x0 , %xmm10, %xmm8 pxor %xmm8, %xmm7 xorps %xmm3, %xmm7 movdqa rk17(%rip), %xmm10 movdqa %xmm4, %xmm8 pclmulqdq $0x11, %xmm10, %xmm4 pclmulqdq $0x0 , %xmm10, %xmm8 pxor %xmm8, %xmm7 pxor %xmm4, %xmm7 movdqa rk19(%rip), %xmm10 movdqa %xmm5, %xmm8 pclmulqdq $0x11, %xmm10, %xmm5 pclmulqdq $0x0 , %xmm10, %xmm8 pxor %xmm8, %xmm7 xorps %xmm5, %xmm7 movdqa rk1(%rip), %xmm10 #xmm10 has rk1 and rk2 #imm value of pclmulqdq instruction #will determine which constant to use movdqa %xmm6, %xmm8 pclmulqdq $0x11, %xmm10, %xmm6 pclmulqdq $0x0 , %xmm10, %xmm8 pxor %xmm8, %xmm7 pxor %xmm6, %xmm7 # instead of 64, we add 48 to the loop counter to save 1 instruction # from the loop instead of a cmp instruction, we use the negative # flag with the jl instruction add $128-16, arg3 jl _final_reduction_for_128 # now we have 16+y bytes left to reduce. 16 Bytes is in register xmm7 # and the rest is in memory. We can fold 16 bytes at a time if y>=16 # continue folding 16B at a time _16B_reduction_loop: movdqa %xmm7, %xmm8 pclmulqdq $0x11, %xmm10, %xmm7 pclmulqdq $0x0 , %xmm10, %xmm8 pxor %xmm8, %xmm7 movdqu (arg2), %xmm0 pshufb %xmm11, %xmm0 pxor %xmm0 , %xmm7 add $16, arg2 sub $16, arg3 # instead of a cmp instruction, we utilize the flags with the # jge instruction equivalent of: cmp arg3, 16-16 # check if there is any more 16B in the buffer to be able to fold jge _16B_reduction_loop #now we have 16+z bytes left to reduce, where 0<= z < 16. #first, we reduce the data in the xmm7 register _final_reduction_for_128: # check if any more data to fold. If not, compute the CRC of # the final 128 bits add $16, arg3 je _128_done # here we are getting data that is less than 16 bytes. # since we know that there was data before the pointer, we can # offset the input pointer before the actual point, to receive # exactly 16 bytes. after that the registers need to be adjusted. _get_last_two_xmms: movdqa %xmm7, %xmm2 movdqu -16(arg2, arg3), %xmm1 pshufb %xmm11, %xmm1 # get rid of the extra data that was loaded before # load the shift constant lea pshufb_shf_table+16(%rip), %rax sub arg3, %rax movdqu (%rax), %xmm0 # shift xmm2 to the left by arg3 bytes pshufb %xmm0, %xmm2 # shift xmm7 to the right by 16-arg3 bytes pxor mask1(%rip), %xmm0 pshufb %xmm0, %xmm7 pblendvb %xmm2, %xmm1 #xmm0 is implicit # fold 16 Bytes movdqa %xmm1, %xmm2 movdqa %xmm7, %xmm8 pclmulqdq $0x11, %xmm10, %xmm7 pclmulqdq $0x0 , %xmm10, %xmm8 pxor %xmm8, %xmm7 pxor %xmm2, %xmm7 _128_done: # compute crc of a 128-bit value movdqa rk5(%rip), %xmm10 # rk5 and rk6 in xmm10 movdqa %xmm7, %xmm0 #64b fold pclmulqdq $0x1, %xmm10, %xmm7 pslldq $8 , %xmm0 pxor %xmm0, %xmm7 #32b fold movdqa %xmm7, %xmm0 pand mask2(%rip), %xmm0 psrldq $12, %xmm7 pclmulqdq $0x10, %xmm10, %xmm7 pxor %xmm0, %xmm7 #barrett reduction _barrett: movdqa rk7(%rip), %xmm10 # rk7 and rk8 in xmm10 movdqa %xmm7, %xmm0 pclmulqdq $0x01, %xmm10, %xmm7 pslldq $4, %xmm7 pclmulqdq $0x11, %xmm10, %xmm7 pslldq $4, %xmm7 pxor %xmm0, %xmm7 pextrd $1, %xmm7, %eax _cleanup: # scale the result back to 16 bits shr $16, %eax mov %rcx, %rsp ret ######################################################################## .align 16 _less_than_128: # check if there is enough buffer to be able to fold 16B at a time cmp $32, arg3 jl _less_than_32 movdqa SHUF_MASK(%rip), %xmm11 # now if there is, load the constants movdqa rk1(%rip), %xmm10 # rk1 and rk2 in xmm10 movd arg1_low32, %xmm0 # get the initial crc value pslldq $12, %xmm0 # align it to its correct place movdqu (arg2), %xmm7 # load the plaintext pshufb %xmm11, %xmm7 # byte-reflect the plaintext pxor %xmm0, %xmm7 # update the buffer pointer add $16, arg2 # update the counter. subtract 32 instead of 16 to save one # instruction from the loop sub $32, arg3 jmp _16B_reduction_loop .align 16 _less_than_32: # mov initial crc to the return value. this is necessary for # zero-length buffers. mov arg1_low32, %eax test arg3, arg3 je _cleanup movdqa SHUF_MASK(%rip), %xmm11 movd arg1_low32, %xmm0 # get the initial crc value pslldq $12, %xmm0 # align it to its correct place cmp $16, arg3 je _exact_16_left jl _less_than_16_left movdqu (arg2), %xmm7 # load the plaintext pshufb %xmm11, %xmm7 # byte-reflect the plaintext pxor %xmm0 , %xmm7 # xor the initial crc value add $16, arg2 sub $16, arg3 movdqa rk1(%rip), %xmm10 # rk1 and rk2 in xmm10 jmp _get_last_two_xmms .align 16 _less_than_16_left: # use stack space to load data less than 16 bytes, zero-out # the 16B in memory first. pxor %xmm1, %xmm1 mov %rsp, %r11 movdqa %xmm1, (%r11) cmp $4, arg3 jl _only_less_than_4 # backup the counter value mov arg3, %r9 cmp $8, arg3 jl _less_than_8_left # load 8 Bytes mov (arg2), %rax mov %rax, (%r11) add $8, %r11 sub $8, arg3 add $8, arg2 _less_than_8_left: cmp $4, arg3 jl _less_than_4_left # load 4 Bytes mov (arg2), %eax mov %eax, (%r11) add $4, %r11 sub $4, arg3 add $4, arg2 _less_than_4_left: cmp $2, arg3 jl _less_than_2_left # load 2 Bytes mov (arg2), %ax mov %ax, (%r11) add $2, %r11 sub $2, arg3 add $2, arg2 _less_than_2_left: cmp $1, arg3 jl _zero_left # load 1 Byte mov (arg2), %al mov %al, (%r11) _zero_left: movdqa (%rsp), %xmm7 pshufb %xmm11, %xmm7 pxor %xmm0 , %xmm7 # xor the initial crc value # shl r9, 4 lea pshufb_shf_table+16(%rip), %rax sub %r9, %rax movdqu (%rax), %xmm0 pxor mask1(%rip), %xmm0 pshufb %xmm0, %xmm7 jmp _128_done .align 16 _exact_16_left: movdqu (arg2), %xmm7 pshufb %xmm11, %xmm7 pxor %xmm0 , %xmm7 # xor the initial crc value jmp _128_done _only_less_than_4: cmp $3, arg3 jl _only_less_than_3 # load 3 Bytes mov (arg2), %al mov %al, (%r11) mov 1(arg2), %al mov %al, 1(%r11) mov 2(arg2), %al mov %al, 2(%r11) movdqa (%rsp), %xmm7 pshufb %xmm11, %xmm7 pxor %xmm0 , %xmm7 # xor the initial crc value psrldq $5, %xmm7 jmp _barrett _only_less_than_3: cmp $2, arg3 jl _only_less_than_2 # load 2 Bytes mov (arg2), %al mov %al, (%r11) mov 1(arg2), %al mov %al, 1(%r11) movdqa (%rsp), %xmm7 pshufb %xmm11, %xmm7 pxor %xmm0 , %xmm7 # xor the initial crc value psrldq $6, %xmm7 jmp _barrett _only_less_than_2: # load 1 Byte mov (arg2), %al mov %al, (%r11) movdqa (%rsp), %xmm7 pshufb %xmm11, %xmm7 pxor %xmm0 , %xmm7 # xor the initial crc value psrldq $7, %xmm7 jmp _barrett ENDPROC(crc_t10dif_pcl) .section .rodata, "a", @progbits .align 16 # precomputed constants # these constants are precomputed from the poly: # 0x8bb70000 (0x8bb7 scaled to 32 bits) # Q = 0x18BB70000 # rk1 = 2^(32*3) mod Q << 32 # rk2 = 2^(32*5) mod Q << 32 # rk3 = 2^(32*15) mod Q << 32 # rk4 = 2^(32*17) mod Q << 32 # rk5 = 2^(32*3) mod Q << 32 # rk6 = 2^(32*2) mod Q << 32 # rk7 = floor(2^64/Q) # rk8 = Q rk1: .quad 0x2d56000000000000 rk2: .quad 0x06df000000000000 rk3: .quad 0x9d9d000000000000 rk4: .quad 0x7cf5000000000000 rk5: .quad 0x2d56000000000000 rk6: .quad 0x1368000000000000 rk7: .quad 0x00000001f65a57f8 rk8: .quad 0x000000018bb70000 rk9: .quad 0xceae000000000000 rk10: .quad 0xbfd6000000000000 rk11: .quad 0x1e16000000000000 rk12: .quad 0x713c000000000000 rk13: .quad 0xf7f9000000000000 rk14: .quad 0x80a6000000000000 rk15: .quad 0x044c000000000000 rk16: .quad 0xe658000000000000 rk17: .quad 0xad18000000000000 rk18: .quad 0xa497000000000000 rk19: .quad 0x6ee3000000000000 rk20: .quad 0xe7b5000000000000 .section .rodata.cst16.mask1, "aM", @progbits, 16 .align 16 mask1: .octa 0x80808080808080808080808080808080 .section .rodata.cst16.mask2, "aM", @progbits, 16 .align 16 mask2: .octa 0x00000000FFFFFFFFFFFFFFFFFFFFFFFF .section .rodata.cst16.SHUF_MASK, "aM", @progbits, 16 .align 16 SHUF_MASK: .octa 0x000102030405060708090A0B0C0D0E0F .section .rodata.cst32.pshufb_shf_table, "aM", @progbits, 32 .align 32 pshufb_shf_table: # use these values for shift constants for the pshufb instruction # different alignments result in values as shown: # DDQ 0x008f8e8d8c8b8a898887868584838281 # shl 15 (16-1) / shr1 # DDQ 0x01008f8e8d8c8b8a8988878685848382 # shl 14 (16-3) / shr2 # DDQ 0x0201008f8e8d8c8b8a89888786858483 # shl 13 (16-4) / shr3 # DDQ 0x030201008f8e8d8c8b8a898887868584 # shl 12 (16-4) / shr4 # DDQ 0x04030201008f8e8d8c8b8a8988878685 # shl 11 (16-5) / shr5 # DDQ 0x0504030201008f8e8d8c8b8a89888786 # shl 10 (16-6) / shr6 # DDQ 0x060504030201008f8e8d8c8b8a898887 # shl 9 (16-7) / shr7 # DDQ 0x07060504030201008f8e8d8c8b8a8988 # shl 8 (16-8) / shr8 # DDQ 0x0807060504030201008f8e8d8c8b8a89 # shl 7 (16-9) / shr9 # DDQ 0x090807060504030201008f8e8d8c8b8a # shl 6 (16-10) / shr10 # DDQ 0x0a090807060504030201008f8e8d8c8b # shl 5 (16-11) / shr11 # DDQ 0x0b0a090807060504030201008f8e8d8c # shl 4 (16-12) / shr12 # DDQ 0x0c0b0a090807060504030201008f8e8d # shl 3 (16-13) / shr13 # DDQ 0x0d0c0b0a090807060504030201008f8e # shl 2 (16-14) / shr14 # DDQ 0x0e0d0c0b0a090807060504030201008f # shl 1 (16-15) / shr15 .octa 0x8f8e8d8c8b8a89888786858483828100 .octa 0x000e0d0c0b0a09080706050403020100
AirFortressIlikara/LS2K0300-linux-4.19
11,064
arch/x86/crypto/camellia-x86_64-asm_64.S
/* * Camellia Cipher Algorithm (x86_64) * * Copyright (C) 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 * USA * */ #include <linux/linkage.h> .file "camellia-x86_64-asm_64.S" .text .extern camellia_sp10011110; .extern camellia_sp22000222; .extern camellia_sp03303033; .extern camellia_sp00444404; .extern camellia_sp02220222; .extern camellia_sp30333033; .extern camellia_sp44044404; .extern camellia_sp11101110; #define sp10011110 camellia_sp10011110 #define sp22000222 camellia_sp22000222 #define sp03303033 camellia_sp03303033 #define sp00444404 camellia_sp00444404 #define sp02220222 camellia_sp02220222 #define sp30333033 camellia_sp30333033 #define sp44044404 camellia_sp44044404 #define sp11101110 camellia_sp11101110 #define CAMELLIA_TABLE_BYTE_LEN 272 /* struct camellia_ctx: */ #define key_table 0 #define key_length CAMELLIA_TABLE_BYTE_LEN /* register macros */ #define CTX %rdi #define RIO %rsi #define RIOd %esi #define RAB0 %rax #define RCD0 %rcx #define RAB1 %rbx #define RCD1 %rdx #define RAB0d %eax #define RCD0d %ecx #define RAB1d %ebx #define RCD1d %edx #define RAB0bl %al #define RCD0bl %cl #define RAB1bl %bl #define RCD1bl %dl #define RAB0bh %ah #define RCD0bh %ch #define RAB1bh %bh #define RCD1bh %dh #define RT0 %rsi #define RT1 %r12 #define RT2 %r8 #define RT0d %esi #define RT1d %r12d #define RT2d %r8d #define RT2bl %r8b #define RXOR %r9 #define RR12 %r10 #define RDST %r11 #define RXORd %r9d #define RXORbl %r9b #define xor2ror16(T0, T1, tmp1, tmp2, ab, dst) \ movzbl ab ## bl, tmp2 ## d; \ movzbl ab ## bh, tmp1 ## d; \ rorq $16, ab; \ xorq T0(, tmp2, 8), dst; \ xorq T1(, tmp1, 8), dst; /********************************************************************** 1-way camellia **********************************************************************/ #define roundsm(ab, subkey, cd) \ movq (key_table + ((subkey) * 2) * 4)(CTX), RT2; \ \ xor2ror16(sp00444404, sp03303033, RT0, RT1, ab ## 0, cd ## 0); \ xor2ror16(sp22000222, sp10011110, RT0, RT1, ab ## 0, RT2); \ xor2ror16(sp11101110, sp44044404, RT0, RT1, ab ## 0, cd ## 0); \ xor2ror16(sp30333033, sp02220222, RT0, RT1, ab ## 0, RT2); \ \ xorq RT2, cd ## 0; #define fls(l, r, kl, kr) \ movl (key_table + ((kl) * 2) * 4)(CTX), RT0d; \ andl l ## 0d, RT0d; \ roll $1, RT0d; \ shlq $32, RT0; \ xorq RT0, l ## 0; \ movq (key_table + ((kr) * 2) * 4)(CTX), RT1; \ orq r ## 0, RT1; \ shrq $32, RT1; \ xorq RT1, r ## 0; \ \ movq (key_table + ((kl) * 2) * 4)(CTX), RT2; \ orq l ## 0, RT2; \ shrq $32, RT2; \ xorq RT2, l ## 0; \ movl (key_table + ((kr) * 2) * 4)(CTX), RT0d; \ andl r ## 0d, RT0d; \ roll $1, RT0d; \ shlq $32, RT0; \ xorq RT0, r ## 0; #define enc_rounds(i) \ roundsm(RAB, i + 2, RCD); \ roundsm(RCD, i + 3, RAB); \ roundsm(RAB, i + 4, RCD); \ roundsm(RCD, i + 5, RAB); \ roundsm(RAB, i + 6, RCD); \ roundsm(RCD, i + 7, RAB); #define enc_fls(i) \ fls(RAB, RCD, i + 0, i + 1); #define enc_inpack() \ movq (RIO), RAB0; \ bswapq RAB0; \ rolq $32, RAB0; \ movq 4*2(RIO), RCD0; \ bswapq RCD0; \ rorq $32, RCD0; \ xorq key_table(CTX), RAB0; #define enc_outunpack(op, max) \ xorq key_table(CTX, max, 8), RCD0; \ rorq $32, RCD0; \ bswapq RCD0; \ op ## q RCD0, (RIO); \ rolq $32, RAB0; \ bswapq RAB0; \ op ## q RAB0, 4*2(RIO); #define dec_rounds(i) \ roundsm(RAB, i + 7, RCD); \ roundsm(RCD, i + 6, RAB); \ roundsm(RAB, i + 5, RCD); \ roundsm(RCD, i + 4, RAB); \ roundsm(RAB, i + 3, RCD); \ roundsm(RCD, i + 2, RAB); #define dec_fls(i) \ fls(RAB, RCD, i + 1, i + 0); #define dec_inpack(max) \ movq (RIO), RAB0; \ bswapq RAB0; \ rolq $32, RAB0; \ movq 4*2(RIO), RCD0; \ bswapq RCD0; \ rorq $32, RCD0; \ xorq key_table(CTX, max, 8), RAB0; #define dec_outunpack() \ xorq key_table(CTX), RCD0; \ rorq $32, RCD0; \ bswapq RCD0; \ movq RCD0, (RIO); \ rolq $32, RAB0; \ bswapq RAB0; \ movq RAB0, 4*2(RIO); ENTRY(__camellia_enc_blk) /* input: * %rdi: ctx, CTX * %rsi: dst * %rdx: src * %rcx: bool xor */ movq %r12, RR12; movq %rcx, RXOR; movq %rsi, RDST; movq %rdx, RIO; enc_inpack(); enc_rounds(0); enc_fls(8); enc_rounds(8); enc_fls(16); enc_rounds(16); movl $24, RT1d; /* max */ cmpb $16, key_length(CTX); je .L__enc_done; enc_fls(24); enc_rounds(24); movl $32, RT1d; /* max */ .L__enc_done: testb RXORbl, RXORbl; movq RDST, RIO; jnz .L__enc_xor; enc_outunpack(mov, RT1); movq RR12, %r12; ret; .L__enc_xor: enc_outunpack(xor, RT1); movq RR12, %r12; ret; ENDPROC(__camellia_enc_blk) ENTRY(camellia_dec_blk) /* input: * %rdi: ctx, CTX * %rsi: dst * %rdx: src */ cmpl $16, key_length(CTX); movl $32, RT2d; movl $24, RXORd; cmovel RXORd, RT2d; /* max */ movq %r12, RR12; movq %rsi, RDST; movq %rdx, RIO; dec_inpack(RT2); cmpb $24, RT2bl; je .L__dec_rounds16; dec_rounds(24); dec_fls(24); .L__dec_rounds16: dec_rounds(16); dec_fls(16); dec_rounds(8); dec_fls(8); dec_rounds(0); movq RDST, RIO; dec_outunpack(); movq RR12, %r12; ret; ENDPROC(camellia_dec_blk) /********************************************************************** 2-way camellia **********************************************************************/ #define roundsm2(ab, subkey, cd) \ movq (key_table + ((subkey) * 2) * 4)(CTX), RT2; \ xorq RT2, cd ## 1; \ \ xor2ror16(sp00444404, sp03303033, RT0, RT1, ab ## 0, cd ## 0); \ xor2ror16(sp22000222, sp10011110, RT0, RT1, ab ## 0, RT2); \ xor2ror16(sp11101110, sp44044404, RT0, RT1, ab ## 0, cd ## 0); \ xor2ror16(sp30333033, sp02220222, RT0, RT1, ab ## 0, RT2); \ \ xor2ror16(sp00444404, sp03303033, RT0, RT1, ab ## 1, cd ## 1); \ xorq RT2, cd ## 0; \ xor2ror16(sp22000222, sp10011110, RT0, RT1, ab ## 1, cd ## 1); \ xor2ror16(sp11101110, sp44044404, RT0, RT1, ab ## 1, cd ## 1); \ xor2ror16(sp30333033, sp02220222, RT0, RT1, ab ## 1, cd ## 1); #define fls2(l, r, kl, kr) \ movl (key_table + ((kl) * 2) * 4)(CTX), RT0d; \ andl l ## 0d, RT0d; \ roll $1, RT0d; \ shlq $32, RT0; \ xorq RT0, l ## 0; \ movq (key_table + ((kr) * 2) * 4)(CTX), RT1; \ orq r ## 0, RT1; \ shrq $32, RT1; \ xorq RT1, r ## 0; \ \ movl (key_table + ((kl) * 2) * 4)(CTX), RT2d; \ andl l ## 1d, RT2d; \ roll $1, RT2d; \ shlq $32, RT2; \ xorq RT2, l ## 1; \ movq (key_table + ((kr) * 2) * 4)(CTX), RT0; \ orq r ## 1, RT0; \ shrq $32, RT0; \ xorq RT0, r ## 1; \ \ movq (key_table + ((kl) * 2) * 4)(CTX), RT1; \ orq l ## 0, RT1; \ shrq $32, RT1; \ xorq RT1, l ## 0; \ movl (key_table + ((kr) * 2) * 4)(CTX), RT2d; \ andl r ## 0d, RT2d; \ roll $1, RT2d; \ shlq $32, RT2; \ xorq RT2, r ## 0; \ \ movq (key_table + ((kl) * 2) * 4)(CTX), RT0; \ orq l ## 1, RT0; \ shrq $32, RT0; \ xorq RT0, l ## 1; \ movl (key_table + ((kr) * 2) * 4)(CTX), RT1d; \ andl r ## 1d, RT1d; \ roll $1, RT1d; \ shlq $32, RT1; \ xorq RT1, r ## 1; #define enc_rounds2(i) \ roundsm2(RAB, i + 2, RCD); \ roundsm2(RCD, i + 3, RAB); \ roundsm2(RAB, i + 4, RCD); \ roundsm2(RCD, i + 5, RAB); \ roundsm2(RAB, i + 6, RCD); \ roundsm2(RCD, i + 7, RAB); #define enc_fls2(i) \ fls2(RAB, RCD, i + 0, i + 1); #define enc_inpack2() \ movq (RIO), RAB0; \ bswapq RAB0; \ rorq $32, RAB0; \ movq 4*2(RIO), RCD0; \ bswapq RCD0; \ rolq $32, RCD0; \ xorq key_table(CTX), RAB0; \ \ movq 8*2(RIO), RAB1; \ bswapq RAB1; \ rorq $32, RAB1; \ movq 12*2(RIO), RCD1; \ bswapq RCD1; \ rolq $32, RCD1; \ xorq key_table(CTX), RAB1; #define enc_outunpack2(op, max) \ xorq key_table(CTX, max, 8), RCD0; \ rolq $32, RCD0; \ bswapq RCD0; \ op ## q RCD0, (RIO); \ rorq $32, RAB0; \ bswapq RAB0; \ op ## q RAB0, 4*2(RIO); \ \ xorq key_table(CTX, max, 8), RCD1; \ rolq $32, RCD1; \ bswapq RCD1; \ op ## q RCD1, 8*2(RIO); \ rorq $32, RAB1; \ bswapq RAB1; \ op ## q RAB1, 12*2(RIO); #define dec_rounds2(i) \ roundsm2(RAB, i + 7, RCD); \ roundsm2(RCD, i + 6, RAB); \ roundsm2(RAB, i + 5, RCD); \ roundsm2(RCD, i + 4, RAB); \ roundsm2(RAB, i + 3, RCD); \ roundsm2(RCD, i + 2, RAB); #define dec_fls2(i) \ fls2(RAB, RCD, i + 1, i + 0); #define dec_inpack2(max) \ movq (RIO), RAB0; \ bswapq RAB0; \ rorq $32, RAB0; \ movq 4*2(RIO), RCD0; \ bswapq RCD0; \ rolq $32, RCD0; \ xorq key_table(CTX, max, 8), RAB0; \ \ movq 8*2(RIO), RAB1; \ bswapq RAB1; \ rorq $32, RAB1; \ movq 12*2(RIO), RCD1; \ bswapq RCD1; \ rolq $32, RCD1; \ xorq key_table(CTX, max, 8), RAB1; #define dec_outunpack2() \ xorq key_table(CTX), RCD0; \ rolq $32, RCD0; \ bswapq RCD0; \ movq RCD0, (RIO); \ rorq $32, RAB0; \ bswapq RAB0; \ movq RAB0, 4*2(RIO); \ \ xorq key_table(CTX), RCD1; \ rolq $32, RCD1; \ bswapq RCD1; \ movq RCD1, 8*2(RIO); \ rorq $32, RAB1; \ bswapq RAB1; \ movq RAB1, 12*2(RIO); ENTRY(__camellia_enc_blk_2way) /* input: * %rdi: ctx, CTX * %rsi: dst * %rdx: src * %rcx: bool xor */ pushq %rbx; movq %r12, RR12; movq %rcx, RXOR; movq %rsi, RDST; movq %rdx, RIO; enc_inpack2(); enc_rounds2(0); enc_fls2(8); enc_rounds2(8); enc_fls2(16); enc_rounds2(16); movl $24, RT2d; /* max */ cmpb $16, key_length(CTX); je .L__enc2_done; enc_fls2(24); enc_rounds2(24); movl $32, RT2d; /* max */ .L__enc2_done: test RXORbl, RXORbl; movq RDST, RIO; jnz .L__enc2_xor; enc_outunpack2(mov, RT2); movq RR12, %r12; popq %rbx; ret; .L__enc2_xor: enc_outunpack2(xor, RT2); movq RR12, %r12; popq %rbx; ret; ENDPROC(__camellia_enc_blk_2way) ENTRY(camellia_dec_blk_2way) /* input: * %rdi: ctx, CTX * %rsi: dst * %rdx: src */ cmpl $16, key_length(CTX); movl $32, RT2d; movl $24, RXORd; cmovel RXORd, RT2d; /* max */ movq %rbx, RXOR; movq %r12, RR12; movq %rsi, RDST; movq %rdx, RIO; dec_inpack2(RT2); cmpb $24, RT2bl; je .L__dec2_rounds16; dec_rounds2(24); dec_fls2(24); .L__dec2_rounds16: dec_rounds2(16); dec_fls2(16); dec_rounds2(8); dec_fls2(8); dec_rounds2(0); movq RDST, RIO; dec_outunpack2(); movq RR12, %r12; movq RXOR, %rbx; ret; ENDPROC(camellia_dec_blk_2way)
AirFortressIlikara/LS2K0300-linux-4.19
9,831
arch/x86/crypto/poly1305-avx2-x86_64.S
/* * Poly1305 authenticator algorithm, RFC7539, x64 AVX2 functions * * Copyright (C) 2015 Martin Willi * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/linkage.h> .section .rodata.cst32.ANMASK, "aM", @progbits, 32 .align 32 ANMASK: .octa 0x0000000003ffffff0000000003ffffff .octa 0x0000000003ffffff0000000003ffffff .section .rodata.cst32.ORMASK, "aM", @progbits, 32 .align 32 ORMASK: .octa 0x00000000010000000000000001000000 .octa 0x00000000010000000000000001000000 .text #define h0 0x00(%rdi) #define h1 0x04(%rdi) #define h2 0x08(%rdi) #define h3 0x0c(%rdi) #define h4 0x10(%rdi) #define r0 0x00(%rdx) #define r1 0x04(%rdx) #define r2 0x08(%rdx) #define r3 0x0c(%rdx) #define r4 0x10(%rdx) #define u0 0x00(%r8) #define u1 0x04(%r8) #define u2 0x08(%r8) #define u3 0x0c(%r8) #define u4 0x10(%r8) #define w0 0x14(%r8) #define w1 0x18(%r8) #define w2 0x1c(%r8) #define w3 0x20(%r8) #define w4 0x24(%r8) #define y0 0x28(%r8) #define y1 0x2c(%r8) #define y2 0x30(%r8) #define y3 0x34(%r8) #define y4 0x38(%r8) #define m %rsi #define hc0 %ymm0 #define hc1 %ymm1 #define hc2 %ymm2 #define hc3 %ymm3 #define hc4 %ymm4 #define hc0x %xmm0 #define hc1x %xmm1 #define hc2x %xmm2 #define hc3x %xmm3 #define hc4x %xmm4 #define t1 %ymm5 #define t2 %ymm6 #define t1x %xmm5 #define t2x %xmm6 #define ruwy0 %ymm7 #define ruwy1 %ymm8 #define ruwy2 %ymm9 #define ruwy3 %ymm10 #define ruwy4 %ymm11 #define ruwy0x %xmm7 #define ruwy1x %xmm8 #define ruwy2x %xmm9 #define ruwy3x %xmm10 #define ruwy4x %xmm11 #define svxz1 %ymm12 #define svxz2 %ymm13 #define svxz3 %ymm14 #define svxz4 %ymm15 #define d0 %r9 #define d1 %r10 #define d2 %r11 #define d3 %r12 #define d4 %r13 ENTRY(poly1305_4block_avx2) # %rdi: Accumulator h[5] # %rsi: 64 byte input block m # %rdx: Poly1305 key r[5] # %rcx: Quadblock count # %r8: Poly1305 derived key r^2 u[5], r^3 w[5], r^4 y[5], # This four-block variant uses loop unrolled block processing. It # requires 4 Poly1305 keys: r, r^2, r^3 and r^4: # h = (h + m) * r => h = (h + m1) * r^4 + m2 * r^3 + m3 * r^2 + m4 * r vzeroupper push %rbx push %r12 push %r13 # combine r0,u0,w0,y0 vmovd y0,ruwy0x vmovd w0,t1x vpunpcklqdq t1,ruwy0,ruwy0 vmovd u0,t1x vmovd r0,t2x vpunpcklqdq t2,t1,t1 vperm2i128 $0x20,t1,ruwy0,ruwy0 # combine r1,u1,w1,y1 and s1=r1*5,v1=u1*5,x1=w1*5,z1=y1*5 vmovd y1,ruwy1x vmovd w1,t1x vpunpcklqdq t1,ruwy1,ruwy1 vmovd u1,t1x vmovd r1,t2x vpunpcklqdq t2,t1,t1 vperm2i128 $0x20,t1,ruwy1,ruwy1 vpslld $2,ruwy1,svxz1 vpaddd ruwy1,svxz1,svxz1 # combine r2,u2,w2,y2 and s2=r2*5,v2=u2*5,x2=w2*5,z2=y2*5 vmovd y2,ruwy2x vmovd w2,t1x vpunpcklqdq t1,ruwy2,ruwy2 vmovd u2,t1x vmovd r2,t2x vpunpcklqdq t2,t1,t1 vperm2i128 $0x20,t1,ruwy2,ruwy2 vpslld $2,ruwy2,svxz2 vpaddd ruwy2,svxz2,svxz2 # combine r3,u3,w3,y3 and s3=r3*5,v3=u3*5,x3=w3*5,z3=y3*5 vmovd y3,ruwy3x vmovd w3,t1x vpunpcklqdq t1,ruwy3,ruwy3 vmovd u3,t1x vmovd r3,t2x vpunpcklqdq t2,t1,t1 vperm2i128 $0x20,t1,ruwy3,ruwy3 vpslld $2,ruwy3,svxz3 vpaddd ruwy3,svxz3,svxz3 # combine r4,u4,w4,y4 and s4=r4*5,v4=u4*5,x4=w4*5,z4=y4*5 vmovd y4,ruwy4x vmovd w4,t1x vpunpcklqdq t1,ruwy4,ruwy4 vmovd u4,t1x vmovd r4,t2x vpunpcklqdq t2,t1,t1 vperm2i128 $0x20,t1,ruwy4,ruwy4 vpslld $2,ruwy4,svxz4 vpaddd ruwy4,svxz4,svxz4 .Ldoblock4: # hc0 = [m[48-51] & 0x3ffffff, m[32-35] & 0x3ffffff, # m[16-19] & 0x3ffffff, m[ 0- 3] & 0x3ffffff + h0] vmovd 0x00(m),hc0x vmovd 0x10(m),t1x vpunpcklqdq t1,hc0,hc0 vmovd 0x20(m),t1x vmovd 0x30(m),t2x vpunpcklqdq t2,t1,t1 vperm2i128 $0x20,t1,hc0,hc0 vpand ANMASK(%rip),hc0,hc0 vmovd h0,t1x vpaddd t1,hc0,hc0 # hc1 = [(m[51-54] >> 2) & 0x3ffffff, (m[35-38] >> 2) & 0x3ffffff, # (m[19-22] >> 2) & 0x3ffffff, (m[ 3- 6] >> 2) & 0x3ffffff + h1] vmovd 0x03(m),hc1x vmovd 0x13(m),t1x vpunpcklqdq t1,hc1,hc1 vmovd 0x23(m),t1x vmovd 0x33(m),t2x vpunpcklqdq t2,t1,t1 vperm2i128 $0x20,t1,hc1,hc1 vpsrld $2,hc1,hc1 vpand ANMASK(%rip),hc1,hc1 vmovd h1,t1x vpaddd t1,hc1,hc1 # hc2 = [(m[54-57] >> 4) & 0x3ffffff, (m[38-41] >> 4) & 0x3ffffff, # (m[22-25] >> 4) & 0x3ffffff, (m[ 6- 9] >> 4) & 0x3ffffff + h2] vmovd 0x06(m),hc2x vmovd 0x16(m),t1x vpunpcklqdq t1,hc2,hc2 vmovd 0x26(m),t1x vmovd 0x36(m),t2x vpunpcklqdq t2,t1,t1 vperm2i128 $0x20,t1,hc2,hc2 vpsrld $4,hc2,hc2 vpand ANMASK(%rip),hc2,hc2 vmovd h2,t1x vpaddd t1,hc2,hc2 # hc3 = [(m[57-60] >> 6) & 0x3ffffff, (m[41-44] >> 6) & 0x3ffffff, # (m[25-28] >> 6) & 0x3ffffff, (m[ 9-12] >> 6) & 0x3ffffff + h3] vmovd 0x09(m),hc3x vmovd 0x19(m),t1x vpunpcklqdq t1,hc3,hc3 vmovd 0x29(m),t1x vmovd 0x39(m),t2x vpunpcklqdq t2,t1,t1 vperm2i128 $0x20,t1,hc3,hc3 vpsrld $6,hc3,hc3 vpand ANMASK(%rip),hc3,hc3 vmovd h3,t1x vpaddd t1,hc3,hc3 # hc4 = [(m[60-63] >> 8) | (1<<24), (m[44-47] >> 8) | (1<<24), # (m[28-31] >> 8) | (1<<24), (m[12-15] >> 8) | (1<<24) + h4] vmovd 0x0c(m),hc4x vmovd 0x1c(m),t1x vpunpcklqdq t1,hc4,hc4 vmovd 0x2c(m),t1x vmovd 0x3c(m),t2x vpunpcklqdq t2,t1,t1 vperm2i128 $0x20,t1,hc4,hc4 vpsrld $8,hc4,hc4 vpor ORMASK(%rip),hc4,hc4 vmovd h4,t1x vpaddd t1,hc4,hc4 # t1 = [ hc0[3] * r0, hc0[2] * u0, hc0[1] * w0, hc0[0] * y0 ] vpmuludq hc0,ruwy0,t1 # t1 += [ hc1[3] * s4, hc1[2] * v4, hc1[1] * x4, hc1[0] * z4 ] vpmuludq hc1,svxz4,t2 vpaddq t2,t1,t1 # t1 += [ hc2[3] * s3, hc2[2] * v3, hc2[1] * x3, hc2[0] * z3 ] vpmuludq hc2,svxz3,t2 vpaddq t2,t1,t1 # t1 += [ hc3[3] * s2, hc3[2] * v2, hc3[1] * x2, hc3[0] * z2 ] vpmuludq hc3,svxz2,t2 vpaddq t2,t1,t1 # t1 += [ hc4[3] * s1, hc4[2] * v1, hc4[1] * x1, hc4[0] * z1 ] vpmuludq hc4,svxz1,t2 vpaddq t2,t1,t1 # d0 = t1[0] + t1[1] + t[2] + t[3] vpermq $0xee,t1,t2 vpaddq t2,t1,t1 vpsrldq $8,t1,t2 vpaddq t2,t1,t1 vmovq t1x,d0 # t1 = [ hc0[3] * r1, hc0[2] * u1,hc0[1] * w1, hc0[0] * y1 ] vpmuludq hc0,ruwy1,t1 # t1 += [ hc1[3] * r0, hc1[2] * u0, hc1[1] * w0, hc1[0] * y0 ] vpmuludq hc1,ruwy0,t2 vpaddq t2,t1,t1 # t1 += [ hc2[3] * s4, hc2[2] * v4, hc2[1] * x4, hc2[0] * z4 ] vpmuludq hc2,svxz4,t2 vpaddq t2,t1,t1 # t1 += [ hc3[3] * s3, hc3[2] * v3, hc3[1] * x3, hc3[0] * z3 ] vpmuludq hc3,svxz3,t2 vpaddq t2,t1,t1 # t1 += [ hc4[3] * s2, hc4[2] * v2, hc4[1] * x2, hc4[0] * z2 ] vpmuludq hc4,svxz2,t2 vpaddq t2,t1,t1 # d1 = t1[0] + t1[1] + t1[3] + t1[4] vpermq $0xee,t1,t2 vpaddq t2,t1,t1 vpsrldq $8,t1,t2 vpaddq t2,t1,t1 vmovq t1x,d1 # t1 = [ hc0[3] * r2, hc0[2] * u2, hc0[1] * w2, hc0[0] * y2 ] vpmuludq hc0,ruwy2,t1 # t1 += [ hc1[3] * r1, hc1[2] * u1, hc1[1] * w1, hc1[0] * y1 ] vpmuludq hc1,ruwy1,t2 vpaddq t2,t1,t1 # t1 += [ hc2[3] * r0, hc2[2] * u0, hc2[1] * w0, hc2[0] * y0 ] vpmuludq hc2,ruwy0,t2 vpaddq t2,t1,t1 # t1 += [ hc3[3] * s4, hc3[2] * v4, hc3[1] * x4, hc3[0] * z4 ] vpmuludq hc3,svxz4,t2 vpaddq t2,t1,t1 # t1 += [ hc4[3] * s3, hc4[2] * v3, hc4[1] * x3, hc4[0] * z3 ] vpmuludq hc4,svxz3,t2 vpaddq t2,t1,t1 # d2 = t1[0] + t1[1] + t1[2] + t1[3] vpermq $0xee,t1,t2 vpaddq t2,t1,t1 vpsrldq $8,t1,t2 vpaddq t2,t1,t1 vmovq t1x,d2 # t1 = [ hc0[3] * r3, hc0[2] * u3, hc0[1] * w3, hc0[0] * y3 ] vpmuludq hc0,ruwy3,t1 # t1 += [ hc1[3] * r2, hc1[2] * u2, hc1[1] * w2, hc1[0] * y2 ] vpmuludq hc1,ruwy2,t2 vpaddq t2,t1,t1 # t1 += [ hc2[3] * r1, hc2[2] * u1, hc2[1] * w1, hc2[0] * y1 ] vpmuludq hc2,ruwy1,t2 vpaddq t2,t1,t1 # t1 += [ hc3[3] * r0, hc3[2] * u0, hc3[1] * w0, hc3[0] * y0 ] vpmuludq hc3,ruwy0,t2 vpaddq t2,t1,t1 # t1 += [ hc4[3] * s4, hc4[2] * v4, hc4[1] * x4, hc4[0] * z4 ] vpmuludq hc4,svxz4,t2 vpaddq t2,t1,t1 # d3 = t1[0] + t1[1] + t1[2] + t1[3] vpermq $0xee,t1,t2 vpaddq t2,t1,t1 vpsrldq $8,t1,t2 vpaddq t2,t1,t1 vmovq t1x,d3 # t1 = [ hc0[3] * r4, hc0[2] * u4, hc0[1] * w4, hc0[0] * y4 ] vpmuludq hc0,ruwy4,t1 # t1 += [ hc1[3] * r3, hc1[2] * u3, hc1[1] * w3, hc1[0] * y3 ] vpmuludq hc1,ruwy3,t2 vpaddq t2,t1,t1 # t1 += [ hc2[3] * r2, hc2[2] * u2, hc2[1] * w2, hc2[0] * y2 ] vpmuludq hc2,ruwy2,t2 vpaddq t2,t1,t1 # t1 += [ hc3[3] * r1, hc3[2] * u1, hc3[1] * w1, hc3[0] * y1 ] vpmuludq hc3,ruwy1,t2 vpaddq t2,t1,t1 # t1 += [ hc4[3] * r0, hc4[2] * u0, hc4[1] * w0, hc4[0] * y0 ] vpmuludq hc4,ruwy0,t2 vpaddq t2,t1,t1 # d4 = t1[0] + t1[1] + t1[2] + t1[3] vpermq $0xee,t1,t2 vpaddq t2,t1,t1 vpsrldq $8,t1,t2 vpaddq t2,t1,t1 vmovq t1x,d4 # Now do a partial reduction mod (2^130)-5, carrying h0 -> h1 -> h2 -> # h3 -> h4 -> h0 -> h1 to get h0,h2,h3,h4 < 2^26 and h1 < 2^26 + a small # amount. Careful: we must not assume the carry bits 'd0 >> 26', # 'd1 >> 26', 'd2 >> 26', 'd3 >> 26', and '(d4 >> 26) * 5' fit in 32-bit # integers. It's true in a single-block implementation, but not here. # d1 += d0 >> 26 mov d0,%rax shr $26,%rax add %rax,d1 # h0 = d0 & 0x3ffffff mov d0,%rbx and $0x3ffffff,%ebx # d2 += d1 >> 26 mov d1,%rax shr $26,%rax add %rax,d2 # h1 = d1 & 0x3ffffff mov d1,%rax and $0x3ffffff,%eax mov %eax,h1 # d3 += d2 >> 26 mov d2,%rax shr $26,%rax add %rax,d3 # h2 = d2 & 0x3ffffff mov d2,%rax and $0x3ffffff,%eax mov %eax,h2 # d4 += d3 >> 26 mov d3,%rax shr $26,%rax add %rax,d4 # h3 = d3 & 0x3ffffff mov d3,%rax and $0x3ffffff,%eax mov %eax,h3 # h0 += (d4 >> 26) * 5 mov d4,%rax shr $26,%rax lea (%rax,%rax,4),%rax add %rax,%rbx # h4 = d4 & 0x3ffffff mov d4,%rax and $0x3ffffff,%eax mov %eax,h4 # h1 += h0 >> 26 mov %rbx,%rax shr $26,%rax add %eax,h1 # h0 = h0 & 0x3ffffff andl $0x3ffffff,%ebx mov %ebx,h0 add $0x40,m dec %rcx jnz .Ldoblock4 vzeroupper pop %r13 pop %r12 pop %rbx ret ENDPROC(poly1305_4block_avx2)
AirFortressIlikara/LS2K0300-linux-4.19
14,256
arch/x86/crypto/sha1_avx2_x86_64_asm.S
/* * Implement fast SHA-1 with AVX2 instructions. (x86_64) * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2014 Intel Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * Contact Information: * Ilya Albrekht <ilya.albrekht@intel.com> * Maxim Locktyukhin <maxim.locktyukhin@intel.com> * Ronen Zohar <ronen.zohar@intel.com> * Chandramouli Narayanan <mouli@linux.intel.com> * * BSD LICENSE * * Copyright(c) 2014 Intel Corporation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ /* * SHA-1 implementation with Intel(R) AVX2 instruction set extensions. * *This implementation is based on the previous SSSE3 release: *Visit http://software.intel.com/en-us/articles/ *and refer to improving-the-performance-of-the-secure-hash-algorithm-1/ * *Updates 20-byte SHA-1 record in 'hash' for even number of *'num_blocks' consecutive 64-byte blocks * *extern "C" void sha1_transform_avx2( * int *hash, const char* input, size_t num_blocks ); */ #include <linux/linkage.h> #define CTX %rdi /* arg1 */ #define BUF %rsi /* arg2 */ #define CNT %rdx /* arg3 */ #define REG_A %ecx #define REG_B %esi #define REG_C %edi #define REG_D %eax #define REG_E %edx #define REG_TB %ebx #define REG_TA %r12d #define REG_RA %rcx #define REG_RB %rsi #define REG_RC %rdi #define REG_RD %rax #define REG_RE %rdx #define REG_RTA %r12 #define REG_RTB %rbx #define REG_T1 %r11d #define xmm_mov vmovups #define avx2_zeroupper vzeroupper #define RND_F1 1 #define RND_F2 2 #define RND_F3 3 .macro REGALLOC .set A, REG_A .set B, REG_B .set C, REG_C .set D, REG_D .set E, REG_E .set TB, REG_TB .set TA, REG_TA .set RA, REG_RA .set RB, REG_RB .set RC, REG_RC .set RD, REG_RD .set RE, REG_RE .set RTA, REG_RTA .set RTB, REG_RTB .set T1, REG_T1 .endm #define HASH_PTR %r9 #define BLOCKS_CTR %r8 #define BUFFER_PTR %r10 #define BUFFER_PTR2 %r13 #define PRECALC_BUF %r14 #define WK_BUF %r15 #define W_TMP %xmm0 #define WY_TMP %ymm0 #define WY_TMP2 %ymm9 # AVX2 variables #define WY0 %ymm3 #define WY4 %ymm5 #define WY08 %ymm7 #define WY12 %ymm8 #define WY16 %ymm12 #define WY20 %ymm13 #define WY24 %ymm14 #define WY28 %ymm15 #define YMM_SHUFB_BSWAP %ymm10 /* * Keep 2 iterations precalculated at a time: * - 80 DWORDs per iteration * 2 */ #define W_SIZE (80*2*2 +16) #define WK(t) ((((t) % 80) / 4)*32 + ( (t) % 4)*4 + ((t)/80)*16 )(WK_BUF) #define PRECALC_WK(t) ((t)*2*2)(PRECALC_BUF) .macro UPDATE_HASH hash, val add \hash, \val mov \val, \hash .endm .macro PRECALC_RESET_WY .set WY_00, WY0 .set WY_04, WY4 .set WY_08, WY08 .set WY_12, WY12 .set WY_16, WY16 .set WY_20, WY20 .set WY_24, WY24 .set WY_28, WY28 .set WY_32, WY_00 .endm .macro PRECALC_ROTATE_WY /* Rotate macros */ .set WY_32, WY_28 .set WY_28, WY_24 .set WY_24, WY_20 .set WY_20, WY_16 .set WY_16, WY_12 .set WY_12, WY_08 .set WY_08, WY_04 .set WY_04, WY_00 .set WY_00, WY_32 /* Define register aliases */ .set WY, WY_00 .set WY_minus_04, WY_04 .set WY_minus_08, WY_08 .set WY_minus_12, WY_12 .set WY_minus_16, WY_16 .set WY_minus_20, WY_20 .set WY_minus_24, WY_24 .set WY_minus_28, WY_28 .set WY_minus_32, WY .endm .macro PRECALC_00_15 .if (i == 0) # Initialize and rotate registers PRECALC_RESET_WY PRECALC_ROTATE_WY .endif /* message scheduling pre-compute for rounds 0-15 */ .if ((i & 7) == 0) /* * blended AVX2 and ALU instruction scheduling * 1 vector iteration per 8 rounds */ vmovdqu (i * 2)(BUFFER_PTR), W_TMP .elseif ((i & 7) == 1) vinsertf128 $1, ((i-1) * 2)(BUFFER_PTR2),\ WY_TMP, WY_TMP .elseif ((i & 7) == 2) vpshufb YMM_SHUFB_BSWAP, WY_TMP, WY .elseif ((i & 7) == 4) vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP .elseif ((i & 7) == 7) vmovdqu WY_TMP, PRECALC_WK(i&~7) PRECALC_ROTATE_WY .endif .endm .macro PRECALC_16_31 /* * message scheduling pre-compute for rounds 16-31 * calculating last 32 w[i] values in 8 XMM registers * pre-calculate K+w[i] values and store to mem * for later load by ALU add instruction * * "brute force" vectorization for rounds 16-31 only * due to w[i]->w[i-3] dependency */ .if ((i & 7) == 0) /* * blended AVX2 and ALU instruction scheduling * 1 vector iteration per 8 rounds */ /* w[i-14] */ vpalignr $8, WY_minus_16, WY_minus_12, WY vpsrldq $4, WY_minus_04, WY_TMP /* w[i-3] */ .elseif ((i & 7) == 1) vpxor WY_minus_08, WY, WY vpxor WY_minus_16, WY_TMP, WY_TMP .elseif ((i & 7) == 2) vpxor WY_TMP, WY, WY vpslldq $12, WY, WY_TMP2 .elseif ((i & 7) == 3) vpslld $1, WY, WY_TMP vpsrld $31, WY, WY .elseif ((i & 7) == 4) vpor WY, WY_TMP, WY_TMP vpslld $2, WY_TMP2, WY .elseif ((i & 7) == 5) vpsrld $30, WY_TMP2, WY_TMP2 vpxor WY, WY_TMP, WY_TMP .elseif ((i & 7) == 7) vpxor WY_TMP2, WY_TMP, WY vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP vmovdqu WY_TMP, PRECALC_WK(i&~7) PRECALC_ROTATE_WY .endif .endm .macro PRECALC_32_79 /* * in SHA-1 specification: * w[i] = (w[i-3] ^ w[i-8] ^ w[i-14] ^ w[i-16]) rol 1 * instead we do equal: * w[i] = (w[i-6] ^ w[i-16] ^ w[i-28] ^ w[i-32]) rol 2 * allows more efficient vectorization * since w[i]=>w[i-3] dependency is broken */ .if ((i & 7) == 0) /* * blended AVX2 and ALU instruction scheduling * 1 vector iteration per 8 rounds */ vpalignr $8, WY_minus_08, WY_minus_04, WY_TMP .elseif ((i & 7) == 1) /* W is W_minus_32 before xor */ vpxor WY_minus_28, WY, WY .elseif ((i & 7) == 2) vpxor WY_minus_16, WY_TMP, WY_TMP .elseif ((i & 7) == 3) vpxor WY_TMP, WY, WY .elseif ((i & 7) == 4) vpslld $2, WY, WY_TMP .elseif ((i & 7) == 5) vpsrld $30, WY, WY vpor WY, WY_TMP, WY .elseif ((i & 7) == 7) vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP vmovdqu WY_TMP, PRECALC_WK(i&~7) PRECALC_ROTATE_WY .endif .endm .macro PRECALC r, s .set i, \r .if (i < 40) .set K_XMM, 32*0 .elseif (i < 80) .set K_XMM, 32*1 .elseif (i < 120) .set K_XMM, 32*2 .else .set K_XMM, 32*3 .endif .if (i<32) PRECALC_00_15 \s .elseif (i<64) PRECALC_16_31 \s .elseif (i < 160) PRECALC_32_79 \s .endif .endm .macro ROTATE_STATE .set T_REG, E .set E, D .set D, C .set C, B .set B, TB .set TB, A .set A, T_REG .set T_REG, RE .set RE, RD .set RD, RC .set RC, RB .set RB, RTB .set RTB, RA .set RA, T_REG .endm /* Macro relies on saved ROUND_Fx */ .macro RND_FUN f, r .if (\f == RND_F1) ROUND_F1 \r .elseif (\f == RND_F2) ROUND_F2 \r .elseif (\f == RND_F3) ROUND_F3 \r .endif .endm .macro RR r .set round_id, (\r % 80) .if (round_id == 0) /* Precalculate F for first round */ .set ROUND_FUNC, RND_F1 mov B, TB rorx $(32-30), B, B /* b>>>2 */ andn D, TB, T1 and C, TB xor T1, TB .endif RND_FUN ROUND_FUNC, \r ROTATE_STATE .if (round_id == 18) .set ROUND_FUNC, RND_F2 .elseif (round_id == 38) .set ROUND_FUNC, RND_F3 .elseif (round_id == 58) .set ROUND_FUNC, RND_F2 .endif .set round_id, ( (\r+1) % 80) RND_FUN ROUND_FUNC, (\r+1) ROTATE_STATE .endm .macro ROUND_F1 r add WK(\r), E andn C, A, T1 /* ~b&d */ lea (RE,RTB), E /* Add F from the previous round */ rorx $(32-5), A, TA /* T2 = A >>> 5 */ rorx $(32-30),A, TB /* b>>>2 for next round */ PRECALC (\r) /* msg scheduling for next 2 blocks */ /* * Calculate F for the next round * (b & c) ^ andn[b, d] */ and B, A /* b&c */ xor T1, A /* F1 = (b&c) ^ (~b&d) */ lea (RE,RTA), E /* E += A >>> 5 */ .endm .macro ROUND_F2 r add WK(\r), E lea (RE,RTB), E /* Add F from the previous round */ /* Calculate F for the next round */ rorx $(32-5), A, TA /* T2 = A >>> 5 */ .if ((round_id) < 79) rorx $(32-30), A, TB /* b>>>2 for next round */ .endif PRECALC (\r) /* msg scheduling for next 2 blocks */ .if ((round_id) < 79) xor B, A .endif add TA, E /* E += A >>> 5 */ .if ((round_id) < 79) xor C, A .endif .endm .macro ROUND_F3 r add WK(\r), E PRECALC (\r) /* msg scheduling for next 2 blocks */ lea (RE,RTB), E /* Add F from the previous round */ mov B, T1 or A, T1 rorx $(32-5), A, TA /* T2 = A >>> 5 */ rorx $(32-30), A, TB /* b>>>2 for next round */ /* Calculate F for the next round * (b and c) or (d and (b or c)) */ and C, T1 and B, A or T1, A add TA, E /* E += A >>> 5 */ .endm /* Add constant only if (%2 > %3) condition met (uses RTA as temp) * %1 + %2 >= %3 ? %4 : 0 */ .macro ADD_IF_GE a, b, c, d mov \a, RTA add $\d, RTA cmp $\c, \b cmovge RTA, \a .endm /* * macro implements 80 rounds of SHA-1, for multiple blocks with s/w pipelining */ .macro SHA1_PIPELINED_MAIN_BODY REGALLOC mov (HASH_PTR), A mov 4(HASH_PTR), B mov 8(HASH_PTR), C mov 12(HASH_PTR), D mov 16(HASH_PTR), E mov %rsp, PRECALC_BUF lea (2*4*80+32)(%rsp), WK_BUF # Precalc WK for first 2 blocks ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 2, 64 .set i, 0 .rept 160 PRECALC i .set i, i + 1 .endr /* Go to next block if needed */ ADD_IF_GE BUFFER_PTR, BLOCKS_CTR, 3, 128 ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 4, 128 xchg WK_BUF, PRECALC_BUF .align 32 _loop: /* * code loops through more than one block * we use K_BASE value as a signal of a last block, * it is set below by: cmovae BUFFER_PTR, K_BASE */ test BLOCKS_CTR, BLOCKS_CTR jnz _begin .align 32 jmp _end .align 32 _begin: /* * Do first block * rounds: 0,2,4,6,8 */ .set j, 0 .rept 5 RR j .set j, j+2 .endr jmp _loop0 _loop0: /* * rounds: * 10,12,14,16,18 * 20,22,24,26,28 * 30,32,34,36,38 * 40,42,44,46,48 * 50,52,54,56,58 */ .rept 25 RR j .set j, j+2 .endr /* Update Counter */ sub $1, BLOCKS_CTR /* Move to the next block only if needed*/ ADD_IF_GE BUFFER_PTR, BLOCKS_CTR, 4, 128 /* * rounds * 60,62,64,66,68 * 70,72,74,76,78 */ .rept 10 RR j .set j, j+2 .endr UPDATE_HASH (HASH_PTR), A UPDATE_HASH 4(HASH_PTR), TB UPDATE_HASH 8(HASH_PTR), C UPDATE_HASH 12(HASH_PTR), D UPDATE_HASH 16(HASH_PTR), E test BLOCKS_CTR, BLOCKS_CTR jz _loop mov TB, B /* Process second block */ /* * rounds * 0+80, 2+80, 4+80, 6+80, 8+80 * 10+80,12+80,14+80,16+80,18+80 */ .set j, 0 .rept 10 RR j+80 .set j, j+2 .endr jmp _loop1 _loop1: /* * rounds * 20+80,22+80,24+80,26+80,28+80 * 30+80,32+80,34+80,36+80,38+80 */ .rept 10 RR j+80 .set j, j+2 .endr jmp _loop2 _loop2: /* * rounds * 40+80,42+80,44+80,46+80,48+80 * 50+80,52+80,54+80,56+80,58+80 */ .rept 10 RR j+80 .set j, j+2 .endr /* update counter */ sub $1, BLOCKS_CTR /* Move to the next block only if needed*/ ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 4, 128 jmp _loop3 _loop3: /* * rounds * 60+80,62+80,64+80,66+80,68+80 * 70+80,72+80,74+80,76+80,78+80 */ .rept 10 RR j+80 .set j, j+2 .endr UPDATE_HASH (HASH_PTR), A UPDATE_HASH 4(HASH_PTR), TB UPDATE_HASH 8(HASH_PTR), C UPDATE_HASH 12(HASH_PTR), D UPDATE_HASH 16(HASH_PTR), E /* Reset state for AVX2 reg permutation */ mov A, TA mov TB, A mov C, TB mov E, C mov D, B mov TA, D REGALLOC xchg WK_BUF, PRECALC_BUF jmp _loop .align 32 _end: .endm /* * macro implements SHA-1 function's body for several 64-byte blocks * param: function's name */ .macro SHA1_VECTOR_ASM name ENTRY(\name) push %rbx push %r12 push %r13 push %r14 push %r15 RESERVE_STACK = (W_SIZE*4 + 8+24) /* Align stack */ mov %rsp, %rbx and $~(0x20-1), %rsp push %rbx sub $RESERVE_STACK, %rsp avx2_zeroupper /* Setup initial values */ mov CTX, HASH_PTR mov BUF, BUFFER_PTR mov BUF, BUFFER_PTR2 mov CNT, BLOCKS_CTR xmm_mov BSWAP_SHUFB_CTL(%rip), YMM_SHUFB_BSWAP SHA1_PIPELINED_MAIN_BODY avx2_zeroupper add $RESERVE_STACK, %rsp pop %rsp pop %r15 pop %r14 pop %r13 pop %r12 pop %rbx ret ENDPROC(\name) .endm .section .rodata #define K1 0x5a827999 #define K2 0x6ed9eba1 #define K3 0x8f1bbcdc #define K4 0xca62c1d6 .align 128 K_XMM_AR: .long K1, K1, K1, K1 .long K1, K1, K1, K1 .long K2, K2, K2, K2 .long K2, K2, K2, K2 .long K3, K3, K3, K3 .long K3, K3, K3, K3 .long K4, K4, K4, K4 .long K4, K4, K4, K4 BSWAP_SHUFB_CTL: .long 0x00010203 .long 0x04050607 .long 0x08090a0b .long 0x0c0d0e0f .long 0x00010203 .long 0x04050607 .long 0x08090a0b .long 0x0c0d0e0f .text SHA1_VECTOR_ASM sha1_transform_avx2
AirFortressIlikara/LS2K0300-linux-4.19
12,590
arch/x86/crypto/morus1280-avx2-asm.S
/* * AVX2 implementation of MORUS-1280 * * Copyright (c) 2017-2018 Ondrej Mosnacek <omosnacek@gmail.com> * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include <linux/linkage.h> #include <asm/frame.h> #define SHUFFLE_MASK(i0, i1, i2, i3) \ (i0 | (i1 << 2) | (i2 << 4) | (i3 << 6)) #define MASK1 SHUFFLE_MASK(3, 0, 1, 2) #define MASK2 SHUFFLE_MASK(2, 3, 0, 1) #define MASK3 SHUFFLE_MASK(1, 2, 3, 0) #define STATE0 %ymm0 #define STATE0_LOW %xmm0 #define STATE1 %ymm1 #define STATE2 %ymm2 #define STATE3 %ymm3 #define STATE4 %ymm4 #define KEY %ymm5 #define MSG %ymm5 #define MSG_LOW %xmm5 #define T0 %ymm6 #define T0_LOW %xmm6 #define T1 %ymm7 .section .rodata.cst32.morus1280_const, "aM", @progbits, 32 .align 32 .Lmorus1280_const: .byte 0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x08, 0x0d .byte 0x15, 0x22, 0x37, 0x59, 0x90, 0xe9, 0x79, 0x62 .byte 0xdb, 0x3d, 0x18, 0x55, 0x6d, 0xc2, 0x2f, 0xf1 .byte 0x20, 0x11, 0x31, 0x42, 0x73, 0xb5, 0x28, 0xdd .section .rodata.cst32.morus1280_counter, "aM", @progbits, 32 .align 32 .Lmorus1280_counter: .byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 .byte 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f .byte 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17 .byte 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f .text .macro morus1280_round s0, s1, s2, s3, s4, b, w vpand \s1, \s2, T0 vpxor T0, \s0, \s0 vpxor \s3, \s0, \s0 vpsllq $\b, \s0, T0 vpsrlq $(64 - \b), \s0, \s0 vpxor T0, \s0, \s0 vpermq $\w, \s3, \s3 .endm /* * __morus1280_update: internal ABI * input: * STATE[0-4] - input state * MSG - message block * output: * STATE[0-4] - output state * changed: * T0 */ __morus1280_update: morus1280_round STATE0, STATE1, STATE2, STATE3, STATE4, 13, MASK1 vpxor MSG, STATE1, STATE1 morus1280_round STATE1, STATE2, STATE3, STATE4, STATE0, 46, MASK2 vpxor MSG, STATE2, STATE2 morus1280_round STATE2, STATE3, STATE4, STATE0, STATE1, 38, MASK3 vpxor MSG, STATE3, STATE3 morus1280_round STATE3, STATE4, STATE0, STATE1, STATE2, 7, MASK2 vpxor MSG, STATE4, STATE4 morus1280_round STATE4, STATE0, STATE1, STATE2, STATE3, 4, MASK1 ret ENDPROC(__morus1280_update) /* * __morus1280_update_zero: internal ABI * input: * STATE[0-4] - input state * output: * STATE[0-4] - output state * changed: * T0 */ __morus1280_update_zero: morus1280_round STATE0, STATE1, STATE2, STATE3, STATE4, 13, MASK1 morus1280_round STATE1, STATE2, STATE3, STATE4, STATE0, 46, MASK2 morus1280_round STATE2, STATE3, STATE4, STATE0, STATE1, 38, MASK3 morus1280_round STATE3, STATE4, STATE0, STATE1, STATE2, 7, MASK2 morus1280_round STATE4, STATE0, STATE1, STATE2, STATE3, 4, MASK1 ret ENDPROC(__morus1280_update_zero) /* * __load_partial: internal ABI * input: * %rsi - src * %rcx - bytes * output: * MSG - message block * changed: * %r8 * %r9 */ __load_partial: xor %r9d, %r9d vpxor MSG, MSG, MSG mov %rcx, %r8 and $0x1, %r8 jz .Lld_partial_1 mov %rcx, %r8 and $0x1E, %r8 add %rsi, %r8 mov (%r8), %r9b .Lld_partial_1: mov %rcx, %r8 and $0x2, %r8 jz .Lld_partial_2 mov %rcx, %r8 and $0x1C, %r8 add %rsi, %r8 shl $16, %r9 mov (%r8), %r9w .Lld_partial_2: mov %rcx, %r8 and $0x4, %r8 jz .Lld_partial_4 mov %rcx, %r8 and $0x18, %r8 add %rsi, %r8 shl $32, %r9 mov (%r8), %r8d xor %r8, %r9 .Lld_partial_4: movq %r9, MSG_LOW mov %rcx, %r8 and $0x8, %r8 jz .Lld_partial_8 mov %rcx, %r8 and $0x10, %r8 add %rsi, %r8 pshufd $MASK2, MSG_LOW, MSG_LOW pinsrq $0, (%r8), MSG_LOW .Lld_partial_8: mov %rcx, %r8 and $0x10, %r8 jz .Lld_partial_16 vpermq $MASK2, MSG, MSG movdqu (%rsi), MSG_LOW .Lld_partial_16: ret ENDPROC(__load_partial) /* * __store_partial: internal ABI * input: * %rdx - dst * %rcx - bytes * output: * T0 - message block * changed: * %r8 * %r9 * %r10 */ __store_partial: mov %rcx, %r8 mov %rdx, %r9 cmp $16, %r8 jl .Lst_partial_16 movdqu T0_LOW, (%r9) vpermq $MASK2, T0, T0 sub $16, %r8 add $16, %r9 .Lst_partial_16: movq T0_LOW, %r10 cmp $8, %r8 jl .Lst_partial_8 mov %r10, (%r9) pextrq $1, T0_LOW, %r10 sub $8, %r8 add $8, %r9 .Lst_partial_8: cmp $4, %r8 jl .Lst_partial_4 mov %r10d, (%r9) shr $32, %r10 sub $4, %r8 add $4, %r9 .Lst_partial_4: cmp $2, %r8 jl .Lst_partial_2 mov %r10w, (%r9) shr $16, %r10 sub $2, %r8 add $2, %r9 .Lst_partial_2: cmp $1, %r8 jl .Lst_partial_1 mov %r10b, (%r9) .Lst_partial_1: ret ENDPROC(__store_partial) /* * void crypto_morus1280_avx2_init(void *state, const void *key, * const void *iv); */ ENTRY(crypto_morus1280_avx2_init) FRAME_BEGIN /* load IV: */ vpxor STATE0, STATE0, STATE0 movdqu (%rdx), STATE0_LOW /* load key: */ vmovdqu (%rsi), KEY vmovdqa KEY, STATE1 /* load all ones: */ vpcmpeqd STATE2, STATE2, STATE2 /* load all zeros: */ vpxor STATE3, STATE3, STATE3 /* load the constant: */ vmovdqa .Lmorus1280_const, STATE4 /* update 16 times with zero: */ call __morus1280_update_zero call __morus1280_update_zero call __morus1280_update_zero call __morus1280_update_zero call __morus1280_update_zero call __morus1280_update_zero call __morus1280_update_zero call __morus1280_update_zero call __morus1280_update_zero call __morus1280_update_zero call __morus1280_update_zero call __morus1280_update_zero call __morus1280_update_zero call __morus1280_update_zero call __morus1280_update_zero call __morus1280_update_zero /* xor-in the key again after updates: */ vpxor KEY, STATE1, STATE1 /* store the state: */ vmovdqu STATE0, (0 * 32)(%rdi) vmovdqu STATE1, (1 * 32)(%rdi) vmovdqu STATE2, (2 * 32)(%rdi) vmovdqu STATE3, (3 * 32)(%rdi) vmovdqu STATE4, (4 * 32)(%rdi) FRAME_END ret ENDPROC(crypto_morus1280_avx2_init) /* * void crypto_morus1280_avx2_ad(void *state, const void *data, * unsigned int length); */ ENTRY(crypto_morus1280_avx2_ad) FRAME_BEGIN cmp $32, %rdx jb .Lad_out /* load the state: */ vmovdqu (0 * 32)(%rdi), STATE0 vmovdqu (1 * 32)(%rdi), STATE1 vmovdqu (2 * 32)(%rdi), STATE2 vmovdqu (3 * 32)(%rdi), STATE3 vmovdqu (4 * 32)(%rdi), STATE4 mov %rsi, %r8 and $0x1F, %r8 jnz .Lad_u_loop .align 4 .Lad_a_loop: vmovdqa (%rsi), MSG call __morus1280_update sub $32, %rdx add $32, %rsi cmp $32, %rdx jge .Lad_a_loop jmp .Lad_cont .align 4 .Lad_u_loop: vmovdqu (%rsi), MSG call __morus1280_update sub $32, %rdx add $32, %rsi cmp $32, %rdx jge .Lad_u_loop .Lad_cont: /* store the state: */ vmovdqu STATE0, (0 * 32)(%rdi) vmovdqu STATE1, (1 * 32)(%rdi) vmovdqu STATE2, (2 * 32)(%rdi) vmovdqu STATE3, (3 * 32)(%rdi) vmovdqu STATE4, (4 * 32)(%rdi) .Lad_out: FRAME_END ret ENDPROC(crypto_morus1280_avx2_ad) /* * void crypto_morus1280_avx2_enc(void *state, const void *src, void *dst, * unsigned int length); */ ENTRY(crypto_morus1280_avx2_enc) FRAME_BEGIN cmp $32, %rcx jb .Lenc_out /* load the state: */ vmovdqu (0 * 32)(%rdi), STATE0 vmovdqu (1 * 32)(%rdi), STATE1 vmovdqu (2 * 32)(%rdi), STATE2 vmovdqu (3 * 32)(%rdi), STATE3 vmovdqu (4 * 32)(%rdi), STATE4 mov %rsi, %r8 or %rdx, %r8 and $0x1F, %r8 jnz .Lenc_u_loop .align 4 .Lenc_a_loop: vmovdqa (%rsi), MSG vmovdqa MSG, T0 vpxor STATE0, T0, T0 vpermq $MASK3, STATE1, T1 vpxor T1, T0, T0 vpand STATE2, STATE3, T1 vpxor T1, T0, T0 vmovdqa T0, (%rdx) call __morus1280_update sub $32, %rcx add $32, %rsi add $32, %rdx cmp $32, %rcx jge .Lenc_a_loop jmp .Lenc_cont .align 4 .Lenc_u_loop: vmovdqu (%rsi), MSG vmovdqa MSG, T0 vpxor STATE0, T0, T0 vpermq $MASK3, STATE1, T1 vpxor T1, T0, T0 vpand STATE2, STATE3, T1 vpxor T1, T0, T0 vmovdqu T0, (%rdx) call __morus1280_update sub $32, %rcx add $32, %rsi add $32, %rdx cmp $32, %rcx jge .Lenc_u_loop .Lenc_cont: /* store the state: */ vmovdqu STATE0, (0 * 32)(%rdi) vmovdqu STATE1, (1 * 32)(%rdi) vmovdqu STATE2, (2 * 32)(%rdi) vmovdqu STATE3, (3 * 32)(%rdi) vmovdqu STATE4, (4 * 32)(%rdi) .Lenc_out: FRAME_END ret ENDPROC(crypto_morus1280_avx2_enc) /* * void crypto_morus1280_avx2_enc_tail(void *state, const void *src, void *dst, * unsigned int length); */ ENTRY(crypto_morus1280_avx2_enc_tail) FRAME_BEGIN /* load the state: */ vmovdqu (0 * 32)(%rdi), STATE0 vmovdqu (1 * 32)(%rdi), STATE1 vmovdqu (2 * 32)(%rdi), STATE2 vmovdqu (3 * 32)(%rdi), STATE3 vmovdqu (4 * 32)(%rdi), STATE4 /* encrypt message: */ call __load_partial vmovdqa MSG, T0 vpxor STATE0, T0, T0 vpermq $MASK3, STATE1, T1 vpxor T1, T0, T0 vpand STATE2, STATE3, T1 vpxor T1, T0, T0 call __store_partial call __morus1280_update /* store the state: */ vmovdqu STATE0, (0 * 32)(%rdi) vmovdqu STATE1, (1 * 32)(%rdi) vmovdqu STATE2, (2 * 32)(%rdi) vmovdqu STATE3, (3 * 32)(%rdi) vmovdqu STATE4, (4 * 32)(%rdi) FRAME_END ret ENDPROC(crypto_morus1280_avx2_enc_tail) /* * void crypto_morus1280_avx2_dec(void *state, const void *src, void *dst, * unsigned int length); */ ENTRY(crypto_morus1280_avx2_dec) FRAME_BEGIN cmp $32, %rcx jb .Ldec_out /* load the state: */ vmovdqu (0 * 32)(%rdi), STATE0 vmovdqu (1 * 32)(%rdi), STATE1 vmovdqu (2 * 32)(%rdi), STATE2 vmovdqu (3 * 32)(%rdi), STATE3 vmovdqu (4 * 32)(%rdi), STATE4 mov %rsi, %r8 or %rdx, %r8 and $0x1F, %r8 jnz .Ldec_u_loop .align 4 .Ldec_a_loop: vmovdqa (%rsi), MSG vpxor STATE0, MSG, MSG vpermq $MASK3, STATE1, T0 vpxor T0, MSG, MSG vpand STATE2, STATE3, T0 vpxor T0, MSG, MSG vmovdqa MSG, (%rdx) call __morus1280_update sub $32, %rcx add $32, %rsi add $32, %rdx cmp $32, %rcx jge .Ldec_a_loop jmp .Ldec_cont .align 4 .Ldec_u_loop: vmovdqu (%rsi), MSG vpxor STATE0, MSG, MSG vpermq $MASK3, STATE1, T0 vpxor T0, MSG, MSG vpand STATE2, STATE3, T0 vpxor T0, MSG, MSG vmovdqu MSG, (%rdx) call __morus1280_update sub $32, %rcx add $32, %rsi add $32, %rdx cmp $32, %rcx jge .Ldec_u_loop .Ldec_cont: /* store the state: */ vmovdqu STATE0, (0 * 32)(%rdi) vmovdqu STATE1, (1 * 32)(%rdi) vmovdqu STATE2, (2 * 32)(%rdi) vmovdqu STATE3, (3 * 32)(%rdi) vmovdqu STATE4, (4 * 32)(%rdi) .Ldec_out: FRAME_END ret ENDPROC(crypto_morus1280_avx2_dec) /* * void crypto_morus1280_avx2_dec_tail(void *state, const void *src, void *dst, * unsigned int length); */ ENTRY(crypto_morus1280_avx2_dec_tail) FRAME_BEGIN /* load the state: */ vmovdqu (0 * 32)(%rdi), STATE0 vmovdqu (1 * 32)(%rdi), STATE1 vmovdqu (2 * 32)(%rdi), STATE2 vmovdqu (3 * 32)(%rdi), STATE3 vmovdqu (4 * 32)(%rdi), STATE4 /* decrypt message: */ call __load_partial vpxor STATE0, MSG, MSG vpermq $MASK3, STATE1, T0 vpxor T0, MSG, MSG vpand STATE2, STATE3, T0 vpxor T0, MSG, MSG vmovdqa MSG, T0 call __store_partial /* mask with byte count: */ movq %rcx, T0_LOW vpbroadcastb T0_LOW, T0 vmovdqa .Lmorus1280_counter, T1 vpcmpgtb T1, T0, T0 vpand T0, MSG, MSG call __morus1280_update /* store the state: */ vmovdqu STATE0, (0 * 32)(%rdi) vmovdqu STATE1, (1 * 32)(%rdi) vmovdqu STATE2, (2 * 32)(%rdi) vmovdqu STATE3, (3 * 32)(%rdi) vmovdqu STATE4, (4 * 32)(%rdi) FRAME_END ret ENDPROC(crypto_morus1280_avx2_dec_tail) /* * void crypto_morus1280_avx2_final(void *state, void *tag_xor, * u64 assoclen, u64 cryptlen); */ ENTRY(crypto_morus1280_avx2_final) FRAME_BEGIN /* load the state: */ vmovdqu (0 * 32)(%rdi), STATE0 vmovdqu (1 * 32)(%rdi), STATE1 vmovdqu (2 * 32)(%rdi), STATE2 vmovdqu (3 * 32)(%rdi), STATE3 vmovdqu (4 * 32)(%rdi), STATE4 /* xor state[0] into state[4]: */ vpxor STATE0, STATE4, STATE4 /* prepare length block: */ vpxor MSG, MSG, MSG vpinsrq $0, %rdx, MSG_LOW, MSG_LOW vpinsrq $1, %rcx, MSG_LOW, MSG_LOW vpsllq $3, MSG, MSG /* multiply by 8 (to get bit count) */ /* update state: */ call __morus1280_update call __morus1280_update call __morus1280_update call __morus1280_update call __morus1280_update call __morus1280_update call __morus1280_update call __morus1280_update call __morus1280_update call __morus1280_update /* xor tag: */ vmovdqu (%rsi), MSG vpxor STATE0, MSG, MSG vpermq $MASK3, STATE1, T0 vpxor T0, MSG, MSG vpand STATE2, STATE3, T0 vpxor T0, MSG, MSG vmovdqu MSG, (%rsi) FRAME_END ret ENDPROC(crypto_morus1280_avx2_final)
AirFortressIlikara/LS2K0300-linux-4.19
17,615
arch/x86/crypto/sha256-ssse3-asm.S
######################################################################## # Implement fast SHA-256 with SSSE3 instructions. (x86_64) # # Copyright (C) 2013 Intel Corporation. # # Authors: # James Guilford <james.guilford@intel.com> # Kirk Yap <kirk.s.yap@intel.com> # Tim Chen <tim.c.chen@linux.intel.com> # # This software is available to you under a choice of one of two # licenses. You may choose to be licensed under the terms of the GNU # General Public License (GPL) Version 2, available from the file # COPYING in the main directory of this source tree, or the # OpenIB.org BSD license below: # # Redistribution and use in source and binary forms, with or # without modification, are permitted provided that the following # conditions are met: # # - Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # # - Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials # provided with the distribution. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # ######################################################################## # # This code is described in an Intel White-Paper: # "Fast SHA-256 Implementations on Intel Architecture Processors" # # To find it, surf to http://www.intel.com/p/en_US/embedded # and search for that title. # ######################################################################## #include <linux/linkage.h> ## assume buffers not aligned #define MOVDQ movdqu ################################ Define Macros # addm [mem], reg # Add reg to mem using reg-mem add and store .macro addm p1 p2 add \p1, \p2 mov \p2, \p1 .endm ################################ # COPY_XMM_AND_BSWAP xmm, [mem], byte_flip_mask # Load xmm with mem and byte swap each dword .macro COPY_XMM_AND_BSWAP p1 p2 p3 MOVDQ \p2, \p1 pshufb \p3, \p1 .endm ################################ X0 = %xmm4 X1 = %xmm5 X2 = %xmm6 X3 = %xmm7 XTMP0 = %xmm0 XTMP1 = %xmm1 XTMP2 = %xmm2 XTMP3 = %xmm3 XTMP4 = %xmm8 XFER = %xmm9 SHUF_00BA = %xmm10 # shuffle xBxA -> 00BA SHUF_DC00 = %xmm11 # shuffle xDxC -> DC00 BYTE_FLIP_MASK = %xmm12 NUM_BLKS = %rdx # 3rd arg INP = %rsi # 2nd arg CTX = %rdi # 1st arg SRND = %rsi # clobbers INP c = %ecx d = %r8d e = %edx TBL = %r12 a = %eax b = %ebx f = %r9d g = %r10d h = %r11d y0 = %r13d y1 = %r14d y2 = %r15d _INP_END_SIZE = 8 _INP_SIZE = 8 _XFER_SIZE = 16 _XMM_SAVE_SIZE = 0 _INP_END = 0 _INP = _INP_END + _INP_END_SIZE _XFER = _INP + _INP_SIZE _XMM_SAVE = _XFER + _XFER_SIZE STACK_SIZE = _XMM_SAVE + _XMM_SAVE_SIZE # rotate_Xs # Rotate values of symbols X0...X3 .macro rotate_Xs X_ = X0 X0 = X1 X1 = X2 X2 = X3 X3 = X_ .endm # ROTATE_ARGS # Rotate values of symbols a...h .macro ROTATE_ARGS TMP_ = h h = g g = f f = e e = d d = c c = b b = a a = TMP_ .endm .macro FOUR_ROUNDS_AND_SCHED ## compute s0 four at a time and s1 two at a time ## compute W[-16] + W[-7] 4 at a time movdqa X3, XTMP0 mov e, y0 # y0 = e ror $(25-11), y0 # y0 = e >> (25-11) mov a, y1 # y1 = a palignr $4, X2, XTMP0 # XTMP0 = W[-7] ror $(22-13), y1 # y1 = a >> (22-13) xor e, y0 # y0 = e ^ (e >> (25-11)) mov f, y2 # y2 = f ror $(11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6)) movdqa X1, XTMP1 xor a, y1 # y1 = a ^ (a >> (22-13) xor g, y2 # y2 = f^g paddd X0, XTMP0 # XTMP0 = W[-7] + W[-16] xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) and e, y2 # y2 = (f^g)&e ror $(13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2)) ## compute s0 palignr $4, X0, XTMP1 # XTMP1 = W[-15] xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) ror $6, y0 # y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) xor g, y2 # y2 = CH = ((f^g)&e)^g movdqa XTMP1, XTMP2 # XTMP2 = W[-15] ror $2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) add y0, y2 # y2 = S1 + CH add _XFER(%rsp) , y2 # y2 = k + w + S1 + CH movdqa XTMP1, XTMP3 # XTMP3 = W[-15] mov a, y0 # y0 = a add y2, h # h = h + S1 + CH + k + w mov a, y2 # y2 = a pslld $(32-7), XTMP1 # or c, y0 # y0 = a|c add h, d # d = d + h + S1 + CH + k + w and c, y2 # y2 = a&c psrld $7, XTMP2 # and b, y0 # y0 = (a|c)&b add y1, h # h = h + S1 + CH + k + w + S0 por XTMP2, XTMP1 # XTMP1 = W[-15] ror 7 or y2, y0 # y0 = MAJ = (a|c)&b)|(a&c) add y0, h # h = h + S1 + CH + k + w + S0 + MAJ # ROTATE_ARGS # movdqa XTMP3, XTMP2 # XTMP2 = W[-15] mov e, y0 # y0 = e mov a, y1 # y1 = a movdqa XTMP3, XTMP4 # XTMP4 = W[-15] ror $(25-11), y0 # y0 = e >> (25-11) xor e, y0 # y0 = e ^ (e >> (25-11)) mov f, y2 # y2 = f ror $(22-13), y1 # y1 = a >> (22-13) pslld $(32-18), XTMP3 # xor a, y1 # y1 = a ^ (a >> (22-13) ror $(11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6)) xor g, y2 # y2 = f^g psrld $18, XTMP2 # ror $(13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2)) xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) and e, y2 # y2 = (f^g)&e ror $6, y0 # y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) pxor XTMP3, XTMP1 xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) xor g, y2 # y2 = CH = ((f^g)&e)^g psrld $3, XTMP4 # XTMP4 = W[-15] >> 3 add y0, y2 # y2 = S1 + CH add (1*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH ror $2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) pxor XTMP2, XTMP1 # XTMP1 = W[-15] ror 7 ^ W[-15] ror 18 mov a, y0 # y0 = a add y2, h # h = h + S1 + CH + k + w mov a, y2 # y2 = a pxor XTMP4, XTMP1 # XTMP1 = s0 or c, y0 # y0 = a|c add h, d # d = d + h + S1 + CH + k + w and c, y2 # y2 = a&c ## compute low s1 pshufd $0b11111010, X3, XTMP2 # XTMP2 = W[-2] {BBAA} and b, y0 # y0 = (a|c)&b add y1, h # h = h + S1 + CH + k + w + S0 paddd XTMP1, XTMP0 # XTMP0 = W[-16] + W[-7] + s0 or y2, y0 # y0 = MAJ = (a|c)&b)|(a&c) add y0, h # h = h + S1 + CH + k + w + S0 + MAJ ROTATE_ARGS movdqa XTMP2, XTMP3 # XTMP3 = W[-2] {BBAA} mov e, y0 # y0 = e mov a, y1 # y1 = a ror $(25-11), y0 # y0 = e >> (25-11) movdqa XTMP2, XTMP4 # XTMP4 = W[-2] {BBAA} xor e, y0 # y0 = e ^ (e >> (25-11)) ror $(22-13), y1 # y1 = a >> (22-13) mov f, y2 # y2 = f xor a, y1 # y1 = a ^ (a >> (22-13) ror $(11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6)) psrlq $17, XTMP2 # XTMP2 = W[-2] ror 17 {xBxA} xor g, y2 # y2 = f^g psrlq $19, XTMP3 # XTMP3 = W[-2] ror 19 {xBxA} xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) and e, y2 # y2 = (f^g)&e psrld $10, XTMP4 # XTMP4 = W[-2] >> 10 {BBAA} ror $(13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2)) xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) xor g, y2 # y2 = CH = ((f^g)&e)^g ror $6, y0 # y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) pxor XTMP3, XTMP2 add y0, y2 # y2 = S1 + CH ror $2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) add (2*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH pxor XTMP2, XTMP4 # XTMP4 = s1 {xBxA} mov a, y0 # y0 = a add y2, h # h = h + S1 + CH + k + w mov a, y2 # y2 = a pshufb SHUF_00BA, XTMP4 # XTMP4 = s1 {00BA} or c, y0 # y0 = a|c add h, d # d = d + h + S1 + CH + k + w and c, y2 # y2 = a&c paddd XTMP4, XTMP0 # XTMP0 = {..., ..., W[1], W[0]} and b, y0 # y0 = (a|c)&b add y1, h # h = h + S1 + CH + k + w + S0 ## compute high s1 pshufd $0b01010000, XTMP0, XTMP2 # XTMP2 = W[-2] {BBAA} or y2, y0 # y0 = MAJ = (a|c)&b)|(a&c) add y0, h # h = h + S1 + CH + k + w + S0 + MAJ # ROTATE_ARGS # movdqa XTMP2, XTMP3 # XTMP3 = W[-2] {DDCC} mov e, y0 # y0 = e ror $(25-11), y0 # y0 = e >> (25-11) mov a, y1 # y1 = a movdqa XTMP2, X0 # X0 = W[-2] {DDCC} ror $(22-13), y1 # y1 = a >> (22-13) xor e, y0 # y0 = e ^ (e >> (25-11)) mov f, y2 # y2 = f ror $(11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6)) psrlq $17, XTMP2 # XTMP2 = W[-2] ror 17 {xDxC} xor a, y1 # y1 = a ^ (a >> (22-13) xor g, y2 # y2 = f^g psrlq $19, XTMP3 # XTMP3 = W[-2] ror 19 {xDxC} xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25 and e, y2 # y2 = (f^g)&e ror $(13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2)) psrld $10, X0 # X0 = W[-2] >> 10 {DDCC} xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22 ror $6, y0 # y0 = S1 = (e>>6) & (e>>11) ^ (e>>2 xor g, y2 # y2 = CH = ((f^g)&e)^g pxor XTMP3, XTMP2 # ror $2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>2 add y0, y2 # y2 = S1 + CH add (3*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH pxor XTMP2, X0 # X0 = s1 {xDxC} mov a, y0 # y0 = a add y2, h # h = h + S1 + CH + k + w mov a, y2 # y2 = a pshufb SHUF_DC00, X0 # X0 = s1 {DC00} or c, y0 # y0 = a|c add h, d # d = d + h + S1 + CH + k + w and c, y2 # y2 = a&c paddd XTMP0, X0 # X0 = {W[3], W[2], W[1], W[0]} and b, y0 # y0 = (a|c)&b add y1, h # h = h + S1 + CH + k + w + S0 or y2, y0 # y0 = MAJ = (a|c)&b)|(a&c) add y0, h # h = h + S1 + CH + k + w + S0 + MAJ ROTATE_ARGS rotate_Xs .endm ## input is [rsp + _XFER + %1 * 4] .macro DO_ROUND round mov e, y0 # y0 = e ror $(25-11), y0 # y0 = e >> (25-11) mov a, y1 # y1 = a xor e, y0 # y0 = e ^ (e >> (25-11)) ror $(22-13), y1 # y1 = a >> (22-13) mov f, y2 # y2 = f xor a, y1 # y1 = a ^ (a >> (22-13) ror $(11-6), y0 # y0 = (e >> (11-6)) ^ (e >> (25-6)) xor g, y2 # y2 = f^g xor e, y0 # y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) ror $(13-2), y1 # y1 = (a >> (13-2)) ^ (a >> (22-2)) and e, y2 # y2 = (f^g)&e xor a, y1 # y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) ror $6, y0 # y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) xor g, y2 # y2 = CH = ((f^g)&e)^g add y0, y2 # y2 = S1 + CH ror $2, y1 # y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) offset = \round * 4 + _XFER add offset(%rsp), y2 # y2 = k + w + S1 + CH mov a, y0 # y0 = a add y2, h # h = h + S1 + CH + k + w mov a, y2 # y2 = a or c, y0 # y0 = a|c add h, d # d = d + h + S1 + CH + k + w and c, y2 # y2 = a&c and b, y0 # y0 = (a|c)&b add y1, h # h = h + S1 + CH + k + w + S0 or y2, y0 # y0 = MAJ = (a|c)&b)|(a&c) add y0, h # h = h + S1 + CH + k + w + S0 + MAJ ROTATE_ARGS .endm ######################################################################## ## void sha256_transform_ssse3(void *input_data, UINT32 digest[8], UINT64 num_blks) ## arg 1 : pointer to digest ## arg 2 : pointer to input data ## arg 3 : Num blocks ######################################################################## .text ENTRY(sha256_transform_ssse3) .align 32 pushq %rbx pushq %r12 pushq %r13 pushq %r14 pushq %r15 pushq %rbp mov %rsp, %rbp subq $STACK_SIZE, %rsp and $~15, %rsp shl $6, NUM_BLKS # convert to bytes jz done_hash add INP, NUM_BLKS mov NUM_BLKS, _INP_END(%rsp) # pointer to end of data ## load initial digest mov 4*0(CTX), a mov 4*1(CTX), b mov 4*2(CTX), c mov 4*3(CTX), d mov 4*4(CTX), e mov 4*5(CTX), f mov 4*6(CTX), g mov 4*7(CTX), h movdqa PSHUFFLE_BYTE_FLIP_MASK(%rip), BYTE_FLIP_MASK movdqa _SHUF_00BA(%rip), SHUF_00BA movdqa _SHUF_DC00(%rip), SHUF_DC00 loop0: lea K256(%rip), TBL ## byte swap first 16 dwords COPY_XMM_AND_BSWAP X0, 0*16(INP), BYTE_FLIP_MASK COPY_XMM_AND_BSWAP X1, 1*16(INP), BYTE_FLIP_MASK COPY_XMM_AND_BSWAP X2, 2*16(INP), BYTE_FLIP_MASK COPY_XMM_AND_BSWAP X3, 3*16(INP), BYTE_FLIP_MASK mov INP, _INP(%rsp) ## schedule 48 input dwords, by doing 3 rounds of 16 each mov $3, SRND .align 16 loop1: movdqa (TBL), XFER paddd X0, XFER movdqa XFER, _XFER(%rsp) FOUR_ROUNDS_AND_SCHED movdqa 1*16(TBL), XFER paddd X0, XFER movdqa XFER, _XFER(%rsp) FOUR_ROUNDS_AND_SCHED movdqa 2*16(TBL), XFER paddd X0, XFER movdqa XFER, _XFER(%rsp) FOUR_ROUNDS_AND_SCHED movdqa 3*16(TBL), XFER paddd X0, XFER movdqa XFER, _XFER(%rsp) add $4*16, TBL FOUR_ROUNDS_AND_SCHED sub $1, SRND jne loop1 mov $2, SRND loop2: paddd (TBL), X0 movdqa X0, _XFER(%rsp) DO_ROUND 0 DO_ROUND 1 DO_ROUND 2 DO_ROUND 3 paddd 1*16(TBL), X1 movdqa X1, _XFER(%rsp) add $2*16, TBL DO_ROUND 0 DO_ROUND 1 DO_ROUND 2 DO_ROUND 3 movdqa X2, X0 movdqa X3, X1 sub $1, SRND jne loop2 addm (4*0)(CTX),a addm (4*1)(CTX),b addm (4*2)(CTX),c addm (4*3)(CTX),d addm (4*4)(CTX),e addm (4*5)(CTX),f addm (4*6)(CTX),g addm (4*7)(CTX),h mov _INP(%rsp), INP add $64, INP cmp _INP_END(%rsp), INP jne loop0 done_hash: mov %rbp, %rsp popq %rbp popq %r15 popq %r14 popq %r13 popq %r12 popq %rbx ret ENDPROC(sha256_transform_ssse3) .section .rodata.cst256.K256, "aM", @progbits, 256 .align 64 K256: .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 .section .rodata.cst16.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 16 .align 16 PSHUFFLE_BYTE_FLIP_MASK: .octa 0x0c0d0e0f08090a0b0405060700010203 .section .rodata.cst16._SHUF_00BA, "aM", @progbits, 16 .align 16 # shuffle xBxA -> 00BA _SHUF_00BA: .octa 0xFFFFFFFFFFFFFFFF0b0a090803020100 .section .rodata.cst16._SHUF_DC00, "aM", @progbits, 16 .align 16 # shuffle xDxC -> DC00 _SHUF_DC00: .octa 0x0b0a090803020100FFFFFFFFFFFFFFFF
AirFortressIlikara/LS2K0300-linux-4.19
23,631
arch/x86/crypto/serpent-avx2-asm_64.S
/* * x86_64/AVX2 assembler optimized version of Serpent * * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> * * Based on AVX assembler implementation of Serpent by: * Copyright © 2012 Johannes Goetzfried * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * */ #include <linux/linkage.h> #include <asm/frame.h> #include "glue_helper-asm-avx2.S" .file "serpent-avx2-asm_64.S" .section .rodata.cst16.bswap128_mask, "aM", @progbits, 16 .align 16 .Lbswap128_mask: .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 .section .rodata.cst16.xts_gf128mul_and_shl1_mask_0, "aM", @progbits, 16 .align 16 .Lxts_gf128mul_and_shl1_mask_0: .byte 0x87, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0 .section .rodata.cst16.xts_gf128mul_and_shl1_mask_1, "aM", @progbits, 16 .align 16 .Lxts_gf128mul_and_shl1_mask_1: .byte 0x0e, 1, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0 .text #define CTX %rdi #define RNOT %ymm0 #define tp %ymm1 #define RA1 %ymm2 #define RA2 %ymm3 #define RB1 %ymm4 #define RB2 %ymm5 #define RC1 %ymm6 #define RC2 %ymm7 #define RD1 %ymm8 #define RD2 %ymm9 #define RE1 %ymm10 #define RE2 %ymm11 #define RK0 %ymm12 #define RK1 %ymm13 #define RK2 %ymm14 #define RK3 %ymm15 #define RK0x %xmm12 #define RK1x %xmm13 #define RK2x %xmm14 #define RK3x %xmm15 #define S0_1(x0, x1, x2, x3, x4) \ vpor x0, x3, tp; \ vpxor x3, x0, x0; \ vpxor x2, x3, x4; \ vpxor RNOT, x4, x4; \ vpxor x1, tp, x3; \ vpand x0, x1, x1; \ vpxor x4, x1, x1; \ vpxor x0, x2, x2; #define S0_2(x0, x1, x2, x3, x4) \ vpxor x3, x0, x0; \ vpor x0, x4, x4; \ vpxor x2, x0, x0; \ vpand x1, x2, x2; \ vpxor x2, x3, x3; \ vpxor RNOT, x1, x1; \ vpxor x4, x2, x2; \ vpxor x2, x1, x1; #define S1_1(x0, x1, x2, x3, x4) \ vpxor x0, x1, tp; \ vpxor x3, x0, x0; \ vpxor RNOT, x3, x3; \ vpand tp, x1, x4; \ vpor tp, x0, x0; \ vpxor x2, x3, x3; \ vpxor x3, x0, x0; \ vpxor x3, tp, x1; #define S1_2(x0, x1, x2, x3, x4) \ vpxor x4, x3, x3; \ vpor x4, x1, x1; \ vpxor x2, x4, x4; \ vpand x0, x2, x2; \ vpxor x1, x2, x2; \ vpor x0, x1, x1; \ vpxor RNOT, x0, x0; \ vpxor x2, x0, x0; \ vpxor x1, x4, x4; #define S2_1(x0, x1, x2, x3, x4) \ vpxor RNOT, x3, x3; \ vpxor x0, x1, x1; \ vpand x2, x0, tp; \ vpxor x3, tp, tp; \ vpor x0, x3, x3; \ vpxor x1, x2, x2; \ vpxor x1, x3, x3; \ vpand tp, x1, x1; #define S2_2(x0, x1, x2, x3, x4) \ vpxor x2, tp, tp; \ vpand x3, x2, x2; \ vpor x1, x3, x3; \ vpxor RNOT, tp, tp; \ vpxor tp, x3, x3; \ vpxor tp, x0, x4; \ vpxor x2, tp, x0; \ vpor x2, x1, x1; #define S3_1(x0, x1, x2, x3, x4) \ vpxor x3, x1, tp; \ vpor x0, x3, x3; \ vpand x0, x1, x4; \ vpxor x2, x0, x0; \ vpxor tp, x2, x2; \ vpand x3, tp, x1; \ vpxor x3, x2, x2; \ vpor x4, x0, x0; \ vpxor x3, x4, x4; #define S3_2(x0, x1, x2, x3, x4) \ vpxor x0, x1, x1; \ vpand x3, x0, x0; \ vpand x4, x3, x3; \ vpxor x2, x3, x3; \ vpor x1, x4, x4; \ vpand x1, x2, x2; \ vpxor x3, x4, x4; \ vpxor x3, x0, x0; \ vpxor x2, x3, x3; #define S4_1(x0, x1, x2, x3, x4) \ vpand x0, x3, tp; \ vpxor x3, x0, x0; \ vpxor x2, tp, tp; \ vpor x3, x2, x2; \ vpxor x1, x0, x0; \ vpxor tp, x3, x4; \ vpor x0, x2, x2; \ vpxor x1, x2, x2; #define S4_2(x0, x1, x2, x3, x4) \ vpand x0, x1, x1; \ vpxor x4, x1, x1; \ vpand x2, x4, x4; \ vpxor tp, x2, x2; \ vpxor x0, x4, x4; \ vpor x1, tp, x3; \ vpxor RNOT, x1, x1; \ vpxor x0, x3, x3; #define S5_1(x0, x1, x2, x3, x4) \ vpor x0, x1, tp; \ vpxor tp, x2, x2; \ vpxor RNOT, x3, x3; \ vpxor x0, x1, x4; \ vpxor x2, x0, x0; \ vpand x4, tp, x1; \ vpor x3, x4, x4; \ vpxor x0, x4, x4; #define S5_2(x0, x1, x2, x3, x4) \ vpand x3, x0, x0; \ vpxor x3, x1, x1; \ vpxor x2, x3, x3; \ vpxor x1, x0, x0; \ vpand x4, x2, x2; \ vpxor x2, x1, x1; \ vpand x0, x2, x2; \ vpxor x2, x3, x3; #define S6_1(x0, x1, x2, x3, x4) \ vpxor x0, x3, x3; \ vpxor x2, x1, tp; \ vpxor x0, x2, x2; \ vpand x3, x0, x0; \ vpor x3, tp, tp; \ vpxor RNOT, x1, x4; \ vpxor tp, x0, x0; \ vpxor x2, tp, x1; #define S6_2(x0, x1, x2, x3, x4) \ vpxor x4, x3, x3; \ vpxor x0, x4, x4; \ vpand x0, x2, x2; \ vpxor x1, x4, x4; \ vpxor x3, x2, x2; \ vpand x1, x3, x3; \ vpxor x0, x3, x3; \ vpxor x2, x1, x1; #define S7_1(x0, x1, x2, x3, x4) \ vpxor RNOT, x1, tp; \ vpxor RNOT, x0, x0; \ vpand x2, tp, x1; \ vpxor x3, x1, x1; \ vpor tp, x3, x3; \ vpxor x2, tp, x4; \ vpxor x3, x2, x2; \ vpxor x0, x3, x3; \ vpor x1, x0, x0; #define S7_2(x0, x1, x2, x3, x4) \ vpand x0, x2, x2; \ vpxor x4, x0, x0; \ vpxor x3, x4, x4; \ vpand x0, x3, x3; \ vpxor x1, x4, x4; \ vpxor x4, x2, x2; \ vpxor x1, x3, x3; \ vpor x0, x4, x4; \ vpxor x1, x4, x4; #define SI0_1(x0, x1, x2, x3, x4) \ vpxor x0, x1, x1; \ vpor x1, x3, tp; \ vpxor x1, x3, x4; \ vpxor RNOT, x0, x0; \ vpxor tp, x2, x2; \ vpxor x0, tp, x3; \ vpand x1, x0, x0; \ vpxor x2, x0, x0; #define SI0_2(x0, x1, x2, x3, x4) \ vpand x3, x2, x2; \ vpxor x4, x3, x3; \ vpxor x3, x2, x2; \ vpxor x3, x1, x1; \ vpand x0, x3, x3; \ vpxor x0, x1, x1; \ vpxor x2, x0, x0; \ vpxor x3, x4, x4; #define SI1_1(x0, x1, x2, x3, x4) \ vpxor x3, x1, x1; \ vpxor x2, x0, tp; \ vpxor RNOT, x2, x2; \ vpor x1, x0, x4; \ vpxor x3, x4, x4; \ vpand x1, x3, x3; \ vpxor x2, x1, x1; \ vpand x4, x2, x2; #define SI1_2(x0, x1, x2, x3, x4) \ vpxor x1, x4, x4; \ vpor x3, x1, x1; \ vpxor tp, x3, x3; \ vpxor tp, x2, x2; \ vpor x4, tp, x0; \ vpxor x4, x2, x2; \ vpxor x0, x1, x1; \ vpxor x1, x4, x4; #define SI2_1(x0, x1, x2, x3, x4) \ vpxor x1, x2, x2; \ vpxor RNOT, x3, tp; \ vpor x2, tp, tp; \ vpxor x3, x2, x2; \ vpxor x0, x3, x4; \ vpxor x1, tp, x3; \ vpor x2, x1, x1; \ vpxor x0, x2, x2; #define SI2_2(x0, x1, x2, x3, x4) \ vpxor x4, x1, x1; \ vpor x3, x4, x4; \ vpxor x3, x2, x2; \ vpxor x2, x4, x4; \ vpand x1, x2, x2; \ vpxor x3, x2, x2; \ vpxor x4, x3, x3; \ vpxor x0, x4, x4; #define SI3_1(x0, x1, x2, x3, x4) \ vpxor x1, x2, x2; \ vpand x2, x1, tp; \ vpxor x0, tp, tp; \ vpor x1, x0, x0; \ vpxor x3, x1, x4; \ vpxor x3, x0, x0; \ vpor tp, x3, x3; \ vpxor x2, tp, x1; #define SI3_2(x0, x1, x2, x3, x4) \ vpxor x3, x1, x1; \ vpxor x2, x0, x0; \ vpxor x3, x2, x2; \ vpand x1, x3, x3; \ vpxor x0, x1, x1; \ vpand x2, x0, x0; \ vpxor x3, x4, x4; \ vpxor x0, x3, x3; \ vpxor x1, x0, x0; #define SI4_1(x0, x1, x2, x3, x4) \ vpxor x3, x2, x2; \ vpand x1, x0, tp; \ vpxor x2, tp, tp; \ vpor x3, x2, x2; \ vpxor RNOT, x0, x4; \ vpxor tp, x1, x1; \ vpxor x2, tp, x0; \ vpand x4, x2, x2; #define SI4_2(x0, x1, x2, x3, x4) \ vpxor x0, x2, x2; \ vpor x4, x0, x0; \ vpxor x3, x0, x0; \ vpand x2, x3, x3; \ vpxor x3, x4, x4; \ vpxor x1, x3, x3; \ vpand x0, x1, x1; \ vpxor x1, x4, x4; \ vpxor x3, x0, x0; #define SI5_1(x0, x1, x2, x3, x4) \ vpor x2, x1, tp; \ vpxor x1, x2, x2; \ vpxor x3, tp, tp; \ vpand x1, x3, x3; \ vpxor x3, x2, x2; \ vpor x0, x3, x3; \ vpxor RNOT, x0, x0; \ vpxor x2, x3, x3; \ vpor x0, x2, x2; #define SI5_2(x0, x1, x2, x3, x4) \ vpxor tp, x1, x4; \ vpxor x4, x2, x2; \ vpand x0, x4, x4; \ vpxor tp, x0, x0; \ vpxor x3, tp, x1; \ vpand x2, x0, x0; \ vpxor x3, x2, x2; \ vpxor x2, x0, x0; \ vpxor x4, x2, x2; \ vpxor x3, x4, x4; #define SI6_1(x0, x1, x2, x3, x4) \ vpxor x2, x0, x0; \ vpand x3, x0, tp; \ vpxor x3, x2, x2; \ vpxor x2, tp, tp; \ vpxor x1, x3, x3; \ vpor x0, x2, x2; \ vpxor x3, x2, x2; \ vpand tp, x3, x3; #define SI6_2(x0, x1, x2, x3, x4) \ vpxor RNOT, tp, tp; \ vpxor x1, x3, x3; \ vpand x2, x1, x1; \ vpxor tp, x0, x4; \ vpxor x4, x3, x3; \ vpxor x2, x4, x4; \ vpxor x1, tp, x0; \ vpxor x0, x2, x2; #define SI7_1(x0, x1, x2, x3, x4) \ vpand x0, x3, tp; \ vpxor x2, x0, x0; \ vpor x3, x2, x2; \ vpxor x1, x3, x4; \ vpxor RNOT, x0, x0; \ vpor tp, x1, x1; \ vpxor x0, x4, x4; \ vpand x2, x0, x0; \ vpxor x1, x0, x0; #define SI7_2(x0, x1, x2, x3, x4) \ vpand x2, x1, x1; \ vpxor x2, tp, x3; \ vpxor x3, x4, x4; \ vpand x3, x2, x2; \ vpor x0, x3, x3; \ vpxor x4, x1, x1; \ vpxor x4, x3, x3; \ vpand x0, x4, x4; \ vpxor x2, x4, x4; #define get_key(i,j,t) \ vpbroadcastd (4*(i)+(j))*4(CTX), t; #define K2(x0, x1, x2, x3, x4, i) \ get_key(i, 0, RK0); \ get_key(i, 1, RK1); \ get_key(i, 2, RK2); \ get_key(i, 3, RK3); \ vpxor RK0, x0 ## 1, x0 ## 1; \ vpxor RK1, x1 ## 1, x1 ## 1; \ vpxor RK2, x2 ## 1, x2 ## 1; \ vpxor RK3, x3 ## 1, x3 ## 1; \ vpxor RK0, x0 ## 2, x0 ## 2; \ vpxor RK1, x1 ## 2, x1 ## 2; \ vpxor RK2, x2 ## 2, x2 ## 2; \ vpxor RK3, x3 ## 2, x3 ## 2; #define LK2(x0, x1, x2, x3, x4, i) \ vpslld $13, x0 ## 1, x4 ## 1; \ vpsrld $(32 - 13), x0 ## 1, x0 ## 1; \ vpor x4 ## 1, x0 ## 1, x0 ## 1; \ vpxor x0 ## 1, x1 ## 1, x1 ## 1; \ vpslld $3, x2 ## 1, x4 ## 1; \ vpsrld $(32 - 3), x2 ## 1, x2 ## 1; \ vpor x4 ## 1, x2 ## 1, x2 ## 1; \ vpxor x2 ## 1, x1 ## 1, x1 ## 1; \ vpslld $13, x0 ## 2, x4 ## 2; \ vpsrld $(32 - 13), x0 ## 2, x0 ## 2; \ vpor x4 ## 2, x0 ## 2, x0 ## 2; \ vpxor x0 ## 2, x1 ## 2, x1 ## 2; \ vpslld $3, x2 ## 2, x4 ## 2; \ vpsrld $(32 - 3), x2 ## 2, x2 ## 2; \ vpor x4 ## 2, x2 ## 2, x2 ## 2; \ vpxor x2 ## 2, x1 ## 2, x1 ## 2; \ vpslld $1, x1 ## 1, x4 ## 1; \ vpsrld $(32 - 1), x1 ## 1, x1 ## 1; \ vpor x4 ## 1, x1 ## 1, x1 ## 1; \ vpslld $3, x0 ## 1, x4 ## 1; \ vpxor x2 ## 1, x3 ## 1, x3 ## 1; \ vpxor x4 ## 1, x3 ## 1, x3 ## 1; \ get_key(i, 1, RK1); \ vpslld $1, x1 ## 2, x4 ## 2; \ vpsrld $(32 - 1), x1 ## 2, x1 ## 2; \ vpor x4 ## 2, x1 ## 2, x1 ## 2; \ vpslld $3, x0 ## 2, x4 ## 2; \ vpxor x2 ## 2, x3 ## 2, x3 ## 2; \ vpxor x4 ## 2, x3 ## 2, x3 ## 2; \ get_key(i, 3, RK3); \ vpslld $7, x3 ## 1, x4 ## 1; \ vpsrld $(32 - 7), x3 ## 1, x3 ## 1; \ vpor x4 ## 1, x3 ## 1, x3 ## 1; \ vpslld $7, x1 ## 1, x4 ## 1; \ vpxor x1 ## 1, x0 ## 1, x0 ## 1; \ vpxor x3 ## 1, x0 ## 1, x0 ## 1; \ vpxor x3 ## 1, x2 ## 1, x2 ## 1; \ vpxor x4 ## 1, x2 ## 1, x2 ## 1; \ get_key(i, 0, RK0); \ vpslld $7, x3 ## 2, x4 ## 2; \ vpsrld $(32 - 7), x3 ## 2, x3 ## 2; \ vpor x4 ## 2, x3 ## 2, x3 ## 2; \ vpslld $7, x1 ## 2, x4 ## 2; \ vpxor x1 ## 2, x0 ## 2, x0 ## 2; \ vpxor x3 ## 2, x0 ## 2, x0 ## 2; \ vpxor x3 ## 2, x2 ## 2, x2 ## 2; \ vpxor x4 ## 2, x2 ## 2, x2 ## 2; \ get_key(i, 2, RK2); \ vpxor RK1, x1 ## 1, x1 ## 1; \ vpxor RK3, x3 ## 1, x3 ## 1; \ vpslld $5, x0 ## 1, x4 ## 1; \ vpsrld $(32 - 5), x0 ## 1, x0 ## 1; \ vpor x4 ## 1, x0 ## 1, x0 ## 1; \ vpslld $22, x2 ## 1, x4 ## 1; \ vpsrld $(32 - 22), x2 ## 1, x2 ## 1; \ vpor x4 ## 1, x2 ## 1, x2 ## 1; \ vpxor RK0, x0 ## 1, x0 ## 1; \ vpxor RK2, x2 ## 1, x2 ## 1; \ vpxor RK1, x1 ## 2, x1 ## 2; \ vpxor RK3, x3 ## 2, x3 ## 2; \ vpslld $5, x0 ## 2, x4 ## 2; \ vpsrld $(32 - 5), x0 ## 2, x0 ## 2; \ vpor x4 ## 2, x0 ## 2, x0 ## 2; \ vpslld $22, x2 ## 2, x4 ## 2; \ vpsrld $(32 - 22), x2 ## 2, x2 ## 2; \ vpor x4 ## 2, x2 ## 2, x2 ## 2; \ vpxor RK0, x0 ## 2, x0 ## 2; \ vpxor RK2, x2 ## 2, x2 ## 2; #define KL2(x0, x1, x2, x3, x4, i) \ vpxor RK0, x0 ## 1, x0 ## 1; \ vpxor RK2, x2 ## 1, x2 ## 1; \ vpsrld $5, x0 ## 1, x4 ## 1; \ vpslld $(32 - 5), x0 ## 1, x0 ## 1; \ vpor x4 ## 1, x0 ## 1, x0 ## 1; \ vpxor RK3, x3 ## 1, x3 ## 1; \ vpxor RK1, x1 ## 1, x1 ## 1; \ vpsrld $22, x2 ## 1, x4 ## 1; \ vpslld $(32 - 22), x2 ## 1, x2 ## 1; \ vpor x4 ## 1, x2 ## 1, x2 ## 1; \ vpxor x3 ## 1, x2 ## 1, x2 ## 1; \ vpxor RK0, x0 ## 2, x0 ## 2; \ vpxor RK2, x2 ## 2, x2 ## 2; \ vpsrld $5, x0 ## 2, x4 ## 2; \ vpslld $(32 - 5), x0 ## 2, x0 ## 2; \ vpor x4 ## 2, x0 ## 2, x0 ## 2; \ vpxor RK3, x3 ## 2, x3 ## 2; \ vpxor RK1, x1 ## 2, x1 ## 2; \ vpsrld $22, x2 ## 2, x4 ## 2; \ vpslld $(32 - 22), x2 ## 2, x2 ## 2; \ vpor x4 ## 2, x2 ## 2, x2 ## 2; \ vpxor x3 ## 2, x2 ## 2, x2 ## 2; \ vpxor x3 ## 1, x0 ## 1, x0 ## 1; \ vpslld $7, x1 ## 1, x4 ## 1; \ vpxor x1 ## 1, x0 ## 1, x0 ## 1; \ vpxor x4 ## 1, x2 ## 1, x2 ## 1; \ vpsrld $1, x1 ## 1, x4 ## 1; \ vpslld $(32 - 1), x1 ## 1, x1 ## 1; \ vpor x4 ## 1, x1 ## 1, x1 ## 1; \ vpxor x3 ## 2, x0 ## 2, x0 ## 2; \ vpslld $7, x1 ## 2, x4 ## 2; \ vpxor x1 ## 2, x0 ## 2, x0 ## 2; \ vpxor x4 ## 2, x2 ## 2, x2 ## 2; \ vpsrld $1, x1 ## 2, x4 ## 2; \ vpslld $(32 - 1), x1 ## 2, x1 ## 2; \ vpor x4 ## 2, x1 ## 2, x1 ## 2; \ vpsrld $7, x3 ## 1, x4 ## 1; \ vpslld $(32 - 7), x3 ## 1, x3 ## 1; \ vpor x4 ## 1, x3 ## 1, x3 ## 1; \ vpxor x0 ## 1, x1 ## 1, x1 ## 1; \ vpslld $3, x0 ## 1, x4 ## 1; \ vpxor x4 ## 1, x3 ## 1, x3 ## 1; \ vpsrld $7, x3 ## 2, x4 ## 2; \ vpslld $(32 - 7), x3 ## 2, x3 ## 2; \ vpor x4 ## 2, x3 ## 2, x3 ## 2; \ vpxor x0 ## 2, x1 ## 2, x1 ## 2; \ vpslld $3, x0 ## 2, x4 ## 2; \ vpxor x4 ## 2, x3 ## 2, x3 ## 2; \ vpsrld $13, x0 ## 1, x4 ## 1; \ vpslld $(32 - 13), x0 ## 1, x0 ## 1; \ vpor x4 ## 1, x0 ## 1, x0 ## 1; \ vpxor x2 ## 1, x1 ## 1, x1 ## 1; \ vpxor x2 ## 1, x3 ## 1, x3 ## 1; \ vpsrld $3, x2 ## 1, x4 ## 1; \ vpslld $(32 - 3), x2 ## 1, x2 ## 1; \ vpor x4 ## 1, x2 ## 1, x2 ## 1; \ vpsrld $13, x0 ## 2, x4 ## 2; \ vpslld $(32 - 13), x0 ## 2, x0 ## 2; \ vpor x4 ## 2, x0 ## 2, x0 ## 2; \ vpxor x2 ## 2, x1 ## 2, x1 ## 2; \ vpxor x2 ## 2, x3 ## 2, x3 ## 2; \ vpsrld $3, x2 ## 2, x4 ## 2; \ vpslld $(32 - 3), x2 ## 2, x2 ## 2; \ vpor x4 ## 2, x2 ## 2, x2 ## 2; #define S(SBOX, x0, x1, x2, x3, x4) \ SBOX ## _1(x0 ## 1, x1 ## 1, x2 ## 1, x3 ## 1, x4 ## 1); \ SBOX ## _2(x0 ## 1, x1 ## 1, x2 ## 1, x3 ## 1, x4 ## 1); \ SBOX ## _1(x0 ## 2, x1 ## 2, x2 ## 2, x3 ## 2, x4 ## 2); \ SBOX ## _2(x0 ## 2, x1 ## 2, x2 ## 2, x3 ## 2, x4 ## 2); #define SP(SBOX, x0, x1, x2, x3, x4, i) \ get_key(i, 0, RK0); \ SBOX ## _1(x0 ## 1, x1 ## 1, x2 ## 1, x3 ## 1, x4 ## 1); \ get_key(i, 2, RK2); \ SBOX ## _2(x0 ## 1, x1 ## 1, x2 ## 1, x3 ## 1, x4 ## 1); \ get_key(i, 3, RK3); \ SBOX ## _1(x0 ## 2, x1 ## 2, x2 ## 2, x3 ## 2, x4 ## 2); \ get_key(i, 1, RK1); \ SBOX ## _2(x0 ## 2, x1 ## 2, x2 ## 2, x3 ## 2, x4 ## 2); \ #define transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \ vpunpckldq x1, x0, t0; \ vpunpckhdq x1, x0, t2; \ vpunpckldq x3, x2, t1; \ vpunpckhdq x3, x2, x3; \ \ vpunpcklqdq t1, t0, x0; \ vpunpckhqdq t1, t0, x1; \ vpunpcklqdq x3, t2, x2; \ vpunpckhqdq x3, t2, x3; #define read_blocks(x0, x1, x2, x3, t0, t1, t2) \ transpose_4x4(x0, x1, x2, x3, t0, t1, t2) #define write_blocks(x0, x1, x2, x3, t0, t1, t2) \ transpose_4x4(x0, x1, x2, x3, t0, t1, t2) .align 8 __serpent_enc_blk16: /* input: * %rdi: ctx, CTX * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: plaintext * output: * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: ciphertext */ vpcmpeqd RNOT, RNOT, RNOT; read_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2); read_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2); K2(RA, RB, RC, RD, RE, 0); S(S0, RA, RB, RC, RD, RE); LK2(RC, RB, RD, RA, RE, 1); S(S1, RC, RB, RD, RA, RE); LK2(RE, RD, RA, RC, RB, 2); S(S2, RE, RD, RA, RC, RB); LK2(RB, RD, RE, RC, RA, 3); S(S3, RB, RD, RE, RC, RA); LK2(RC, RA, RD, RB, RE, 4); S(S4, RC, RA, RD, RB, RE); LK2(RA, RD, RB, RE, RC, 5); S(S5, RA, RD, RB, RE, RC); LK2(RC, RA, RD, RE, RB, 6); S(S6, RC, RA, RD, RE, RB); LK2(RD, RB, RA, RE, RC, 7); S(S7, RD, RB, RA, RE, RC); LK2(RC, RA, RE, RD, RB, 8); S(S0, RC, RA, RE, RD, RB); LK2(RE, RA, RD, RC, RB, 9); S(S1, RE, RA, RD, RC, RB); LK2(RB, RD, RC, RE, RA, 10); S(S2, RB, RD, RC, RE, RA); LK2(RA, RD, RB, RE, RC, 11); S(S3, RA, RD, RB, RE, RC); LK2(RE, RC, RD, RA, RB, 12); S(S4, RE, RC, RD, RA, RB); LK2(RC, RD, RA, RB, RE, 13); S(S5, RC, RD, RA, RB, RE); LK2(RE, RC, RD, RB, RA, 14); S(S6, RE, RC, RD, RB, RA); LK2(RD, RA, RC, RB, RE, 15); S(S7, RD, RA, RC, RB, RE); LK2(RE, RC, RB, RD, RA, 16); S(S0, RE, RC, RB, RD, RA); LK2(RB, RC, RD, RE, RA, 17); S(S1, RB, RC, RD, RE, RA); LK2(RA, RD, RE, RB, RC, 18); S(S2, RA, RD, RE, RB, RC); LK2(RC, RD, RA, RB, RE, 19); S(S3, RC, RD, RA, RB, RE); LK2(RB, RE, RD, RC, RA, 20); S(S4, RB, RE, RD, RC, RA); LK2(RE, RD, RC, RA, RB, 21); S(S5, RE, RD, RC, RA, RB); LK2(RB, RE, RD, RA, RC, 22); S(S6, RB, RE, RD, RA, RC); LK2(RD, RC, RE, RA, RB, 23); S(S7, RD, RC, RE, RA, RB); LK2(RB, RE, RA, RD, RC, 24); S(S0, RB, RE, RA, RD, RC); LK2(RA, RE, RD, RB, RC, 25); S(S1, RA, RE, RD, RB, RC); LK2(RC, RD, RB, RA, RE, 26); S(S2, RC, RD, RB, RA, RE); LK2(RE, RD, RC, RA, RB, 27); S(S3, RE, RD, RC, RA, RB); LK2(RA, RB, RD, RE, RC, 28); S(S4, RA, RB, RD, RE, RC); LK2(RB, RD, RE, RC, RA, 29); S(S5, RB, RD, RE, RC, RA); LK2(RA, RB, RD, RC, RE, 30); S(S6, RA, RB, RD, RC, RE); LK2(RD, RE, RB, RC, RA, 31); S(S7, RD, RE, RB, RC, RA); K2(RA, RB, RC, RD, RE, 32); write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2); write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2); ret; ENDPROC(__serpent_enc_blk16) .align 8 __serpent_dec_blk16: /* input: * %rdi: ctx, CTX * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: ciphertext * output: * RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2: plaintext */ vpcmpeqd RNOT, RNOT, RNOT; read_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2); read_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2); K2(RA, RB, RC, RD, RE, 32); SP(SI7, RA, RB, RC, RD, RE, 31); KL2(RB, RD, RA, RE, RC, 31); SP(SI6, RB, RD, RA, RE, RC, 30); KL2(RA, RC, RE, RB, RD, 30); SP(SI5, RA, RC, RE, RB, RD, 29); KL2(RC, RD, RA, RE, RB, 29); SP(SI4, RC, RD, RA, RE, RB, 28); KL2(RC, RA, RB, RE, RD, 28); SP(SI3, RC, RA, RB, RE, RD, 27); KL2(RB, RC, RD, RE, RA, 27); SP(SI2, RB, RC, RD, RE, RA, 26); KL2(RC, RA, RE, RD, RB, 26); SP(SI1, RC, RA, RE, RD, RB, 25); KL2(RB, RA, RE, RD, RC, 25); SP(SI0, RB, RA, RE, RD, RC, 24); KL2(RE, RC, RA, RB, RD, 24); SP(SI7, RE, RC, RA, RB, RD, 23); KL2(RC, RB, RE, RD, RA, 23); SP(SI6, RC, RB, RE, RD, RA, 22); KL2(RE, RA, RD, RC, RB, 22); SP(SI5, RE, RA, RD, RC, RB, 21); KL2(RA, RB, RE, RD, RC, 21); SP(SI4, RA, RB, RE, RD, RC, 20); KL2(RA, RE, RC, RD, RB, 20); SP(SI3, RA, RE, RC, RD, RB, 19); KL2(RC, RA, RB, RD, RE, 19); SP(SI2, RC, RA, RB, RD, RE, 18); KL2(RA, RE, RD, RB, RC, 18); SP(SI1, RA, RE, RD, RB, RC, 17); KL2(RC, RE, RD, RB, RA, 17); SP(SI0, RC, RE, RD, RB, RA, 16); KL2(RD, RA, RE, RC, RB, 16); SP(SI7, RD, RA, RE, RC, RB, 15); KL2(RA, RC, RD, RB, RE, 15); SP(SI6, RA, RC, RD, RB, RE, 14); KL2(RD, RE, RB, RA, RC, 14); SP(SI5, RD, RE, RB, RA, RC, 13); KL2(RE, RC, RD, RB, RA, 13); SP(SI4, RE, RC, RD, RB, RA, 12); KL2(RE, RD, RA, RB, RC, 12); SP(SI3, RE, RD, RA, RB, RC, 11); KL2(RA, RE, RC, RB, RD, 11); SP(SI2, RA, RE, RC, RB, RD, 10); KL2(RE, RD, RB, RC, RA, 10); SP(SI1, RE, RD, RB, RC, RA, 9); KL2(RA, RD, RB, RC, RE, 9); SP(SI0, RA, RD, RB, RC, RE, 8); KL2(RB, RE, RD, RA, RC, 8); SP(SI7, RB, RE, RD, RA, RC, 7); KL2(RE, RA, RB, RC, RD, 7); SP(SI6, RE, RA, RB, RC, RD, 6); KL2(RB, RD, RC, RE, RA, 6); SP(SI5, RB, RD, RC, RE, RA, 5); KL2(RD, RA, RB, RC, RE, 5); SP(SI4, RD, RA, RB, RC, RE, 4); KL2(RD, RB, RE, RC, RA, 4); SP(SI3, RD, RB, RE, RC, RA, 3); KL2(RE, RD, RA, RC, RB, 3); SP(SI2, RE, RD, RA, RC, RB, 2); KL2(RD, RB, RC, RA, RE, 2); SP(SI1, RD, RB, RC, RA, RE, 1); KL2(RE, RB, RC, RA, RD, 1); S(SI0, RE, RB, RC, RA, RD); K2(RC, RD, RB, RE, RA, 0); write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2); write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2); ret; ENDPROC(__serpent_dec_blk16) ENTRY(serpent_ecb_enc_16way) /* input: * %rdi: ctx, CTX * %rsi: dst * %rdx: src */ FRAME_BEGIN vzeroupper; load_16way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); call __serpent_enc_blk16; store_16way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); vzeroupper; FRAME_END ret; ENDPROC(serpent_ecb_enc_16way) ENTRY(serpent_ecb_dec_16way) /* input: * %rdi: ctx, CTX * %rsi: dst * %rdx: src */ FRAME_BEGIN vzeroupper; load_16way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); call __serpent_dec_blk16; store_16way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2); vzeroupper; FRAME_END ret; ENDPROC(serpent_ecb_dec_16way) ENTRY(serpent_cbc_dec_16way) /* input: * %rdi: ctx, CTX * %rsi: dst * %rdx: src */ FRAME_BEGIN vzeroupper; load_16way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); call __serpent_dec_blk16; store_cbc_16way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2, RK0); vzeroupper; FRAME_END ret; ENDPROC(serpent_cbc_dec_16way) ENTRY(serpent_ctr_16way) /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) * %rcx: iv (little endian, 128bit) */ FRAME_BEGIN vzeroupper; load_ctr_16way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2, RK0, RK0x, RK1, RK1x, RK2, RK2x, RK3, RK3x, RNOT, tp); call __serpent_enc_blk16; store_ctr_16way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); vzeroupper; FRAME_END ret; ENDPROC(serpent_ctr_16way) ENTRY(serpent_xts_enc_16way) /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸)) */ FRAME_BEGIN vzeroupper; load_xts_16way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2, RK0, RK0x, RK1, RK1x, RK2, RK2x, RK3, RK3x, RNOT, .Lxts_gf128mul_and_shl1_mask_0, .Lxts_gf128mul_and_shl1_mask_1); call __serpent_enc_blk16; store_xts_16way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); vzeroupper; FRAME_END ret; ENDPROC(serpent_xts_enc_16way) ENTRY(serpent_xts_dec_16way) /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸)) */ FRAME_BEGIN vzeroupper; load_xts_16way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2, RK0, RK0x, RK1, RK1x, RK2, RK2x, RK3, RK3x, RNOT, .Lxts_gf128mul_and_shl1_mask_0, .Lxts_gf128mul_and_shl1_mask_1); call __serpent_dec_blk16; store_xts_16way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2); vzeroupper; FRAME_END ret; ENDPROC(serpent_xts_dec_16way)