repo_id
stringlengths
5
115
size
int64
590
5.01M
file_path
stringlengths
4
212
content
stringlengths
590
5.01M
AirFortressIlikara/LS2K0300-linux-4.19
10,512
arch/sparc/lib/copy_user.S
/* SPDX-License-Identifier: GPL-2.0 */ /* copy_user.S: Sparc optimized copy_from_user and copy_to_user code. * * Copyright(C) 1995 Linus Torvalds * Copyright(C) 1996 David S. Miller * Copyright(C) 1996 Eddie C. Dost * Copyright(C) 1996,1998 Jakub Jelinek * * derived from: * e-mail between David and Eddie. * * Returns 0 if successful, otherwise count of bytes not copied yet */ #include <asm/ptrace.h> #include <asm/asmmacro.h> #include <asm/page.h> #include <asm/thread_info.h> #include <asm/export.h> /* Work around cpp -rob */ #define ALLOC #alloc #define EXECINSTR #execinstr #define EX(x,y,a,b) \ 98: x,y; \ .section .fixup,ALLOC,EXECINSTR; \ .align 4; \ 99: ba fixupretl; \ a, b, %g3; \ .section __ex_table,ALLOC; \ .align 4; \ .word 98b, 99b; \ .text; \ .align 4 #define EX2(x,y,c,d,e,a,b) \ 98: x,y; \ .section .fixup,ALLOC,EXECINSTR; \ .align 4; \ 99: c, d, e; \ ba fixupretl; \ a, b, %g3; \ .section __ex_table,ALLOC; \ .align 4; \ .word 98b, 99b; \ .text; \ .align 4 #define EXO2(x,y) \ 98: x, y; \ .section __ex_table,ALLOC; \ .align 4; \ .word 98b, 97f; \ .text; \ .align 4 #define EXT(start,end,handler) \ .section __ex_table,ALLOC; \ .align 4; \ .word start, 0, end, handler; \ .text; \ .align 4 /* Please do not change following macros unless you change logic used * in .fixup at the end of this file as well */ /* Both these macros have to start with exactly the same insn */ #define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ ldd [%src + (offset) + 0x00], %t0; \ ldd [%src + (offset) + 0x08], %t2; \ ldd [%src + (offset) + 0x10], %t4; \ ldd [%src + (offset) + 0x18], %t6; \ st %t0, [%dst + (offset) + 0x00]; \ st %t1, [%dst + (offset) + 0x04]; \ st %t2, [%dst + (offset) + 0x08]; \ st %t3, [%dst + (offset) + 0x0c]; \ st %t4, [%dst + (offset) + 0x10]; \ st %t5, [%dst + (offset) + 0x14]; \ st %t6, [%dst + (offset) + 0x18]; \ st %t7, [%dst + (offset) + 0x1c]; #define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ ldd [%src + (offset) + 0x00], %t0; \ ldd [%src + (offset) + 0x08], %t2; \ ldd [%src + (offset) + 0x10], %t4; \ ldd [%src + (offset) + 0x18], %t6; \ std %t0, [%dst + (offset) + 0x00]; \ std %t2, [%dst + (offset) + 0x08]; \ std %t4, [%dst + (offset) + 0x10]; \ std %t6, [%dst + (offset) + 0x18]; #define MOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \ ldd [%src - (offset) - 0x10], %t0; \ ldd [%src - (offset) - 0x08], %t2; \ st %t0, [%dst - (offset) - 0x10]; \ st %t1, [%dst - (offset) - 0x0c]; \ st %t2, [%dst - (offset) - 0x08]; \ st %t3, [%dst - (offset) - 0x04]; #define MOVE_HALFCHUNK(src, dst, offset, t0, t1, t2, t3) \ lduh [%src + (offset) + 0x00], %t0; \ lduh [%src + (offset) + 0x02], %t1; \ lduh [%src + (offset) + 0x04], %t2; \ lduh [%src + (offset) + 0x06], %t3; \ sth %t0, [%dst + (offset) + 0x00]; \ sth %t1, [%dst + (offset) + 0x02]; \ sth %t2, [%dst + (offset) + 0x04]; \ sth %t3, [%dst + (offset) + 0x06]; #define MOVE_SHORTCHUNK(src, dst, offset, t0, t1) \ ldub [%src - (offset) - 0x02], %t0; \ ldub [%src - (offset) - 0x01], %t1; \ stb %t0, [%dst - (offset) - 0x02]; \ stb %t1, [%dst - (offset) - 0x01]; .text .align 4 .globl __copy_user_begin __copy_user_begin: .globl __copy_user EXPORT_SYMBOL(__copy_user) dword_align: andcc %o1, 1, %g0 be 4f andcc %o1, 2, %g0 EXO2(ldub [%o1], %g2) add %o1, 1, %o1 EXO2(stb %g2, [%o0]) sub %o2, 1, %o2 bne 3f add %o0, 1, %o0 EXO2(lduh [%o1], %g2) add %o1, 2, %o1 EXO2(sth %g2, [%o0]) sub %o2, 2, %o2 b 3f add %o0, 2, %o0 4: EXO2(lduh [%o1], %g2) add %o1, 2, %o1 EXO2(sth %g2, [%o0]) sub %o2, 2, %o2 b 3f add %o0, 2, %o0 __copy_user: /* %o0=dst %o1=src %o2=len */ xor %o0, %o1, %o4 1: andcc %o4, 3, %o5 2: bne cannot_optimize cmp %o2, 15 bleu short_aligned_end andcc %o1, 3, %g0 bne dword_align 3: andcc %o1, 4, %g0 be 2f mov %o2, %g1 EXO2(ld [%o1], %o4) sub %g1, 4, %g1 EXO2(st %o4, [%o0]) add %o1, 4, %o1 add %o0, 4, %o0 2: andcc %g1, 0xffffff80, %g7 be 3f andcc %o0, 4, %g0 be ldd_std + 4 5: MOVE_BIGCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5) MOVE_BIGCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5) MOVE_BIGCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5) MOVE_BIGCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5) 80: EXT(5b, 80b, 50f) subcc %g7, 128, %g7 add %o1, 128, %o1 bne 5b add %o0, 128, %o0 3: andcc %g1, 0x70, %g7 be copy_user_table_end andcc %g1, 8, %g0 sethi %hi(copy_user_table_end), %o5 srl %g7, 1, %o4 add %g7, %o4, %o4 add %o1, %g7, %o1 sub %o5, %o4, %o5 jmpl %o5 + %lo(copy_user_table_end), %g0 add %o0, %g7, %o0 copy_user_table: MOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g4, g5) MOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g4, g5) MOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g4, g5) MOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g4, g5) MOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g4, g5) MOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g4, g5) MOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g4, g5) copy_user_table_end: EXT(copy_user_table, copy_user_table_end, 51f) be copy_user_last7 andcc %g1, 4, %g0 EX(ldd [%o1], %g2, and %g1, 0xf) add %o0, 8, %o0 add %o1, 8, %o1 EX(st %g2, [%o0 - 0x08], and %g1, 0xf) EX2(st %g3, [%o0 - 0x04], and %g1, 0xf, %g1, sub %g1, 4) copy_user_last7: be 1f andcc %g1, 2, %g0 EX(ld [%o1], %g2, and %g1, 7) add %o1, 4, %o1 EX(st %g2, [%o0], and %g1, 7) add %o0, 4, %o0 1: be 1f andcc %g1, 1, %g0 EX(lduh [%o1], %g2, and %g1, 3) add %o1, 2, %o1 EX(sth %g2, [%o0], and %g1, 3) add %o0, 2, %o0 1: be 1f nop EX(ldub [%o1], %g2, add %g0, 1) EX(stb %g2, [%o0], add %g0, 1) 1: retl clr %o0 ldd_std: MOVE_BIGALIGNCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5) MOVE_BIGALIGNCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5) MOVE_BIGALIGNCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5) MOVE_BIGALIGNCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5) 81: EXT(ldd_std, 81b, 52f) subcc %g7, 128, %g7 add %o1, 128, %o1 bne ldd_std add %o0, 128, %o0 andcc %g1, 0x70, %g7 be copy_user_table_end andcc %g1, 8, %g0 sethi %hi(copy_user_table_end), %o5 srl %g7, 1, %o4 add %g7, %o4, %o4 add %o1, %g7, %o1 sub %o5, %o4, %o5 jmpl %o5 + %lo(copy_user_table_end), %g0 add %o0, %g7, %o0 cannot_optimize: bleu short_end cmp %o5, 2 bne byte_chunk and %o2, 0xfffffff0, %o3 andcc %o1, 1, %g0 be 10f nop EXO2(ldub [%o1], %g2) add %o1, 1, %o1 EXO2(stb %g2, [%o0]) sub %o2, 1, %o2 andcc %o2, 0xfffffff0, %o3 be short_end add %o0, 1, %o0 10: MOVE_HALFCHUNK(o1, o0, 0x00, g2, g3, g4, g5) MOVE_HALFCHUNK(o1, o0, 0x08, g2, g3, g4, g5) 82: EXT(10b, 82b, 53f) subcc %o3, 0x10, %o3 add %o1, 0x10, %o1 bne 10b add %o0, 0x10, %o0 b 2f and %o2, 0xe, %o3 byte_chunk: MOVE_SHORTCHUNK(o1, o0, -0x02, g2, g3) MOVE_SHORTCHUNK(o1, o0, -0x04, g2, g3) MOVE_SHORTCHUNK(o1, o0, -0x06, g2, g3) MOVE_SHORTCHUNK(o1, o0, -0x08, g2, g3) MOVE_SHORTCHUNK(o1, o0, -0x0a, g2, g3) MOVE_SHORTCHUNK(o1, o0, -0x0c, g2, g3) MOVE_SHORTCHUNK(o1, o0, -0x0e, g2, g3) MOVE_SHORTCHUNK(o1, o0, -0x10, g2, g3) 83: EXT(byte_chunk, 83b, 54f) subcc %o3, 0x10, %o3 add %o1, 0x10, %o1 bne byte_chunk add %o0, 0x10, %o0 short_end: and %o2, 0xe, %o3 2: sethi %hi(short_table_end), %o5 sll %o3, 3, %o4 add %o0, %o3, %o0 sub %o5, %o4, %o5 add %o1, %o3, %o1 jmpl %o5 + %lo(short_table_end), %g0 andcc %o2, 1, %g0 84: MOVE_SHORTCHUNK(o1, o0, 0x0c, g2, g3) MOVE_SHORTCHUNK(o1, o0, 0x0a, g2, g3) MOVE_SHORTCHUNK(o1, o0, 0x08, g2, g3) MOVE_SHORTCHUNK(o1, o0, 0x06, g2, g3) MOVE_SHORTCHUNK(o1, o0, 0x04, g2, g3) MOVE_SHORTCHUNK(o1, o0, 0x02, g2, g3) MOVE_SHORTCHUNK(o1, o0, 0x00, g2, g3) short_table_end: EXT(84b, short_table_end, 55f) be 1f nop EX(ldub [%o1], %g2, add %g0, 1) EX(stb %g2, [%o0], add %g0, 1) 1: retl clr %o0 short_aligned_end: bne short_end andcc %o2, 8, %g0 be 1f andcc %o2, 4, %g0 EXO2(ld [%o1 + 0x00], %g2) EXO2(ld [%o1 + 0x04], %g3) add %o1, 8, %o1 EXO2(st %g2, [%o0 + 0x00]) EX(st %g3, [%o0 + 0x04], sub %o2, 4) add %o0, 8, %o0 1: b copy_user_last7 mov %o2, %g1 .section .fixup,#alloc,#execinstr .align 4 97: mov %o2, %g3 fixupretl: retl mov %g3, %o0 /* exception routine sets %g2 to (broken_insn - first_insn)>>2 */ 50: /* This magic counts how many bytes are left when crash in MOVE_BIGCHUNK * happens. This is derived from the amount ldd reads, st stores, etc. * x = g2 % 12; * g3 = g1 + g7 - ((g2 / 12) * 32 + (x < 4) ? 0 : (x - 4) * 4); * o0 += (g2 / 12) * 32; */ cmp %g2, 12 add %o0, %g7, %o0 bcs 1f cmp %g2, 24 bcs 2f cmp %g2, 36 bcs 3f nop sub %g2, 12, %g2 sub %g7, 32, %g7 3: sub %g2, 12, %g2 sub %g7, 32, %g7 2: sub %g2, 12, %g2 sub %g7, 32, %g7 1: cmp %g2, 4 bcs,a 60f clr %g2 sub %g2, 4, %g2 sll %g2, 2, %g2 60: and %g1, 0x7f, %g3 sub %o0, %g7, %o0 add %g3, %g7, %g3 ba fixupretl sub %g3, %g2, %g3 51: /* i = 41 - g2; j = i % 6; * g3 = (g1 & 15) + (i / 6) * 16 + (j < 4) ? (j + 1) * 4 : 16; * o0 -= (i / 6) * 16 + 16; */ neg %g2 and %g1, 0xf, %g1 add %g2, 41, %g2 add %o0, %g1, %o0 1: cmp %g2, 6 bcs,a 2f cmp %g2, 4 add %g1, 16, %g1 b 1b sub %g2, 6, %g2 2: bcc,a 2f mov 16, %g2 inc %g2 sll %g2, 2, %g2 2: add %g1, %g2, %g3 ba fixupretl sub %o0, %g3, %o0 52: /* g3 = g1 + g7 - (g2 / 8) * 32 + (g2 & 4) ? (g2 & 3) * 8 : 0; o0 += (g2 / 8) * 32 */ andn %g2, 7, %g4 add %o0, %g7, %o0 andcc %g2, 4, %g0 and %g2, 3, %g2 sll %g4, 2, %g4 sll %g2, 3, %g2 bne 60b sub %g7, %g4, %g7 ba 60b clr %g2 53: /* g3 = o3 + (o2 & 15) - (g2 & 8) - (g2 & 4) ? (g2 & 3) * 2 : 0; o0 += (g2 & 8) */ and %g2, 3, %g4 andcc %g2, 4, %g0 and %g2, 8, %g2 sll %g4, 1, %g4 be 1f add %o0, %g2, %o0 add %g2, %g4, %g2 1: and %o2, 0xf, %g3 add %g3, %o3, %g3 ba fixupretl sub %g3, %g2, %g3 54: /* g3 = o3 + (o2 & 15) - (g2 / 4) * 2 - (g2 & 2) ? (g2 & 1) : 0; o0 += (g2 / 4) * 2 */ srl %g2, 2, %o4 and %g2, 1, %o5 srl %g2, 1, %g2 add %o4, %o4, %o4 and %o5, %g2, %o5 and %o2, 0xf, %o2 add %o0, %o4, %o0 sub %o3, %o5, %o3 sub %o2, %o4, %o2 ba fixupretl add %o2, %o3, %g3 55: /* i = 27 - g2; g3 = (o2 & 1) + i / 4 * 2 + !(i & 3); o0 -= i / 4 * 2 + 1 */ neg %g2 and %o2, 1, %o2 add %g2, 27, %g2 srl %g2, 2, %o5 andcc %g2, 3, %g0 mov 1, %g2 add %o5, %o5, %o5 be,a 1f clr %g2 1: add %g2, %o5, %g3 sub %o0, %g3, %o0 ba fixupretl add %g3, %o2, %g3 .globl __copy_user_end __copy_user_end:
AirFortressIlikara/LS2K0300-linux-4.19
1,884
arch/sparc/lib/VISsave.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * VISsave.S: Code for saving FPU register state for * VIS routines. One should not call this directly, * but use macros provided in <asm/visasm.h>. * * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz) */ #include <linux/linkage.h> #include <asm/asi.h> #include <asm/page.h> #include <asm/ptrace.h> #include <asm/visasm.h> #include <asm/thread_info.h> #include <asm/export.h> /* On entry: %o5=current FPRS value, %g7 is callers address */ /* May clobber %o5, %g1, %g2, %g3, %g7, %icc, %xcc */ /* Nothing special need be done here to handle pre-emption, this * FPU save/restore mechanism is already preemption safe. */ .text .align 32 ENTRY(VISenter) ldub [%g6 + TI_FPDEPTH], %g1 brnz,a,pn %g1, 1f cmp %g1, 1 stb %g0, [%g6 + TI_FPSAVED] stx %fsr, [%g6 + TI_XFSR] 9: jmpl %g7 + %g0, %g0 nop 1: bne,pn %icc, 2f srl %g1, 1, %g1 vis1: ldub [%g6 + TI_FPSAVED], %g3 stx %fsr, [%g6 + TI_XFSR] or %g3, %o5, %g3 stb %g3, [%g6 + TI_FPSAVED] rd %gsr, %g3 clr %g1 ba,pt %xcc, 3f stx %g3, [%g6 + TI_GSR] 2: add %g6, %g1, %g3 mov FPRS_DU | FPRS_DL | FPRS_FEF, %o5 sll %g1, 3, %g1 stb %o5, [%g3 + TI_FPSAVED] rd %gsr, %g2 add %g6, %g1, %g3 stx %g2, [%g3 + TI_GSR] add %g6, %g1, %g2 stx %fsr, [%g2 + TI_XFSR] sll %g1, 5, %g1 3: andcc %o5, FPRS_DL|FPRS_DU, %g0 be,pn %icc, 9b add %g6, TI_FPREGS, %g2 andcc %o5, FPRS_DL, %g0 be,pn %icc, 4f add %g6, TI_FPREGS+0x40, %g3 membar #Sync stda %f0, [%g2 + %g1] ASI_BLK_P stda %f16, [%g3 + %g1] ASI_BLK_P membar #Sync andcc %o5, FPRS_DU, %g0 be,pn %icc, 5f 4: add %g1, 128, %g1 membar #Sync stda %f32, [%g2 + %g1] ASI_BLK_P stda %f48, [%g3 + %g1] ASI_BLK_P 5: membar #Sync ba,pt %xcc, 80f nop .align 32 80: jmpl %g7 + %g0, %g0 nop ENDPROC(VISenter) EXPORT_SYMBOL(VISenter)
AirFortressIlikara/LS2K0300-linux-4.19
7,136
arch/sparc/lib/csum_copy.S
/* SPDX-License-Identifier: GPL-2.0 */ /* csum_copy.S: Checksum+copy code for sparc64 * * Copyright (C) 2005 David S. Miller <davem@davemloft.net> */ #include <asm/export.h> #ifdef __KERNEL__ #define GLOBAL_SPARE %g7 #else #define GLOBAL_SPARE %g5 #endif #ifndef EX_LD #define EX_LD(x) x #endif #ifndef EX_ST #define EX_ST(x) x #endif #ifndef EX_RETVAL #define EX_RETVAL(x) x #endif #ifndef LOAD #define LOAD(type,addr,dest) type [addr], dest #endif #ifndef STORE #define STORE(type,src,addr) type src, [addr] #endif #ifndef FUNC_NAME #define FUNC_NAME csum_partial_copy_nocheck #endif .register %g2, #scratch .register %g3, #scratch .text 90: /* We checked for zero length already, so there must be * at least one byte. */ be,pt %icc, 1f nop EX_LD(LOAD(ldub, %o0 + 0x00, %o4)) add %o0, 1, %o0 sub %o2, 1, %o2 EX_ST(STORE(stb, %o4, %o1 + 0x00)) add %o1, 1, %o1 1: andcc %o0, 0x2, %g0 be,pn %icc, 80f cmp %o2, 2 blu,pn %icc, 60f nop EX_LD(LOAD(lduh, %o0 + 0x00, %o5)) add %o0, 2, %o0 sub %o2, 2, %o2 EX_ST(STORE(sth, %o5, %o1 + 0x00)) add %o1, 2, %o1 ba,pt %xcc, 80f add %o5, %o4, %o4 .globl FUNC_NAME .type FUNC_NAME,#function EXPORT_SYMBOL(FUNC_NAME) FUNC_NAME: /* %o0=src, %o1=dst, %o2=len, %o3=sum */ LOAD(prefetch, %o0 + 0x000, #n_reads) xor %o0, %o1, %g1 clr %o4 andcc %g1, 0x3, %g0 bne,pn %icc, 95f LOAD(prefetch, %o0 + 0x040, #n_reads) brz,pn %o2, 70f andcc %o0, 0x3, %g0 /* We "remember" whether the lowest bit in the address * was set in GLOBAL_SPARE. Because if it is, we have to swap * upper and lower 8 bit fields of the sum we calculate. */ bne,pn %icc, 90b andcc %o0, 0x1, GLOBAL_SPARE 80: LOAD(prefetch, %o0 + 0x080, #n_reads) andncc %o2, 0x3f, %g3 LOAD(prefetch, %o0 + 0x0c0, #n_reads) sub %o2, %g3, %o2 brz,pn %g3, 2f LOAD(prefetch, %o0 + 0x100, #n_reads) /* So that we don't need to use the non-pairing * add-with-carry instructions we accumulate 32-bit * values into a 64-bit register. At the end of the * loop we fold it down to 32-bits and so on. */ ba,pt %xcc, 1f LOAD(prefetch, %o0 + 0x140, #n_reads) .align 32 1: EX_LD(LOAD(lduw, %o0 + 0x00, %o5)) EX_LD(LOAD(lduw, %o0 + 0x04, %g1)) EX_LD(LOAD(lduw, %o0 + 0x08, %g2)) add %o4, %o5, %o4 EX_ST(STORE(stw, %o5, %o1 + 0x00)) EX_LD(LOAD(lduw, %o0 + 0x0c, %o5)) add %o4, %g1, %o4 EX_ST(STORE(stw, %g1, %o1 + 0x04)) EX_LD(LOAD(lduw, %o0 + 0x10, %g1)) add %o4, %g2, %o4 EX_ST(STORE(stw, %g2, %o1 + 0x08)) EX_LD(LOAD(lduw, %o0 + 0x14, %g2)) add %o4, %o5, %o4 EX_ST(STORE(stw, %o5, %o1 + 0x0c)) EX_LD(LOAD(lduw, %o0 + 0x18, %o5)) add %o4, %g1, %o4 EX_ST(STORE(stw, %g1, %o1 + 0x10)) EX_LD(LOAD(lduw, %o0 + 0x1c, %g1)) add %o4, %g2, %o4 EX_ST(STORE(stw, %g2, %o1 + 0x14)) EX_LD(LOAD(lduw, %o0 + 0x20, %g2)) add %o4, %o5, %o4 EX_ST(STORE(stw, %o5, %o1 + 0x18)) EX_LD(LOAD(lduw, %o0 + 0x24, %o5)) add %o4, %g1, %o4 EX_ST(STORE(stw, %g1, %o1 + 0x1c)) EX_LD(LOAD(lduw, %o0 + 0x28, %g1)) add %o4, %g2, %o4 EX_ST(STORE(stw, %g2, %o1 + 0x20)) EX_LD(LOAD(lduw, %o0 + 0x2c, %g2)) add %o4, %o5, %o4 EX_ST(STORE(stw, %o5, %o1 + 0x24)) EX_LD(LOAD(lduw, %o0 + 0x30, %o5)) add %o4, %g1, %o4 EX_ST(STORE(stw, %g1, %o1 + 0x28)) EX_LD(LOAD(lduw, %o0 + 0x34, %g1)) add %o4, %g2, %o4 EX_ST(STORE(stw, %g2, %o1 + 0x2c)) EX_LD(LOAD(lduw, %o0 + 0x38, %g2)) add %o4, %o5, %o4 EX_ST(STORE(stw, %o5, %o1 + 0x30)) EX_LD(LOAD(lduw, %o0 + 0x3c, %o5)) add %o4, %g1, %o4 EX_ST(STORE(stw, %g1, %o1 + 0x34)) LOAD(prefetch, %o0 + 0x180, #n_reads) add %o4, %g2, %o4 EX_ST(STORE(stw, %g2, %o1 + 0x38)) subcc %g3, 0x40, %g3 add %o0, 0x40, %o0 add %o4, %o5, %o4 EX_ST(STORE(stw, %o5, %o1 + 0x3c)) bne,pt %icc, 1b add %o1, 0x40, %o1 2: and %o2, 0x3c, %g3 brz,pn %g3, 2f sub %o2, %g3, %o2 1: EX_LD(LOAD(lduw, %o0 + 0x00, %o5)) subcc %g3, 0x4, %g3 add %o0, 0x4, %o0 add %o4, %o5, %o4 EX_ST(STORE(stw, %o5, %o1 + 0x00)) bne,pt %icc, 1b add %o1, 0x4, %o1 2: /* fold 64-->32 */ srlx %o4, 32, %o5 srl %o4, 0, %o4 add %o4, %o5, %o4 srlx %o4, 32, %o5 srl %o4, 0, %o4 add %o4, %o5, %o4 /* fold 32-->16 */ sethi %hi(0xffff0000), %g1 srl %o4, 16, %o5 andn %o4, %g1, %g2 add %o5, %g2, %o4 srl %o4, 16, %o5 andn %o4, %g1, %g2 add %o5, %g2, %o4 60: /* %o4 has the 16-bit sum we have calculated so-far. */ cmp %o2, 2 blu,pt %icc, 1f nop EX_LD(LOAD(lduh, %o0 + 0x00, %o5)) sub %o2, 2, %o2 add %o0, 2, %o0 add %o4, %o5, %o4 EX_ST(STORE(sth, %o5, %o1 + 0x00)) add %o1, 0x2, %o1 1: brz,pt %o2, 1f nop EX_LD(LOAD(ldub, %o0 + 0x00, %o5)) sub %o2, 1, %o2 add %o0, 1, %o0 EX_ST(STORE(stb, %o5, %o1 + 0x00)) sllx %o5, 8, %o5 add %o1, 1, %o1 add %o4, %o5, %o4 1: /* fold 32-->16 */ sethi %hi(0xffff0000), %g1 srl %o4, 16, %o5 andn %o4, %g1, %g2 add %o5, %g2, %o4 srl %o4, 16, %o5 andn %o4, %g1, %g2 add %o5, %g2, %o4 1: brz,pt GLOBAL_SPARE, 1f nop /* We started with an odd byte, byte-swap the result. */ srl %o4, 8, %o5 and %o4, 0xff, %g1 sll %g1, 8, %g1 or %o5, %g1, %o4 1: addcc %o3, %o4, %o3 addc %g0, %o3, %o3 70: retl srl %o3, 0, %o0 95: mov 0, GLOBAL_SPARE brlez,pn %o2, 4f andcc %o0, 1, %o5 be,a,pt %icc, 1f srl %o2, 1, %g1 sub %o2, 1, %o2 EX_LD(LOAD(ldub, %o0, GLOBAL_SPARE)) add %o0, 1, %o0 EX_ST(STORE(stb, GLOBAL_SPARE, %o1)) srl %o2, 1, %g1 add %o1, 1, %o1 1: brz,a,pn %g1, 3f andcc %o2, 1, %g0 andcc %o0, 2, %g0 be,a,pt %icc, 1f srl %g1, 1, %g1 EX_LD(LOAD(lduh, %o0, %o4)) sub %o2, 2, %o2 srl %o4, 8, %g2 sub %g1, 1, %g1 EX_ST(STORE(stb, %g2, %o1)) add %o4, GLOBAL_SPARE, GLOBAL_SPARE EX_ST(STORE(stb, %o4, %o1 + 1)) add %o0, 2, %o0 srl %g1, 1, %g1 add %o1, 2, %o1 1: brz,a,pn %g1, 2f andcc %o2, 2, %g0 EX_LD(LOAD(lduw, %o0, %o4)) 5: srl %o4, 24, %g2 srl %o4, 16, %g3 EX_ST(STORE(stb, %g2, %o1)) srl %o4, 8, %g2 EX_ST(STORE(stb, %g3, %o1 + 1)) add %o0, 4, %o0 EX_ST(STORE(stb, %g2, %o1 + 2)) addcc %o4, GLOBAL_SPARE, GLOBAL_SPARE EX_ST(STORE(stb, %o4, %o1 + 3)) addc GLOBAL_SPARE, %g0, GLOBAL_SPARE add %o1, 4, %o1 subcc %g1, 1, %g1 bne,a,pt %icc, 5b EX_LD(LOAD(lduw, %o0, %o4)) sll GLOBAL_SPARE, 16, %g2 srl GLOBAL_SPARE, 16, GLOBAL_SPARE srl %g2, 16, %g2 andcc %o2, 2, %g0 add %g2, GLOBAL_SPARE, GLOBAL_SPARE 2: be,a,pt %icc, 3f andcc %o2, 1, %g0 EX_LD(LOAD(lduh, %o0, %o4)) andcc %o2, 1, %g0 srl %o4, 8, %g2 add %o0, 2, %o0 EX_ST(STORE(stb, %g2, %o1)) add GLOBAL_SPARE, %o4, GLOBAL_SPARE EX_ST(STORE(stb, %o4, %o1 + 1)) add %o1, 2, %o1 3: be,a,pt %icc, 1f sll GLOBAL_SPARE, 16, %o4 EX_LD(LOAD(ldub, %o0, %g2)) sll %g2, 8, %o4 EX_ST(STORE(stb, %g2, %o1)) add GLOBAL_SPARE, %o4, GLOBAL_SPARE sll GLOBAL_SPARE, 16, %o4 1: addcc %o4, GLOBAL_SPARE, GLOBAL_SPARE srl GLOBAL_SPARE, 16, %o4 addc %g0, %o4, GLOBAL_SPARE brz,pt %o5, 4f srl GLOBAL_SPARE, 8, %o4 and GLOBAL_SPARE, 0xff, %g2 and %o4, 0xff, %o4 sll %g2, 8, %g2 or %g2, %o4, GLOBAL_SPARE 4: addcc %o3, GLOBAL_SPARE, %o3 addc %g0, %o3, %o0 retl srl %o0, 0, %o0 .size FUNC_NAME, .-FUNC_NAME
AirFortressIlikara/LS2K0300-linux-4.19
2,633
arch/sparc/lib/mcount.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com) * * This file implements mcount(), which is used to collect profiling data. * This can also be tweaked for kernel stack overflow detection. */ #include <linux/linkage.h> #include <asm/export.h> /* * This is the main variant and is called by C code. GCC's -pg option * automatically instruments every C function with a call to this. */ .text .align 32 .globl _mcount .type _mcount,#function EXPORT_SYMBOL(_mcount) .globl mcount .type mcount,#function _mcount: mcount: #ifdef CONFIG_FUNCTION_TRACER #ifdef CONFIG_DYNAMIC_FTRACE /* Do nothing, the retl/nop below is all we need. */ #else sethi %hi(ftrace_trace_function), %g1 sethi %hi(ftrace_stub), %g2 ldx [%g1 + %lo(ftrace_trace_function)], %g1 or %g2, %lo(ftrace_stub), %g2 cmp %g1, %g2 be,pn %icc, 1f mov %i7, %g3 save %sp, -176, %sp mov %g3, %o1 jmpl %g1, %o7 mov %i7, %o0 ret restore /* not reached */ 1: #ifdef CONFIG_FUNCTION_GRAPH_TRACER sethi %hi(ftrace_graph_return), %g1 ldx [%g1 + %lo(ftrace_graph_return)], %g3 cmp %g2, %g3 bne,pn %xcc, 5f sethi %hi(ftrace_graph_entry_stub), %g2 sethi %hi(ftrace_graph_entry), %g1 or %g2, %lo(ftrace_graph_entry_stub), %g2 ldx [%g1 + %lo(ftrace_graph_entry)], %g1 cmp %g1, %g2 be,pt %xcc, 2f nop 5: mov %i7, %g2 mov %fp, %g3 save %sp, -176, %sp mov %g2, %l0 ba,pt %xcc, ftrace_graph_caller mov %g3, %l1 #endif 2: #endif #endif retl nop .size _mcount,.-_mcount .size mcount,.-mcount #ifdef CONFIG_FUNCTION_TRACER .globl ftrace_stub .type ftrace_stub,#function ftrace_stub: retl nop .size ftrace_stub,.-ftrace_stub #ifdef CONFIG_DYNAMIC_FTRACE .globl ftrace_caller .type ftrace_caller,#function ftrace_caller: mov %i7, %g2 mov %fp, %g3 save %sp, -176, %sp mov %g2, %o1 mov %g2, %l0 mov %g3, %l1 .globl ftrace_call ftrace_call: call ftrace_stub mov %i7, %o0 #ifdef CONFIG_FUNCTION_GRAPH_TRACER .globl ftrace_graph_call ftrace_graph_call: call ftrace_stub nop #endif ret restore #ifdef CONFIG_FUNCTION_GRAPH_TRACER .size ftrace_graph_call,.-ftrace_graph_call #endif .size ftrace_call,.-ftrace_call .size ftrace_caller,.-ftrace_caller #endif #endif #ifdef CONFIG_FUNCTION_GRAPH_TRACER ENTRY(ftrace_graph_caller) mov %l0, %o0 mov %i7, %o1 call prepare_ftrace_return mov %l1, %o2 ret restore %o0, -8, %i7 END(ftrace_graph_caller) ENTRY(return_to_handler) save %sp, -176, %sp call ftrace_return_to_handler mov %fp, %o0 jmpl %o0 + 8, %g0 restore END(return_to_handler) #endif
AirFortressIlikara/LS2K0300-linux-4.19
4,076
arch/sparc/lib/atomic_64.S
/* SPDX-License-Identifier: GPL-2.0 */ /* atomic.S: These things are too big to do inline. * * Copyright (C) 1999, 2007 2012 David S. Miller (davem@davemloft.net) */ #include <linux/linkage.h> #include <asm/asi.h> #include <asm/backoff.h> #include <asm/export.h> .text /* Three versions of the atomic routines, one that * does not return a value and does not perform * memory barriers, and a two which return * a value, the new and old value resp. and does the * barriers. */ #define ATOMIC_OP(op) \ ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ BACKOFF_SETUP(%o2); \ 1: lduw [%o1], %g1; \ op %g1, %o0, %g7; \ cas [%o1], %g1, %g7; \ cmp %g1, %g7; \ bne,pn %icc, BACKOFF_LABEL(2f, 1b); \ nop; \ retl; \ nop; \ 2: BACKOFF_SPIN(%o2, %o3, 1b); \ ENDPROC(atomic_##op); \ EXPORT_SYMBOL(atomic_##op); #define ATOMIC_OP_RETURN(op) \ ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \ BACKOFF_SETUP(%o2); \ 1: lduw [%o1], %g1; \ op %g1, %o0, %g7; \ cas [%o1], %g1, %g7; \ cmp %g1, %g7; \ bne,pn %icc, BACKOFF_LABEL(2f, 1b); \ op %g1, %o0, %g1; \ retl; \ sra %g1, 0, %o0; \ 2: BACKOFF_SPIN(%o2, %o3, 1b); \ ENDPROC(atomic_##op##_return); \ EXPORT_SYMBOL(atomic_##op##_return); #define ATOMIC_FETCH_OP(op) \ ENTRY(atomic_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ BACKOFF_SETUP(%o2); \ 1: lduw [%o1], %g1; \ op %g1, %o0, %g7; \ cas [%o1], %g1, %g7; \ cmp %g1, %g7; \ bne,pn %icc, BACKOFF_LABEL(2f, 1b); \ nop; \ retl; \ sra %g1, 0, %o0; \ 2: BACKOFF_SPIN(%o2, %o3, 1b); \ ENDPROC(atomic_fetch_##op); \ EXPORT_SYMBOL(atomic_fetch_##op); ATOMIC_OP(add) ATOMIC_OP_RETURN(add) ATOMIC_FETCH_OP(add) ATOMIC_OP(sub) ATOMIC_OP_RETURN(sub) ATOMIC_FETCH_OP(sub) ATOMIC_OP(and) ATOMIC_FETCH_OP(and) ATOMIC_OP(or) ATOMIC_FETCH_OP(or) ATOMIC_OP(xor) ATOMIC_FETCH_OP(xor) #undef ATOMIC_FETCH_OP #undef ATOMIC_OP_RETURN #undef ATOMIC_OP #define ATOMIC64_OP(op) \ ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ BACKOFF_SETUP(%o2); \ 1: ldx [%o1], %g1; \ op %g1, %o0, %g7; \ casx [%o1], %g1, %g7; \ cmp %g1, %g7; \ bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \ nop; \ retl; \ nop; \ 2: BACKOFF_SPIN(%o2, %o3, 1b); \ ENDPROC(atomic64_##op); \ EXPORT_SYMBOL(atomic64_##op); #define ATOMIC64_OP_RETURN(op) \ ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \ BACKOFF_SETUP(%o2); \ 1: ldx [%o1], %g1; \ op %g1, %o0, %g7; \ casx [%o1], %g1, %g7; \ cmp %g1, %g7; \ bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \ nop; \ retl; \ op %g1, %o0, %o0; \ 2: BACKOFF_SPIN(%o2, %o3, 1b); \ ENDPROC(atomic64_##op##_return); \ EXPORT_SYMBOL(atomic64_##op##_return); #define ATOMIC64_FETCH_OP(op) \ ENTRY(atomic64_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ BACKOFF_SETUP(%o2); \ 1: ldx [%o1], %g1; \ op %g1, %o0, %g7; \ casx [%o1], %g1, %g7; \ cmp %g1, %g7; \ bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \ nop; \ retl; \ mov %g1, %o0; \ 2: BACKOFF_SPIN(%o2, %o3, 1b); \ ENDPROC(atomic64_fetch_##op); \ EXPORT_SYMBOL(atomic64_fetch_##op); ATOMIC64_OP(add) ATOMIC64_OP_RETURN(add) ATOMIC64_FETCH_OP(add) ATOMIC64_OP(sub) ATOMIC64_OP_RETURN(sub) ATOMIC64_FETCH_OP(sub) ATOMIC64_OP(and) ATOMIC64_FETCH_OP(and) ATOMIC64_OP(or) ATOMIC64_FETCH_OP(or) ATOMIC64_OP(xor) ATOMIC64_FETCH_OP(xor) #undef ATOMIC64_FETCH_OP #undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */ BACKOFF_SETUP(%o2) 1: ldx [%o0], %g1 brlez,pn %g1, 3f sub %g1, 1, %g7 casx [%o0], %g1, %g7 cmp %g1, %g7 bne,pn %xcc, BACKOFF_LABEL(2f, 1b) nop 3: retl sub %g1, 1, %o0 2: BACKOFF_SPIN(%o2, %o3, 1b) ENDPROC(atomic64_dec_if_positive) EXPORT_SYMBOL(atomic64_dec_if_positive)
AirFortressIlikara/LS2K0300-linux-4.19
2,332
arch/sparc/lib/NG4memset.S
/* SPDX-License-Identifier: GPL-2.0 */ /* NG4memset.S: Niagara-4 optimized memset/bzero. * * Copyright (C) 2012 David S. Miller (davem@davemloft.net) */ #include <asm/asi.h> .register %g2, #scratch .register %g3, #scratch .text .align 32 .globl NG4memset NG4memset: andcc %o1, 0xff, %o4 be,pt %icc, 1f mov %o2, %o1 sllx %o4, 8, %g1 or %g1, %o4, %o2 sllx %o2, 16, %g1 or %g1, %o2, %o2 sllx %o2, 32, %g1 ba,pt %icc, 1f or %g1, %o2, %o4 .size NG4memset,.-NG4memset .align 32 .globl NG4bzero NG4bzero: clr %o4 1: cmp %o1, 16 ble %icc, .Ltiny mov %o0, %o3 sub %g0, %o0, %g1 and %g1, 0x7, %g1 brz,pt %g1, .Laligned8 sub %o1, %g1, %o1 1: stb %o4, [%o0 + 0x00] subcc %g1, 1, %g1 bne,pt %icc, 1b add %o0, 1, %o0 .Laligned8: cmp %o1, 64 + (64 - 8) ble .Lmedium sub %g0, %o0, %g1 andcc %g1, (64 - 1), %g1 brz,pn %g1, .Laligned64 sub %o1, %g1, %o1 1: stx %o4, [%o0 + 0x00] subcc %g1, 8, %g1 bne,pt %icc, 1b add %o0, 0x8, %o0 .Laligned64: andn %o1, 64 - 1, %g1 sub %o1, %g1, %o1 brnz,pn %o4, .Lnon_bzero_loop mov 0x20, %g2 1: stxa %o4, [%o0 + %g0] ASI_BLK_INIT_QUAD_LDD_P subcc %g1, 0x40, %g1 stxa %o4, [%o0 + %g2] ASI_BLK_INIT_QUAD_LDD_P bne,pt %icc, 1b add %o0, 0x40, %o0 .Lpostloop: cmp %o1, 8 bl,pn %icc, .Ltiny membar #StoreStore|#StoreLoad .Lmedium: andn %o1, 0x7, %g1 sub %o1, %g1, %o1 1: stx %o4, [%o0 + 0x00] subcc %g1, 0x8, %g1 bne,pt %icc, 1b add %o0, 0x08, %o0 andcc %o1, 0x4, %g1 be,pt %icc, .Ltiny sub %o1, %g1, %o1 stw %o4, [%o0 + 0x00] add %o0, 0x4, %o0 .Ltiny: cmp %o1, 0 be,pn %icc, .Lexit 1: subcc %o1, 1, %o1 stb %o4, [%o0 + 0x00] bne,pt %icc, 1b add %o0, 1, %o0 .Lexit: retl mov %o3, %o0 .Lnon_bzero_loop: mov 0x08, %g3 mov 0x28, %o5 1: stxa %o4, [%o0 + %g0] ASI_BLK_INIT_QUAD_LDD_P subcc %g1, 0x40, %g1 stxa %o4, [%o0 + %g2] ASI_BLK_INIT_QUAD_LDD_P stxa %o4, [%o0 + %g3] ASI_BLK_INIT_QUAD_LDD_P stxa %o4, [%o0 + %o5] ASI_BLK_INIT_QUAD_LDD_P add %o0, 0x10, %o0 stxa %o4, [%o0 + %g0] ASI_BLK_INIT_QUAD_LDD_P stxa %o4, [%o0 + %g2] ASI_BLK_INIT_QUAD_LDD_P stxa %o4, [%o0 + %g3] ASI_BLK_INIT_QUAD_LDD_P stxa %o4, [%o0 + %o5] ASI_BLK_INIT_QUAD_LDD_P bne,pt %icc, 1b add %o0, 0x30, %o0 ba,a,pt %icc, .Lpostloop nop .size NG4bzero,.-NG4bzero
AirFortressIlikara/LS2K0300-linux-4.19
1,109
arch/sparc/lib/NG2copy_to_user.S
/* SPDX-License-Identifier: GPL-2.0 */ /* NG2copy_to_user.S: Niagara-2 optimized copy to userspace. * * Copyright (C) 2007 David S. Miller (davem@davemloft.net) */ #define EX_ST(x,y) \ 98: x; \ .section __ex_table,"a";\ .align 4; \ .word 98b, y; \ .text; \ .align 4; #define EX_ST_FP(x,y) \ 98: x; \ .section __ex_table,"a";\ .align 4; \ .word 98b, y##_fp; \ .text; \ .align 4; #ifndef ASI_AIUS #define ASI_AIUS 0x11 #endif #ifndef ASI_BLK_AIUS_4V #define ASI_BLK_AIUS_4V 0x17 #endif #ifndef ASI_BLK_INIT_QUAD_LDD_AIUS #define ASI_BLK_INIT_QUAD_LDD_AIUS 0x23 #endif #define FUNC_NAME NG2copy_to_user #define STORE(type,src,addr) type##a src, [addr] ASI_AIUS #define STORE_ASI ASI_BLK_INIT_QUAD_LDD_AIUS #define STORE_BLK(src,addr) stda src, [addr] ASI_BLK_AIUS_4V #define EX_RETVAL(x) 0 #ifdef __KERNEL__ /* Writing to %asi is _expensive_ so we hardcode it. * Reading %asi to check for KERNEL_DS is comparatively * cheap. */ #define PREAMBLE \ rd %asi, %g1; \ cmp %g1, ASI_AIUS; \ bne,pn %icc, raw_copy_in_user; \ nop #endif #include "NG2memcpy.S"
AirFortressIlikara/LS2K0300-linux-4.19
31,360
arch/sparc/lib/M7memcpy.S
/* * M7memcpy: Optimized SPARC M7 memcpy * * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. */ .file "M7memcpy.S" /* * memcpy(s1, s2, len) * * Copy s2 to s1, always copy n bytes. * Note: this C code does not work for overlapped copies. * * Fast assembler language version of the following C-program for memcpy * which represents the `standard' for the C-library. * * void * * memcpy(void *s, const void *s0, size_t n) * { * if (n != 0) { * char *s1 = s; * const char *s2 = s0; * do { * *s1++ = *s2++; * } while (--n != 0); * } * return (s); * } * * * SPARC T7/M7 Flow : * * if (count < SMALL_MAX) { * if count < SHORTCOPY (SHORTCOPY=3) * copy bytes; exit with dst addr * if src & dst aligned on word boundary but not long word boundary, * copy with ldw/stw; branch to finish_up * if src & dst aligned on long word boundary * copy with ldx/stx; branch to finish_up * if src & dst not aligned and length <= SHORTCHECK (SHORTCHECK=14) * copy bytes; exit with dst addr * move enough bytes to get src to word boundary * if dst now on word boundary * move_words: * copy words; branch to finish_up * if dst now on half word boundary * load words, shift half words, store words; branch to finish_up * if dst on byte 1 * load words, shift 3 bytes, store words; branch to finish_up * if dst on byte 3 * load words, shift 1 byte, store words; branch to finish_up * finish_up: * copy bytes; exit with dst addr * } else { More than SMALL_MAX bytes * move bytes until dst is on long word boundary * if( src is on long word boundary ) { * if (count < MED_MAX) { * finish_long: src/dst aligned on 8 bytes * copy with ldx/stx in 8-way unrolled loop; * copy final 0-63 bytes; exit with dst addr * } else { src/dst aligned; count > MED_MAX * align dst on 64 byte boundary; for main data movement: * prefetch src data to L2 cache; let HW prefetch move data to L1 cache * Use BIS (block initializing store) to avoid copying store cache * lines from memory. But pre-store first element of each cache line * ST_CHUNK lines in advance of the rest of that cache line. That * gives time for replacement cache lines to be written back without * excess STQ and Miss Buffer filling. Repeat until near the end, * then finish up storing before going to finish_long. * } * } else { src/dst not aligned on 8 bytes * if src is word aligned and count < MED_WMAX * move words in 8-way unrolled loop * move final 0-31 bytes; exit with dst addr * if count < MED_UMAX * use alignaddr/faligndata combined with ldd/std in 8-way * unrolled loop to move data. * go to unalign_done * else * setup alignaddr for faligndata instructions * align dst on 64 byte boundary; prefetch src data to L1 cache * loadx8, falign, block-store, prefetch loop * (only use block-init-store when src/dst on 8 byte boundaries.) * unalign_done: * move remaining bytes for unaligned cases. exit with dst addr. * } * */ #include <asm/visasm.h> #include <asm/asi.h> #if !defined(EX_LD) && !defined(EX_ST) #define NON_USER_COPY #endif #ifndef EX_LD #define EX_LD(x,y) x #endif #ifndef EX_LD_FP #define EX_LD_FP(x,y) x #endif #ifndef EX_ST #define EX_ST(x,y) x #endif #ifndef EX_ST_FP #define EX_ST_FP(x,y) x #endif #ifndef EX_RETVAL #define EX_RETVAL(x) x #endif #ifndef LOAD #define LOAD(type,addr,dest) type [addr], dest #endif #ifndef STORE #define STORE(type,src,addr) type src, [addr] #endif /* * ASI_BLK_INIT_QUAD_LDD_P/ASI_BLK_INIT_QUAD_LDD_S marks the cache * line as "least recently used" which means if many threads are * active, it has a high probability of being pushed out of the cache * between the first initializing store and the final stores. * Thus, we use ASI_ST_BLKINIT_MRU_P/ASI_ST_BLKINIT_MRU_S which * marks the cache line as "most recently used" for all * but the last cache line */ #ifndef STORE_ASI #ifndef SIMULATE_NIAGARA_ON_NON_NIAGARA #define STORE_ASI ASI_BLK_INIT_QUAD_LDD_P #else #define STORE_ASI 0x80 /* ASI_P */ #endif #endif #ifndef STORE_MRU_ASI #ifndef SIMULATE_NIAGARA_ON_NON_NIAGARA #define STORE_MRU_ASI ASI_ST_BLKINIT_MRU_P #else #define STORE_MRU_ASI 0x80 /* ASI_P */ #endif #endif #ifndef STORE_INIT #define STORE_INIT(src,addr) stxa src, [addr] STORE_ASI #endif #ifndef STORE_INIT_MRU #define STORE_INIT_MRU(src,addr) stxa src, [addr] STORE_MRU_ASI #endif #ifndef FUNC_NAME #define FUNC_NAME M7memcpy #endif #ifndef PREAMBLE #define PREAMBLE #endif #define BLOCK_SIZE 64 #define SHORTCOPY 3 #define SHORTCHECK 14 #define SHORT_LONG 64 /* max copy for short longword-aligned case */ /* must be at least 64 */ #define SMALL_MAX 128 #define MED_UMAX 1024 /* max copy for medium un-aligned case */ #define MED_WMAX 1024 /* max copy for medium word-aligned case */ #define MED_MAX 1024 /* max copy for medium longword-aligned case */ #define ST_CHUNK 24 /* ST_CHUNK - block of values for BIS Store */ #define ALIGN_PRE 24 /* distance for aligned prefetch loop */ .register %g2,#scratch .section ".text" .global FUNC_NAME .type FUNC_NAME, #function .align 16 FUNC_NAME: srlx %o2, 31, %g2 cmp %g2, 0 tne %xcc, 5 PREAMBLE mov %o0, %g1 ! save %o0 brz,pn %o2, .Lsmallx cmp %o2, 3 ble,pn %icc, .Ltiny_cp cmp %o2, 19 ble,pn %icc, .Lsmall_cp or %o0, %o1, %g2 cmp %o2, SMALL_MAX bl,pn %icc, .Lmedium_cp nop .Lmedium: neg %o0, %o5 andcc %o5, 7, %o5 ! bytes till DST 8 byte aligned brz,pt %o5, .Ldst_aligned_on_8 ! %o5 has the bytes to be written in partial store. sub %o2, %o5, %o2 sub %o1, %o0, %o1 ! %o1 gets the difference 7: ! dst aligning loop add %o1, %o0, %o4 EX_LD(LOAD(ldub, %o4, %o4), memcpy_retl_o2_plus_o5) ! load one byte subcc %o5, 1, %o5 EX_ST(STORE(stb, %o4, %o0), memcpy_retl_o2_plus_o5_plus_1) bgu,pt %xcc, 7b add %o0, 1, %o0 ! advance dst add %o1, %o0, %o1 ! restore %o1 .Ldst_aligned_on_8: andcc %o1, 7, %o5 brnz,pt %o5, .Lsrc_dst_unaligned_on_8 nop .Lsrc_dst_aligned_on_8: ! check if we are copying MED_MAX or more bytes set MED_MAX, %o3 cmp %o2, %o3 ! limit to store buffer size bgu,pn %xcc, .Llarge_align8_copy nop /* * Special case for handling when src and dest are both long word aligned * and total data to move is less than MED_MAX bytes */ .Lmedlong: subcc %o2, 63, %o2 ! adjust length to allow cc test ble,pn %xcc, .Lmedl63 ! skip big loop if less than 64 bytes nop .Lmedl64: EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2_plus_63) ! load subcc %o2, 64, %o2 ! decrement length count EX_ST(STORE(stx, %o4, %o0), memcpy_retl_o2_plus_63_64) ! and store EX_LD(LOAD(ldx, %o1+8, %o3), memcpy_retl_o2_plus_63_56) ! a block of 64 EX_ST(STORE(stx, %o3, %o0+8), memcpy_retl_o2_plus_63_56) EX_LD(LOAD(ldx, %o1+16, %o4), memcpy_retl_o2_plus_63_48) EX_ST(STORE(stx, %o4, %o0+16), memcpy_retl_o2_plus_63_48) EX_LD(LOAD(ldx, %o1+24, %o3), memcpy_retl_o2_plus_63_40) EX_ST(STORE(stx, %o3, %o0+24), memcpy_retl_o2_plus_63_40) EX_LD(LOAD(ldx, %o1+32, %o4), memcpy_retl_o2_plus_63_32)! load and store EX_ST(STORE(stx, %o4, %o0+32), memcpy_retl_o2_plus_63_32) EX_LD(LOAD(ldx, %o1+40, %o3), memcpy_retl_o2_plus_63_24)! a block of 64 add %o1, 64, %o1 ! increase src ptr by 64 EX_ST(STORE(stx, %o3, %o0+40), memcpy_retl_o2_plus_63_24) EX_LD(LOAD(ldx, %o1-16, %o4), memcpy_retl_o2_plus_63_16) add %o0, 64, %o0 ! increase dst ptr by 64 EX_ST(STORE(stx, %o4, %o0-16), memcpy_retl_o2_plus_63_16) EX_LD(LOAD(ldx, %o1-8, %o3), memcpy_retl_o2_plus_63_8) bgu,pt %xcc, .Lmedl64 ! repeat if at least 64 bytes left EX_ST(STORE(stx, %o3, %o0-8), memcpy_retl_o2_plus_63_8) .Lmedl63: addcc %o2, 32, %o2 ! adjust remaining count ble,pt %xcc, .Lmedl31 ! to skip if 31 or fewer bytes left nop EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2_plus_31) ! load sub %o2, 32, %o2 ! decrement length count EX_ST(STORE(stx, %o4, %o0), memcpy_retl_o2_plus_31_32) ! and store EX_LD(LOAD(ldx, %o1+8, %o3), memcpy_retl_o2_plus_31_24) ! a block of 32 add %o1, 32, %o1 ! increase src ptr by 32 EX_ST(STORE(stx, %o3, %o0+8), memcpy_retl_o2_plus_31_24) EX_LD(LOAD(ldx, %o1-16, %o4), memcpy_retl_o2_plus_31_16) add %o0, 32, %o0 ! increase dst ptr by 32 EX_ST(STORE(stx, %o4, %o0-16), memcpy_retl_o2_plus_31_16) EX_LD(LOAD(ldx, %o1-8, %o3), memcpy_retl_o2_plus_31_8) EX_ST(STORE(stx, %o3, %o0-8), memcpy_retl_o2_plus_31_8) .Lmedl31: addcc %o2, 16, %o2 ! adjust remaining count ble,pt %xcc, .Lmedl15 ! skip if 15 or fewer bytes left nop ! EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2_plus_15) add %o1, 16, %o1 ! increase src ptr by 16 EX_ST(STORE(stx, %o4, %o0), memcpy_retl_o2_plus_15) sub %o2, 16, %o2 ! decrease count by 16 EX_LD(LOAD(ldx, %o1-8, %o3), memcpy_retl_o2_plus_15_8) add %o0, 16, %o0 ! increase dst ptr by 16 EX_ST(STORE(stx, %o3, %o0-8), memcpy_retl_o2_plus_15_8) .Lmedl15: addcc %o2, 15, %o2 ! restore count bz,pt %xcc, .Lsmallx ! exit if finished cmp %o2, 8 blt,pt %xcc, .Lmedw7 ! skip if 7 or fewer bytes left tst %o2 EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2) ! load 8 bytes add %o1, 8, %o1 ! increase src ptr by 8 add %o0, 8, %o0 ! increase dst ptr by 8 subcc %o2, 8, %o2 ! decrease count by 8 bnz,pn %xcc, .Lmedw7 EX_ST(STORE(stx, %o4, %o0-8), memcpy_retl_o2_plus_8) ! and store 8 retl mov EX_RETVAL(%g1), %o0 ! restore %o0 .align 16 .Lsrc_dst_unaligned_on_8: ! DST is 8-byte aligned, src is not 2: andcc %o1, 0x3, %o5 ! test word alignment bnz,pt %xcc, .Lunalignsetup ! branch to skip if not word aligned nop /* * Handle all cases where src and dest are aligned on word * boundaries. Use unrolled loops for better performance. * This option wins over standard large data move when * source and destination is in cache for.Lmedium * to short data moves. */ set MED_WMAX, %o3 cmp %o2, %o3 ! limit to store buffer size bge,pt %xcc, .Lunalignrejoin ! otherwise rejoin main loop nop subcc %o2, 31, %o2 ! adjust length to allow cc test ! for end of loop ble,pt %xcc, .Lmedw31 ! skip big loop if less than 16 .Lmedw32: EX_LD(LOAD(ld, %o1, %o4), memcpy_retl_o2_plus_31)! move a block of 32 sllx %o4, 32, %o5 EX_LD(LOAD(ld, %o1+4, %o4), memcpy_retl_o2_plus_31) or %o4, %o5, %o5 EX_ST(STORE(stx, %o5, %o0), memcpy_retl_o2_plus_31) subcc %o2, 32, %o2 ! decrement length count EX_LD(LOAD(ld, %o1+8, %o4), memcpy_retl_o2_plus_31_24) sllx %o4, 32, %o5 EX_LD(LOAD(ld, %o1+12, %o4), memcpy_retl_o2_plus_31_24) or %o4, %o5, %o5 EX_ST(STORE(stx, %o5, %o0+8), memcpy_retl_o2_plus_31_24) add %o1, 32, %o1 ! increase src ptr by 32 EX_LD(LOAD(ld, %o1-16, %o4), memcpy_retl_o2_plus_31_16) sllx %o4, 32, %o5 EX_LD(LOAD(ld, %o1-12, %o4), memcpy_retl_o2_plus_31_16) or %o4, %o5, %o5 EX_ST(STORE(stx, %o5, %o0+16), memcpy_retl_o2_plus_31_16) add %o0, 32, %o0 ! increase dst ptr by 32 EX_LD(LOAD(ld, %o1-8, %o4), memcpy_retl_o2_plus_31_8) sllx %o4, 32, %o5 EX_LD(LOAD(ld, %o1-4, %o4), memcpy_retl_o2_plus_31_8) or %o4, %o5, %o5 bgu,pt %xcc, .Lmedw32 ! repeat if at least 32 bytes left EX_ST(STORE(stx, %o5, %o0-8), memcpy_retl_o2_plus_31_8) .Lmedw31: addcc %o2, 31, %o2 ! restore count bz,pt %xcc, .Lsmallx ! exit if finished nop cmp %o2, 16 blt,pt %xcc, .Lmedw15 nop EX_LD(LOAD(ld, %o1, %o4), memcpy_retl_o2)! move a block of 16 bytes sllx %o4, 32, %o5 subcc %o2, 16, %o2 ! decrement length count EX_LD(LOAD(ld, %o1+4, %o4), memcpy_retl_o2_plus_16) or %o4, %o5, %o5 EX_ST(STORE(stx, %o5, %o0), memcpy_retl_o2_plus_16) add %o1, 16, %o1 ! increase src ptr by 16 EX_LD(LOAD(ld, %o1-8, %o4), memcpy_retl_o2_plus_8) add %o0, 16, %o0 ! increase dst ptr by 16 sllx %o4, 32, %o5 EX_LD(LOAD(ld, %o1-4, %o4), memcpy_retl_o2_plus_8) or %o4, %o5, %o5 EX_ST(STORE(stx, %o5, %o0-8), memcpy_retl_o2_plus_8) .Lmedw15: bz,pt %xcc, .Lsmallx ! exit if finished cmp %o2, 8 blt,pn %xcc, .Lmedw7 ! skip if 7 or fewer bytes left tst %o2 EX_LD(LOAD(ld, %o1, %o4), memcpy_retl_o2) ! load 4 bytes subcc %o2, 8, %o2 ! decrease count by 8 EX_ST(STORE(stw, %o4, %o0), memcpy_retl_o2_plus_8)! and store 4 bytes add %o1, 8, %o1 ! increase src ptr by 8 EX_LD(LOAD(ld, %o1-4, %o3), memcpy_retl_o2_plus_4) ! load 4 bytes add %o0, 8, %o0 ! increase dst ptr by 8 EX_ST(STORE(stw, %o3, %o0-4), memcpy_retl_o2_plus_4)! and store 4 bytes bz,pt %xcc, .Lsmallx ! exit if finished .Lmedw7: ! count is ge 1, less than 8 cmp %o2, 4 ! check for 4 bytes left blt,pn %xcc, .Lsmallleft3 ! skip if 3 or fewer bytes left nop ! EX_LD(LOAD(ld, %o1, %o4), memcpy_retl_o2) ! load 4 bytes add %o1, 4, %o1 ! increase src ptr by 4 add %o0, 4, %o0 ! increase dst ptr by 4 subcc %o2, 4, %o2 ! decrease count by 4 bnz .Lsmallleft3 EX_ST(STORE(stw, %o4, %o0-4), memcpy_retl_o2_plus_4)! and store 4 bytes retl mov EX_RETVAL(%g1), %o0 .align 16 .Llarge_align8_copy: ! Src and dst share 8 byte alignment ! align dst to 64 byte boundary andcc %o0, 0x3f, %o3 ! %o3 == 0 means dst is 64 byte aligned brz,pn %o3, .Laligned_to_64 andcc %o0, 8, %o3 ! odd long words to move? brz,pt %o3, .Laligned_to_16 nop EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2) sub %o2, 8, %o2 add %o1, 8, %o1 ! increment src ptr add %o0, 8, %o0 ! increment dst ptr EX_ST(STORE(stx, %o4, %o0-8), memcpy_retl_o2_plus_8) .Laligned_to_16: andcc %o0, 16, %o3 ! pair of long words to move? brz,pt %o3, .Laligned_to_32 nop EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2) sub %o2, 16, %o2 EX_ST(STORE(stx, %o4, %o0), memcpy_retl_o2_plus_16) add %o1, 16, %o1 ! increment src ptr EX_LD(LOAD(ldx, %o1-8, %o4), memcpy_retl_o2_plus_8) add %o0, 16, %o0 ! increment dst ptr EX_ST(STORE(stx, %o4, %o0-8), memcpy_retl_o2_plus_8) .Laligned_to_32: andcc %o0, 32, %o3 ! four long words to move? brz,pt %o3, .Laligned_to_64 nop EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2) sub %o2, 32, %o2 EX_ST(STORE(stx, %o4, %o0), memcpy_retl_o2_plus_32) EX_LD(LOAD(ldx, %o1+8, %o4), memcpy_retl_o2_plus_24) EX_ST(STORE(stx, %o4, %o0+8), memcpy_retl_o2_plus_24) EX_LD(LOAD(ldx, %o1+16, %o4), memcpy_retl_o2_plus_16) EX_ST(STORE(stx, %o4, %o0+16), memcpy_retl_o2_plus_16) add %o1, 32, %o1 ! increment src ptr EX_LD(LOAD(ldx, %o1-8, %o4), memcpy_retl_o2_plus_8) add %o0, 32, %o0 ! increment dst ptr EX_ST(STORE(stx, %o4, %o0-8), memcpy_retl_o2_plus_8) .Laligned_to_64: ! ! Using block init store (BIS) instructions to avoid fetching cache ! lines from memory. Use ST_CHUNK stores to first element of each cache ! line (similar to prefetching) to avoid overfilling STQ or miss buffers. ! Gives existing cache lines time to be moved out of L1/L2/L3 cache. ! Initial stores using MRU version of BIS to keep cache line in ! cache until we are ready to store final element of cache line. ! Then store last element using the LRU version of BIS. ! andn %o2, 0x3f, %o5 ! %o5 is multiple of block size and %o2, 0x3f, %o2 ! residue bytes in %o2 ! ! We use STORE_MRU_ASI for the first seven stores to each cache line ! followed by STORE_ASI (mark as LRU) for the last store. That ! mixed approach reduces the probability that the cache line is removed ! before we finish setting it, while minimizing the effects on ! other cached values during a large memcpy ! ! ST_CHUNK batches up initial BIS operations for several cache lines ! to allow multiple requests to not be blocked by overflowing the ! the store miss buffer. Then the matching stores for all those ! BIS operations are executed. ! sub %o0, 8, %o0 ! adjust %o0 for ASI alignment .Lalign_loop: cmp %o5, ST_CHUNK*64 blu,pt %xcc, .Lalign_loop_fin mov ST_CHUNK,%o3 .Lalign_loop_start: prefetch [%o1 + (ALIGN_PRE * BLOCK_SIZE)], 21 subcc %o3, 1, %o3 EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2_plus_o5) add %o1, 64, %o1 add %o0, 8, %o0 EX_ST(STORE_INIT_MRU(%o4, %o0), memcpy_retl_o2_plus_o5) bgu %xcc,.Lalign_loop_start add %o0, 56, %o0 mov ST_CHUNK,%o3 sllx %o3, 6, %o4 ! ST_CHUNK*64 sub %o1, %o4, %o1 ! reset %o1 sub %o0, %o4, %o0 ! reset %o0 .Lalign_loop_rest: EX_LD(LOAD(ldx, %o1+8, %o4), memcpy_retl_o2_plus_o5) add %o0, 16, %o0 EX_ST(STORE_INIT_MRU(%o4, %o0), memcpy_retl_o2_plus_o5) EX_LD(LOAD(ldx, %o1+16, %o4), memcpy_retl_o2_plus_o5) add %o0, 8, %o0 EX_ST(STORE_INIT_MRU(%o4, %o0), memcpy_retl_o2_plus_o5) subcc %o3, 1, %o3 EX_LD(LOAD(ldx, %o1+24, %o4), memcpy_retl_o2_plus_o5) add %o0, 8, %o0 EX_ST(STORE_INIT_MRU(%o4, %o0), memcpy_retl_o2_plus_o5) EX_LD(LOAD(ldx, %o1+32, %o4), memcpy_retl_o2_plus_o5) add %o0, 8, %o0 EX_ST(STORE_INIT_MRU(%o4, %o0), memcpy_retl_o2_plus_o5) EX_LD(LOAD(ldx, %o1+40, %o4), memcpy_retl_o2_plus_o5) add %o0, 8, %o0 EX_ST(STORE_INIT_MRU(%o4, %o0), memcpy_retl_o2_plus_o5) EX_LD(LOAD(ldx, %o1+48, %o4), memcpy_retl_o2_plus_o5) add %o1, 64, %o1 add %o0, 8, %o0 EX_ST(STORE_INIT_MRU(%o4, %o0), memcpy_retl_o2_plus_o5) add %o0, 8, %o0 EX_LD(LOAD(ldx, %o1-8, %o4), memcpy_retl_o2_plus_o5) sub %o5, 64, %o5 bgu %xcc,.Lalign_loop_rest ! mark cache line as LRU EX_ST(STORE_INIT(%o4, %o0), memcpy_retl_o2_plus_o5_plus_64) cmp %o5, ST_CHUNK*64 bgu,pt %xcc, .Lalign_loop_start mov ST_CHUNK,%o3 cmp %o5, 0 beq .Lalign_done nop .Lalign_loop_fin: EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2_plus_o5) EX_ST(STORE(stx, %o4, %o0+8), memcpy_retl_o2_plus_o5) EX_LD(LOAD(ldx, %o1+8, %o4), memcpy_retl_o2_plus_o5) EX_ST(STORE(stx, %o4, %o0+8+8), memcpy_retl_o2_plus_o5) EX_LD(LOAD(ldx, %o1+16, %o4), memcpy_retl_o2_plus_o5) EX_ST(STORE(stx, %o4, %o0+8+16), memcpy_retl_o2_plus_o5) subcc %o5, 64, %o5 EX_LD(LOAD(ldx, %o1+24, %o4), memcpy_retl_o2_plus_o5_64) EX_ST(STORE(stx, %o4, %o0+8+24), memcpy_retl_o2_plus_o5_64) EX_LD(LOAD(ldx, %o1+32, %o4), memcpy_retl_o2_plus_o5_64) EX_ST(STORE(stx, %o4, %o0+8+32), memcpy_retl_o2_plus_o5_64) EX_LD(LOAD(ldx, %o1+40, %o4), memcpy_retl_o2_plus_o5_64) EX_ST(STORE(stx, %o4, %o0+8+40), memcpy_retl_o2_plus_o5_64) EX_LD(LOAD(ldx, %o1+48, %o4), memcpy_retl_o2_plus_o5_64) add %o1, 64, %o1 EX_ST(STORE(stx, %o4, %o0+8+48), memcpy_retl_o2_plus_o5_64) add %o0, 64, %o0 EX_LD(LOAD(ldx, %o1-8, %o4), memcpy_retl_o2_plus_o5_64) bgu %xcc,.Lalign_loop_fin EX_ST(STORE(stx, %o4, %o0), memcpy_retl_o2_plus_o5_64) .Lalign_done: add %o0, 8, %o0 ! restore %o0 from ASI alignment membar #StoreStore sub %o2, 63, %o2 ! adjust length to allow cc test ba .Lmedl63 ! in .Lmedl63 nop .align 16 ! Dst is on 8 byte boundary; src is not; remaining count > SMALL_MAX .Lunalignsetup: .Lunalignrejoin: mov %g1, %o3 ! save %g1 as VISEntryHalf clobbers it #ifdef NON_USER_COPY VISEntryHalfFast(.Lmedium_vis_entry_fail_cp) #else VISEntryHalf #endif mov %o3, %g1 ! restore %g1 set MED_UMAX, %o3 cmp %o2, %o3 ! check for.Lmedium unaligned limit bge,pt %xcc,.Lunalign_large prefetch [%o1 + (4 * BLOCK_SIZE)], 20 andn %o2, 0x3f, %o5 ! %o5 is multiple of block size and %o2, 0x3f, %o2 ! residue bytes in %o2 cmp %o2, 8 ! Insure we do not load beyond bgt .Lunalign_adjust ! end of source buffer andn %o1, 0x7, %o4 ! %o4 has long word aligned src address add %o2, 64, %o2 ! adjust to leave loop sub %o5, 64, %o5 ! early if necessary .Lunalign_adjust: alignaddr %o1, %g0, %g0 ! generate %gsr add %o1, %o5, %o1 ! advance %o1 to after blocks EX_LD_FP(LOAD(ldd, %o4, %f0), memcpy_retl_o2_plus_o5) .Lunalign_loop: EX_LD_FP(LOAD(ldd, %o4+8, %f2), memcpy_retl_o2_plus_o5) faligndata %f0, %f2, %f16 EX_LD_FP(LOAD(ldd, %o4+16, %f4), memcpy_retl_o2_plus_o5) subcc %o5, BLOCK_SIZE, %o5 EX_ST_FP(STORE(std, %f16, %o0), memcpy_retl_o2_plus_o5_plus_64) faligndata %f2, %f4, %f18 EX_LD_FP(LOAD(ldd, %o4+24, %f6), memcpy_retl_o2_plus_o5_plus_56) EX_ST_FP(STORE(std, %f18, %o0+8), memcpy_retl_o2_plus_o5_plus_56) faligndata %f4, %f6, %f20 EX_LD_FP(LOAD(ldd, %o4+32, %f8), memcpy_retl_o2_plus_o5_plus_48) EX_ST_FP(STORE(std, %f20, %o0+16), memcpy_retl_o2_plus_o5_plus_48) faligndata %f6, %f8, %f22 EX_LD_FP(LOAD(ldd, %o4+40, %f10), memcpy_retl_o2_plus_o5_plus_40) EX_ST_FP(STORE(std, %f22, %o0+24), memcpy_retl_o2_plus_o5_plus_40) faligndata %f8, %f10, %f24 EX_LD_FP(LOAD(ldd, %o4+48, %f12), memcpy_retl_o2_plus_o5_plus_32) EX_ST_FP(STORE(std, %f24, %o0+32), memcpy_retl_o2_plus_o5_plus_32) faligndata %f10, %f12, %f26 EX_LD_FP(LOAD(ldd, %o4+56, %f14), memcpy_retl_o2_plus_o5_plus_24) add %o4, BLOCK_SIZE, %o4 EX_ST_FP(STORE(std, %f26, %o0+40), memcpy_retl_o2_plus_o5_plus_24) faligndata %f12, %f14, %f28 EX_LD_FP(LOAD(ldd, %o4, %f0), memcpy_retl_o2_plus_o5_plus_16) EX_ST_FP(STORE(std, %f28, %o0+48), memcpy_retl_o2_plus_o5_plus_16) faligndata %f14, %f0, %f30 EX_ST_FP(STORE(std, %f30, %o0+56), memcpy_retl_o2_plus_o5_plus_8) add %o0, BLOCK_SIZE, %o0 bgu,pt %xcc, .Lunalign_loop prefetch [%o4 + (5 * BLOCK_SIZE)], 20 ba .Lunalign_done nop .Lunalign_large: andcc %o0, 0x3f, %o3 ! is dst 64-byte block aligned? bz %xcc, .Lunalignsrc sub %o3, 64, %o3 ! %o3 will be multiple of 8 neg %o3 ! bytes until dest is 64 byte aligned sub %o2, %o3, %o2 ! update cnt with bytes to be moved ! Move bytes according to source alignment andcc %o1, 0x1, %o5 bnz %xcc, .Lunalignbyte ! check for byte alignment nop andcc %o1, 2, %o5 ! check for half word alignment bnz %xcc, .Lunalignhalf nop ! Src is word aligned .Lunalignword: EX_LD_FP(LOAD(ld, %o1, %o4), memcpy_retl_o2_plus_o3) ! load 4 bytes add %o1, 8, %o1 ! increase src ptr by 8 EX_ST_FP(STORE(stw, %o4, %o0), memcpy_retl_o2_plus_o3) ! and store 4 subcc %o3, 8, %o3 ! decrease count by 8 EX_LD_FP(LOAD(ld, %o1-4, %o4), memcpy_retl_o2_plus_o3_plus_4)! load 4 add %o0, 8, %o0 ! increase dst ptr by 8 bnz %xcc, .Lunalignword EX_ST_FP(STORE(stw, %o4, %o0-4), memcpy_retl_o2_plus_o3_plus_4) ba .Lunalignsrc nop ! Src is half-word aligned .Lunalignhalf: EX_LD_FP(LOAD(lduh, %o1, %o4), memcpy_retl_o2_plus_o3) ! load 2 bytes sllx %o4, 32, %o5 ! shift left EX_LD_FP(LOAD(lduw, %o1+2, %o4), memcpy_retl_o2_plus_o3) or %o4, %o5, %o5 sllx %o5, 16, %o5 EX_LD_FP(LOAD(lduh, %o1+6, %o4), memcpy_retl_o2_plus_o3) or %o4, %o5, %o5 EX_ST_FP(STORE(stx, %o5, %o0), memcpy_retl_o2_plus_o3) add %o1, 8, %o1 subcc %o3, 8, %o3 bnz %xcc, .Lunalignhalf add %o0, 8, %o0 ba .Lunalignsrc nop ! Src is Byte aligned .Lunalignbyte: sub %o0, %o1, %o0 ! share pointer advance .Lunalignbyte_loop: EX_LD_FP(LOAD(ldub, %o1, %o4), memcpy_retl_o2_plus_o3) sllx %o4, 56, %o5 EX_LD_FP(LOAD(lduh, %o1+1, %o4), memcpy_retl_o2_plus_o3) sllx %o4, 40, %o4 or %o4, %o5, %o5 EX_LD_FP(LOAD(lduh, %o1+3, %o4), memcpy_retl_o2_plus_o3) sllx %o4, 24, %o4 or %o4, %o5, %o5 EX_LD_FP(LOAD(lduh, %o1+5, %o4), memcpy_retl_o2_plus_o3) sllx %o4, 8, %o4 or %o4, %o5, %o5 EX_LD_FP(LOAD(ldub, %o1+7, %o4), memcpy_retl_o2_plus_o3) or %o4, %o5, %o5 add %o0, %o1, %o0 EX_ST_FP(STORE(stx, %o5, %o0), memcpy_retl_o2_plus_o3) sub %o0, %o1, %o0 subcc %o3, 8, %o3 bnz %xcc, .Lunalignbyte_loop add %o1, 8, %o1 add %o0,%o1, %o0 ! restore pointer ! Destination is now block (64 byte aligned) .Lunalignsrc: andn %o2, 0x3f, %o5 ! %o5 is multiple of block size and %o2, 0x3f, %o2 ! residue bytes in %o2 add %o2, 64, %o2 ! Insure we do not load beyond sub %o5, 64, %o5 ! end of source buffer andn %o1, 0x7, %o4 ! %o4 has long word aligned src address alignaddr %o1, %g0, %g0 ! generate %gsr add %o1, %o5, %o1 ! advance %o1 to after blocks EX_LD_FP(LOAD(ldd, %o4, %f14), memcpy_retl_o2_plus_o5) add %o4, 8, %o4 .Lunalign_sloop: EX_LD_FP(LOAD(ldd, %o4, %f16), memcpy_retl_o2_plus_o5) faligndata %f14, %f16, %f0 EX_LD_FP(LOAD(ldd, %o4+8, %f18), memcpy_retl_o2_plus_o5) faligndata %f16, %f18, %f2 EX_LD_FP(LOAD(ldd, %o4+16, %f20), memcpy_retl_o2_plus_o5) faligndata %f18, %f20, %f4 EX_ST_FP(STORE(std, %f0, %o0), memcpy_retl_o2_plus_o5) subcc %o5, 64, %o5 EX_LD_FP(LOAD(ldd, %o4+24, %f22), memcpy_retl_o2_plus_o5_plus_56) faligndata %f20, %f22, %f6 EX_ST_FP(STORE(std, %f2, %o0+8), memcpy_retl_o2_plus_o5_plus_56) EX_LD_FP(LOAD(ldd, %o4+32, %f24), memcpy_retl_o2_plus_o5_plus_48) faligndata %f22, %f24, %f8 EX_ST_FP(STORE(std, %f4, %o0+16), memcpy_retl_o2_plus_o5_plus_48) EX_LD_FP(LOAD(ldd, %o4+40, %f26), memcpy_retl_o2_plus_o5_plus_40) faligndata %f24, %f26, %f10 EX_ST_FP(STORE(std, %f6, %o0+24), memcpy_retl_o2_plus_o5_plus_40) EX_LD_FP(LOAD(ldd, %o4+48, %f28), memcpy_retl_o2_plus_o5_plus_40) faligndata %f26, %f28, %f12 EX_ST_FP(STORE(std, %f8, %o0+32), memcpy_retl_o2_plus_o5_plus_40) add %o4, 64, %o4 EX_LD_FP(LOAD(ldd, %o4-8, %f30), memcpy_retl_o2_plus_o5_plus_40) faligndata %f28, %f30, %f14 EX_ST_FP(STORE(std, %f10, %o0+40), memcpy_retl_o2_plus_o5_plus_40) EX_ST_FP(STORE(std, %f12, %o0+48), memcpy_retl_o2_plus_o5_plus_40) add %o0, 64, %o0 EX_ST_FP(STORE(std, %f14, %o0-8), memcpy_retl_o2_plus_o5_plus_40) fsrc2 %f30, %f14 bgu,pt %xcc, .Lunalign_sloop prefetch [%o4 + (8 * BLOCK_SIZE)], 20 .Lunalign_done: ! Handle trailing bytes, 64 to 127 ! Dest long word aligned, Src not long word aligned cmp %o2, 15 bleu %xcc, .Lunalign_short andn %o2, 0x7, %o5 ! %o5 is multiple of 8 and %o2, 0x7, %o2 ! residue bytes in %o2 add %o2, 8, %o2 sub %o5, 8, %o5 ! insure we do not load past end of src andn %o1, 0x7, %o4 ! %o4 has long word aligned src address add %o1, %o5, %o1 ! advance %o1 to after multiple of 8 EX_LD_FP(LOAD(ldd, %o4, %f0), memcpy_retl_o2_plus_o5)! fetch partialword .Lunalign_by8: EX_LD_FP(LOAD(ldd, %o4+8, %f2), memcpy_retl_o2_plus_o5) add %o4, 8, %o4 faligndata %f0, %f2, %f16 subcc %o5, 8, %o5 EX_ST_FP(STORE(std, %f16, %o0), memcpy_retl_o2_plus_o5) fsrc2 %f2, %f0 bgu,pt %xcc, .Lunalign_by8 add %o0, 8, %o0 .Lunalign_short: #ifdef NON_USER_COPY VISExitHalfFast #else VISExitHalf #endif ba .Lsmallrest nop /* * This is a special case of nested memcpy. This can happen when kernel * calls unaligned memcpy back to back without saving FP registers. We need * traps(context switch) to save/restore FP registers. If the kernel calls * memcpy without this trap sequence we will hit FP corruption. Let's use * the normal integer load/store method in this case. */ #ifdef NON_USER_COPY .Lmedium_vis_entry_fail_cp: or %o0, %o1, %g2 #endif .Lmedium_cp: LOAD(prefetch, %o1 + 0x40, #n_reads_strong) andcc %g2, 0x7, %g0 bne,pn %xcc, .Lmedium_unaligned_cp nop .Lmedium_noprefetch_cp: andncc %o2, 0x20 - 1, %o5 be,pn %xcc, 2f sub %o2, %o5, %o2 1: EX_LD(LOAD(ldx, %o1 + 0x00, %o3), memcpy_retl_o2_plus_o5) EX_LD(LOAD(ldx, %o1 + 0x08, %g2), memcpy_retl_o2_plus_o5) EX_LD(LOAD(ldx, %o1 + 0x10, %g7), memcpy_retl_o2_plus_o5) EX_LD(LOAD(ldx, %o1 + 0x18, %o4), memcpy_retl_o2_plus_o5) add %o1, 0x20, %o1 subcc %o5, 0x20, %o5 EX_ST(STORE(stx, %o3, %o0 + 0x00), memcpy_retl_o2_plus_o5_plus_32) EX_ST(STORE(stx, %g2, %o0 + 0x08), memcpy_retl_o2_plus_o5_plus_24) EX_ST(STORE(stx, %g7, %o0 + 0x10), memcpy_retl_o2_plus_o5_plus_24) EX_ST(STORE(stx, %o4, %o0 + 0x18), memcpy_retl_o2_plus_o5_plus_8) bne,pt %xcc, 1b add %o0, 0x20, %o0 2: andcc %o2, 0x18, %o5 be,pt %xcc, 3f sub %o2, %o5, %o2 1: EX_LD(LOAD(ldx, %o1 + 0x00, %o3), memcpy_retl_o2_plus_o5) add %o1, 0x08, %o1 add %o0, 0x08, %o0 subcc %o5, 0x08, %o5 bne,pt %xcc, 1b EX_ST(STORE(stx, %o3, %o0 - 0x08), memcpy_retl_o2_plus_o5_plus_8) 3: brz,pt %o2, .Lexit_cp cmp %o2, 0x04 bl,pn %xcc, .Ltiny_cp nop EX_LD(LOAD(lduw, %o1 + 0x00, %o3), memcpy_retl_o2) add %o1, 0x04, %o1 add %o0, 0x04, %o0 subcc %o2, 0x04, %o2 bne,pn %xcc, .Ltiny_cp EX_ST(STORE(stw, %o3, %o0 - 0x04), memcpy_retl_o2_plus_4) ba,a,pt %xcc, .Lexit_cp .Lmedium_unaligned_cp: /* First get dest 8 byte aligned. */ sub %g0, %o0, %o3 and %o3, 0x7, %o3 brz,pt %o3, 2f sub %o2, %o3, %o2 1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2), memcpy_retl_o2_plus_g1) add %o1, 1, %o1 subcc %o3, 1, %o3 add %o0, 1, %o0 bne,pt %xcc, 1b EX_ST(STORE(stb, %g2, %o0 - 0x01), memcpy_retl_o2_plus_g1_plus_1) 2: and %o1, 0x7, %o3 brz,pn %o3, .Lmedium_noprefetch_cp sll %o3, 3, %o3 mov 64, %g2 sub %g2, %o3, %g2 andn %o1, 0x7, %o1 EX_LD(LOAD(ldx, %o1 + 0x00, %o4), memcpy_retl_o2) sllx %o4, %o3, %o4 andn %o2, 0x08 - 1, %o5 sub %o2, %o5, %o2 1: EX_LD(LOAD(ldx, %o1 + 0x08, %g3), memcpy_retl_o2_plus_o5) add %o1, 0x08, %o1 subcc %o5, 0x08, %o5 srlx %g3, %g2, %g7 or %g7, %o4, %g7 EX_ST(STORE(stx, %g7, %o0 + 0x00), memcpy_retl_o2_plus_o5_plus_8) add %o0, 0x08, %o0 bne,pt %xcc, 1b sllx %g3, %o3, %o4 srl %o3, 3, %o3 add %o1, %o3, %o1 brz,pn %o2, .Lexit_cp nop ba,pt %xcc, .Lsmall_unaligned_cp .Ltiny_cp: EX_LD(LOAD(ldub, %o1 + 0x00, %o3), memcpy_retl_o2) subcc %o2, 1, %o2 be,pn %xcc, .Lexit_cp EX_ST(STORE(stb, %o3, %o0 + 0x00), memcpy_retl_o2_plus_1) EX_LD(LOAD(ldub, %o1 + 0x01, %o3), memcpy_retl_o2) subcc %o2, 1, %o2 be,pn %xcc, .Lexit_cp EX_ST(STORE(stb, %o3, %o0 + 0x01), memcpy_retl_o2_plus_1) EX_LD(LOAD(ldub, %o1 + 0x02, %o3), memcpy_retl_o2) ba,pt %xcc, .Lexit_cp EX_ST(STORE(stb, %o3, %o0 + 0x02), memcpy_retl_o2) .Lsmall_cp: andcc %g2, 0x3, %g0 bne,pn %xcc, .Lsmall_unaligned_cp andn %o2, 0x4 - 1, %o5 sub %o2, %o5, %o2 1: EX_LD(LOAD(lduw, %o1 + 0x00, %o3), memcpy_retl_o2_plus_o5) add %o1, 0x04, %o1 subcc %o5, 0x04, %o5 add %o0, 0x04, %o0 bne,pt %xcc, 1b EX_ST(STORE(stw, %o3, %o0 - 0x04), memcpy_retl_o2_plus_o5_plus_4) brz,pt %o2, .Lexit_cp nop ba,a,pt %xcc, .Ltiny_cp .Lsmall_unaligned_cp: 1: EX_LD(LOAD(ldub, %o1 + 0x00, %o3), memcpy_retl_o2) add %o1, 1, %o1 add %o0, 1, %o0 subcc %o2, 1, %o2 bne,pt %xcc, 1b EX_ST(STORE(stb, %o3, %o0 - 0x01), memcpy_retl_o2_plus_1) ba,a,pt %xcc, .Lexit_cp .Lsmallrest: tst %o2 bz,pt %xcc, .Lsmallx cmp %o2, 4 blt,pn %xcc, .Lsmallleft3 nop sub %o2, 3, %o2 .Lsmallnotalign4: EX_LD(LOAD(ldub, %o1, %o3), memcpy_retl_o2_plus_3)! read byte subcc %o2, 4, %o2 ! reduce count by 4 EX_ST(STORE(stb, %o3, %o0), memcpy_retl_o2_plus_7)! write byte & repeat EX_LD(LOAD(ldub, %o1+1, %o3), memcpy_retl_o2_plus_6)! for total of 4 add %o1, 4, %o1 ! advance SRC by 4 EX_ST(STORE(stb, %o3, %o0+1), memcpy_retl_o2_plus_6) EX_LD(LOAD(ldub, %o1-2, %o3), memcpy_retl_o2_plus_5) add %o0, 4, %o0 ! advance DST by 4 EX_ST(STORE(stb, %o3, %o0-2), memcpy_retl_o2_plus_5) EX_LD(LOAD(ldub, %o1-1, %o3), memcpy_retl_o2_plus_4) bgu,pt %xcc, .Lsmallnotalign4 ! loop til 3 or fewer bytes remain EX_ST(STORE(stb, %o3, %o0-1), memcpy_retl_o2_plus_4) addcc %o2, 3, %o2 ! restore count bz,pt %xcc, .Lsmallx .Lsmallleft3: ! 1, 2, or 3 bytes remain subcc %o2, 1, %o2 EX_LD(LOAD(ldub, %o1, %o3), memcpy_retl_o2_plus_1) ! load one byte bz,pt %xcc, .Lsmallx EX_ST(STORE(stb, %o3, %o0), memcpy_retl_o2_plus_1) ! store one byte EX_LD(LOAD(ldub, %o1+1, %o3), memcpy_retl_o2) ! load second byte subcc %o2, 1, %o2 bz,pt %xcc, .Lsmallx EX_ST(STORE(stb, %o3, %o0+1), memcpy_retl_o2_plus_1)! store second byte EX_LD(LOAD(ldub, %o1+2, %o3), memcpy_retl_o2) ! load third byte EX_ST(STORE(stb, %o3, %o0+2), memcpy_retl_o2) ! store third byte .Lsmallx: retl mov EX_RETVAL(%g1), %o0 .Lsmallfin: tst %o2 bnz,pn %xcc, .Lsmallleft3 nop retl mov EX_RETVAL(%g1), %o0 ! restore %o0 .Lexit_cp: retl mov EX_RETVAL(%g1), %o0 .size FUNC_NAME, .-FUNC_NAME
AirFortressIlikara/LS2K0300-linux-4.19
1,608
arch/sparc/lib/strlen.S
/* SPDX-License-Identifier: GPL-2.0 */ /* strlen.S: Sparc optimized strlen code * Hand optimized from GNU libc's strlen * Copyright (C) 1991,1996 Free Software Foundation * Copyright (C) 1996,2008 David S. Miller (davem@davemloft.net) * Copyright (C) 1996, 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) */ #include <linux/linkage.h> #include <asm/asm.h> #include <asm/export.h> #define LO_MAGIC 0x01010101 #define HI_MAGIC 0x80808080 .text ENTRY(strlen) mov %o0, %o1 andcc %o0, 3, %g0 BRANCH32(be, pt, 9f) sethi %hi(HI_MAGIC), %o4 ldub [%o0], %o5 BRANCH_REG_ZERO(pn, %o5, 11f) add %o0, 1, %o0 andcc %o0, 3, %g0 BRANCH32(be, pn, 4f) or %o4, %lo(HI_MAGIC), %o3 ldub [%o0], %o5 BRANCH_REG_ZERO(pn, %o5, 12f) add %o0, 1, %o0 andcc %o0, 3, %g0 BRANCH32(be, pt, 5f) sethi %hi(LO_MAGIC), %o4 ldub [%o0], %o5 BRANCH_REG_ZERO(pn, %o5, 13f) add %o0, 1, %o0 BRANCH32(ba, pt, 8f) or %o4, %lo(LO_MAGIC), %o2 9: or %o4, %lo(HI_MAGIC), %o3 4: sethi %hi(LO_MAGIC), %o4 5: or %o4, %lo(LO_MAGIC), %o2 8: ld [%o0], %o5 2: sub %o5, %o2, %o4 andcc %o4, %o3, %g0 BRANCH32(be, pt, 8b) add %o0, 4, %o0 /* Check every byte. */ srl %o5, 24, %g7 andcc %g7, 0xff, %g0 BRANCH32(be, pn, 1f) add %o0, -4, %o4 srl %o5, 16, %g7 andcc %g7, 0xff, %g0 BRANCH32(be, pn, 1f) add %o4, 1, %o4 srl %o5, 8, %g7 andcc %g7, 0xff, %g0 BRANCH32(be, pn, 1f) add %o4, 1, %o4 andcc %o5, 0xff, %g0 BRANCH32_ANNUL(bne, pt, 2b) ld [%o0], %o5 add %o4, 1, %o4 1: retl sub %o4, %o1, %o0 11: retl mov 0, %o0 12: retl mov 1, %o0 13: retl mov 2, %o0 ENDPROC(strlen) EXPORT_SYMBOL(strlen)
AirFortressIlikara/LS2K0300-linux-4.19
16,228
arch/sparc/lib/xor.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/sparc64/lib/xor.S * * High speed xor_block operation for RAID4/5 utilizing the * UltraSparc Visual Instruction Set and Niagara store-init/twin-load. * * Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz) * Copyright (C) 2006 David S. Miller <davem@davemloft.net> */ #include <linux/linkage.h> #include <asm/visasm.h> #include <asm/asi.h> #include <asm/dcu.h> #include <asm/spitfire.h> #include <asm/export.h> /* * Requirements: * !(((long)dest | (long)sourceN) & (64 - 1)) && * !(len & 127) && len >= 256 */ .text /* VIS versions. */ ENTRY(xor_vis_2) rd %fprs, %o5 andcc %o5, FPRS_FEF|FPRS_DU, %g0 be,pt %icc, 0f sethi %hi(VISenter), %g1 jmpl %g1 + %lo(VISenter), %g7 add %g7, 8, %g7 0: wr %g0, FPRS_FEF, %fprs rd %asi, %g1 wr %g0, ASI_BLK_P, %asi membar #LoadStore|#StoreLoad|#StoreStore sub %o0, 128, %o0 ldda [%o1] %asi, %f0 ldda [%o2] %asi, %f16 2: ldda [%o1 + 64] %asi, %f32 fxor %f0, %f16, %f16 fxor %f2, %f18, %f18 fxor %f4, %f20, %f20 fxor %f6, %f22, %f22 fxor %f8, %f24, %f24 fxor %f10, %f26, %f26 fxor %f12, %f28, %f28 fxor %f14, %f30, %f30 stda %f16, [%o1] %asi ldda [%o2 + 64] %asi, %f48 ldda [%o1 + 128] %asi, %f0 fxor %f32, %f48, %f48 fxor %f34, %f50, %f50 add %o1, 128, %o1 fxor %f36, %f52, %f52 add %o2, 128, %o2 fxor %f38, %f54, %f54 subcc %o0, 128, %o0 fxor %f40, %f56, %f56 fxor %f42, %f58, %f58 fxor %f44, %f60, %f60 fxor %f46, %f62, %f62 stda %f48, [%o1 - 64] %asi bne,pt %xcc, 2b ldda [%o2] %asi, %f16 ldda [%o1 + 64] %asi, %f32 fxor %f0, %f16, %f16 fxor %f2, %f18, %f18 fxor %f4, %f20, %f20 fxor %f6, %f22, %f22 fxor %f8, %f24, %f24 fxor %f10, %f26, %f26 fxor %f12, %f28, %f28 fxor %f14, %f30, %f30 stda %f16, [%o1] %asi ldda [%o2 + 64] %asi, %f48 membar #Sync fxor %f32, %f48, %f48 fxor %f34, %f50, %f50 fxor %f36, %f52, %f52 fxor %f38, %f54, %f54 fxor %f40, %f56, %f56 fxor %f42, %f58, %f58 fxor %f44, %f60, %f60 fxor %f46, %f62, %f62 stda %f48, [%o1 + 64] %asi membar #Sync|#StoreStore|#StoreLoad wr %g1, %g0, %asi retl wr %g0, 0, %fprs ENDPROC(xor_vis_2) EXPORT_SYMBOL(xor_vis_2) ENTRY(xor_vis_3) rd %fprs, %o5 andcc %o5, FPRS_FEF|FPRS_DU, %g0 be,pt %icc, 0f sethi %hi(VISenter), %g1 jmpl %g1 + %lo(VISenter), %g7 add %g7, 8, %g7 0: wr %g0, FPRS_FEF, %fprs rd %asi, %g1 wr %g0, ASI_BLK_P, %asi membar #LoadStore|#StoreLoad|#StoreStore sub %o0, 64, %o0 ldda [%o1] %asi, %f0 ldda [%o2] %asi, %f16 3: ldda [%o3] %asi, %f32 fxor %f0, %f16, %f48 fxor %f2, %f18, %f50 add %o1, 64, %o1 fxor %f4, %f20, %f52 fxor %f6, %f22, %f54 add %o2, 64, %o2 fxor %f8, %f24, %f56 fxor %f10, %f26, %f58 fxor %f12, %f28, %f60 fxor %f14, %f30, %f62 ldda [%o1] %asi, %f0 fxor %f48, %f32, %f48 fxor %f50, %f34, %f50 fxor %f52, %f36, %f52 fxor %f54, %f38, %f54 add %o3, 64, %o3 fxor %f56, %f40, %f56 fxor %f58, %f42, %f58 subcc %o0, 64, %o0 fxor %f60, %f44, %f60 fxor %f62, %f46, %f62 stda %f48, [%o1 - 64] %asi bne,pt %xcc, 3b ldda [%o2] %asi, %f16 ldda [%o3] %asi, %f32 fxor %f0, %f16, %f48 fxor %f2, %f18, %f50 fxor %f4, %f20, %f52 fxor %f6, %f22, %f54 fxor %f8, %f24, %f56 fxor %f10, %f26, %f58 fxor %f12, %f28, %f60 fxor %f14, %f30, %f62 membar #Sync fxor %f48, %f32, %f48 fxor %f50, %f34, %f50 fxor %f52, %f36, %f52 fxor %f54, %f38, %f54 fxor %f56, %f40, %f56 fxor %f58, %f42, %f58 fxor %f60, %f44, %f60 fxor %f62, %f46, %f62 stda %f48, [%o1] %asi membar #Sync|#StoreStore|#StoreLoad wr %g1, %g0, %asi retl wr %g0, 0, %fprs ENDPROC(xor_vis_3) EXPORT_SYMBOL(xor_vis_3) ENTRY(xor_vis_4) rd %fprs, %o5 andcc %o5, FPRS_FEF|FPRS_DU, %g0 be,pt %icc, 0f sethi %hi(VISenter), %g1 jmpl %g1 + %lo(VISenter), %g7 add %g7, 8, %g7 0: wr %g0, FPRS_FEF, %fprs rd %asi, %g1 wr %g0, ASI_BLK_P, %asi membar #LoadStore|#StoreLoad|#StoreStore sub %o0, 64, %o0 ldda [%o1] %asi, %f0 ldda [%o2] %asi, %f16 4: ldda [%o3] %asi, %f32 fxor %f0, %f16, %f16 fxor %f2, %f18, %f18 add %o1, 64, %o1 fxor %f4, %f20, %f20 fxor %f6, %f22, %f22 add %o2, 64, %o2 fxor %f8, %f24, %f24 fxor %f10, %f26, %f26 fxor %f12, %f28, %f28 fxor %f14, %f30, %f30 ldda [%o4] %asi, %f48 fxor %f16, %f32, %f32 fxor %f18, %f34, %f34 fxor %f20, %f36, %f36 fxor %f22, %f38, %f38 add %o3, 64, %o3 fxor %f24, %f40, %f40 fxor %f26, %f42, %f42 fxor %f28, %f44, %f44 fxor %f30, %f46, %f46 ldda [%o1] %asi, %f0 fxor %f32, %f48, %f48 fxor %f34, %f50, %f50 fxor %f36, %f52, %f52 add %o4, 64, %o4 fxor %f38, %f54, %f54 fxor %f40, %f56, %f56 fxor %f42, %f58, %f58 subcc %o0, 64, %o0 fxor %f44, %f60, %f60 fxor %f46, %f62, %f62 stda %f48, [%o1 - 64] %asi bne,pt %xcc, 4b ldda [%o2] %asi, %f16 ldda [%o3] %asi, %f32 fxor %f0, %f16, %f16 fxor %f2, %f18, %f18 fxor %f4, %f20, %f20 fxor %f6, %f22, %f22 fxor %f8, %f24, %f24 fxor %f10, %f26, %f26 fxor %f12, %f28, %f28 fxor %f14, %f30, %f30 ldda [%o4] %asi, %f48 fxor %f16, %f32, %f32 fxor %f18, %f34, %f34 fxor %f20, %f36, %f36 fxor %f22, %f38, %f38 fxor %f24, %f40, %f40 fxor %f26, %f42, %f42 fxor %f28, %f44, %f44 fxor %f30, %f46, %f46 membar #Sync fxor %f32, %f48, %f48 fxor %f34, %f50, %f50 fxor %f36, %f52, %f52 fxor %f38, %f54, %f54 fxor %f40, %f56, %f56 fxor %f42, %f58, %f58 fxor %f44, %f60, %f60 fxor %f46, %f62, %f62 stda %f48, [%o1] %asi membar #Sync|#StoreStore|#StoreLoad wr %g1, %g0, %asi retl wr %g0, 0, %fprs ENDPROC(xor_vis_4) EXPORT_SYMBOL(xor_vis_4) ENTRY(xor_vis_5) save %sp, -192, %sp rd %fprs, %o5 andcc %o5, FPRS_FEF|FPRS_DU, %g0 be,pt %icc, 0f sethi %hi(VISenter), %g1 jmpl %g1 + %lo(VISenter), %g7 add %g7, 8, %g7 0: wr %g0, FPRS_FEF, %fprs rd %asi, %g1 wr %g0, ASI_BLK_P, %asi membar #LoadStore|#StoreLoad|#StoreStore sub %i0, 64, %i0 ldda [%i1] %asi, %f0 ldda [%i2] %asi, %f16 5: ldda [%i3] %asi, %f32 fxor %f0, %f16, %f48 fxor %f2, %f18, %f50 add %i1, 64, %i1 fxor %f4, %f20, %f52 fxor %f6, %f22, %f54 add %i2, 64, %i2 fxor %f8, %f24, %f56 fxor %f10, %f26, %f58 fxor %f12, %f28, %f60 fxor %f14, %f30, %f62 ldda [%i4] %asi, %f16 fxor %f48, %f32, %f48 fxor %f50, %f34, %f50 fxor %f52, %f36, %f52 fxor %f54, %f38, %f54 add %i3, 64, %i3 fxor %f56, %f40, %f56 fxor %f58, %f42, %f58 fxor %f60, %f44, %f60 fxor %f62, %f46, %f62 ldda [%i5] %asi, %f32 fxor %f48, %f16, %f48 fxor %f50, %f18, %f50 add %i4, 64, %i4 fxor %f52, %f20, %f52 fxor %f54, %f22, %f54 add %i5, 64, %i5 fxor %f56, %f24, %f56 fxor %f58, %f26, %f58 fxor %f60, %f28, %f60 fxor %f62, %f30, %f62 ldda [%i1] %asi, %f0 fxor %f48, %f32, %f48 fxor %f50, %f34, %f50 fxor %f52, %f36, %f52 fxor %f54, %f38, %f54 fxor %f56, %f40, %f56 fxor %f58, %f42, %f58 subcc %i0, 64, %i0 fxor %f60, %f44, %f60 fxor %f62, %f46, %f62 stda %f48, [%i1 - 64] %asi bne,pt %xcc, 5b ldda [%i2] %asi, %f16 ldda [%i3] %asi, %f32 fxor %f0, %f16, %f48 fxor %f2, %f18, %f50 fxor %f4, %f20, %f52 fxor %f6, %f22, %f54 fxor %f8, %f24, %f56 fxor %f10, %f26, %f58 fxor %f12, %f28, %f60 fxor %f14, %f30, %f62 ldda [%i4] %asi, %f16 fxor %f48, %f32, %f48 fxor %f50, %f34, %f50 fxor %f52, %f36, %f52 fxor %f54, %f38, %f54 fxor %f56, %f40, %f56 fxor %f58, %f42, %f58 fxor %f60, %f44, %f60 fxor %f62, %f46, %f62 ldda [%i5] %asi, %f32 fxor %f48, %f16, %f48 fxor %f50, %f18, %f50 fxor %f52, %f20, %f52 fxor %f54, %f22, %f54 fxor %f56, %f24, %f56 fxor %f58, %f26, %f58 fxor %f60, %f28, %f60 fxor %f62, %f30, %f62 membar #Sync fxor %f48, %f32, %f48 fxor %f50, %f34, %f50 fxor %f52, %f36, %f52 fxor %f54, %f38, %f54 fxor %f56, %f40, %f56 fxor %f58, %f42, %f58 fxor %f60, %f44, %f60 fxor %f62, %f46, %f62 stda %f48, [%i1] %asi membar #Sync|#StoreStore|#StoreLoad wr %g1, %g0, %asi wr %g0, 0, %fprs ret restore ENDPROC(xor_vis_5) EXPORT_SYMBOL(xor_vis_5) /* Niagara versions. */ ENTRY(xor_niagara_2) /* %o0=bytes, %o1=dest, %o2=src */ save %sp, -192, %sp prefetch [%i1], #n_writes prefetch [%i2], #one_read rd %asi, %g7 wr %g0, ASI_BLK_INIT_QUAD_LDD_P, %asi srlx %i0, 6, %g1 mov %i1, %i0 mov %i2, %i1 1: ldda [%i1 + 0x00] %asi, %i2 /* %i2/%i3 = src + 0x00 */ ldda [%i1 + 0x10] %asi, %i4 /* %i4/%i5 = src + 0x10 */ ldda [%i1 + 0x20] %asi, %g2 /* %g2/%g3 = src + 0x20 */ ldda [%i1 + 0x30] %asi, %l0 /* %l0/%l1 = src + 0x30 */ prefetch [%i1 + 0x40], #one_read ldda [%i0 + 0x00] %asi, %o0 /* %o0/%o1 = dest + 0x00 */ ldda [%i0 + 0x10] %asi, %o2 /* %o2/%o3 = dest + 0x10 */ ldda [%i0 + 0x20] %asi, %o4 /* %o4/%o5 = dest + 0x20 */ ldda [%i0 + 0x30] %asi, %l2 /* %l2/%l3 = dest + 0x30 */ prefetch [%i0 + 0x40], #n_writes xor %o0, %i2, %o0 xor %o1, %i3, %o1 stxa %o0, [%i0 + 0x00] %asi stxa %o1, [%i0 + 0x08] %asi xor %o2, %i4, %o2 xor %o3, %i5, %o3 stxa %o2, [%i0 + 0x10] %asi stxa %o3, [%i0 + 0x18] %asi xor %o4, %g2, %o4 xor %o5, %g3, %o5 stxa %o4, [%i0 + 0x20] %asi stxa %o5, [%i0 + 0x28] %asi xor %l2, %l0, %l2 xor %l3, %l1, %l3 stxa %l2, [%i0 + 0x30] %asi stxa %l3, [%i0 + 0x38] %asi add %i0, 0x40, %i0 subcc %g1, 1, %g1 bne,pt %xcc, 1b add %i1, 0x40, %i1 membar #Sync wr %g7, 0x0, %asi ret restore ENDPROC(xor_niagara_2) EXPORT_SYMBOL(xor_niagara_2) ENTRY(xor_niagara_3) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2 */ save %sp, -192, %sp prefetch [%i1], #n_writes prefetch [%i2], #one_read prefetch [%i3], #one_read rd %asi, %g7 wr %g0, ASI_BLK_INIT_QUAD_LDD_P, %asi srlx %i0, 6, %g1 mov %i1, %i0 mov %i2, %i1 mov %i3, %l7 1: ldda [%i1 + 0x00] %asi, %i2 /* %i2/%i3 = src1 + 0x00 */ ldda [%i1 + 0x10] %asi, %i4 /* %i4/%i5 = src1 + 0x10 */ ldda [%l7 + 0x00] %asi, %g2 /* %g2/%g3 = src2 + 0x00 */ ldda [%l7 + 0x10] %asi, %l0 /* %l0/%l1 = src2 + 0x10 */ ldda [%i0 + 0x00] %asi, %o0 /* %o0/%o1 = dest + 0x00 */ ldda [%i0 + 0x10] %asi, %o2 /* %o2/%o3 = dest + 0x10 */ xor %g2, %i2, %g2 xor %g3, %i3, %g3 xor %o0, %g2, %o0 xor %o1, %g3, %o1 stxa %o0, [%i0 + 0x00] %asi stxa %o1, [%i0 + 0x08] %asi ldda [%i1 + 0x20] %asi, %i2 /* %i2/%i3 = src1 + 0x20 */ ldda [%l7 + 0x20] %asi, %g2 /* %g2/%g3 = src2 + 0x20 */ ldda [%i0 + 0x20] %asi, %o0 /* %o0/%o1 = dest + 0x20 */ xor %l0, %i4, %l0 xor %l1, %i5, %l1 xor %o2, %l0, %o2 xor %o3, %l1, %o3 stxa %o2, [%i0 + 0x10] %asi stxa %o3, [%i0 + 0x18] %asi ldda [%i1 + 0x30] %asi, %i4 /* %i4/%i5 = src1 + 0x30 */ ldda [%l7 + 0x30] %asi, %l0 /* %l0/%l1 = src2 + 0x30 */ ldda [%i0 + 0x30] %asi, %o2 /* %o2/%o3 = dest + 0x30 */ prefetch [%i1 + 0x40], #one_read prefetch [%l7 + 0x40], #one_read prefetch [%i0 + 0x40], #n_writes xor %g2, %i2, %g2 xor %g3, %i3, %g3 xor %o0, %g2, %o0 xor %o1, %g3, %o1 stxa %o0, [%i0 + 0x20] %asi stxa %o1, [%i0 + 0x28] %asi xor %l0, %i4, %l0 xor %l1, %i5, %l1 xor %o2, %l0, %o2 xor %o3, %l1, %o3 stxa %o2, [%i0 + 0x30] %asi stxa %o3, [%i0 + 0x38] %asi add %i0, 0x40, %i0 add %i1, 0x40, %i1 subcc %g1, 1, %g1 bne,pt %xcc, 1b add %l7, 0x40, %l7 membar #Sync wr %g7, 0x0, %asi ret restore ENDPROC(xor_niagara_3) EXPORT_SYMBOL(xor_niagara_3) ENTRY(xor_niagara_4) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3 */ save %sp, -192, %sp prefetch [%i1], #n_writes prefetch [%i2], #one_read prefetch [%i3], #one_read prefetch [%i4], #one_read rd %asi, %g7 wr %g0, ASI_BLK_INIT_QUAD_LDD_P, %asi srlx %i0, 6, %g1 mov %i1, %i0 mov %i2, %i1 mov %i3, %l7 mov %i4, %l6 1: ldda [%i1 + 0x00] %asi, %i2 /* %i2/%i3 = src1 + 0x00 */ ldda [%l7 + 0x00] %asi, %i4 /* %i4/%i5 = src2 + 0x00 */ ldda [%l6 + 0x00] %asi, %g2 /* %g2/%g3 = src3 + 0x00 */ ldda [%i0 + 0x00] %asi, %l0 /* %l0/%l1 = dest + 0x00 */ xor %i4, %i2, %i4 xor %i5, %i3, %i5 ldda [%i1 + 0x10] %asi, %i2 /* %i2/%i3 = src1 + 0x10 */ xor %g2, %i4, %g2 xor %g3, %i5, %g3 ldda [%l7 + 0x10] %asi, %i4 /* %i4/%i5 = src2 + 0x10 */ xor %l0, %g2, %l0 xor %l1, %g3, %l1 stxa %l0, [%i0 + 0x00] %asi stxa %l1, [%i0 + 0x08] %asi ldda [%l6 + 0x10] %asi, %g2 /* %g2/%g3 = src3 + 0x10 */ ldda [%i0 + 0x10] %asi, %l0 /* %l0/%l1 = dest + 0x10 */ xor %i4, %i2, %i4 xor %i5, %i3, %i5 ldda [%i1 + 0x20] %asi, %i2 /* %i2/%i3 = src1 + 0x20 */ xor %g2, %i4, %g2 xor %g3, %i5, %g3 ldda [%l7 + 0x20] %asi, %i4 /* %i4/%i5 = src2 + 0x20 */ xor %l0, %g2, %l0 xor %l1, %g3, %l1 stxa %l0, [%i0 + 0x10] %asi stxa %l1, [%i0 + 0x18] %asi ldda [%l6 + 0x20] %asi, %g2 /* %g2/%g3 = src3 + 0x20 */ ldda [%i0 + 0x20] %asi, %l0 /* %l0/%l1 = dest + 0x20 */ xor %i4, %i2, %i4 xor %i5, %i3, %i5 ldda [%i1 + 0x30] %asi, %i2 /* %i2/%i3 = src1 + 0x30 */ xor %g2, %i4, %g2 xor %g3, %i5, %g3 ldda [%l7 + 0x30] %asi, %i4 /* %i4/%i5 = src2 + 0x30 */ xor %l0, %g2, %l0 xor %l1, %g3, %l1 stxa %l0, [%i0 + 0x20] %asi stxa %l1, [%i0 + 0x28] %asi ldda [%l6 + 0x30] %asi, %g2 /* %g2/%g3 = src3 + 0x30 */ ldda [%i0 + 0x30] %asi, %l0 /* %l0/%l1 = dest + 0x30 */ prefetch [%i1 + 0x40], #one_read prefetch [%l7 + 0x40], #one_read prefetch [%l6 + 0x40], #one_read prefetch [%i0 + 0x40], #n_writes xor %i4, %i2, %i4 xor %i5, %i3, %i5 xor %g2, %i4, %g2 xor %g3, %i5, %g3 xor %l0, %g2, %l0 xor %l1, %g3, %l1 stxa %l0, [%i0 + 0x30] %asi stxa %l1, [%i0 + 0x38] %asi add %i0, 0x40, %i0 add %i1, 0x40, %i1 add %l7, 0x40, %l7 subcc %g1, 1, %g1 bne,pt %xcc, 1b add %l6, 0x40, %l6 membar #Sync wr %g7, 0x0, %asi ret restore ENDPROC(xor_niagara_4) EXPORT_SYMBOL(xor_niagara_4) ENTRY(xor_niagara_5) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3, %o5=src4 */ save %sp, -192, %sp prefetch [%i1], #n_writes prefetch [%i2], #one_read prefetch [%i3], #one_read prefetch [%i4], #one_read prefetch [%i5], #one_read rd %asi, %g7 wr %g0, ASI_BLK_INIT_QUAD_LDD_P, %asi srlx %i0, 6, %g1 mov %i1, %i0 mov %i2, %i1 mov %i3, %l7 mov %i4, %l6 mov %i5, %l5 1: ldda [%i1 + 0x00] %asi, %i2 /* %i2/%i3 = src1 + 0x00 */ ldda [%l7 + 0x00] %asi, %i4 /* %i4/%i5 = src2 + 0x00 */ ldda [%l6 + 0x00] %asi, %g2 /* %g2/%g3 = src3 + 0x00 */ ldda [%l5 + 0x00] %asi, %l0 /* %l0/%l1 = src4 + 0x00 */ ldda [%i0 + 0x00] %asi, %l2 /* %l2/%l3 = dest + 0x00 */ xor %i4, %i2, %i4 xor %i5, %i3, %i5 ldda [%i1 + 0x10] %asi, %i2 /* %i2/%i3 = src1 + 0x10 */ xor %g2, %i4, %g2 xor %g3, %i5, %g3 ldda [%l7 + 0x10] %asi, %i4 /* %i4/%i5 = src2 + 0x10 */ xor %l0, %g2, %l0 xor %l1, %g3, %l1 ldda [%l6 + 0x10] %asi, %g2 /* %g2/%g3 = src3 + 0x10 */ xor %l2, %l0, %l2 xor %l3, %l1, %l3 stxa %l2, [%i0 + 0x00] %asi stxa %l3, [%i0 + 0x08] %asi ldda [%l5 + 0x10] %asi, %l0 /* %l0/%l1 = src4 + 0x10 */ ldda [%i0 + 0x10] %asi, %l2 /* %l2/%l3 = dest + 0x10 */ xor %i4, %i2, %i4 xor %i5, %i3, %i5 ldda [%i1 + 0x20] %asi, %i2 /* %i2/%i3 = src1 + 0x20 */ xor %g2, %i4, %g2 xor %g3, %i5, %g3 ldda [%l7 + 0x20] %asi, %i4 /* %i4/%i5 = src2 + 0x20 */ xor %l0, %g2, %l0 xor %l1, %g3, %l1 ldda [%l6 + 0x20] %asi, %g2 /* %g2/%g3 = src3 + 0x20 */ xor %l2, %l0, %l2 xor %l3, %l1, %l3 stxa %l2, [%i0 + 0x10] %asi stxa %l3, [%i0 + 0x18] %asi ldda [%l5 + 0x20] %asi, %l0 /* %l0/%l1 = src4 + 0x20 */ ldda [%i0 + 0x20] %asi, %l2 /* %l2/%l3 = dest + 0x20 */ xor %i4, %i2, %i4 xor %i5, %i3, %i5 ldda [%i1 + 0x30] %asi, %i2 /* %i2/%i3 = src1 + 0x30 */ xor %g2, %i4, %g2 xor %g3, %i5, %g3 ldda [%l7 + 0x30] %asi, %i4 /* %i4/%i5 = src2 + 0x30 */ xor %l0, %g2, %l0 xor %l1, %g3, %l1 ldda [%l6 + 0x30] %asi, %g2 /* %g2/%g3 = src3 + 0x30 */ xor %l2, %l0, %l2 xor %l3, %l1, %l3 stxa %l2, [%i0 + 0x20] %asi stxa %l3, [%i0 + 0x28] %asi ldda [%l5 + 0x30] %asi, %l0 /* %l0/%l1 = src4 + 0x30 */ ldda [%i0 + 0x30] %asi, %l2 /* %l2/%l3 = dest + 0x30 */ prefetch [%i1 + 0x40], #one_read prefetch [%l7 + 0x40], #one_read prefetch [%l6 + 0x40], #one_read prefetch [%l5 + 0x40], #one_read prefetch [%i0 + 0x40], #n_writes xor %i4, %i2, %i4 xor %i5, %i3, %i5 xor %g2, %i4, %g2 xor %g3, %i5, %g3 xor %l0, %g2, %l0 xor %l1, %g3, %l1 xor %l2, %l0, %l2 xor %l3, %l1, %l3 stxa %l2, [%i0 + 0x30] %asi stxa %l3, [%i0 + 0x38] %asi add %i0, 0x40, %i0 add %i1, 0x40, %i1 add %l7, 0x40, %l7 add %l6, 0x40, %l6 subcc %g1, 1, %g1 bne,pt %xcc, 1b add %l5, 0x40, %l5 membar #Sync wr %g7, 0x0, %asi ret restore ENDPROC(xor_niagara_5) EXPORT_SYMBOL(xor_niagara_5)
AirFortressIlikara/LS2K0300-linux-4.19
1,059
arch/sparc/lib/fls64.S
/* fls64.S: SPARC default __fls definition. * * SPARC default __fls definition, which follows the same algorithm as * in generic __fls(). This function will be boot time patched on T4 * and onward. */ #include <linux/linkage.h> #include <asm/export.h> .text .register %g2, #scratch .register %g3, #scratch ENTRY(__fls) mov -1, %g2 sllx %g2, 32, %g2 and %o0, %g2, %g2 brnz,pt %g2, 1f mov 63, %g1 sllx %o0, 32, %o0 mov 31, %g1 1: mov -1, %g2 sllx %g2, 48, %g2 and %o0, %g2, %g2 brnz,pt %g2, 2f mov -1, %g2 sllx %o0, 16, %o0 add %g1, -16, %g1 2: mov -1, %g2 sllx %g2, 56, %g2 and %o0, %g2, %g2 brnz,pt %g2, 3f mov -1, %g2 sllx %o0, 8, %o0 add %g1, -8, %g1 3: sllx %g2, 60, %g2 and %o0, %g2, %g2 brnz,pt %g2, 4f mov -1, %g2 sllx %o0, 4, %o0 add %g1, -4, %g1 4: sllx %g2, 62, %g2 and %o0, %g2, %g2 brnz,pt %g2, 5f mov -1, %g3 sllx %o0, 2, %o0 add %g1, -2, %g1 5: mov 0, %g2 sllx %g3, 63, %g3 and %o0, %g3, %o0 movre %o0, 1, %g2 sub %g1, %g2, %g1 jmp %o7+8 sra %g1, 0, %o0 ENDPROC(__fls) EXPORT_SYMBOL(__fls)
AirFortressIlikara/LS2K0300-linux-4.19
1,211
arch/sparc/lib/M7patch.S
/* * M7patch.S: Patch generic routines with M7 variant. * * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. */ #include <linux/linkage.h> #define BRANCH_ALWAYS 0x10680000 #define NOP 0x01000000 #define NG_DO_PATCH(OLD, NEW) \ sethi %hi(NEW), %g1; \ or %g1, %lo(NEW), %g1; \ sethi %hi(OLD), %g2; \ or %g2, %lo(OLD), %g2; \ sub %g1, %g2, %g1; \ sethi %hi(BRANCH_ALWAYS), %g3; \ sll %g1, 11, %g1; \ srl %g1, 11 + 2, %g1; \ or %g3, %lo(BRANCH_ALWAYS), %g3; \ or %g3, %g1, %g3; \ stw %g3, [%g2]; \ sethi %hi(NOP), %g3; \ or %g3, %lo(NOP), %g3; \ stw %g3, [%g2 + 0x4]; \ flush %g2; ENTRY(m7_patch_copyops) NG_DO_PATCH(memcpy, M7memcpy) NG_DO_PATCH(raw_copy_from_user, M7copy_from_user) NG_DO_PATCH(raw_copy_to_user, M7copy_to_user) retl nop ENDPROC(m7_patch_copyops) ENTRY(m7_patch_bzero) NG_DO_PATCH(memset, M7memset) NG_DO_PATCH(__bzero, M7bzero) NG_DO_PATCH(__clear_user, NGclear_user) NG_DO_PATCH(tsb_init, NGtsb_init) retl nop ENDPROC(m7_patch_bzero) ENTRY(m7_patch_pageops) NG_DO_PATCH(copy_user_page, NG4copy_user_page) NG_DO_PATCH(_clear_page, M7clear_page) NG_DO_PATCH(clear_user_page, M7clear_user_page) retl nop ENDPROC(m7_patch_pageops)
AirFortressIlikara/LS2K0300-linux-4.19
3,984
arch/sparc/lib/checksum_64.S
/* SPDX-License-Identifier: GPL-2.0 */ /* checksum.S: Sparc V9 optimized checksum code. * * Copyright(C) 1995 Linus Torvalds * Copyright(C) 1995 Miguel de Icaza * Copyright(C) 1996, 2000 David S. Miller * Copyright(C) 1997 Jakub Jelinek * * derived from: * Linux/Alpha checksum c-code * Linux/ix86 inline checksum assembly * RFC1071 Computing the Internet Checksum (esp. Jacobsons m68k code) * David Mosberger-Tang for optimized reference c-code * BSD4.4 portable checksum routine */ #include <asm/export.h> .text csum_partial_fix_alignment: /* We checked for zero length already, so there must be * at least one byte. */ be,pt %icc, 1f nop ldub [%o0 + 0x00], %o4 add %o0, 1, %o0 sub %o1, 1, %o1 1: andcc %o0, 0x2, %g0 be,pn %icc, csum_partial_post_align cmp %o1, 2 blu,pn %icc, csum_partial_end_cruft nop lduh [%o0 + 0x00], %o5 add %o0, 2, %o0 sub %o1, 2, %o1 ba,pt %xcc, csum_partial_post_align add %o5, %o4, %o4 .align 32 .globl csum_partial .type csum_partial,#function EXPORT_SYMBOL(csum_partial) csum_partial: /* %o0=buff, %o1=len, %o2=sum */ prefetch [%o0 + 0x000], #n_reads clr %o4 prefetch [%o0 + 0x040], #n_reads brz,pn %o1, csum_partial_finish andcc %o0, 0x3, %g0 /* We "remember" whether the lowest bit in the address * was set in %g7. Because if it is, we have to swap * upper and lower 8 bit fields of the sum we calculate. */ bne,pn %icc, csum_partial_fix_alignment andcc %o0, 0x1, %g7 csum_partial_post_align: prefetch [%o0 + 0x080], #n_reads andncc %o1, 0x3f, %o3 prefetch [%o0 + 0x0c0], #n_reads sub %o1, %o3, %o1 brz,pn %o3, 2f prefetch [%o0 + 0x100], #n_reads /* So that we don't need to use the non-pairing * add-with-carry instructions we accumulate 32-bit * values into a 64-bit register. At the end of the * loop we fold it down to 32-bits and so on. */ prefetch [%o0 + 0x140], #n_reads 1: lduw [%o0 + 0x00], %o5 lduw [%o0 + 0x04], %g1 lduw [%o0 + 0x08], %g2 add %o4, %o5, %o4 lduw [%o0 + 0x0c], %g3 add %o4, %g1, %o4 lduw [%o0 + 0x10], %o5 add %o4, %g2, %o4 lduw [%o0 + 0x14], %g1 add %o4, %g3, %o4 lduw [%o0 + 0x18], %g2 add %o4, %o5, %o4 lduw [%o0 + 0x1c], %g3 add %o4, %g1, %o4 lduw [%o0 + 0x20], %o5 add %o4, %g2, %o4 lduw [%o0 + 0x24], %g1 add %o4, %g3, %o4 lduw [%o0 + 0x28], %g2 add %o4, %o5, %o4 lduw [%o0 + 0x2c], %g3 add %o4, %g1, %o4 lduw [%o0 + 0x30], %o5 add %o4, %g2, %o4 lduw [%o0 + 0x34], %g1 add %o4, %g3, %o4 lduw [%o0 + 0x38], %g2 add %o4, %o5, %o4 lduw [%o0 + 0x3c], %g3 add %o4, %g1, %o4 prefetch [%o0 + 0x180], #n_reads add %o4, %g2, %o4 subcc %o3, 0x40, %o3 add %o0, 0x40, %o0 bne,pt %icc, 1b add %o4, %g3, %o4 2: and %o1, 0x3c, %o3 brz,pn %o3, 2f sub %o1, %o3, %o1 1: lduw [%o0 + 0x00], %o5 subcc %o3, 0x4, %o3 add %o0, 0x4, %o0 bne,pt %icc, 1b add %o4, %o5, %o4 2: /* fold 64-->32 */ srlx %o4, 32, %o5 srl %o4, 0, %o4 add %o4, %o5, %o4 srlx %o4, 32, %o5 srl %o4, 0, %o4 add %o4, %o5, %o4 /* fold 32-->16 */ sethi %hi(0xffff0000), %g1 srl %o4, 16, %o5 andn %o4, %g1, %g2 add %o5, %g2, %o4 srl %o4, 16, %o5 andn %o4, %g1, %g2 add %o5, %g2, %o4 csum_partial_end_cruft: /* %o4 has the 16-bit sum we have calculated so-far. */ cmp %o1, 2 blu,pt %icc, 1f nop lduh [%o0 + 0x00], %o5 sub %o1, 2, %o1 add %o0, 2, %o0 add %o4, %o5, %o4 1: brz,pt %o1, 1f nop ldub [%o0 + 0x00], %o5 sub %o1, 1, %o1 add %o0, 1, %o0 sllx %o5, 8, %o5 add %o4, %o5, %o4 1: /* fold 32-->16 */ sethi %hi(0xffff0000), %g1 srl %o4, 16, %o5 andn %o4, %g1, %g2 add %o5, %g2, %o4 srl %o4, 16, %o5 andn %o4, %g1, %g2 add %o5, %g2, %o4 1: brz,pt %g7, 1f nop /* We started with an odd byte, byte-swap the result. */ srl %o4, 8, %o5 and %o4, 0xff, %g1 sll %g1, 8, %g1 or %o5, %g1, %o4 1: addcc %o2, %o4, %o2 addc %g0, %o2, %o2 csum_partial_finish: retl srl %o2, 0, %o0
AirFortressIlikara/LS2K0300-linux-4.19
4,271
arch/sparc/net/bpf_jit_asm_32.S
/* SPDX-License-Identifier: GPL-2.0 */ #include <asm/ptrace.h> #include "bpf_jit_32.h" #define SAVE_SZ 96 #define SCRATCH_OFF 72 #define BE_PTR(label) be label #define SIGN_EXTEND(reg) #define SKF_MAX_NEG_OFF (-0x200000) /* SKF_LL_OFF from filter.h */ .text .globl bpf_jit_load_word bpf_jit_load_word: cmp r_OFF, 0 bl bpf_slow_path_word_neg nop .globl bpf_jit_load_word_positive_offset bpf_jit_load_word_positive_offset: sub r_HEADLEN, r_OFF, r_TMP cmp r_TMP, 3 ble bpf_slow_path_word add r_SKB_DATA, r_OFF, r_TMP andcc r_TMP, 3, %g0 bne load_word_unaligned nop retl ld [r_TMP], r_A load_word_unaligned: ldub [r_TMP + 0x0], r_OFF ldub [r_TMP + 0x1], r_TMP2 sll r_OFF, 8, r_OFF or r_OFF, r_TMP2, r_OFF ldub [r_TMP + 0x2], r_TMP2 sll r_OFF, 8, r_OFF or r_OFF, r_TMP2, r_OFF ldub [r_TMP + 0x3], r_TMP2 sll r_OFF, 8, r_OFF retl or r_OFF, r_TMP2, r_A .globl bpf_jit_load_half bpf_jit_load_half: cmp r_OFF, 0 bl bpf_slow_path_half_neg nop .globl bpf_jit_load_half_positive_offset bpf_jit_load_half_positive_offset: sub r_HEADLEN, r_OFF, r_TMP cmp r_TMP, 1 ble bpf_slow_path_half add r_SKB_DATA, r_OFF, r_TMP andcc r_TMP, 1, %g0 bne load_half_unaligned nop retl lduh [r_TMP], r_A load_half_unaligned: ldub [r_TMP + 0x0], r_OFF ldub [r_TMP + 0x1], r_TMP2 sll r_OFF, 8, r_OFF retl or r_OFF, r_TMP2, r_A .globl bpf_jit_load_byte bpf_jit_load_byte: cmp r_OFF, 0 bl bpf_slow_path_byte_neg nop .globl bpf_jit_load_byte_positive_offset bpf_jit_load_byte_positive_offset: cmp r_OFF, r_HEADLEN bge bpf_slow_path_byte nop retl ldub [r_SKB_DATA + r_OFF], r_A .globl bpf_jit_load_byte_msh bpf_jit_load_byte_msh: cmp r_OFF, 0 bl bpf_slow_path_byte_msh_neg nop .globl bpf_jit_load_byte_msh_positive_offset bpf_jit_load_byte_msh_positive_offset: cmp r_OFF, r_HEADLEN bge bpf_slow_path_byte_msh nop ldub [r_SKB_DATA + r_OFF], r_OFF and r_OFF, 0xf, r_OFF retl sll r_OFF, 2, r_X #define bpf_slow_path_common(LEN) \ save %sp, -SAVE_SZ, %sp; \ mov %i0, %o0; \ mov r_OFF, %o1; \ add %fp, SCRATCH_OFF, %o2; \ call skb_copy_bits; \ mov (LEN), %o3; \ cmp %o0, 0; \ restore; bpf_slow_path_word: bpf_slow_path_common(4) bl bpf_error ld [%sp + SCRATCH_OFF], r_A retl nop bpf_slow_path_half: bpf_slow_path_common(2) bl bpf_error lduh [%sp + SCRATCH_OFF], r_A retl nop bpf_slow_path_byte: bpf_slow_path_common(1) bl bpf_error ldub [%sp + SCRATCH_OFF], r_A retl nop bpf_slow_path_byte_msh: bpf_slow_path_common(1) bl bpf_error ldub [%sp + SCRATCH_OFF], r_A and r_OFF, 0xf, r_OFF retl sll r_OFF, 2, r_X #define bpf_negative_common(LEN) \ save %sp, -SAVE_SZ, %sp; \ mov %i0, %o0; \ mov r_OFF, %o1; \ SIGN_EXTEND(%o1); \ call bpf_internal_load_pointer_neg_helper; \ mov (LEN), %o2; \ mov %o0, r_TMP; \ cmp %o0, 0; \ BE_PTR(bpf_error); \ restore; bpf_slow_path_word_neg: sethi %hi(SKF_MAX_NEG_OFF), r_TMP cmp r_OFF, r_TMP bl bpf_error nop .globl bpf_jit_load_word_negative_offset bpf_jit_load_word_negative_offset: bpf_negative_common(4) andcc r_TMP, 3, %g0 bne load_word_unaligned nop retl ld [r_TMP], r_A bpf_slow_path_half_neg: sethi %hi(SKF_MAX_NEG_OFF), r_TMP cmp r_OFF, r_TMP bl bpf_error nop .globl bpf_jit_load_half_negative_offset bpf_jit_load_half_negative_offset: bpf_negative_common(2) andcc r_TMP, 1, %g0 bne load_half_unaligned nop retl lduh [r_TMP], r_A bpf_slow_path_byte_neg: sethi %hi(SKF_MAX_NEG_OFF), r_TMP cmp r_OFF, r_TMP bl bpf_error nop .globl bpf_jit_load_byte_negative_offset bpf_jit_load_byte_negative_offset: bpf_negative_common(1) retl ldub [r_TMP], r_A bpf_slow_path_byte_msh_neg: sethi %hi(SKF_MAX_NEG_OFF), r_TMP cmp r_OFF, r_TMP bl bpf_error nop .globl bpf_jit_load_byte_msh_negative_offset bpf_jit_load_byte_msh_negative_offset: bpf_negative_common(1) ldub [r_TMP], r_OFF and r_OFF, 0xf, r_OFF retl sll r_OFF, 2, r_X bpf_error: /* Make the JIT program return zero. The JIT epilogue * stores away the original %o7 into r_saved_O7. The * normal leaf function return is to use "retl" which * would evalute to "jmpl %o7 + 8, %g0" but we want to * use the saved value thus the sequence you see here. */ jmpl r_saved_O7 + 8, %g0 clr %o0
AirFortressIlikara/LS2K0300-linux-4.19
9,751
arch/sparc/mm/hypersparc.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * hypersparc.S: High speed Hypersparc mmu/cache operations. * * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) */ #include <asm/ptrace.h> #include <asm/psr.h> #include <asm/asm-offsets.h> #include <asm/asi.h> #include <asm/page.h> #include <asm/pgtsrmmu.h> #include <linux/init.h> .text .align 4 .globl hypersparc_flush_cache_all, hypersparc_flush_cache_mm .globl hypersparc_flush_cache_range, hypersparc_flush_cache_page .globl hypersparc_flush_page_to_ram .globl hypersparc_flush_page_for_dma, hypersparc_flush_sig_insns .globl hypersparc_flush_tlb_all, hypersparc_flush_tlb_mm .globl hypersparc_flush_tlb_range, hypersparc_flush_tlb_page hypersparc_flush_cache_all: WINDOW_FLUSH(%g4, %g5) sethi %hi(vac_cache_size), %g4 ld [%g4 + %lo(vac_cache_size)], %g5 sethi %hi(vac_line_size), %g1 ld [%g1 + %lo(vac_line_size)], %g2 1: subcc %g5, %g2, %g5 ! hyper_flush_unconditional_combined bne 1b sta %g0, [%g5] ASI_M_FLUSH_CTX retl sta %g0, [%g0] ASI_M_FLUSH_IWHOLE ! hyper_flush_whole_icache /* We expand the window flush to get maximum performance. */ hypersparc_flush_cache_mm: #ifndef CONFIG_SMP ld [%o0 + AOFF_mm_context], %g1 cmp %g1, -1 be hypersparc_flush_cache_mm_out #endif WINDOW_FLUSH(%g4, %g5) sethi %hi(vac_line_size), %g1 ld [%g1 + %lo(vac_line_size)], %o1 sethi %hi(vac_cache_size), %g2 ld [%g2 + %lo(vac_cache_size)], %o0 add %o1, %o1, %g1 add %o1, %g1, %g2 add %o1, %g2, %g3 add %o1, %g3, %g4 add %o1, %g4, %g5 add %o1, %g5, %o4 add %o1, %o4, %o5 /* BLAMMO! */ 1: subcc %o0, %o5, %o0 ! hyper_flush_cache_user sta %g0, [%o0 + %g0] ASI_M_FLUSH_USER sta %g0, [%o0 + %o1] ASI_M_FLUSH_USER sta %g0, [%o0 + %g1] ASI_M_FLUSH_USER sta %g0, [%o0 + %g2] ASI_M_FLUSH_USER sta %g0, [%o0 + %g3] ASI_M_FLUSH_USER sta %g0, [%o0 + %g4] ASI_M_FLUSH_USER sta %g0, [%o0 + %g5] ASI_M_FLUSH_USER bne 1b sta %g0, [%o0 + %o4] ASI_M_FLUSH_USER hypersparc_flush_cache_mm_out: retl nop /* The things we do for performance... */ hypersparc_flush_cache_range: ld [%o0 + VMA_VM_MM], %o0 #ifndef CONFIG_SMP ld [%o0 + AOFF_mm_context], %g1 cmp %g1, -1 be hypersparc_flush_cache_range_out #endif WINDOW_FLUSH(%g4, %g5) sethi %hi(vac_line_size), %g1 ld [%g1 + %lo(vac_line_size)], %o4 sethi %hi(vac_cache_size), %g2 ld [%g2 + %lo(vac_cache_size)], %o3 /* Here comes the fun part... */ add %o2, (PAGE_SIZE - 1), %o2 andn %o1, (PAGE_SIZE - 1), %o1 add %o4, %o4, %o5 andn %o2, (PAGE_SIZE - 1), %o2 add %o4, %o5, %g1 sub %o2, %o1, %g4 add %o4, %g1, %g2 sll %o3, 2, %g5 add %o4, %g2, %g3 cmp %g4, %g5 add %o4, %g3, %g4 blu 0f add %o4, %g4, %g5 add %o4, %g5, %g7 /* Flush entire user space, believe it or not this is quicker * than page at a time flushings for range > (cache_size<<2). */ 1: subcc %o3, %g7, %o3 sta %g0, [%o3 + %g0] ASI_M_FLUSH_USER sta %g0, [%o3 + %o4] ASI_M_FLUSH_USER sta %g0, [%o3 + %o5] ASI_M_FLUSH_USER sta %g0, [%o3 + %g1] ASI_M_FLUSH_USER sta %g0, [%o3 + %g2] ASI_M_FLUSH_USER sta %g0, [%o3 + %g3] ASI_M_FLUSH_USER sta %g0, [%o3 + %g4] ASI_M_FLUSH_USER bne 1b sta %g0, [%o3 + %g5] ASI_M_FLUSH_USER retl nop /* Below our threshold, flush one page at a time. */ 0: ld [%o0 + AOFF_mm_context], %o0 mov SRMMU_CTX_REG, %g7 lda [%g7] ASI_M_MMUREGS, %o3 sta %o0, [%g7] ASI_M_MMUREGS add %o2, -PAGE_SIZE, %o0 1: or %o0, 0x400, %g7 lda [%g7] ASI_M_FLUSH_PROBE, %g7 orcc %g7, 0, %g0 be,a 3f mov %o0, %o2 add %o4, %g5, %g7 2: sub %o2, %g7, %o2 sta %g0, [%o2 + %g0] ASI_M_FLUSH_PAGE sta %g0, [%o2 + %o4] ASI_M_FLUSH_PAGE sta %g0, [%o2 + %o5] ASI_M_FLUSH_PAGE sta %g0, [%o2 + %g1] ASI_M_FLUSH_PAGE sta %g0, [%o2 + %g2] ASI_M_FLUSH_PAGE sta %g0, [%o2 + %g3] ASI_M_FLUSH_PAGE andcc %o2, 0xffc, %g0 sta %g0, [%o2 + %g4] ASI_M_FLUSH_PAGE bne 2b sta %g0, [%o2 + %g5] ASI_M_FLUSH_PAGE 3: cmp %o2, %o1 bne 1b add %o2, -PAGE_SIZE, %o0 mov SRMMU_FAULT_STATUS, %g5 lda [%g5] ASI_M_MMUREGS, %g0 mov SRMMU_CTX_REG, %g7 sta %o3, [%g7] ASI_M_MMUREGS hypersparc_flush_cache_range_out: retl nop /* HyperSparc requires a valid mapping where we are about to flush * in order to check for a physical tag match during the flush. */ /* Verified, my ass... */ hypersparc_flush_cache_page: ld [%o0 + VMA_VM_MM], %o0 ld [%o0 + AOFF_mm_context], %g2 #ifndef CONFIG_SMP cmp %g2, -1 be hypersparc_flush_cache_page_out #endif WINDOW_FLUSH(%g4, %g5) sethi %hi(vac_line_size), %g1 ld [%g1 + %lo(vac_line_size)], %o4 mov SRMMU_CTX_REG, %o3 andn %o1, (PAGE_SIZE - 1), %o1 lda [%o3] ASI_M_MMUREGS, %o2 sta %g2, [%o3] ASI_M_MMUREGS or %o1, 0x400, %o5 lda [%o5] ASI_M_FLUSH_PROBE, %g1 orcc %g0, %g1, %g0 be 2f add %o4, %o4, %o5 sub %o1, -PAGE_SIZE, %o1 add %o4, %o5, %g1 add %o4, %g1, %g2 add %o4, %g2, %g3 add %o4, %g3, %g4 add %o4, %g4, %g5 add %o4, %g5, %g7 /* BLAMMO! */ 1: sub %o1, %g7, %o1 sta %g0, [%o1 + %g0] ASI_M_FLUSH_PAGE sta %g0, [%o1 + %o4] ASI_M_FLUSH_PAGE sta %g0, [%o1 + %o5] ASI_M_FLUSH_PAGE sta %g0, [%o1 + %g1] ASI_M_FLUSH_PAGE sta %g0, [%o1 + %g2] ASI_M_FLUSH_PAGE sta %g0, [%o1 + %g3] ASI_M_FLUSH_PAGE andcc %o1, 0xffc, %g0 sta %g0, [%o1 + %g4] ASI_M_FLUSH_PAGE bne 1b sta %g0, [%o1 + %g5] ASI_M_FLUSH_PAGE 2: mov SRMMU_FAULT_STATUS, %g7 mov SRMMU_CTX_REG, %g4 lda [%g7] ASI_M_MMUREGS, %g0 sta %o2, [%g4] ASI_M_MMUREGS hypersparc_flush_cache_page_out: retl nop hypersparc_flush_sig_insns: flush %o1 retl flush %o1 + 4 /* HyperSparc is copy-back. */ hypersparc_flush_page_to_ram: sethi %hi(vac_line_size), %g1 ld [%g1 + %lo(vac_line_size)], %o4 andn %o0, (PAGE_SIZE - 1), %o0 add %o4, %o4, %o5 or %o0, 0x400, %g7 lda [%g7] ASI_M_FLUSH_PROBE, %g5 add %o4, %o5, %g1 orcc %g5, 0, %g0 be 2f add %o4, %g1, %g2 add %o4, %g2, %g3 sub %o0, -PAGE_SIZE, %o0 add %o4, %g3, %g4 add %o4, %g4, %g5 add %o4, %g5, %g7 /* BLAMMO! */ 1: sub %o0, %g7, %o0 sta %g0, [%o0 + %g0] ASI_M_FLUSH_PAGE sta %g0, [%o0 + %o4] ASI_M_FLUSH_PAGE sta %g0, [%o0 + %o5] ASI_M_FLUSH_PAGE sta %g0, [%o0 + %g1] ASI_M_FLUSH_PAGE sta %g0, [%o0 + %g2] ASI_M_FLUSH_PAGE sta %g0, [%o0 + %g3] ASI_M_FLUSH_PAGE andcc %o0, 0xffc, %g0 sta %g0, [%o0 + %g4] ASI_M_FLUSH_PAGE bne 1b sta %g0, [%o0 + %g5] ASI_M_FLUSH_PAGE 2: mov SRMMU_FAULT_STATUS, %g1 retl lda [%g1] ASI_M_MMUREGS, %g0 /* HyperSparc is IO cache coherent. */ hypersparc_flush_page_for_dma: retl nop /* It was noted that at boot time a TLB flush all in a delay slot * can deliver an illegal instruction to the processor if the timing * is just right... */ hypersparc_flush_tlb_all: mov 0x400, %g1 sta %g0, [%g1] ASI_M_FLUSH_PROBE retl nop hypersparc_flush_tlb_mm: mov SRMMU_CTX_REG, %g1 ld [%o0 + AOFF_mm_context], %o1 lda [%g1] ASI_M_MMUREGS, %g5 #ifndef CONFIG_SMP cmp %o1, -1 be hypersparc_flush_tlb_mm_out #endif mov 0x300, %g2 sta %o1, [%g1] ASI_M_MMUREGS sta %g0, [%g2] ASI_M_FLUSH_PROBE hypersparc_flush_tlb_mm_out: retl sta %g5, [%g1] ASI_M_MMUREGS hypersparc_flush_tlb_range: ld [%o0 + VMA_VM_MM], %o0 mov SRMMU_CTX_REG, %g1 ld [%o0 + AOFF_mm_context], %o3 lda [%g1] ASI_M_MMUREGS, %g5 #ifndef CONFIG_SMP cmp %o3, -1 be hypersparc_flush_tlb_range_out #endif sethi %hi(~((1 << SRMMU_PGDIR_SHIFT) - 1)), %o4 sta %o3, [%g1] ASI_M_MMUREGS and %o1, %o4, %o1 add %o1, 0x200, %o1 sta %g0, [%o1] ASI_M_FLUSH_PROBE 1: sub %o1, %o4, %o1 cmp %o1, %o2 blu,a 1b sta %g0, [%o1] ASI_M_FLUSH_PROBE hypersparc_flush_tlb_range_out: retl sta %g5, [%g1] ASI_M_MMUREGS hypersparc_flush_tlb_page: ld [%o0 + VMA_VM_MM], %o0 mov SRMMU_CTX_REG, %g1 ld [%o0 + AOFF_mm_context], %o3 andn %o1, (PAGE_SIZE - 1), %o1 #ifndef CONFIG_SMP cmp %o3, -1 be hypersparc_flush_tlb_page_out #endif lda [%g1] ASI_M_MMUREGS, %g5 sta %o3, [%g1] ASI_M_MMUREGS sta %g0, [%o1] ASI_M_FLUSH_PROBE hypersparc_flush_tlb_page_out: retl sta %g5, [%g1] ASI_M_MMUREGS __INIT /* High speed page clear/copy. */ hypersparc_bzero_1page: /* NOTE: This routine has to be shorter than 40insns --jj */ clr %g1 mov 32, %g2 mov 64, %g3 mov 96, %g4 mov 128, %g5 mov 160, %g7 mov 192, %o2 mov 224, %o3 mov 16, %o1 1: stda %g0, [%o0 + %g0] ASI_M_BFILL stda %g0, [%o0 + %g2] ASI_M_BFILL stda %g0, [%o0 + %g3] ASI_M_BFILL stda %g0, [%o0 + %g4] ASI_M_BFILL stda %g0, [%o0 + %g5] ASI_M_BFILL stda %g0, [%o0 + %g7] ASI_M_BFILL stda %g0, [%o0 + %o2] ASI_M_BFILL stda %g0, [%o0 + %o3] ASI_M_BFILL subcc %o1, 1, %o1 bne 1b add %o0, 256, %o0 retl nop hypersparc_copy_1page: /* NOTE: This routine has to be shorter than 70insns --jj */ sub %o1, %o0, %o2 ! difference mov 16, %g1 1: sta %o0, [%o0 + %o2] ASI_M_BCOPY add %o0, 32, %o0 sta %o0, [%o0 + %o2] ASI_M_BCOPY add %o0, 32, %o0 sta %o0, [%o0 + %o2] ASI_M_BCOPY add %o0, 32, %o0 sta %o0, [%o0 + %o2] ASI_M_BCOPY add %o0, 32, %o0 sta %o0, [%o0 + %o2] ASI_M_BCOPY add %o0, 32, %o0 sta %o0, [%o0 + %o2] ASI_M_BCOPY add %o0, 32, %o0 sta %o0, [%o0 + %o2] ASI_M_BCOPY add %o0, 32, %o0 sta %o0, [%o0 + %o2] ASI_M_BCOPY subcc %g1, 1, %g1 bne 1b add %o0, 32, %o0 retl nop .globl hypersparc_setup_blockops hypersparc_setup_blockops: sethi %hi(bzero_1page), %o0 or %o0, %lo(bzero_1page), %o0 sethi %hi(hypersparc_bzero_1page), %o1 or %o1, %lo(hypersparc_bzero_1page), %o1 sethi %hi(hypersparc_copy_1page), %o2 or %o2, %lo(hypersparc_copy_1page), %o2 ld [%o1], %o4 1: add %o1, 4, %o1 st %o4, [%o0] add %o0, 4, %o0 cmp %o1, %o2 bne 1b ld [%o1], %o4 sethi %hi(__copy_1page), %o0 or %o0, %lo(__copy_1page), %o0 sethi %hi(hypersparc_setup_blockops), %o2 or %o2, %lo(hypersparc_setup_blockops), %o2 ld [%o1], %o4 1: add %o1, 4, %o1 st %o4, [%o0] add %o0, 4, %o0 cmp %o1, %o2 bne 1b ld [%o1], %o4 sta %g0, [%g0] ASI_M_FLUSH_IWHOLE retl nop
AirFortressIlikara/LS2K0300-linux-4.19
25,179
arch/sparc/mm/ultra.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * ultra.S: Don't expand these all over the place... * * Copyright (C) 1997, 2000, 2008 David S. Miller (davem@davemloft.net) */ #include <asm/asi.h> #include <asm/pgtable.h> #include <asm/page.h> #include <asm/spitfire.h> #include <asm/mmu_context.h> #include <asm/mmu.h> #include <asm/pil.h> #include <asm/head.h> #include <asm/thread_info.h> #include <asm/cacheflush.h> #include <asm/hypervisor.h> #include <asm/cpudata.h> /* Basically, most of the Spitfire vs. Cheetah madness * has to do with the fact that Cheetah does not support * IMMU flushes out of the secondary context. Someone needs * to throw a south lake birthday party for the folks * in Microelectronics who refused to fix this shit. */ /* This file is meant to be read efficiently by the CPU, not humans. * Staraj sie tego nikomu nie pierdolnac... */ .text .align 32 .globl __flush_tlb_mm __flush_tlb_mm: /* 19 insns */ /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */ ldxa [%o1] ASI_DMMU, %g2 cmp %g2, %o0 bne,pn %icc, __spitfire_flush_tlb_mm_slow mov 0x50, %g3 stxa %g0, [%g3] ASI_DMMU_DEMAP stxa %g0, [%g3] ASI_IMMU_DEMAP sethi %hi(KERNBASE), %g3 flush %g3 retl nop nop nop nop nop nop nop nop nop nop .align 32 .globl __flush_tlb_page __flush_tlb_page: /* 22 insns */ /* %o0 = context, %o1 = vaddr */ rdpr %pstate, %g7 andn %g7, PSTATE_IE, %g2 wrpr %g2, %pstate mov SECONDARY_CONTEXT, %o4 ldxa [%o4] ASI_DMMU, %g2 stxa %o0, [%o4] ASI_DMMU andcc %o1, 1, %g0 andn %o1, 1, %o3 be,pn %icc, 1f or %o3, 0x10, %o3 stxa %g0, [%o3] ASI_IMMU_DEMAP 1: stxa %g0, [%o3] ASI_DMMU_DEMAP membar #Sync stxa %g2, [%o4] ASI_DMMU sethi %hi(KERNBASE), %o4 flush %o4 retl wrpr %g7, 0x0, %pstate nop nop nop nop .align 32 .globl __flush_tlb_pending __flush_tlb_pending: /* 27 insns */ /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ rdpr %pstate, %g7 sllx %o1, 3, %o1 andn %g7, PSTATE_IE, %g2 wrpr %g2, %pstate mov SECONDARY_CONTEXT, %o4 ldxa [%o4] ASI_DMMU, %g2 stxa %o0, [%o4] ASI_DMMU 1: sub %o1, (1 << 3), %o1 ldx [%o2 + %o1], %o3 andcc %o3, 1, %g0 andn %o3, 1, %o3 be,pn %icc, 2f or %o3, 0x10, %o3 stxa %g0, [%o3] ASI_IMMU_DEMAP 2: stxa %g0, [%o3] ASI_DMMU_DEMAP membar #Sync brnz,pt %o1, 1b nop stxa %g2, [%o4] ASI_DMMU sethi %hi(KERNBASE), %o4 flush %o4 retl wrpr %g7, 0x0, %pstate nop nop nop nop .align 32 .globl __flush_tlb_kernel_range __flush_tlb_kernel_range: /* 31 insns */ /* %o0=start, %o1=end */ cmp %o0, %o1 be,pn %xcc, 2f sub %o1, %o0, %o3 srlx %o3, 18, %o4 brnz,pn %o4, __spitfire_flush_tlb_kernel_range_slow sethi %hi(PAGE_SIZE), %o4 sub %o3, %o4, %o3 or %o0, 0x20, %o0 ! Nucleus 1: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP stxa %g0, [%o0 + %o3] ASI_IMMU_DEMAP membar #Sync brnz,pt %o3, 1b sub %o3, %o4, %o3 2: sethi %hi(KERNBASE), %o3 flush %o3 retl nop nop nop nop nop nop nop nop nop nop nop nop nop nop nop __spitfire_flush_tlb_kernel_range_slow: mov 63 * 8, %o4 1: ldxa [%o4] ASI_ITLB_DATA_ACCESS, %o3 andcc %o3, 0x40, %g0 /* _PAGE_L_4U */ bne,pn %xcc, 2f mov TLB_TAG_ACCESS, %o3 stxa %g0, [%o3] ASI_IMMU stxa %g0, [%o4] ASI_ITLB_DATA_ACCESS membar #Sync 2: ldxa [%o4] ASI_DTLB_DATA_ACCESS, %o3 andcc %o3, 0x40, %g0 bne,pn %xcc, 2f mov TLB_TAG_ACCESS, %o3 stxa %g0, [%o3] ASI_DMMU stxa %g0, [%o4] ASI_DTLB_DATA_ACCESS membar #Sync 2: sub %o4, 8, %o4 brgez,pt %o4, 1b nop retl nop __spitfire_flush_tlb_mm_slow: rdpr %pstate, %g1 wrpr %g1, PSTATE_IE, %pstate stxa %o0, [%o1] ASI_DMMU stxa %g0, [%g3] ASI_DMMU_DEMAP stxa %g0, [%g3] ASI_IMMU_DEMAP flush %g6 stxa %g2, [%o1] ASI_DMMU sethi %hi(KERNBASE), %o1 flush %o1 retl wrpr %g1, 0, %pstate /* * The following code flushes one page_size worth. */ .section .kprobes.text, "ax" .align 32 .globl __flush_icache_page __flush_icache_page: /* %o0 = phys_page */ srlx %o0, PAGE_SHIFT, %o0 sethi %hi(PAGE_OFFSET), %g1 sllx %o0, PAGE_SHIFT, %o0 sethi %hi(PAGE_SIZE), %g2 ldx [%g1 + %lo(PAGE_OFFSET)], %g1 add %o0, %g1, %o0 1: subcc %g2, 32, %g2 bne,pt %icc, 1b flush %o0 + %g2 retl nop #ifdef DCACHE_ALIASING_POSSIBLE #if (PAGE_SHIFT != 13) #error only page shift of 13 is supported by dcache flush #endif #define DTAG_MASK 0x3 /* This routine is Spitfire specific so the hardcoded * D-cache size and line-size are OK. */ .align 64 .globl __flush_dcache_page __flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */ sethi %hi(PAGE_OFFSET), %g1 ldx [%g1 + %lo(PAGE_OFFSET)], %g1 sub %o0, %g1, %o0 ! physical address srlx %o0, 11, %o0 ! make D-cache TAG sethi %hi(1 << 14), %o2 ! D-cache size sub %o2, (1 << 5), %o2 ! D-cache line size 1: ldxa [%o2] ASI_DCACHE_TAG, %o3 ! load D-cache TAG andcc %o3, DTAG_MASK, %g0 ! Valid? be,pn %xcc, 2f ! Nope, branch andn %o3, DTAG_MASK, %o3 ! Clear valid bits cmp %o3, %o0 ! TAG match? bne,pt %xcc, 2f ! Nope, branch nop stxa %g0, [%o2] ASI_DCACHE_TAG ! Invalidate TAG membar #Sync 2: brnz,pt %o2, 1b sub %o2, (1 << 5), %o2 ! D-cache line size /* The I-cache does not snoop local stores so we * better flush that too when necessary. */ brnz,pt %o1, __flush_icache_page sllx %o0, 11, %o0 retl nop #endif /* DCACHE_ALIASING_POSSIBLE */ .previous /* Cheetah specific versions, patched at boot time. */ __cheetah_flush_tlb_mm: /* 19 insns */ rdpr %pstate, %g7 andn %g7, PSTATE_IE, %g2 wrpr %g2, 0x0, %pstate wrpr %g0, 1, %tl mov PRIMARY_CONTEXT, %o2 mov 0x40, %g3 ldxa [%o2] ASI_DMMU, %g2 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o1 sllx %o1, CTX_PGSZ1_NUC_SHIFT, %o1 or %o0, %o1, %o0 /* Preserve nucleus page size fields */ stxa %o0, [%o2] ASI_DMMU stxa %g0, [%g3] ASI_DMMU_DEMAP stxa %g0, [%g3] ASI_IMMU_DEMAP stxa %g2, [%o2] ASI_DMMU sethi %hi(KERNBASE), %o2 flush %o2 wrpr %g0, 0, %tl retl wrpr %g7, 0x0, %pstate __cheetah_flush_tlb_page: /* 22 insns */ /* %o0 = context, %o1 = vaddr */ rdpr %pstate, %g7 andn %g7, PSTATE_IE, %g2 wrpr %g2, 0x0, %pstate wrpr %g0, 1, %tl mov PRIMARY_CONTEXT, %o4 ldxa [%o4] ASI_DMMU, %g2 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3 sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3 or %o0, %o3, %o0 /* Preserve nucleus page size fields */ stxa %o0, [%o4] ASI_DMMU andcc %o1, 1, %g0 be,pn %icc, 1f andn %o1, 1, %o3 stxa %g0, [%o3] ASI_IMMU_DEMAP 1: stxa %g0, [%o3] ASI_DMMU_DEMAP membar #Sync stxa %g2, [%o4] ASI_DMMU sethi %hi(KERNBASE), %o4 flush %o4 wrpr %g0, 0, %tl retl wrpr %g7, 0x0, %pstate __cheetah_flush_tlb_pending: /* 27 insns */ /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ rdpr %pstate, %g7 sllx %o1, 3, %o1 andn %g7, PSTATE_IE, %g2 wrpr %g2, 0x0, %pstate wrpr %g0, 1, %tl mov PRIMARY_CONTEXT, %o4 ldxa [%o4] ASI_DMMU, %g2 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3 sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3 or %o0, %o3, %o0 /* Preserve nucleus page size fields */ stxa %o0, [%o4] ASI_DMMU 1: sub %o1, (1 << 3), %o1 ldx [%o2 + %o1], %o3 andcc %o3, 1, %g0 be,pn %icc, 2f andn %o3, 1, %o3 stxa %g0, [%o3] ASI_IMMU_DEMAP 2: stxa %g0, [%o3] ASI_DMMU_DEMAP membar #Sync brnz,pt %o1, 1b nop stxa %g2, [%o4] ASI_DMMU sethi %hi(KERNBASE), %o4 flush %o4 wrpr %g0, 0, %tl retl wrpr %g7, 0x0, %pstate __cheetah_flush_tlb_kernel_range: /* 31 insns */ /* %o0=start, %o1=end */ cmp %o0, %o1 be,pn %xcc, 2f sub %o1, %o0, %o3 srlx %o3, 18, %o4 brnz,pn %o4, 3f sethi %hi(PAGE_SIZE), %o4 sub %o3, %o4, %o3 or %o0, 0x20, %o0 ! Nucleus 1: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP stxa %g0, [%o0 + %o3] ASI_IMMU_DEMAP membar #Sync brnz,pt %o3, 1b sub %o3, %o4, %o3 2: sethi %hi(KERNBASE), %o3 flush %o3 retl nop 3: mov 0x80, %o4 stxa %g0, [%o4] ASI_DMMU_DEMAP membar #Sync stxa %g0, [%o4] ASI_IMMU_DEMAP membar #Sync retl nop nop nop nop nop nop nop nop #ifdef DCACHE_ALIASING_POSSIBLE __cheetah_flush_dcache_page: /* 11 insns */ sethi %hi(PAGE_OFFSET), %g1 ldx [%g1 + %lo(PAGE_OFFSET)], %g1 sub %o0, %g1, %o0 sethi %hi(PAGE_SIZE), %o4 1: subcc %o4, (1 << 5), %o4 stxa %g0, [%o0 + %o4] ASI_DCACHE_INVALIDATE membar #Sync bne,pt %icc, 1b nop retl /* I-cache flush never needed on Cheetah, see callers. */ nop #endif /* DCACHE_ALIASING_POSSIBLE */ /* Hypervisor specific versions, patched at boot time. */ __hypervisor_tlb_tl0_error: save %sp, -192, %sp mov %i0, %o0 call hypervisor_tlbop_error mov %i1, %o1 ret restore __hypervisor_flush_tlb_mm: /* 19 insns */ mov %o0, %o2 /* ARG2: mmu context */ mov 0, %o0 /* ARG0: CPU lists unimplemented */ mov 0, %o1 /* ARG1: CPU lists unimplemented */ mov HV_MMU_ALL, %o3 /* ARG3: flags */ mov HV_FAST_MMU_DEMAP_CTX, %o5 ta HV_FAST_TRAP brnz,pn %o0, 1f mov HV_FAST_MMU_DEMAP_CTX, %o1 retl nop 1: sethi %hi(__hypervisor_tlb_tl0_error), %o5 jmpl %o5 + %lo(__hypervisor_tlb_tl0_error), %g0 nop nop nop nop nop nop nop __hypervisor_flush_tlb_page: /* 22 insns */ /* %o0 = context, %o1 = vaddr */ mov %o0, %g2 mov %o1, %o0 /* ARG0: vaddr + IMMU-bit */ mov %g2, %o1 /* ARG1: mmu context */ mov HV_MMU_ALL, %o2 /* ARG2: flags */ srlx %o0, PAGE_SHIFT, %o0 sllx %o0, PAGE_SHIFT, %o0 ta HV_MMU_UNMAP_ADDR_TRAP brnz,pn %o0, 1f mov HV_MMU_UNMAP_ADDR_TRAP, %o1 retl nop 1: sethi %hi(__hypervisor_tlb_tl0_error), %o2 jmpl %o2 + %lo(__hypervisor_tlb_tl0_error), %g0 nop nop nop nop nop nop nop nop nop __hypervisor_flush_tlb_pending: /* 27 insns */ /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ sllx %o1, 3, %g1 mov %o2, %g2 mov %o0, %g3 1: sub %g1, (1 << 3), %g1 ldx [%g2 + %g1], %o0 /* ARG0: vaddr + IMMU-bit */ mov %g3, %o1 /* ARG1: mmu context */ mov HV_MMU_ALL, %o2 /* ARG2: flags */ srlx %o0, PAGE_SHIFT, %o0 sllx %o0, PAGE_SHIFT, %o0 ta HV_MMU_UNMAP_ADDR_TRAP brnz,pn %o0, 1f mov HV_MMU_UNMAP_ADDR_TRAP, %o1 brnz,pt %g1, 1b nop retl nop 1: sethi %hi(__hypervisor_tlb_tl0_error), %o2 jmpl %o2 + %lo(__hypervisor_tlb_tl0_error), %g0 nop nop nop nop nop nop nop nop nop __hypervisor_flush_tlb_kernel_range: /* 31 insns */ /* %o0=start, %o1=end */ cmp %o0, %o1 be,pn %xcc, 2f sub %o1, %o0, %g2 srlx %g2, 18, %g3 brnz,pn %g3, 4f mov %o0, %g1 sethi %hi(PAGE_SIZE), %g3 sub %g2, %g3, %g2 1: add %g1, %g2, %o0 /* ARG0: virtual address */ mov 0, %o1 /* ARG1: mmu context */ mov HV_MMU_ALL, %o2 /* ARG2: flags */ ta HV_MMU_UNMAP_ADDR_TRAP brnz,pn %o0, 3f mov HV_MMU_UNMAP_ADDR_TRAP, %o1 brnz,pt %g2, 1b sub %g2, %g3, %g2 2: retl nop 3: sethi %hi(__hypervisor_tlb_tl0_error), %o2 jmpl %o2 + %lo(__hypervisor_tlb_tl0_error), %g0 nop 4: mov 0, %o0 /* ARG0: CPU lists unimplemented */ mov 0, %o1 /* ARG1: CPU lists unimplemented */ mov 0, %o2 /* ARG2: mmu context == nucleus */ mov HV_MMU_ALL, %o3 /* ARG3: flags */ mov HV_FAST_MMU_DEMAP_CTX, %o5 ta HV_FAST_TRAP brnz,pn %o0, 3b mov HV_FAST_MMU_DEMAP_CTX, %o1 retl nop #ifdef DCACHE_ALIASING_POSSIBLE /* XXX Niagara and friends have an 8K cache, so no aliasing is * XXX possible, but nothing explicit in the Hypervisor API * XXX guarantees this. */ __hypervisor_flush_dcache_page: /* 2 insns */ retl nop #endif tlb_patch_one: 1: lduw [%o1], %g1 stw %g1, [%o0] flush %o0 subcc %o2, 1, %o2 add %o1, 4, %o1 bne,pt %icc, 1b add %o0, 4, %o0 retl nop #ifdef CONFIG_SMP /* These are all called by the slaves of a cross call, at * trap level 1, with interrupts fully disabled. * * Register usage: * %g5 mm->context (all tlb flushes) * %g1 address arg 1 (tlb page and range flushes) * %g7 address arg 2 (tlb range flush only) * * %g6 scratch 1 * %g2 scratch 2 * %g3 scratch 3 * %g4 scratch 4 */ .align 32 .globl xcall_flush_tlb_mm xcall_flush_tlb_mm: /* 24 insns */ mov PRIMARY_CONTEXT, %g2 ldxa [%g2] ASI_DMMU, %g3 srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4 sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4 or %g5, %g4, %g5 /* Preserve nucleus page size fields */ stxa %g5, [%g2] ASI_DMMU mov 0x40, %g4 stxa %g0, [%g4] ASI_DMMU_DEMAP stxa %g0, [%g4] ASI_IMMU_DEMAP stxa %g3, [%g2] ASI_DMMU retry nop nop nop nop nop nop nop nop nop nop nop nop nop .globl xcall_flush_tlb_page xcall_flush_tlb_page: /* 20 insns */ /* %g5=context, %g1=vaddr */ mov PRIMARY_CONTEXT, %g4 ldxa [%g4] ASI_DMMU, %g2 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4 sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4 or %g5, %g4, %g5 mov PRIMARY_CONTEXT, %g4 stxa %g5, [%g4] ASI_DMMU andcc %g1, 0x1, %g0 be,pn %icc, 2f andn %g1, 0x1, %g5 stxa %g0, [%g5] ASI_IMMU_DEMAP 2: stxa %g0, [%g5] ASI_DMMU_DEMAP membar #Sync stxa %g2, [%g4] ASI_DMMU retry nop nop nop nop nop .globl xcall_flush_tlb_kernel_range xcall_flush_tlb_kernel_range: /* 44 insns */ sethi %hi(PAGE_SIZE - 1), %g2 or %g2, %lo(PAGE_SIZE - 1), %g2 andn %g1, %g2, %g1 andn %g7, %g2, %g7 sub %g7, %g1, %g3 srlx %g3, 18, %g2 brnz,pn %g2, 2f sethi %hi(PAGE_SIZE), %g2 sub %g3, %g2, %g3 or %g1, 0x20, %g1 ! Nucleus 1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP stxa %g0, [%g1 + %g3] ASI_IMMU_DEMAP membar #Sync brnz,pt %g3, 1b sub %g3, %g2, %g3 retry 2: mov 63 * 8, %g1 1: ldxa [%g1] ASI_ITLB_DATA_ACCESS, %g2 andcc %g2, 0x40, %g0 /* _PAGE_L_4U */ bne,pn %xcc, 2f mov TLB_TAG_ACCESS, %g2 stxa %g0, [%g2] ASI_IMMU stxa %g0, [%g1] ASI_ITLB_DATA_ACCESS membar #Sync 2: ldxa [%g1] ASI_DTLB_DATA_ACCESS, %g2 andcc %g2, 0x40, %g0 bne,pn %xcc, 2f mov TLB_TAG_ACCESS, %g2 stxa %g0, [%g2] ASI_DMMU stxa %g0, [%g1] ASI_DTLB_DATA_ACCESS membar #Sync 2: sub %g1, 8, %g1 brgez,pt %g1, 1b nop retry nop nop nop nop nop nop nop nop nop /* This runs in a very controlled environment, so we do * not need to worry about BH races etc. */ .globl xcall_sync_tick xcall_sync_tick: 661: rdpr %pstate, %g2 wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate .section .sun4v_2insn_patch, "ax" .word 661b nop nop .previous rdpr %pil, %g2 wrpr %g0, PIL_NORMAL_MAX, %pil sethi %hi(109f), %g7 b,pt %xcc, etrap_irq 109: or %g7, %lo(109b), %g7 #ifdef CONFIG_TRACE_IRQFLAGS call trace_hardirqs_off nop #endif call smp_synchronize_tick_client nop b rtrap_xcall ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1 .globl xcall_fetch_glob_regs xcall_fetch_glob_regs: sethi %hi(global_cpu_snapshot), %g1 or %g1, %lo(global_cpu_snapshot), %g1 __GET_CPUID(%g2) sllx %g2, 6, %g3 add %g1, %g3, %g1 rdpr %tstate, %g7 stx %g7, [%g1 + GR_SNAP_TSTATE] rdpr %tpc, %g7 stx %g7, [%g1 + GR_SNAP_TPC] rdpr %tnpc, %g7 stx %g7, [%g1 + GR_SNAP_TNPC] stx %o7, [%g1 + GR_SNAP_O7] stx %i7, [%g1 + GR_SNAP_I7] /* Don't try this at home kids... */ rdpr %cwp, %g3 sub %g3, 1, %g7 wrpr %g7, %cwp mov %i7, %g7 wrpr %g3, %cwp stx %g7, [%g1 + GR_SNAP_RPC] sethi %hi(trap_block), %g7 or %g7, %lo(trap_block), %g7 sllx %g2, TRAP_BLOCK_SZ_SHIFT, %g2 add %g7, %g2, %g7 ldx [%g7 + TRAP_PER_CPU_THREAD], %g3 stx %g3, [%g1 + GR_SNAP_THREAD] retry .globl xcall_fetch_glob_pmu xcall_fetch_glob_pmu: sethi %hi(global_cpu_snapshot), %g1 or %g1, %lo(global_cpu_snapshot), %g1 __GET_CPUID(%g2) sllx %g2, 6, %g3 add %g1, %g3, %g1 rd %pic, %g7 stx %g7, [%g1 + (4 * 8)] rd %pcr, %g7 stx %g7, [%g1 + (0 * 8)] retry .globl xcall_fetch_glob_pmu_n4 xcall_fetch_glob_pmu_n4: sethi %hi(global_cpu_snapshot), %g1 or %g1, %lo(global_cpu_snapshot), %g1 __GET_CPUID(%g2) sllx %g2, 6, %g3 add %g1, %g3, %g1 ldxa [%g0] ASI_PIC, %g7 stx %g7, [%g1 + (4 * 8)] mov 0x08, %g3 ldxa [%g3] ASI_PIC, %g7 stx %g7, [%g1 + (5 * 8)] mov 0x10, %g3 ldxa [%g3] ASI_PIC, %g7 stx %g7, [%g1 + (6 * 8)] mov 0x18, %g3 ldxa [%g3] ASI_PIC, %g7 stx %g7, [%g1 + (7 * 8)] mov %o0, %g2 mov %o1, %g3 mov %o5, %g7 mov HV_FAST_VT_GET_PERFREG, %o5 mov 3, %o0 ta HV_FAST_TRAP stx %o1, [%g1 + (3 * 8)] mov HV_FAST_VT_GET_PERFREG, %o5 mov 2, %o0 ta HV_FAST_TRAP stx %o1, [%g1 + (2 * 8)] mov HV_FAST_VT_GET_PERFREG, %o5 mov 1, %o0 ta HV_FAST_TRAP stx %o1, [%g1 + (1 * 8)] mov HV_FAST_VT_GET_PERFREG, %o5 mov 0, %o0 ta HV_FAST_TRAP stx %o1, [%g1 + (0 * 8)] mov %g2, %o0 mov %g3, %o1 mov %g7, %o5 retry __cheetah_xcall_flush_tlb_kernel_range: /* 44 insns */ sethi %hi(PAGE_SIZE - 1), %g2 or %g2, %lo(PAGE_SIZE - 1), %g2 andn %g1, %g2, %g1 andn %g7, %g2, %g7 sub %g7, %g1, %g3 srlx %g3, 18, %g2 brnz,pn %g2, 2f sethi %hi(PAGE_SIZE), %g2 sub %g3, %g2, %g3 or %g1, 0x20, %g1 ! Nucleus 1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP stxa %g0, [%g1 + %g3] ASI_IMMU_DEMAP membar #Sync brnz,pt %g3, 1b sub %g3, %g2, %g3 retry 2: mov 0x80, %g2 stxa %g0, [%g2] ASI_DMMU_DEMAP membar #Sync stxa %g0, [%g2] ASI_IMMU_DEMAP membar #Sync retry nop nop nop nop nop nop nop nop nop nop nop nop nop nop nop nop nop nop nop nop nop nop #ifdef DCACHE_ALIASING_POSSIBLE .align 32 .globl xcall_flush_dcache_page_cheetah xcall_flush_dcache_page_cheetah: /* %g1 == physical page address */ sethi %hi(PAGE_SIZE), %g3 1: subcc %g3, (1 << 5), %g3 stxa %g0, [%g1 + %g3] ASI_DCACHE_INVALIDATE membar #Sync bne,pt %icc, 1b nop retry nop #endif /* DCACHE_ALIASING_POSSIBLE */ .globl xcall_flush_dcache_page_spitfire xcall_flush_dcache_page_spitfire: /* %g1 == physical page address %g7 == kernel page virtual address %g5 == (page->mapping != NULL) */ #ifdef DCACHE_ALIASING_POSSIBLE srlx %g1, (13 - 2), %g1 ! Form tag comparitor sethi %hi(L1DCACHE_SIZE), %g3 ! D$ size == 16K sub %g3, (1 << 5), %g3 ! D$ linesize == 32 1: ldxa [%g3] ASI_DCACHE_TAG, %g2 andcc %g2, 0x3, %g0 be,pn %xcc, 2f andn %g2, 0x3, %g2 cmp %g2, %g1 bne,pt %xcc, 2f nop stxa %g0, [%g3] ASI_DCACHE_TAG membar #Sync 2: cmp %g3, 0 bne,pt %xcc, 1b sub %g3, (1 << 5), %g3 brz,pn %g5, 2f #endif /* DCACHE_ALIASING_POSSIBLE */ sethi %hi(PAGE_SIZE), %g3 1: flush %g7 subcc %g3, (1 << 5), %g3 bne,pt %icc, 1b add %g7, (1 << 5), %g7 2: retry nop nop /* %g5: error * %g6: tlb op */ __hypervisor_tlb_xcall_error: mov %g5, %g4 mov %g6, %g5 ba,pt %xcc, etrap rd %pc, %g7 mov %l4, %o0 call hypervisor_tlbop_error_xcall mov %l5, %o1 ba,a,pt %xcc, rtrap .globl __hypervisor_xcall_flush_tlb_mm __hypervisor_xcall_flush_tlb_mm: /* 24 insns */ /* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */ mov %o0, %g2 mov %o1, %g3 mov %o2, %g4 mov %o3, %g1 mov %o5, %g7 clr %o0 /* ARG0: CPU lists unimplemented */ clr %o1 /* ARG1: CPU lists unimplemented */ mov %g5, %o2 /* ARG2: mmu context */ mov HV_MMU_ALL, %o3 /* ARG3: flags */ mov HV_FAST_MMU_DEMAP_CTX, %o5 ta HV_FAST_TRAP mov HV_FAST_MMU_DEMAP_CTX, %g6 brnz,pn %o0, 1f mov %o0, %g5 mov %g2, %o0 mov %g3, %o1 mov %g4, %o2 mov %g1, %o3 mov %g7, %o5 membar #Sync retry 1: sethi %hi(__hypervisor_tlb_xcall_error), %g4 jmpl %g4 + %lo(__hypervisor_tlb_xcall_error), %g0 nop .globl __hypervisor_xcall_flush_tlb_page __hypervisor_xcall_flush_tlb_page: /* 20 insns */ /* %g5=ctx, %g1=vaddr */ mov %o0, %g2 mov %o1, %g3 mov %o2, %g4 mov %g1, %o0 /* ARG0: virtual address */ mov %g5, %o1 /* ARG1: mmu context */ mov HV_MMU_ALL, %o2 /* ARG2: flags */ srlx %o0, PAGE_SHIFT, %o0 sllx %o0, PAGE_SHIFT, %o0 ta HV_MMU_UNMAP_ADDR_TRAP mov HV_MMU_UNMAP_ADDR_TRAP, %g6 brnz,a,pn %o0, 1f mov %o0, %g5 mov %g2, %o0 mov %g3, %o1 mov %g4, %o2 membar #Sync retry 1: sethi %hi(__hypervisor_tlb_xcall_error), %g4 jmpl %g4 + %lo(__hypervisor_tlb_xcall_error), %g0 nop .globl __hypervisor_xcall_flush_tlb_kernel_range __hypervisor_xcall_flush_tlb_kernel_range: /* 44 insns */ /* %g1=start, %g7=end, g2,g3,g4,g5,g6=scratch */ sethi %hi(PAGE_SIZE - 1), %g2 or %g2, %lo(PAGE_SIZE - 1), %g2 andn %g1, %g2, %g1 andn %g7, %g2, %g7 sub %g7, %g1, %g3 srlx %g3, 18, %g7 add %g2, 1, %g2 sub %g3, %g2, %g3 mov %o0, %g2 mov %o1, %g4 brnz,pn %g7, 2f mov %o2, %g7 1: add %g1, %g3, %o0 /* ARG0: virtual address */ mov 0, %o1 /* ARG1: mmu context */ mov HV_MMU_ALL, %o2 /* ARG2: flags */ ta HV_MMU_UNMAP_ADDR_TRAP mov HV_MMU_UNMAP_ADDR_TRAP, %g6 brnz,pn %o0, 1f mov %o0, %g5 sethi %hi(PAGE_SIZE), %o2 brnz,pt %g3, 1b sub %g3, %o2, %g3 5: mov %g2, %o0 mov %g4, %o1 mov %g7, %o2 membar #Sync retry 1: sethi %hi(__hypervisor_tlb_xcall_error), %g4 jmpl %g4 + %lo(__hypervisor_tlb_xcall_error), %g0 nop 2: mov %o3, %g1 mov %o5, %g3 mov 0, %o0 /* ARG0: CPU lists unimplemented */ mov 0, %o1 /* ARG1: CPU lists unimplemented */ mov 0, %o2 /* ARG2: mmu context == nucleus */ mov HV_MMU_ALL, %o3 /* ARG3: flags */ mov HV_FAST_MMU_DEMAP_CTX, %o5 ta HV_FAST_TRAP mov %g1, %o3 brz,pt %o0, 5b mov %g3, %o5 mov HV_FAST_MMU_DEMAP_CTX, %g6 ba,pt %xcc, 1b clr %g5 /* These just get rescheduled to PIL vectors. */ .globl xcall_call_function xcall_call_function: wr %g0, (1 << PIL_SMP_CALL_FUNC), %set_softint retry .globl xcall_call_function_single xcall_call_function_single: wr %g0, (1 << PIL_SMP_CALL_FUNC_SNGL), %set_softint retry .globl xcall_receive_signal xcall_receive_signal: wr %g0, (1 << PIL_SMP_RECEIVE_SIGNAL), %set_softint retry .globl xcall_capture xcall_capture: wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint retry #ifdef CONFIG_KGDB .globl xcall_kgdb_capture xcall_kgdb_capture: wr %g0, (1 << PIL_KGDB_CAPTURE), %set_softint retry #endif #endif /* CONFIG_SMP */ .globl cheetah_patch_cachetlbops cheetah_patch_cachetlbops: save %sp, -128, %sp sethi %hi(__flush_tlb_mm), %o0 or %o0, %lo(__flush_tlb_mm), %o0 sethi %hi(__cheetah_flush_tlb_mm), %o1 or %o1, %lo(__cheetah_flush_tlb_mm), %o1 call tlb_patch_one mov 19, %o2 sethi %hi(__flush_tlb_page), %o0 or %o0, %lo(__flush_tlb_page), %o0 sethi %hi(__cheetah_flush_tlb_page), %o1 or %o1, %lo(__cheetah_flush_tlb_page), %o1 call tlb_patch_one mov 22, %o2 sethi %hi(__flush_tlb_pending), %o0 or %o0, %lo(__flush_tlb_pending), %o0 sethi %hi(__cheetah_flush_tlb_pending), %o1 or %o1, %lo(__cheetah_flush_tlb_pending), %o1 call tlb_patch_one mov 27, %o2 sethi %hi(__flush_tlb_kernel_range), %o0 or %o0, %lo(__flush_tlb_kernel_range), %o0 sethi %hi(__cheetah_flush_tlb_kernel_range), %o1 or %o1, %lo(__cheetah_flush_tlb_kernel_range), %o1 call tlb_patch_one mov 31, %o2 #ifdef DCACHE_ALIASING_POSSIBLE sethi %hi(__flush_dcache_page), %o0 or %o0, %lo(__flush_dcache_page), %o0 sethi %hi(__cheetah_flush_dcache_page), %o1 or %o1, %lo(__cheetah_flush_dcache_page), %o1 call tlb_patch_one mov 11, %o2 #endif /* DCACHE_ALIASING_POSSIBLE */ #ifdef CONFIG_SMP sethi %hi(xcall_flush_tlb_kernel_range), %o0 or %o0, %lo(xcall_flush_tlb_kernel_range), %o0 sethi %hi(__cheetah_xcall_flush_tlb_kernel_range), %o1 or %o1, %lo(__cheetah_xcall_flush_tlb_kernel_range), %o1 call tlb_patch_one mov 44, %o2 #endif /* CONFIG_SMP */ ret restore .globl hypervisor_patch_cachetlbops hypervisor_patch_cachetlbops: save %sp, -128, %sp sethi %hi(__flush_tlb_mm), %o0 or %o0, %lo(__flush_tlb_mm), %o0 sethi %hi(__hypervisor_flush_tlb_mm), %o1 or %o1, %lo(__hypervisor_flush_tlb_mm), %o1 call tlb_patch_one mov 19, %o2 sethi %hi(__flush_tlb_page), %o0 or %o0, %lo(__flush_tlb_page), %o0 sethi %hi(__hypervisor_flush_tlb_page), %o1 or %o1, %lo(__hypervisor_flush_tlb_page), %o1 call tlb_patch_one mov 22, %o2 sethi %hi(__flush_tlb_pending), %o0 or %o0, %lo(__flush_tlb_pending), %o0 sethi %hi(__hypervisor_flush_tlb_pending), %o1 or %o1, %lo(__hypervisor_flush_tlb_pending), %o1 call tlb_patch_one mov 27, %o2 sethi %hi(__flush_tlb_kernel_range), %o0 or %o0, %lo(__flush_tlb_kernel_range), %o0 sethi %hi(__hypervisor_flush_tlb_kernel_range), %o1 or %o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1 call tlb_patch_one mov 31, %o2 #ifdef DCACHE_ALIASING_POSSIBLE sethi %hi(__flush_dcache_page), %o0 or %o0, %lo(__flush_dcache_page), %o0 sethi %hi(__hypervisor_flush_dcache_page), %o1 or %o1, %lo(__hypervisor_flush_dcache_page), %o1 call tlb_patch_one mov 2, %o2 #endif /* DCACHE_ALIASING_POSSIBLE */ #ifdef CONFIG_SMP sethi %hi(xcall_flush_tlb_mm), %o0 or %o0, %lo(xcall_flush_tlb_mm), %o0 sethi %hi(__hypervisor_xcall_flush_tlb_mm), %o1 or %o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1 call tlb_patch_one mov 24, %o2 sethi %hi(xcall_flush_tlb_page), %o0 or %o0, %lo(xcall_flush_tlb_page), %o0 sethi %hi(__hypervisor_xcall_flush_tlb_page), %o1 or %o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1 call tlb_patch_one mov 20, %o2 sethi %hi(xcall_flush_tlb_kernel_range), %o0 or %o0, %lo(xcall_flush_tlb_kernel_range), %o0 sethi %hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1 or %o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1 call tlb_patch_one mov 44, %o2 #endif /* CONFIG_SMP */ ret restore
AirFortressIlikara/LS2K0300-linux-4.19
6,147
arch/sparc/mm/viking.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * viking.S: High speed Viking cache/mmu operations * * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be) * Copyright (C) 1997,1998,1999 Jakub Jelinek (jj@ultra.linux.cz) * Copyright (C) 1999 Pavel Semerad (semerad@ss1000.ms.mff.cuni.cz) */ #include <asm/ptrace.h> #include <asm/psr.h> #include <asm/asm-offsets.h> #include <asm/asi.h> #include <asm/mxcc.h> #include <asm/page.h> #include <asm/pgtsrmmu.h> #include <asm/viking.h> #ifdef CONFIG_SMP .data .align 4 sun4dsmp_flush_tlb_spin: .word 0 #endif .text .align 4 .globl viking_flush_cache_all, viking_flush_cache_mm .globl viking_flush_cache_range, viking_flush_cache_page .globl viking_flush_page, viking_mxcc_flush_page .globl viking_flush_page_for_dma, viking_flush_page_to_ram .globl viking_flush_sig_insns .globl viking_flush_tlb_all, viking_flush_tlb_mm .globl viking_flush_tlb_range, viking_flush_tlb_page viking_flush_page: sethi %hi(PAGE_OFFSET), %g2 sub %o0, %g2, %g3 srl %g3, 12, %g1 ! ppage >> 12 clr %o1 ! set counter, 0 - 127 sethi %hi(PAGE_OFFSET + PAGE_SIZE - 0x80000000), %o3 sethi %hi(0x80000000), %o4 sethi %hi(VIKING_PTAG_VALID), %o5 sethi %hi(2*PAGE_SIZE), %o0 sethi %hi(PAGE_SIZE), %g7 clr %o2 ! block counter, 0 - 3 5: sll %o1, 5, %g4 or %g4, %o4, %g4 ! 0x80000000 | (set << 5) sll %o2, 26, %g5 ! block << 26 6: or %g5, %g4, %g5 ldda [%g5] ASI_M_DATAC_TAG, %g2 cmp %g3, %g1 ! ptag == ppage? bne 7f inc %o2 andcc %g2, %o5, %g0 ! ptag VALID? be 7f add %g4, %o3, %g2 ! (PAGE_OFFSET + PAGE_SIZE) | (set << 5) ld [%g2], %g3 ld [%g2 + %g7], %g3 add %g2, %o0, %g2 ld [%g2], %g3 ld [%g2 + %g7], %g3 add %g2, %o0, %g2 ld [%g2], %g3 ld [%g2 + %g7], %g3 add %g2, %o0, %g2 ld [%g2], %g3 b 8f ld [%g2 + %g7], %g3 7: cmp %o2, 3 ble 6b sll %o2, 26, %g5 ! block << 26 8: inc %o1 cmp %o1, 0x7f ble 5b clr %o2 9: retl nop viking_mxcc_flush_page: sethi %hi(PAGE_OFFSET), %g2 sub %o0, %g2, %g3 sub %g3, -PAGE_SIZE, %g3 ! ppage + PAGE_SIZE sethi %hi(MXCC_SRCSTREAM), %o3 ! assume %hi(MXCC_SRCSTREAM) == %hi(MXCC_DESTSTREAM) mov 0x10, %g2 ! set cacheable bit or %o3, %lo(MXCC_SRCSTREAM), %o2 or %o3, %lo(MXCC_DESSTREAM), %o3 sub %g3, MXCC_STREAM_SIZE, %g3 6: stda %g2, [%o2] ASI_M_MXCC stda %g2, [%o3] ASI_M_MXCC andncc %g3, PAGE_MASK, %g0 bne 6b sub %g3, MXCC_STREAM_SIZE, %g3 9: retl nop viking_flush_cache_page: viking_flush_cache_range: #ifndef CONFIG_SMP ld [%o0 + VMA_VM_MM], %o0 #endif viking_flush_cache_mm: #ifndef CONFIG_SMP ld [%o0 + AOFF_mm_context], %g1 cmp %g1, -1 bne viking_flush_cache_all nop b,a viking_flush_cache_out #endif viking_flush_cache_all: WINDOW_FLUSH(%g4, %g5) viking_flush_cache_out: retl nop viking_flush_tlb_all: mov 0x400, %g1 retl sta %g0, [%g1] ASI_M_FLUSH_PROBE viking_flush_tlb_mm: mov SRMMU_CTX_REG, %g1 ld [%o0 + AOFF_mm_context], %o1 lda [%g1] ASI_M_MMUREGS, %g5 #ifndef CONFIG_SMP cmp %o1, -1 be 1f #endif mov 0x300, %g2 sta %o1, [%g1] ASI_M_MMUREGS sta %g0, [%g2] ASI_M_FLUSH_PROBE retl sta %g5, [%g1] ASI_M_MMUREGS #ifndef CONFIG_SMP 1: retl nop #endif viking_flush_tlb_range: ld [%o0 + VMA_VM_MM], %o0 mov SRMMU_CTX_REG, %g1 ld [%o0 + AOFF_mm_context], %o3 lda [%g1] ASI_M_MMUREGS, %g5 #ifndef CONFIG_SMP cmp %o3, -1 be 2f #endif sethi %hi(~((1 << SRMMU_PGDIR_SHIFT) - 1)), %o4 sta %o3, [%g1] ASI_M_MMUREGS and %o1, %o4, %o1 add %o1, 0x200, %o1 sta %g0, [%o1] ASI_M_FLUSH_PROBE 1: sub %o1, %o4, %o1 cmp %o1, %o2 blu,a 1b sta %g0, [%o1] ASI_M_FLUSH_PROBE retl sta %g5, [%g1] ASI_M_MMUREGS #ifndef CONFIG_SMP 2: retl nop #endif viking_flush_tlb_page: ld [%o0 + VMA_VM_MM], %o0 mov SRMMU_CTX_REG, %g1 ld [%o0 + AOFF_mm_context], %o3 lda [%g1] ASI_M_MMUREGS, %g5 #ifndef CONFIG_SMP cmp %o3, -1 be 1f #endif and %o1, PAGE_MASK, %o1 sta %o3, [%g1] ASI_M_MMUREGS sta %g0, [%o1] ASI_M_FLUSH_PROBE retl sta %g5, [%g1] ASI_M_MMUREGS #ifndef CONFIG_SMP 1: retl nop #endif viking_flush_page_to_ram: viking_flush_page_for_dma: viking_flush_sig_insns: retl nop #ifdef CONFIG_SMP .globl sun4dsmp_flush_tlb_all, sun4dsmp_flush_tlb_mm .globl sun4dsmp_flush_tlb_range, sun4dsmp_flush_tlb_page sun4dsmp_flush_tlb_all: sethi %hi(sun4dsmp_flush_tlb_spin), %g3 1: ldstub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5 tst %g5 bne 2f mov 0x400, %g1 sta %g0, [%g1] ASI_M_FLUSH_PROBE retl stb %g0, [%g3 + %lo(sun4dsmp_flush_tlb_spin)] 2: tst %g5 bne,a 2b ldub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5 b,a 1b sun4dsmp_flush_tlb_mm: sethi %hi(sun4dsmp_flush_tlb_spin), %g3 1: ldstub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5 tst %g5 bne 2f mov SRMMU_CTX_REG, %g1 ld [%o0 + AOFF_mm_context], %o1 lda [%g1] ASI_M_MMUREGS, %g5 mov 0x300, %g2 sta %o1, [%g1] ASI_M_MMUREGS sta %g0, [%g2] ASI_M_FLUSH_PROBE sta %g5, [%g1] ASI_M_MMUREGS retl stb %g0, [%g3 + %lo(sun4dsmp_flush_tlb_spin)] 2: tst %g5 bne,a 2b ldub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5 b,a 1b sun4dsmp_flush_tlb_range: sethi %hi(sun4dsmp_flush_tlb_spin), %g3 1: ldstub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5 tst %g5 bne 3f mov SRMMU_CTX_REG, %g1 ld [%o0 + VMA_VM_MM], %o0 ld [%o0 + AOFF_mm_context], %o3 lda [%g1] ASI_M_MMUREGS, %g5 sethi %hi(~((1 << SRMMU_PGDIR_SHIFT) - 1)), %o4 sta %o3, [%g1] ASI_M_MMUREGS and %o1, %o4, %o1 add %o1, 0x200, %o1 sta %g0, [%o1] ASI_M_FLUSH_PROBE 2: sub %o1, %o4, %o1 cmp %o1, %o2 blu,a 2b sta %g0, [%o1] ASI_M_FLUSH_PROBE sta %g5, [%g1] ASI_M_MMUREGS retl stb %g0, [%g3 + %lo(sun4dsmp_flush_tlb_spin)] 3: tst %g5 bne,a 3b ldub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5 b,a 1b sun4dsmp_flush_tlb_page: sethi %hi(sun4dsmp_flush_tlb_spin), %g3 1: ldstub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5 tst %g5 bne 2f mov SRMMU_CTX_REG, %g1 ld [%o0 + VMA_VM_MM], %o0 ld [%o0 + AOFF_mm_context], %o3 lda [%g1] ASI_M_MMUREGS, %g5 and %o1, PAGE_MASK, %o1 sta %o3, [%g1] ASI_M_MMUREGS sta %g0, [%o1] ASI_M_FLUSH_PROBE sta %g5, [%g1] ASI_M_MMUREGS retl stb %g0, [%g3 + %lo(sun4dsmp_flush_tlb_spin)] 2: tst %g5 bne,a 2b ldub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5 b,a 1b nop #endif
AirFortressIlikara/LS2K0300-linux-4.19
3,219
arch/sparc/mm/tsunami.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * tsunami.S: High speed MicroSparc-I mmu/cache operations. * * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) */ #include <asm/ptrace.h> #include <asm/asm-offsets.h> #include <asm/psr.h> #include <asm/asi.h> #include <asm/page.h> #include <asm/pgtsrmmu.h> .text .align 4 .globl tsunami_flush_cache_all, tsunami_flush_cache_mm .globl tsunami_flush_cache_range, tsunami_flush_cache_page .globl tsunami_flush_page_to_ram, tsunami_flush_page_for_dma .globl tsunami_flush_sig_insns .globl tsunami_flush_tlb_all, tsunami_flush_tlb_mm .globl tsunami_flush_tlb_range, tsunami_flush_tlb_page /* Sliiick... */ tsunami_flush_cache_page: tsunami_flush_cache_range: ld [%o0 + VMA_VM_MM], %o0 tsunami_flush_cache_mm: ld [%o0 + AOFF_mm_context], %g2 cmp %g2, -1 be tsunami_flush_cache_out tsunami_flush_cache_all: WINDOW_FLUSH(%g4, %g5) tsunami_flush_page_for_dma: sta %g0, [%g0] ASI_M_IC_FLCLEAR sta %g0, [%g0] ASI_M_DC_FLCLEAR tsunami_flush_cache_out: tsunami_flush_page_to_ram: retl nop tsunami_flush_sig_insns: flush %o1 retl flush %o1 + 4 /* More slick stuff... */ tsunami_flush_tlb_range: ld [%o0 + VMA_VM_MM], %o0 tsunami_flush_tlb_mm: ld [%o0 + AOFF_mm_context], %g2 cmp %g2, -1 be tsunami_flush_tlb_out tsunami_flush_tlb_all: mov 0x400, %o1 sta %g0, [%o1] ASI_M_FLUSH_PROBE nop nop nop nop nop tsunami_flush_tlb_out: retl nop /* This one can be done in a fine grained manner... */ tsunami_flush_tlb_page: ld [%o0 + VMA_VM_MM], %o0 mov SRMMU_CTX_REG, %g1 ld [%o0 + AOFF_mm_context], %o3 andn %o1, (PAGE_SIZE - 1), %o1 cmp %o3, -1 be tsunami_flush_tlb_page_out lda [%g1] ASI_M_MMUREGS, %g5 sta %o3, [%g1] ASI_M_MMUREGS sta %g0, [%o1] ASI_M_FLUSH_PROBE nop nop nop nop nop tsunami_flush_tlb_page_out: retl sta %g5, [%g1] ASI_M_MMUREGS #define MIRROR_BLOCK(dst, src, offset, t0, t1, t2, t3) \ ldd [src + offset + 0x18], t0; \ std t0, [dst + offset + 0x18]; \ ldd [src + offset + 0x10], t2; \ std t2, [dst + offset + 0x10]; \ ldd [src + offset + 0x08], t0; \ std t0, [dst + offset + 0x08]; \ ldd [src + offset + 0x00], t2; \ std t2, [dst + offset + 0x00]; tsunami_copy_1page: /* NOTE: This routine has to be shorter than 70insns --jj */ or %g0, (PAGE_SIZE >> 8), %g1 1: MIRROR_BLOCK(%o0, %o1, 0x00, %o2, %o3, %o4, %o5) MIRROR_BLOCK(%o0, %o1, 0x20, %o2, %o3, %o4, %o5) MIRROR_BLOCK(%o0, %o1, 0x40, %o2, %o3, %o4, %o5) MIRROR_BLOCK(%o0, %o1, 0x60, %o2, %o3, %o4, %o5) MIRROR_BLOCK(%o0, %o1, 0x80, %o2, %o3, %o4, %o5) MIRROR_BLOCK(%o0, %o1, 0xa0, %o2, %o3, %o4, %o5) MIRROR_BLOCK(%o0, %o1, 0xc0, %o2, %o3, %o4, %o5) MIRROR_BLOCK(%o0, %o1, 0xe0, %o2, %o3, %o4, %o5) subcc %g1, 1, %g1 add %o0, 0x100, %o0 bne 1b add %o1, 0x100, %o1 .globl tsunami_setup_blockops tsunami_setup_blockops: sethi %hi(__copy_1page), %o0 or %o0, %lo(__copy_1page), %o0 sethi %hi(tsunami_copy_1page), %o1 or %o1, %lo(tsunami_copy_1page), %o1 sethi %hi(tsunami_setup_blockops), %o2 or %o2, %lo(tsunami_setup_blockops), %o2 ld [%o1], %o4 1: add %o1, 4, %o1 st %o4, [%o0] add %o0, 4, %o0 cmp %o1, %o2 bne 1b ld [%o1], %o4 sta %g0, [%g0] ASI_M_IC_FLCLEAR sta %g0, [%g0] ASI_M_DC_FLCLEAR retl nop
AirFortressIlikara/LS2K0300-linux-4.19
5,349
arch/sparc/mm/swift.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * swift.S: MicroSparc-II mmu/cache operations. * * Copyright (C) 1999 David S. Miller (davem@redhat.com) */ #include <asm/psr.h> #include <asm/asi.h> #include <asm/page.h> #include <asm/pgtsrmmu.h> #include <asm/asm-offsets.h> .text .align 4 #if 1 /* XXX screw this, I can't get the VAC flushes working * XXX reliably... -DaveM */ .globl swift_flush_cache_all, swift_flush_cache_mm .globl swift_flush_cache_range, swift_flush_cache_page .globl swift_flush_page_for_dma .globl swift_flush_page_to_ram swift_flush_cache_all: swift_flush_cache_mm: swift_flush_cache_range: swift_flush_cache_page: swift_flush_page_for_dma: swift_flush_page_to_ram: sethi %hi(0x2000), %o0 1: subcc %o0, 0x10, %o0 add %o0, %o0, %o1 sta %g0, [%o0] ASI_M_DATAC_TAG bne 1b sta %g0, [%o1] ASI_M_TXTC_TAG retl nop #else .globl swift_flush_cache_all swift_flush_cache_all: WINDOW_FLUSH(%g4, %g5) /* Just clear out all the tags. */ sethi %hi(16 * 1024), %o0 1: subcc %o0, 16, %o0 sta %g0, [%o0] ASI_M_TXTC_TAG bne 1b sta %g0, [%o0] ASI_M_DATAC_TAG retl nop .globl swift_flush_cache_mm swift_flush_cache_mm: ld [%o0 + AOFF_mm_context], %g2 cmp %g2, -1 be swift_flush_cache_mm_out WINDOW_FLUSH(%g4, %g5) rd %psr, %g1 andn %g1, PSR_ET, %g3 wr %g3, 0x0, %psr nop nop mov SRMMU_CTX_REG, %g7 lda [%g7] ASI_M_MMUREGS, %g5 sta %g2, [%g7] ASI_M_MMUREGS #if 1 sethi %hi(0x2000), %o0 1: subcc %o0, 0x10, %o0 sta %g0, [%o0] ASI_M_FLUSH_CTX bne 1b nop #else clr %o0 or %g0, 2048, %g7 or %g0, 2048, %o1 add %o1, 2048, %o2 add %o2, 2048, %o3 mov 16, %o4 add %o4, 2048, %o5 add %o5, 2048, %g2 add %g2, 2048, %g3 1: sta %g0, [%o0 ] ASI_M_FLUSH_CTX sta %g0, [%o0 + %o1] ASI_M_FLUSH_CTX sta %g0, [%o0 + %o2] ASI_M_FLUSH_CTX sta %g0, [%o0 + %o3] ASI_M_FLUSH_CTX sta %g0, [%o0 + %o4] ASI_M_FLUSH_CTX sta %g0, [%o0 + %o5] ASI_M_FLUSH_CTX sta %g0, [%o0 + %g2] ASI_M_FLUSH_CTX sta %g0, [%o0 + %g3] ASI_M_FLUSH_CTX subcc %g7, 32, %g7 bne 1b add %o0, 32, %o0 #endif mov SRMMU_CTX_REG, %g7 sta %g5, [%g7] ASI_M_MMUREGS wr %g1, 0x0, %psr nop nop swift_flush_cache_mm_out: retl nop .globl swift_flush_cache_range swift_flush_cache_range: ld [%o0 + VMA_VM_MM], %o0 sub %o2, %o1, %o2 sethi %hi(4096), %o3 cmp %o2, %o3 bgu swift_flush_cache_mm nop b 70f nop .globl swift_flush_cache_page swift_flush_cache_page: ld [%o0 + VMA_VM_MM], %o0 70: ld [%o0 + AOFF_mm_context], %g2 cmp %g2, -1 be swift_flush_cache_page_out WINDOW_FLUSH(%g4, %g5) rd %psr, %g1 andn %g1, PSR_ET, %g3 wr %g3, 0x0, %psr nop nop mov SRMMU_CTX_REG, %g7 lda [%g7] ASI_M_MMUREGS, %g5 sta %g2, [%g7] ASI_M_MMUREGS andn %o1, (PAGE_SIZE - 1), %o1 #if 1 sethi %hi(0x1000), %o0 1: subcc %o0, 0x10, %o0 sta %g0, [%o1 + %o0] ASI_M_FLUSH_PAGE bne 1b nop #else or %g0, 512, %g7 or %g0, 512, %o0 add %o0, 512, %o2 add %o2, 512, %o3 add %o3, 512, %o4 add %o4, 512, %o5 add %o5, 512, %g3 add %g3, 512, %g4 1: sta %g0, [%o1 ] ASI_M_FLUSH_PAGE sta %g0, [%o1 + %o0] ASI_M_FLUSH_PAGE sta %g0, [%o1 + %o2] ASI_M_FLUSH_PAGE sta %g0, [%o1 + %o3] ASI_M_FLUSH_PAGE sta %g0, [%o1 + %o4] ASI_M_FLUSH_PAGE sta %g0, [%o1 + %o5] ASI_M_FLUSH_PAGE sta %g0, [%o1 + %g3] ASI_M_FLUSH_PAGE sta %g0, [%o1 + %g4] ASI_M_FLUSH_PAGE subcc %g7, 16, %g7 bne 1b add %o1, 16, %o1 #endif mov SRMMU_CTX_REG, %g7 sta %g5, [%g7] ASI_M_MMUREGS wr %g1, 0x0, %psr nop nop swift_flush_cache_page_out: retl nop /* Swift is write-thru, however it is not * I/O nor TLB-walk coherent. Also it has * caches which are virtually indexed and tagged. */ .globl swift_flush_page_for_dma .globl swift_flush_page_to_ram swift_flush_page_for_dma: swift_flush_page_to_ram: andn %o0, (PAGE_SIZE - 1), %o1 #if 1 sethi %hi(0x1000), %o0 1: subcc %o0, 0x10, %o0 sta %g0, [%o1 + %o0] ASI_M_FLUSH_PAGE bne 1b nop #else or %g0, 512, %g7 or %g0, 512, %o0 add %o0, 512, %o2 add %o2, 512, %o3 add %o3, 512, %o4 add %o4, 512, %o5 add %o5, 512, %g3 add %g3, 512, %g4 1: sta %g0, [%o1 ] ASI_M_FLUSH_PAGE sta %g0, [%o1 + %o0] ASI_M_FLUSH_PAGE sta %g0, [%o1 + %o2] ASI_M_FLUSH_PAGE sta %g0, [%o1 + %o3] ASI_M_FLUSH_PAGE sta %g0, [%o1 + %o4] ASI_M_FLUSH_PAGE sta %g0, [%o1 + %o5] ASI_M_FLUSH_PAGE sta %g0, [%o1 + %g3] ASI_M_FLUSH_PAGE sta %g0, [%o1 + %g4] ASI_M_FLUSH_PAGE subcc %g7, 16, %g7 bne 1b add %o1, 16, %o1 #endif retl nop #endif .globl swift_flush_sig_insns swift_flush_sig_insns: flush %o1 retl flush %o1 + 4 .globl swift_flush_tlb_mm .globl swift_flush_tlb_range .globl swift_flush_tlb_all swift_flush_tlb_range: ld [%o0 + VMA_VM_MM], %o0 swift_flush_tlb_mm: ld [%o0 + AOFF_mm_context], %g2 cmp %g2, -1 be swift_flush_tlb_all_out swift_flush_tlb_all: mov 0x400, %o1 sta %g0, [%o1] ASI_M_FLUSH_PROBE swift_flush_tlb_all_out: retl nop .globl swift_flush_tlb_page swift_flush_tlb_page: ld [%o0 + VMA_VM_MM], %o0 mov SRMMU_CTX_REG, %g1 ld [%o0 + AOFF_mm_context], %o3 andn %o1, (PAGE_SIZE - 1), %o1 cmp %o3, -1 be swift_flush_tlb_page_out nop #if 1 mov 0x400, %o1 sta %g0, [%o1] ASI_M_FLUSH_PROBE #else lda [%g1] ASI_M_MMUREGS, %g5 sta %o3, [%g1] ASI_M_MMUREGS sta %g0, [%o1] ASI_M_FLUSH_PAGE /* rem. virt. cache. prot. */ sta %g0, [%o1] ASI_M_FLUSH_PROBE sta %g5, [%g1] ASI_M_MMUREGS #endif swift_flush_tlb_page_out: retl nop
AirFortressIlikara/LS2K0300-linux-4.19
1,937
arch/sparc/mm/srmmu_access.S
/* SPDX-License-Identifier: GPL-2.0 */ /* Assembler variants of srmmu access functions. * Implemented in assembler to allow run-time patching. * LEON uses a different ASI for MMUREGS than SUN. * * The leon_1insn_patch infrastructure is used * for the run-time patching. */ #include <linux/linkage.h> #include <asm/asmmacro.h> #include <asm/pgtsrmmu.h> #include <asm/asi.h> /* unsigned int srmmu_get_mmureg(void) */ ENTRY(srmmu_get_mmureg) LEON_PI(lda [%g0] ASI_LEON_MMUREGS, %o0) SUN_PI_(lda [%g0] ASI_M_MMUREGS, %o0) retl nop ENDPROC(srmmu_get_mmureg) /* void srmmu_set_mmureg(unsigned long regval) */ ENTRY(srmmu_set_mmureg) LEON_PI(sta %o0, [%g0] ASI_LEON_MMUREGS) SUN_PI_(sta %o0, [%g0] ASI_M_MMUREGS) retl nop ENDPROC(srmmu_set_mmureg) /* void srmmu_set_ctable_ptr(unsigned long paddr) */ ENTRY(srmmu_set_ctable_ptr) /* paddr = ((paddr >> 4) & SRMMU_CTX_PMASK); */ srl %o0, 4, %g1 and %g1, SRMMU_CTX_PMASK, %g1 mov SRMMU_CTXTBL_PTR, %g2 LEON_PI(sta %g1, [%g2] ASI_LEON_MMUREGS) SUN_PI_(sta %g1, [%g2] ASI_M_MMUREGS) retl nop ENDPROC(srmmu_set_ctable_ptr) /* void srmmu_set_context(int context) */ ENTRY(srmmu_set_context) mov SRMMU_CTX_REG, %g1 LEON_PI(sta %o0, [%g1] ASI_LEON_MMUREGS) SUN_PI_(sta %o0, [%g1] ASI_M_MMUREGS) retl nop ENDPROC(srmmu_set_context) /* int srmmu_get_context(void) */ ENTRY(srmmu_get_context) mov SRMMU_CTX_REG, %o0 LEON_PI(lda [%o0] ASI_LEON_MMUREGS, %o0) SUN_PI_(lda [%o0] ASI_M_MMUREGS, %o0) retl nop ENDPROC(srmmu_get_context) /* unsigned int srmmu_get_fstatus(void) */ ENTRY(srmmu_get_fstatus) mov SRMMU_FAULT_STATUS, %o0 LEON_PI(lda [%o0] ASI_LEON_MMUREGS, %o0) SUN_PI_(lda [%o0] ASI_M_MMUREGS, %o0) retl nop ENDPROC(srmmu_get_fstatus) /* unsigned int srmmu_get_faddr(void) */ ENTRY(srmmu_get_faddr) mov SRMMU_FAULT_ADDR, %o0 LEON_PI(lda [%o0] ASI_LEON_MMUREGS, %o0) SUN_PI_(lda [%o0] ASI_M_MMUREGS, %o0) retl nop ENDPROC(srmmu_get_faddr)
AirFortressIlikara/LS2K0300-linux-4.19
1,047
arch/h8300/kernel/head_ram.S
/* SPDX-License-Identifier: GPL-2.0 */ #include <linux/sys.h> #include <linux/init.h> #include <asm/unistd.h> #include <asm/setup.h> #include <asm/segment.h> #include <asm/linkage.h> #include <asm/asm-offsets.h> #include <asm/thread_info.h> #include <asm/errno.h> #if defined(CONFIG_CPU_H8300H) .h8300h #define SYSCR 0xfee012 #define IRAMTOP 0xffff20 #endif #if defined(CONFIG_CPU_H8S) .h8300s #define INTCR 0xffff31 #define IRAMTOP 0xffc000 #endif __HEAD .global _start _start: mov.l #IRAMTOP,sp /* .bss clear */ mov.l #_sbss,er5 mov.l #_ebss,er4 sub.l er5,er4 shlr er4 shlr er4 sub.l er2,er2 1: mov.l er2,@er5 adds #4,er5 dec.l #1,er4 bne 1b jsr @h8300_fdt_init /* linux kernel start */ #if defined(CONFIG_CPU_H8300H) ldc #0xd0,ccr /* running kernel */ mov.l #SYSCR,er0 bclr #3,@er0 #endif #if defined(CONFIG_CPU_H8S) ldc #0x07,exr bclr #4,@INTCR:8 bset #5,@INTCR:8 /* Interrupt mode 2 */ ldc #0x90,ccr /* running kernel */ #endif mov.l #init_thread_union,sp add.l #0x2000,sp jsr @start_kernel 1: bra 1b .end
AirFortressIlikara/LS2K0300-linux-4.19
8,271
arch/h8300/kernel/entry.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * * linux/arch/h8300/kernel/entry.S * * Yoshinori Sato <ysato@users.sourceforge.jp> * David McCullough <davidm@snapgear.com> * */ /* * entry.S * include exception/interrupt gateway * system call entry */ #include <linux/sys.h> #include <asm/unistd.h> #include <asm/setup.h> #include <asm/segment.h> #include <asm/linkage.h> #include <asm/asm-offsets.h> #include <asm/thread_info.h> #include <asm/errno.h> #if defined(CONFIG_CPU_H8300H) #define USERRET 8 INTERRUPTS = 64 .h8300h .macro SHLL2 reg shll.l \reg shll.l \reg .endm .macro SHLR2 reg shlr.l \reg shlr.l \reg .endm .macro SAVEREGS mov.l er0,@-sp mov.l er1,@-sp mov.l er2,@-sp mov.l er3,@-sp .endm .macro RESTOREREGS mov.l @sp+,er3 mov.l @sp+,er2 .endm .macro SAVEEXR .endm .macro RESTOREEXR .endm #endif #if defined(CONFIG_CPU_H8S) #define USERRET 10 #define USEREXR 8 INTERRUPTS = 128 .h8300s .macro SHLL2 reg shll.l #2,\reg .endm .macro SHLR2 reg shlr.l #2,\reg .endm .macro SAVEREGS stm.l er0-er3,@-sp .endm .macro RESTOREREGS ldm.l @sp+,er2-er3 .endm .macro SAVEEXR mov.w @(USEREXR:16,er0),r1 mov.w r1,@(LEXR-LER3:16,sp) /* copy EXR */ .endm .macro RESTOREEXR mov.w @(LEXR-LER1:16,sp),r1 /* restore EXR */ mov.b r1l,r1h mov.w r1,@(USEREXR:16,er0) .endm #endif /* CPU context save/restore macros. */ .macro SAVE_ALL mov.l er0,@-sp stc ccr,r0l /* check kernel mode */ btst #4,r0l bne 5f /* user mode */ mov.l sp,@_sw_usp mov.l @sp,er0 /* restore saved er0 */ orc #0x10,ccr /* switch kernel stack */ mov.l @_sw_ksp,sp sub.l #(LRET-LORIG),sp /* allocate LORIG - LRET */ SAVEREGS mov.l @_sw_usp,er0 mov.l @(USERRET:16,er0),er1 /* copy the RET addr */ mov.l er1,@(LRET-LER3:16,sp) SAVEEXR mov.l @(LORIG-LER3:16,sp),er0 mov.l er0,@(LER0-LER3:16,sp) /* copy ER0 */ mov.w e1,r1 /* e1 highbyte = ccr */ and #0xef,r1h /* mask mode? flag */ bra 6f 5: /* kernel mode */ mov.l @sp,er0 /* restore saved er0 */ subs #2,sp /* set dummy ccr */ subs #4,sp /* set dummp sp */ SAVEREGS mov.w @(LRET-LER3:16,sp),r1 /* copy old ccr */ 6: mov.b r1h,r1l mov.b #0,r1h mov.w r1,@(LCCR-LER3:16,sp) /* set ccr */ mov.l @_sw_usp,er2 mov.l er2,@(LSP-LER3:16,sp) /* set usp */ mov.l er6,@-sp /* syscall arg #6 */ mov.l er5,@-sp /* syscall arg #5 */ mov.l er4,@-sp /* syscall arg #4 */ .endm /* r1 = ccr */ .macro RESTORE_ALL mov.l @sp+,er4 mov.l @sp+,er5 mov.l @sp+,er6 RESTOREREGS mov.w @(LCCR-LER1:16,sp),r0 /* check kernel mode */ btst #4,r0l bne 7f orc #0xc0,ccr mov.l @(LSP-LER1:16,sp),er0 mov.l @(LER0-LER1:16,sp),er1 /* restore ER0 */ mov.l er1,@er0 RESTOREEXR mov.w @(LCCR-LER1:16,sp),r1 /* restore the RET addr */ mov.b r1l,r1h mov.b @(LRET+1-LER1:16,sp),r1l mov.w r1,e1 mov.w @(LRET+2-LER1:16,sp),r1 mov.l er1,@(USERRET:16,er0) mov.l @sp+,er1 add.l #(LRET-LER1),sp /* remove LORIG - LRET */ mov.l sp,@_sw_ksp andc #0xef,ccr /* switch to user mode */ mov.l er0,sp bra 8f 7: mov.l @sp+,er1 add.l #10,sp 8: mov.l @sp+,er0 adds #4,sp /* remove the sw created LVEC */ rte .endm .globl _system_call .globl ret_from_exception .globl ret_from_fork .globl ret_from_kernel_thread .globl ret_from_interrupt .globl _interrupt_redirect_table .globl _sw_ksp,_sw_usp .globl _resume .globl _interrupt_entry .globl _trace_break .globl _nmi #if defined(CONFIG_ROMKERNEL) .section .int_redirect,"ax" _interrupt_redirect_table: #if defined(CONFIG_CPU_H8300H) .rept 7 .long 0 .endr #endif #if defined(CONFIG_CPU_H8S) .rept 5 .long 0 .endr jmp @_trace_break .long 0 #endif jsr @_interrupt_entry /* NMI */ jmp @_system_call /* TRAPA #0 (System call) */ .long 0 #if defined(CONFIG_KGDB) jmp @_kgdb_trap #else .long 0 #endif jmp @_trace_break /* TRAPA #3 (breakpoint) */ .rept INTERRUPTS-12 jsr @_interrupt_entry .endr #endif #if defined(CONFIG_RAMKERNEL) .globl _interrupt_redirect_table .section .bss _interrupt_redirect_table: .space 4 #endif .section .text .align 2 _interrupt_entry: SAVE_ALL /* r1l is saved ccr */ mov.l sp,er0 add.l #LVEC,er0 btst #4,r1l bne 1f /* user LVEC */ mov.l @_sw_usp,er0 adds #4,er0 1: mov.l @er0,er0 /* LVEC address */ #if defined(CONFIG_ROMKERNEL) sub.l #_interrupt_redirect_table,er0 #endif #if defined(CONFIG_RAMKERNEL) mov.l @_interrupt_redirect_table,er1 sub.l er1,er0 #endif SHLR2 er0 dec.l #1,er0 mov.l sp,er1 subs #4,er1 /* adjust ret_pc */ #if defined(CONFIG_CPU_H8S) orc #7,exr #endif jsr @do_IRQ jmp @ret_from_interrupt _system_call: subs #4,sp /* dummy LVEC */ SAVE_ALL /* er0: syscall nr */ andc #0xbf,ccr mov.l er0,er4 /* save top of frame */ mov.l sp,er0 jsr @set_esp0 andc #0x3f,ccr mov.l sp,er2 and.w #0xe000,r2 mov.l @(TI_FLAGS:16,er2),er2 and.w #_TIF_WORK_SYSCALL_MASK,r2 beq 1f mov.l sp,er0 jsr @do_syscall_trace_enter 1: cmp.l #__NR_syscalls,er4 bcc badsys SHLL2 er4 mov.l #_sys_call_table,er0 add.l er4,er0 mov.l @er0,er4 beq ret_from_exception:16 mov.l @(LER1:16,sp),er0 mov.l @(LER2:16,sp),er1 mov.l @(LER3:16,sp),er2 jsr @er4 mov.l er0,@(LER0:16,sp) /* save the return value */ mov.l sp,er2 and.w #0xe000,r2 mov.l @(TI_FLAGS:16,er2),er2 and.w #_TIF_WORK_SYSCALL_MASK,r2 beq 2f mov.l sp,er0 jsr @do_syscall_trace_leave 2: orc #0xc0,ccr bra resume_userspace badsys: mov.l #-ENOSYS,er0 mov.l er0,@(LER0:16,sp) bra resume_userspace #if !defined(CONFIG_PREEMPT) #define resume_kernel restore_all #endif ret_from_exception: #if defined(CONFIG_PREEMPT) orc #0xc0,ccr #endif ret_from_interrupt: mov.b @(LCCR+1:16,sp),r0l btst #4,r0l bne resume_kernel:16 /* return from kernel */ resume_userspace: andc #0xbf,ccr mov.l sp,er4 and.w #0xe000,r4 /* er4 <- current thread info */ mov.l @(TI_FLAGS:16,er4),er1 and.l #_TIF_WORK_MASK,er1 beq restore_all:8 work_pending: btst #TIF_NEED_RESCHED,r1l bne work_resched:8 /* work notifysig */ mov.l sp,er0 subs #4,er0 /* er0: pt_regs */ jsr @do_notify_resume bra resume_userspace:8 work_resched: mov.l sp,er0 jsr @set_esp0 jsr @schedule bra resume_userspace:8 restore_all: RESTORE_ALL /* Does RTE */ #if defined(CONFIG_PREEMPT) resume_kernel: mov.l @(TI_PRE_COUNT:16,er4),er0 bne restore_all:8 need_resched: mov.l @(TI_FLAGS:16,er4),er0 btst #TIF_NEED_RESCHED,r0l beq restore_all:8 mov.b @(LCCR+1:16,sp),r0l /* Interrupt Enabled? */ bmi restore_all:8 mov.l sp,er0 jsr @set_esp0 jsr @preempt_schedule_irq bra need_resched:8 #endif ret_from_fork: mov.l er2,er0 jsr @schedule_tail jmp @ret_from_exception ret_from_kernel_thread: mov.l er2,er0 jsr @schedule_tail mov.l @(LER4:16,sp),er0 mov.l @(LER5:16,sp),er1 jsr @er1 jmp @ret_from_exception _resume: /* * Beware - when entering resume, offset of tss is in d1, * prev (the current task) is in a0, next (the new task) * is in a1 and d2.b is non-zero if the mm structure is * shared between the tasks, so don't change these * registers until their contents are no longer needed. */ /* save sr */ sub.w r3,r3 stc ccr,r3l mov.w r3,@(THREAD_CCR+2:16,er0) /* disable interrupts */ orc #0xc0,ccr mov.l @_sw_usp,er3 mov.l er3,@(THREAD_USP:16,er0) mov.l sp,@(THREAD_KSP:16,er0) /* Skip address space switching if they are the same. */ /* FIXME: what did we hack out of here, this does nothing! */ mov.l @(THREAD_USP:16,er1),er0 mov.l er0,@_sw_usp mov.l @(THREAD_KSP:16,er1),sp /* restore status register */ mov.w @(THREAD_CCR+2:16,er1),r3 ldc r3l,ccr rts _trace_break: subs #4,sp SAVE_ALL sub.l er1,er1 dec.l #1,er1 mov.l er1,@(LORIG,sp) mov.l sp,er0 jsr @set_esp0 mov.l @_sw_usp,er0 mov.l @er0,er1 mov.w @(-2:16,er1),r2 cmp.w #0x5730,r2 beq 1f subs #2,er1 mov.l er1,@er0 1: and.w #0xff,e1 mov.l er1,er0 jsr @trace_trap jmp @ret_from_exception _nmi: subs #4, sp mov.l er0, @-sp mov.l @_interrupt_redirect_table, er0 add.l #8*4, er0 mov.l er0, @(4,sp) mov.l @sp+, er0 jmp @_interrupt_entry #if defined(CONFIG_KGDB) _kgdb_trap: subs #4,sp SAVE_ALL mov.l sp,er0 add.l #LRET,er0 mov.l er0,@(LSP,sp) jsr @set_esp0 mov.l sp,er0 subs #4,er0 jsr @h8300_kgdb_trap jmp @ret_from_exception #endif .section .bss _sw_ksp: .space 4 _sw_usp: .space 4 .end
AirFortressIlikara/LS2K0300-linux-4.19
1,756
arch/h8300/kernel/head_rom.S
/* SPDX-License-Identifier: GPL-2.0 */ #include <linux/init.h> #include <asm/thread_info.h> #if defined(CONFIG_CPU_H8300H) .h8300h #define SYSCR 0xfee012 #define IRAMTOP 0xffff20 #define NR_INT 64 #endif #if defined(CONFIG_CPU_H8S) .h8300s #define INTCR 0xffff31 #define IRAMTOP 0xffc000 #define NR_INT 128 #endif __HEAD .global _start _start: mov.l #IRAMTOP,sp #if !defined(CONFIG_H8300H_SIM) && \ !defined(CONFIG_H8S_SIM) jsr @lowlevel_init /* copy .data */ mov.l #_begin_data,er5 mov.l #_sdata,er6 mov.l #_edata,er4 sub.l er6,er4 shlr.l er4 shlr.l er4 1: mov.l @er5+,er0 mov.l er0,@er6 adds #4,er6 dec.l #1,er4 bne 1b /* .bss clear */ mov.l #_sbss,er5 mov.l #_ebss,er4 sub.l er5,er4 shlr er4 shlr er4 sub.l er0,er0 1: mov.l er0,@er5 adds #4,er5 dec.l #1,er4 bne 1b #else /* get cmdline from gdb */ jsr @0xcc ;; er0 - argc ;; er1 - argv mov.l #command_line,er3 adds #4,er1 dec.l #1,er0 beq 4f 1: mov.l @er1+,er2 2: mov.b @er2+,r4l beq 3f mov.b r4l,@er3 adds #1,er3 bra 2b 3: mov.b #' ',r4l mov.b r4l,@er3 adds #1,er3 dec.l #1,er0 bne 1b subs #1,er3 mov.b #0,r4l mov.b r4l,@er3 4: #endif sub.l er0,er0 jsr @h8300_fdt_init /* linux kernel start */ #if defined(CONFIG_CPU_H8300H) ldc #0xd0,ccr /* running kernel */ mov.l #SYSCR,er0 bclr #3,@er0 #endif #if defined(CONFIG_CPU_H8S) ldc #0x07,exr bclr #4,@INTCR:8 bset #5,@INTCR:8 /* Interrupt mode 2 */ ldc #0x90,ccr /* running kernel */ #endif mov.l #init_thread_union,sp add.l #0x2000,sp jsr @start_kernel 1: bra 1b #if defined(CONFIG_ROMKERNEL) /* interrupt vector */ .section .vectors,"ax" .long _start .long _start vector = 2 .rept NR_INT - 2 .long _interrupt_redirect_table+vector*4 vector = vector + 1 .endr #endif .end
AirFortressIlikara/LS2K0300-linux-4.19
1,084
arch/h8300/kernel/vmlinux.lds.S
/* SPDX-License-Identifier: GPL-2.0 */ #include <asm-generic/vmlinux.lds.h> #include <asm/page.h> #include <asm/thread_info.h> #define ROMTOP 0x000000 #define RAMTOP 0x400000 jiffies = jiffies_64 + 4; ENTRY(_start) SECTIONS { #if defined(CONFIG_ROMKERNEL) . = ROMTOP; .vectors : { _vector = . ; *(.vector*) } #else . = RAMTOP; _ramstart = .; . = . + CONFIG_OFFSET; #endif _text = .; HEAD_TEXT_SECTION .text : { _stext = . ; TEXT_TEXT SCHED_TEXT CPUIDLE_TEXT LOCK_TEXT #if defined(CONFIG_ROMKERNEL) *(.int_redirect) #endif _etext = . ; } EXCEPTION_TABLE(16) NOTES RO_DATA_SECTION(4) ROMEND = .; #if defined(CONFIG_ROMKERNEL) . = RAMTOP; _ramstart = .; #define ADDR(x) ROMEND #endif _sdata = . ; __data_start = . ; RW_DATA_SECTION(0, PAGE_SIZE, THREAD_SIZE) #if defined(CONFIG_ROMKERNEL) #undef ADDR #endif . = ALIGN(0x4) ; __init_begin = .; INIT_TEXT_SECTION(4) INIT_DATA_SECTION(4) SECURITY_INIT __init_end = .; _edata = . ; _begin_data = LOADADDR(.data); _sbss =.; BSS_SECTION(0, 0 ,0) _ebss =.; _ramend = .; _end = .; DISCARDS }
AirFortressIlikara/LS2K0300-linux-4.19
1,124
arch/h8300/lib/memcpy.S
;;; SPDX-License-Identifier: GPL-2.0 ;;; memcpy.S #include <asm/linkage.h> #if defined(CONFIG_CPU_H8300H) .h8300h #endif #if defined(CONFIG_CPU_H8S) .h8300s #endif .text .global memcpy ;;; void *memcpy(void *to, void *from, size_t n) memcpy: mov.l er2,er2 bne 1f rts 1: ;; address check bld #0,r0l bxor #0,r1l bcs 4f mov.l er4,@-sp mov.l er0,@-sp btst #0,r0l beq 1f ;; (aligned even) odd address mov.b @er1,r3l mov.b r3l,@er0 adds #1,er1 adds #1,er0 dec.l #1,er2 beq 3f 1: ;; n < sizeof(unsigned long) check sub.l er4,er4 adds #4,er4 ; loop count check value cmp.l er4,er2 blo 2f ;; unsigned long copy 1: mov.l @er1,er3 mov.l er3,@er0 adds #4,er0 adds #4,er1 subs #4,er2 cmp.l er4,er2 bcc 1b ;; rest 2: mov.l er2,er2 beq 3f 1: mov.b @er1,r3l mov.b r3l,@er0 adds #1,er1 adds #1,er0 dec.l #1,er2 bne 1b 3: mov.l @sp+,er0 mov.l @sp+,er4 rts ;; odd <- even / even <- odd 4: mov.l er4,er3 mov.l er2,er4 mov.l er5,er2 mov.l er1,er5 mov.l er6,er1 mov.l er0,er6 1: eepmov.w mov.w r4,r4 bne 1b dec.w #1,e4 bpl 1b mov.l er1,er6 mov.l er2,er5 mov.l er3,er4 rts .end
AirFortressIlikara/LS2K0300-linux-4.19
1,583
arch/h8300/lib/udivsi3.S
/* SPDX-License-Identifier: GPL-2.0 */ #include "libgcc.h" ;; This function also computes the remainder and stores it in er3. .global __udivsi3 __udivsi3: mov.w A1E,A1E ; denominator top word 0? bne DenHighNonZero ; do it the easy way, see page 107 in manual mov.w A0E,A2 extu.l A2P divxu.w A1,A2P mov.w A2E,A0E divxu.w A1,A0P mov.w A0E,A3 mov.w A2,A0E extu.l A3P rts ; er0 = er0 / er1 ; er3 = er0 % er1 ; trashes er1 er2 ; expects er1 >= 2^16 DenHighNonZero: mov.l er0,er3 mov.l er1,er2 #ifdef CONFIG_CPU_H8300H divmod_L21: shlr.l er0 shlr.l er2 ; make divisor < 2^16 mov.w e2,e2 bne divmod_L21 #else shlr.l #2,er2 ; make divisor < 2^16 mov.w e2,e2 beq divmod_L22A divmod_L21: shlr.l #2,er0 divmod_L22: shlr.l #2,er2 ; make divisor < 2^16 mov.w e2,e2 bne divmod_L21 divmod_L22A: rotxl.w r2 bcs divmod_L23 shlr.l er0 bra divmod_L24 divmod_L23: rotxr.w r2 shlr.l #2,er0 divmod_L24: #endif ;; At this point, ;; er0 contains shifted dividend ;; er1 contains divisor ;; er2 contains shifted divisor ;; er3 contains dividend, later remainder divxu.w r2,er0 ; r0 now contains the approximate quotient (AQ) extu.l er0 beq divmod_L25 subs #1,er0 ; er0 = AQ - 1 mov.w e1,r2 mulxu.w r0,er2 ; er2 = upper (AQ - 1) * divisor sub.w r2,e3 ; dividend - 65536 * er2 mov.w r1,r2 mulxu.w r0,er2 ; compute er3 = remainder (tentative) sub.l er2,er3 ; er3 = dividend - (AQ - 1) * divisor divmod_L25: cmp.l er1,er3 ; is divisor < remainder? blo divmod_L26 adds #1,er0 sub.l er1,er3 ; correct the remainder divmod_L26: rts .end
AirFortressIlikara/LS2K0300-linux-4.19
1,075
arch/h8300/lib/moddivsi3.S
/* SPDX-License-Identifier: GPL-2.0 */ #include "libgcc.h" ; numerator in A0/A1 ; denominator in A2/A3 .global __modsi3 __modsi3: PUSHP S2P bsr modnorm bsr __divsi3 mov.l er3,er0 bra exitdiv .global __umodsi3 __umodsi3: bsr __udivsi3:16 mov.l er3,er0 rts .global __divsi3 __divsi3: PUSHP S2P bsr divnorm bsr __udivsi3:16 ; examine what the sign should be exitdiv: btst #3,S2L beq reti ; should be -ve neg.l A0P reti: POPP S2P rts divnorm: mov.l A0P,A0P ; is the numerator -ve stc ccr,S2L ; keep the sign in bit 3 of S2L bge postive neg.l A0P ; negate arg postive: mov.l A1P,A1P ; is the denominator -ve bge postive2 neg.l A1P ; negate arg xor.b #0x08,S2L ; toggle the result sign postive2: rts ;; Basically the same, except that the sign of the divisor determines ;; the sign. modnorm: mov.l A0P,A0P ; is the numerator -ve stc ccr,S2L ; keep the sign in bit 3 of S2L bge mpostive neg.l A0P ; negate arg mpostive: mov.l A1P,A1P ; is the denominator -ve bge mpostive2 neg.l A1P ; negate arg mpostive2: rts .end
AirFortressIlikara/LS2K0300-linux-4.19
1,069
arch/h8300/lib/modsi3.S
/* SPDX-License-Identifier: GPL-2.0 */ #include "libgcc.h" ; numerator in A0/A1 ; denominator in A2/A3 .global __modsi3 __modsi3: PUSHP S2P bsr modnorm bsr __divsi3 mov.l er3,er0 bra exitdiv .global __umodsi3 __umodsi3: bsr __udivsi3 mov.l er3,er0 rts .global __divsi3 __divsi3: PUSHP S2P jsr divnorm bsr __udivsi3 ; examine what the sign should be exitdiv: btst #3,S2L beq reti ; should be -ve neg.l A0P reti: POPP S2P rts divnorm: mov.l A0P,A0P ; is the numerator -ve stc ccr,S2L ; keep the sign in bit 3 of S2L bge postive neg.l A0P ; negate arg postive: mov.l A1P,A1P ; is the denominator -ve bge postive2 neg.l A1P ; negate arg xor.b #0x08,S2L ; toggle the result sign postive2: rts ;; Basically the same, except that the sign of the divisor determines ;; the sign. modnorm: mov.l A0P,A0P ; is the numerator -ve stc ccr,S2L ; keep the sign in bit 3 of S2L bge mpostive neg.l A0P ; negate arg mpostive: mov.l A1P,A1P ; is the denominator -ve bge mpostive2 neg.l A1P ; negate arg mpostive2: rts .end
AirFortressIlikara/LS2K0300-linux-4.19
1,039
arch/h8300/boot/compressed/head.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * linux/arch/h8300/boot/compressed/head.S * * Copyright (C) 2006 Yoshinori Sato */ #include <linux/linkage.h> .section .text..startup,"ax" .global startup startup: mov.l #startup, sp mov.l er0, er4 mov.l #__sbss, er0 mov.l #__ebss, er1 sub.l er0, er1 shlr er1 shlr er1 sub.l er2, er2 1: mov.l er2, @er0 adds #4, er0 dec.l #1, er1 bne 1b jsr @decompress_kernel mov.l er4, er0 jmp @output .align 9 fake_headers_as_bzImage: .word 0 .ascii "HdrS" ; header signature .word 0x0202 ; header version number (>= 0x0105) ; or else old loadlin-1.5 will fail) .word 0 ; default_switch .word 0 ; SETUPSEG .word 0x1000 .word 0 ; pointing to kernel version string .byte 0 ; = 0, old one (LILO, Loadlin, ; 0xTV: T=0 for LILO ; V = version .byte 1 ; Load flags bzImage=1 .word 0x8000 ; size to move, when setup is not .long 0x100000 ; 0x100000 = default for big kernel .long 0 ; address of loaded ramdisk image .long 0 ; its size in bytes .end
AirFortressIlikara/LS2K0300-linux-4.19
5,649
arch/nds32/kernel/head.S
// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2005-2017 Andes Technology Corporation #include <linux/linkage.h> #include <linux/init.h> #include <asm/ptrace.h> #include <asm/asm-offsets.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/sizes.h> #include <asm/thread_info.h> #ifdef CONFIG_CPU_BIG_ENDIAN #define OF_DT_MAGIC 0xd00dfeed #else #define OF_DT_MAGIC 0xedfe0dd0 #endif .globl swapper_pg_dir .equ swapper_pg_dir, TEXTADDR - 0x4000 /* * Kernel startup entry point. */ .section ".head.text", "ax" .type _stext, %function ENTRY(_stext) setgie.d ! Disable interrupt isb /* * Disable I/D-cache and enable it at a proper time */ mfsr $r0, $mr8 li $r1, #~(CACHE_CTL_mskIC_EN|CACHE_CTL_mskDC_EN) and $r0, $r0, $r1 mtsr $r0, $mr8 /* * Process device tree blob */ andi $r0,$r2,#0x3 li $r10, 0 bne $r0, $r10, _nodtb lwi $r0, [$r2] li $r1, OF_DT_MAGIC bne $r0, $r1, _nodtb move $r10, $r2 _nodtb: /* * Create a temporary mapping area for booting, before start_kernel */ sethi $r4, hi20(swapper_pg_dir) li $p0, (PAGE_OFFSET - PHYS_OFFSET) sub $r4, $r4, $p0 tlbop FlushAll ! invalidate TLB\n" isb mtsr $r4, $L1_PPTB ! load page table pointer\n" #ifdef CONFIG_CPU_DCACHE_DISABLE #define MMU_CTL_NTCC MMU_CTL_CACHEABLE_NON #else #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH #define MMU_CTL_NTCC MMU_CTL_CACHEABLE_WT #else #define MMU_CTL_NTCC MMU_CTL_CACHEABLE_WB #endif #endif /* set NTC cacheability, mutliple page size in use */ mfsr $r3, $MMU_CTL #if CONFIG_MEMORY_START >= 0xc0000000 ori $r3, $r3, (MMU_CTL_NTCC << MMU_CTL_offNTC3) #elif CONFIG_MEMORY_START >= 0x80000000 ori $r3, $r3, (MMU_CTL_NTCC << MMU_CTL_offNTC2) #elif CONFIG_MEMORY_START >= 0x40000000 ori $r3, $r3, (MMU_CTL_NTCC << MMU_CTL_offNTC1) #else ori $r3, $r3, (MMU_CTL_NTCC << MMU_CTL_offNTC0) #endif #ifdef CONFIG_ANDES_PAGE_SIZE_4KB ori $r3, $r3, #(MMU_CTL_mskMPZIU) #else ori $r3, $r3, #(MMU_CTL_mskMPZIU|MMU_CTL_D8KB) #endif #ifdef CONFIG_HW_SUPPORT_UNALIGNMENT_ACCESS li $r0, #MMU_CTL_UNA or $r3, $r3, $r0 #endif mtsr $r3, $MMU_CTL isb /* set page size and size of kernel image */ mfsr $r0, $MMU_CFG srli $r3, $r0, MMU_CFG_offfEPSZ zeb $r3, $r3 bnez $r3, _extra_page_size_support #ifdef CONFIG_ANDES_PAGE_SIZE_4KB li $r5, #SZ_4K ! Use 4KB page size #else li $r5, #SZ_8K ! Use 8KB page size li $r3, #1 #endif mtsr $r3, $TLB_MISC b _image_size_check _extra_page_size_support: ! Use epzs pages size clz $r6, $r3 subri $r2, $r6, #31 li $r3, #1 sll $r3, $r3, $r2 /* MMU_CFG.EPSZ value -> meaning */ mul $r5, $r3, $r3 slli $r5, $r5, #14 /* MMU_CFG.EPSZ -> TLB_MISC.ACC_PSZ */ addi $r3, $r2, #0x2 mtsr $r3, $TLB_MISC _image_size_check: /* calculate the image maximum size accepted by TLB config */ andi $r6, $r0, MMU_CFG_mskTBW andi $r0, $r0, MMU_CFG_mskTBS srli $r6, $r6, MMU_CFG_offTBW srli $r0, $r0, MMU_CFG_offTBS /* * we just map the kernel to the maximum way - 1 of tlb * reserver one way for UART VA mapping * it will cause page fault if UART mapping cover the kernel mapping * * direct mapping is not supported now. */ li $r2, 't' beqz $r6, __error ! MMU_CFG.TBW = 0 is direct mappin addi $r0, $r0, #0x2 ! MMU_CFG.TBS value -> meaning sll $r0, $r6, $r0 ! entries = k-way * n-set mul $r6, $r0, $r5 ! max size = entries * page size /* check kernel image size */ la $r3, (_end - PAGE_OFFSET) li $r2, 's' bgt $r3, $r6, __error li $r2, #(PHYS_OFFSET + TLB_DATA_kernel_text_attr) li $r3, PAGE_OFFSET add $r6, $r6, $r3 _tlb: mtsr $r3, $TLB_VPN dsb tlbop $r2, RWR isb add $r3, $r3, $r5 add $r2, $r2, $r5 bgt $r6, $r3, _tlb mfsr $r3, $TLB_MISC ! setup access page size li $r2, #~0xf and $r3, $r3, $r2 #ifdef CONFIG_ANDES_PAGE_SIZE_8KB ori $r3, $r3, #0x1 #endif mtsr $r3, $TLB_MISC mfsr $r0, $MISC_CTL ! Enable BTB and RTP and shadow sp ori $r0, $r0, #MISC_init mtsr $r0, $MISC_CTL mfsr $p1, $PSW li $r15, #~PSW_clr ! clear WBNA|DME|IME|DT|IT|POM|INTL|GIE and $p1, $p1, $r15 ori $p1, $p1, #PSW_init mtsr $p1, $IPSW ! when iret, it will automatically enable MMU la $lp, __mmap_switched mtsr $lp, $IPC iret nop .type __switch_data, %object __switch_data: .long __bss_start ! $r6 .long _end ! $r7 .long __atags_pointer ! $atag_pointer .long init_task ! $r9, move to $r25 .long init_thread_union + THREAD_SIZE ! $sp /* * The following fragment of code is executed with the MMU on in MMU mode, * and uses absolute addresses; this is not position independent. */ .align .type __mmap_switched, %function __mmap_switched: la $r3, __switch_data lmw.bim $r6, [$r3], $r9, #0b0001 move $r25, $r9 move $fp, #0 ! Clear BSS (and zero $fp) beq $r7, $r6, _RRT 1: swi.bi $fp, [$r6], #4 bne $r7, $r6, 1b swi $r10, [$r8] _RRT: b start_kernel __error: b __error
AirFortressIlikara/LS2K0300-linux-4.19
3,770
arch/nds32/kernel/ex-exit.S
// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2005-2017 Andes Technology Corporation #include <linux/linkage.h> #include <asm/unistd.h> #include <asm/assembler.h> #include <asm/nds32.h> #include <asm/asm-offsets.h> #include <asm/thread_info.h> #include <asm/current.h> #ifdef CONFIG_HWZOL .macro pop_zol mtusr $r14, $LB mtusr $r15, $LE mtusr $r16, $LC .endm #endif .macro restore_user_regs_first setgie.d isb addi $sp, $sp, FUCOP_CTL_OFFSET lmw.adm $r12, [$sp], $r24, #0x0 mtsr $r12, $SP_USR mtsr $r13, $IPC #ifdef CONFIG_HWZOL pop_zol #endif mtsr $r19, $PSW mtsr $r20, $IPSW mtsr $r21, $P_IPSW mtsr $r22, $P_IPC mtsr $r23, $P_P0 mtsr $r24, $P_P1 lmw.adm $sp, [$sp], $sp, #0xe .endm .macro restore_user_regs_last pop $p0 cmovn $sp, $p0, $p0 iret nop .endm .macro restore_user_regs restore_user_regs_first lmw.adm $r0, [$sp], $r25, #0x0 addi $sp, $sp, OSP_OFFSET restore_user_regs_last .endm .macro fast_restore_user_regs restore_user_regs_first lmw.adm $r1, [$sp], $r25, #0x0 addi $sp, $sp, OSP_OFFSET-4 restore_user_regs_last .endm #ifdef CONFIG_PREEMPT .macro preempt_stop .endm #else .macro preempt_stop setgie.d isb .endm #define resume_kernel no_work_pending #endif ENTRY(ret_from_exception) preempt_stop ENTRY(ret_from_intr) /* * judge Kernel or user mode * */ lwi $p0, [$sp+(#IPSW_OFFSET)] ! Check if in nested interrupt andi $p0, $p0, #PSW_mskINTL bnez $p0, resume_kernel ! done with iret j resume_userspace /* * This is the fast syscall return path. We do as little as * possible here, and this includes saving $r0 back into the SVC * stack. * fixed: tsk - $r25, syscall # - $r7, syscall table pointer - $r8 */ ENTRY(ret_fast_syscall) gie_disable lwi $r1, [tsk+#TSK_TI_FLAGS] andi $p1, $r1, #_TIF_WORK_MASK bnez $p1, fast_work_pending fast_restore_user_regs ! iret /* * Ok, we need to do extra processing, * enter the slow path returning from syscall, while pending work. */ fast_work_pending: swi $r0, [$sp+(#R0_OFFSET)] ! what is different from ret_from_exception work_pending: andi $p1, $r1, #_TIF_NEED_RESCHED bnez $p1, work_resched andi $p1, $r1, #_TIF_SIGPENDING|#_TIF_NOTIFY_RESUME beqz $p1, no_work_pending move $r0, $sp ! 'regs' gie_enable bal do_notify_resume b ret_slow_syscall work_resched: bal schedule ! path, return to user mode /* * "slow" syscall return path. */ ENTRY(resume_userspace) ENTRY(ret_slow_syscall) gie_disable lwi $p0, [$sp+(#IPSW_OFFSET)] ! Check if in nested interrupt andi $p0, $p0, #PSW_mskINTL bnez $p0, no_work_pending ! done with iret lwi $r1, [tsk+#TSK_TI_FLAGS] andi $p1, $r1, #_TIF_WORK_MASK bnez $p1, work_pending ! handle work_resched, sig_pend no_work_pending: #ifdef CONFIG_TRACE_IRQFLAGS lwi $p0, [$sp+(#IPSW_OFFSET)] andi $p0, $p0, #0x1 la $r10, __trace_hardirqs_off la $r9, __trace_hardirqs_on cmovz $r9, $p0, $r10 jral $r9 #endif restore_user_regs ! return from iret /* * preemptive kernel */ #ifdef CONFIG_PREEMPT resume_kernel: gie_disable lwi $t0, [tsk+#TSK_TI_PREEMPT] bnez $t0, no_work_pending need_resched: lwi $t0, [tsk+#TSK_TI_FLAGS] andi $p1, $t0, #_TIF_NEED_RESCHED beqz $p1, no_work_pending lwi $t0, [$sp+(#IPSW_OFFSET)] ! Interrupts off? andi $t0, $t0, #1 beqz $t0, no_work_pending jal preempt_schedule_irq b need_resched #endif /* * This is how we return from a fork. */ ENTRY(ret_from_fork) bal schedule_tail beqz $r6, 1f ! r6 stores fn for kernel thread move $r0, $r7 ! prepare kernel thread arg jral $r6 1: lwi $r1, [tsk+#TSK_TI_FLAGS] ! check for syscall tracing andi $p1, $r1, #_TIF_WORK_SYSCALL_LEAVE ! are we tracing syscalls? beqz $p1, ret_slow_syscall move $r0, $sp bal syscall_trace_leave b ret_slow_syscall
AirFortressIlikara/LS2K0300-linux-4.19
2,248
arch/nds32/kernel/ex-scall.S
// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2005-2017 Andes Technology Corporation #include <linux/linkage.h> #include <asm/unistd.h> #include <asm/assembler.h> #include <asm/nds32.h> #include <asm/asm-offsets.h> #include <asm/thread_info.h> #include <asm/current.h> /* * $r0 = previous task_struct, * $r1 = next task_struct, * previous and next are guaranteed not to be the same. */ ENTRY(__switch_to) la $p0, __entry_task sw $r1, [$p0] move $p1, $r0 addi $p1, $p1, #THREAD_CPU_CONTEXT smw.bi $r6, [$p1], $r14, #0xb ! push r6~r14, fp, lp, sp move $r25, $r1 addi $r1, $r1, #THREAD_CPU_CONTEXT lmw.bi $r6, [$r1], $r14, #0xb ! pop r6~r14, fp, lp, sp ret #define tbl $r8 /* * $r7 will be writen as syscall nr */ .macro get_scno lwi $r7, [$sp + R15_OFFSET] swi $r7, [$sp + SYSCALLNO_OFFSET] .endm .macro updateipc addi $r17, $r13, #4 ! $r13 is $IPC swi $r17, [$sp + IPC_OFFSET] .endm ENTRY(eh_syscall) updateipc get_scno gie_enable lwi $p0, [tsk+#TSK_TI_FLAGS] ! check for syscall tracing andi $p1, $p0, #_TIF_WORK_SYSCALL_ENTRY ! are we tracing syscalls? bnez $p1, __sys_trace la $lp, ret_fast_syscall ! return address jmp_systbl: addi $p1, $r7, #-__NR_syscalls ! syscall number of syscall instruction is guarded by addembler bgez $p1, _SCNO_EXCEED ! call sys_* routine la tbl, sys_call_table ! load syscall table pointer slli $p1, $r7, #2 add $p1, tbl, $p1 lwi $p1, [$p1] jr $p1 ! no return _SCNO_EXCEED: ori $r0, $r7, #0 ori $r1, $sp, #0 b bad_syscall /* * This is the really slow path. We're going to be doing * context switches, and waiting for our parent to respond. */ __sys_trace: move $r0, $sp bal syscall_trace_enter move $r7, $r0 la $lp, __sys_trace_return ! return address addi $p1, $r7, #1 beqz $p1, ret_slow_syscall ! fatal signal is pending addi $p1, $sp, #R0_OFFSET ! pointer to regs lmw.bi $r0, [$p1], $r5 ! have to reload $r0 - $r5 b jmp_systbl __sys_trace_return: swi $r0, [$sp+#R0_OFFSET] ! T: save returned $r0 move $r0, $sp ! set pt_regs for syscall_trace_leave bal syscall_trace_leave b ret_slow_syscall ENTRY(sys_rt_sigreturn_wrapper) addi $r0, $sp, #0 b sys_rt_sigreturn ENDPROC(sys_rt_sigreturn_wrapper)
AirFortressIlikara/LS2K0300-linux-4.19
1,254
arch/nds32/kernel/vmlinux.lds.S
// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2005-2017 Andes Technology Corporation #include <asm/page.h> #include <asm/thread_info.h> #include <asm/cache.h> #include <asm/memory.h> #define LOAD_OFFSET (PAGE_OFFSET - PHYS_OFFSET) #include <asm-generic/vmlinux.lds.h> OUTPUT_ARCH(nds32) ENTRY(_stext_lma) jiffies = jiffies_64; #if defined(CONFIG_GCOV_KERNEL) #define NDS32_EXIT_KEEP(x) x #else #define NDS32_EXIT_KEEP(x) #endif SECTIONS { _stext_lma = TEXTADDR - LOAD_OFFSET; . = TEXTADDR; __init_begin = .; HEAD_TEXT_SECTION .exit.text : { NDS32_EXIT_KEEP(EXIT_TEXT) } INIT_TEXT_SECTION(PAGE_SIZE) INIT_DATA_SECTION(16) .exit.data : { NDS32_EXIT_KEEP(EXIT_DATA) } PERCPU_SECTION(L1_CACHE_BYTES) __init_end = .; . = ALIGN(PAGE_SIZE); _stext = .; /* Real text segment */ .text : AT(ADDR(.text) - LOAD_OFFSET) { _text = .; /* Text and read-only data */ TEXT_TEXT SCHED_TEXT CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT *(.fixup) } _etext = .; /* End of text and rodata section */ _sdata = .; RO_DATA_SECTION(PAGE_SIZE) RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) _edata = .; EXCEPTION_TABLE(16) NOTES BSS_SECTION(4, 4, 4) _end = .; STABS_DEBUG DWARF_DEBUG DISCARDS }
AirFortressIlikara/LS2K0300-linux-4.19
3,378
arch/nds32/kernel/ex-entry.S
// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2005-2017 Andes Technology Corporation #include <linux/linkage.h> #include <asm/memory.h> #include <asm/nds32.h> #include <asm/errno.h> #include <asm/asm-offsets.h> #include <asm/page.h> #ifdef CONFIG_HWZOL .macro push_zol mfusr $r14, $LB mfusr $r15, $LE mfusr $r16, $LC .endm #endif .macro save_user_regs smw.adm $sp, [$sp], $sp, #0x1 /* move $SP to the bottom of pt_regs */ addi $sp, $sp, -OSP_OFFSET /* push $r0 ~ $r25 */ smw.bim $r0, [$sp], $r25 /* push $fp, $gp, $lp */ smw.bim $sp, [$sp], $sp, #0xe mfsr $r12, $SP_USR mfsr $r13, $IPC #ifdef CONFIG_HWZOL push_zol #endif movi $r17, -1 move $r18, $r0 mfsr $r19, $PSW mfsr $r20, $IPSW mfsr $r21, $P_IPSW mfsr $r22, $P_IPC mfsr $r23, $P_P0 mfsr $r24, $P_P1 smw.bim $r12, [$sp], $r24, #0 addi $sp, $sp, -FUCOP_CTL_OFFSET /* Initialize kernel space $fp */ andi $p0, $r20, #PSW_mskPOM movi $p1, #0x0 cmovz $fp, $p1, $p0 andi $r16, $r19, #PSW_mskINTL slti $r17, $r16, #4 bnez $r17, 1f addi $r17, $r19, #-2 mtsr $r17, $PSW isb 1: /* If it was superuser mode, we don't need to update $r25 */ bnez $p0, 2f la $p0, __entry_task lw $r25, [$p0] 2: .endm .text /* * Exception Vector */ exception_handlers: .long unhandled_exceptions !Reset/NMI .long unhandled_exceptions !TLB fill .long do_page_fault !PTE not present .long do_dispatch_tlb_misc !TLB misc .long unhandled_exceptions !TLB VLPT .long unhandled_exceptions !Machine Error .long do_debug_trap !Debug related .long do_dispatch_general !General exception .long eh_syscall !Syscall .long asm_do_IRQ !IRQ common_exception_handler: save_user_regs mfsr $p0, $ITYPE andi $p0, $p0, #ITYPE_mskVECTOR srli $p0, $p0, #ITYPE_offVECTOR andi $p1, $p0, #NDS32_VECTOR_mskNONEXCEPTION bnez $p1, 1f sethi $lp, hi20(ret_from_exception) ori $lp, $lp, lo12(ret_from_exception) sethi $p1, hi20(exception_handlers) ori $p1, $p1, lo12(exception_handlers) lw $p1, [$p1+$p0<<2] move $r0, $p0 mfsr $r1, $EVA mfsr $r2, $ITYPE move $r3, $sp mfsr $r4, $OIPC /* enable gie if it is enabled in IPSW. */ mfsr $r21, $PSW andi $r20, $r20, #PSW_mskGIE /* r20 is $IPSW*/ or $r21, $r21, $r20 mtsr $r21, $PSW dsb jr $p1 /* syscall */ 1: addi $p1, $p0, #-NDS32_VECTOR_offEXCEPTION bnez $p1, 2f sethi $lp, hi20(ret_from_exception) ori $lp, $lp, lo12(ret_from_exception) sethi $p1, hi20(exception_handlers) ori $p1, $p1, lo12(exception_handlers) lwi $p1, [$p1+#NDS32_VECTOR_offEXCEPTION<<2] jr $p1 /* interrupt */ 2: #ifdef CONFIG_TRACE_IRQFLAGS jal __trace_hardirqs_off #endif move $r0, $sp sethi $lp, hi20(ret_from_intr) ori $lp, $lp, lo12(ret_from_intr) sethi $p0, hi20(exception_handlers) ori $p0, $p0, lo12(exception_handlers) lwi $p0, [$p0+#NDS32_VECTOR_offINTERRUPT<<2] jr $p0 .macro EXCEPTION_VECTOR_DEBUG .align 4 mfsr $p0, $EDM_CTL andi $p0, $p0, EDM_CTL_mskV3_EDM_MODE tnez $p0, SWID_RAISE_INTERRUPT_LEVEL .endm .macro EXCEPTION_VECTOR .align 4 sethi $p0, hi20(common_exception_handler) ori $p0, $p0, lo12(common_exception_handler) jral.ton $p0, $p0 .endm .section ".text.init", #alloc, #execinstr .global exception_vector exception_vector: .rept 6 EXCEPTION_VECTOR .endr EXCEPTION_VECTOR_DEBUG .rept 121 EXCEPTION_VECTOR .endr .align 4 .global exception_vector_end exception_vector_end:
AirFortressIlikara/LS2K0300-linux-4.19
1,253
arch/nds32/lib/clear_user.S
// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2005-2017 Andes Technology Corporation #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/errno.h> /* Prototype: int __arch_clear_user(void *addr, size_t sz) * Purpose : clear some user memory * Params : addr - user memory address to clear * : sz - number of bytes to clear * Returns : number of bytes NOT cleared */ .text .align 5 ENTRY(__arch_clear_user) add $r5, $r0, $r1 beqz $r1, clear_exit xor $p1, $p1, $p1 ! Use $p1=0 to clear mem srli $p0, $r1, #2 ! $p0 = number of word to clear andi $r1, $r1, #3 ! Bytes less than a word to copy beqz $p0, byte_clear ! Only less than a word to clear word_clear: USER( smw.bim,$p1, [$r0], $p1) ! Clear the word addi $p0, $p0, #-1 ! Decrease word count bnez $p0, word_clear ! Continue looping to clear all words beqz $r1, clear_exit ! No left bytes to copy byte_clear: USER( sbi.bi, $p1, [$r0], #1) ! Clear the byte addi $r1, $r1, #-1 ! Decrease byte count bnez $r1, byte_clear ! Continue looping to clear all left bytes clear_exit: move $r0, $r1 ! Set return value ret .section .fixup,"ax" .align 0 9001: sub $r0, $r5, $r0 ! Bytes left to copy ret .previous ENDPROC(__arch_clear_user)
AirFortressIlikara/LS2K0300-linux-4.19
1,421
arch/nds32/lib/copy_template.S
// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2005-2017 Andes Technology Corporation beq $r1, $r0, quit_memcpy beqz $r2, quit_memcpy srli $r3, $r2, #5 ! check if len < cache-line size 32 beqz $r3, word_copy_entry andi $r4, $r0, #0x3 ! check byte-align beqz $r4, unalign_word_copy_entry addi $r4, $r4,#-4 abs $r4, $r4 ! check how many un-align byte to copy sub $r2, $r2, $r4 ! update $R2 unalign_byte_copy: lbi1 $r3, $r1, #1 addi $r4, $r4, #-1 sbi1 $r3, $r0, #1 bnez $r4, unalign_byte_copy beqz $r2, quit_memcpy unalign_word_copy_entry: andi $r3, $r0, 0x1f ! check cache-line unaligncount beqz $r3, cache_copy addi $r3, $r3, #-32 abs $r3, $r3 sub $r2, $r2, $r3 ! update $R2 unalign_word_copy: lmw1 $r4, $r1, $r4 addi $r3, $r3, #-4 smw1 $r4, $r0, $r4 bnez $r3, unalign_word_copy beqz $r2, quit_memcpy addi $r3, $r2, #-32 ! to check $r2< cache_line , than go to word_copy bltz $r3, word_copy_entry cache_copy: srli $r3, $r2, #5 beqz $r3, word_copy_entry 3: lmw1 $r17, $r1, $r24 addi $r3, $r3, #-1 smw1 $r17, $r0, $r24 bnez $r3, 3b word_copy_entry: andi $r2, $r2, #31 beqz $r2, quit_memcpy 5: srli $r3, $r2, #2 beqz $r3, byte_copy word_copy: lmw1 $r4, $r1, $r4 addi $r3, $r3, #-1 smw1 $r4, $r0, $r4 bnez $r3, word_copy andi $r2, $r2, #3 beqz $r2, quit_memcpy byte_copy: lbi1 $r3, $r1, #1 addi $r2, $r2, #-1 sbi1 $r3, $r0, #1 bnez $r2, byte_copy quit_memcpy:
AirFortressIlikara/LS2K0300-linux-4.19
2,028
arch/nds32/lib/memmove.S
// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2005-2017 Andes Technology Corporation #include <linux/linkage.h> /* void *memmove(void *dst, const void *src, int n); dst: $r0 src: $r1 n : $r2 ret: $r0 - pointer to the memory area dst. */ .text ENTRY(memmove) move $r5, $r0 ! Set return value = det beq $r0, $r1, exit_memcpy ! Exit when det = src beqz $r2, exit_memcpy ! Exit when n = 0 pushm $t0, $t1 ! Save reg srli $p1, $r2, #2 ! $p1 is how many words to copy ! Avoid data lost when memory overlap ! Copy data reversely when src < dst slt $p0, $r0, $r1 ! check if $r0 < $r1 beqz $p0, do_reverse ! branch if dst > src ! No reverse, dst < src andi $r2, $r2, #3 ! How many bytes are less than a word li $t0, #1 ! Determining copy direction in byte_cpy beqz $p1, byte_cpy ! When n is less than a word word_cpy: lmw.bim $p0, [$r1], $p0 ! Read a word from src addi $p1, $p1, #-1 ! How many words left to copy smw.bim $p0, [$r0], $p0 ! Copy the word to det bnez $p1, word_cpy ! If remained words > 0 beqz $r2, end_memcpy ! No left bytes to copy b byte_cpy do_reverse: add $r0, $r0, $r2 ! Start with the end of $r0 add $r1, $r1, $r2 ! Start with the end of $r1 andi $r2, $r2, #3 ! How many bytes are less than a word li $t0, #-1 ! Determining copy direction in byte_cpy beqz $p1, reverse_byte_cpy ! When n is less than a word reverse_word_cpy: lmw.adm $p0, [$r1], $p0 ! Read a word from src addi $p1, $p1, #-1 ! How many words left to copy smw.adm $p0, [$r0], $p0 ! Copy the word to det bnez $p1, reverse_word_cpy ! If remained words > 0 beqz $r2, end_memcpy ! No left bytes to copy reverse_byte_cpy: addi $r0, $r0, #-1 addi $r1, $r1, #-1 byte_cpy: ! Less than 4 bytes to copy now lb.bi $p0, [$r1], $t0 ! Read a byte from src addi $r2, $r2, #-1 ! How many bytes left to copy sb.bi $p0, [$r0], $t0 ! copy the byte to det bnez $r2, byte_cpy ! If remained bytes > 0 end_memcpy: popm $t0, $t1 exit_memcpy: move $r0, $r5 ret ENDPROC(memmove)
AirFortressIlikara/LS2K0300-linux-4.19
1,056
arch/nds32/lib/memset.S
// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2005-2017 Andes Technology Corporation #include <linux/linkage.h> .text ENTRY(memset) move $r5, $r0 ! Return value beqz $r2, end_memset ! Exit when len = 0 srli $p1, $r2, 2 ! $p1 is how many words to copy andi $r2, $r2, 3 ! How many bytes are less than a word beqz $p1, byte_set ! When n is less than a word ! set $r1 from ??????ab to abababab andi $r1, $r1, #0x00ff ! $r1 = 000000ab slli $p0, $r1, #8 ! $p0 = 0000ab00 or $r1, $r1, $p0 ! $r1 = 0000abab slli $p0, $r1, #16 ! $p0 = abab0000 or $r1, $r1, $p0 ! $r1 = abababab word_set: addi $p1, $p1, #-1 ! How many words left to copy smw.bim $r1, [$r0], $r1 ! Copy the word to det bnez $p1, word_set ! Still words to set, continue looping beqz $r2, end_memset ! No left byte to set byte_set: ! Less than 4 bytes left to set addi $r2, $r2, #-1 ! Decrease len by 1 sbi.bi $r1, [$r0], #1 ! Set data of the next byte to $r1 bnez $r2, byte_set ! Still bytes left to set end_memset: move $r0, $r5 ret ENDPROC(memset)
AirFortressIlikara/LS2K0300-linux-4.19
1,472
arch/nds32/kernel/vdso/vdso.lds.S
/* * SPDX-License-Identifier: GPL-2.0 * Copyright (C) 2005-2017 Andes Technology Corporation */ #include <linux/const.h> #include <asm/page.h> #include <asm/vdso.h> OUTPUT_ARCH(nds32) SECTIONS { . = SIZEOF_HEADERS; .hash : { *(.hash) } :text .gnu.hash : { *(.gnu.hash) } .dynsym : { *(.dynsym) } .dynstr : { *(.dynstr) } .gnu.version : { *(.gnu.version) } .gnu.version_d : { *(.gnu.version_d) } .gnu.version_r : { *(.gnu.version_r) } .note : { *(.note.*) } :text :note .text : { *(.text*) } :text .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr .eh_frame : { KEEP (*(.eh_frame)) } :text .dynamic : { *(.dynamic) } :text :dynamic .rodata : { *(.rodata*) } :text /DISCARD/ : { *(.note.GNU-stack) *(.data .data.* .gnu.linkonce.d.* .sdata*) *(.bss .sbss .dynbss .dynsbss) } } /* * We must supply the ELF program headers explicitly to get just one * PT_LOAD segment, and set the flags explicitly to make segments read-only. */ PHDRS { text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */ dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ note PT_NOTE FLAGS(4); /* PF_R */ eh_frame_hdr PT_GNU_EH_FRAME; } /* * This controls what symbols we export from the DSO. */ VERSION { LINUX_4 { global: __kernel_rt_sigreturn; __vdso_gettimeofday; __vdso_clock_getres; __vdso_clock_gettime; local: *; }; } /* * Make the rt_sigreturn code visible to the kernel. */ VDSO_rt_sigtramp = __kernel_rt_sigreturn;
AirFortressIlikara/LS2K0300-linux-4.19
28,260
arch/ia64/kernel/head.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Here is where the ball gets rolling as far as the kernel is concerned. * When control is transferred to _start, the bootload has already * loaded us to the correct address. All that's left to do here is * to set up the kernel's global pointer and jump to the kernel * entry point. * * Copyright (C) 1998-2001, 2003, 2005 Hewlett-Packard Co * David Mosberger-Tang <davidm@hpl.hp.com> * Stephane Eranian <eranian@hpl.hp.com> * Copyright (C) 1999 VA Linux Systems * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> * Copyright (C) 1999 Intel Corp. * Copyright (C) 1999 Asit Mallick <Asit.K.Mallick@intel.com> * Copyright (C) 1999 Don Dugger <Don.Dugger@intel.com> * Copyright (C) 2002 Fenghua Yu <fenghua.yu@intel.com> * -Optimize __ia64_save_fpu() and __ia64_load_fpu() for Itanium 2. * Copyright (C) 2004 Ashok Raj <ashok.raj@intel.com> * Support for CPU Hotplug */ #include <asm/asmmacro.h> #include <asm/fpu.h> #include <asm/kregs.h> #include <asm/mmu_context.h> #include <asm/asm-offsets.h> #include <asm/pal.h> #include <asm/pgtable.h> #include <asm/processor.h> #include <asm/ptrace.h> #include <asm/mca_asm.h> #include <linux/init.h> #include <linux/linkage.h> #include <asm/export.h> #ifdef CONFIG_HOTPLUG_CPU #define SAL_PSR_BITS_TO_SET \ (IA64_PSR_AC | IA64_PSR_BN | IA64_PSR_MFH | IA64_PSR_MFL) #define SAVE_FROM_REG(src, ptr, dest) \ mov dest=src;; \ st8 [ptr]=dest,0x08 #define RESTORE_REG(reg, ptr, _tmp) \ ld8 _tmp=[ptr],0x08;; \ mov reg=_tmp #define SAVE_BREAK_REGS(ptr, _idx, _breg, _dest)\ mov ar.lc=IA64_NUM_DBG_REGS-1;; \ mov _idx=0;; \ 1: \ SAVE_FROM_REG(_breg[_idx], ptr, _dest);; \ add _idx=1,_idx;; \ br.cloop.sptk.many 1b #define RESTORE_BREAK_REGS(ptr, _idx, _breg, _tmp, _lbl)\ mov ar.lc=IA64_NUM_DBG_REGS-1;; \ mov _idx=0;; \ _lbl: RESTORE_REG(_breg[_idx], ptr, _tmp);; \ add _idx=1, _idx;; \ br.cloop.sptk.many _lbl #define SAVE_ONE_RR(num, _reg, _tmp) \ movl _tmp=(num<<61);; \ mov _reg=rr[_tmp] #define SAVE_REGION_REGS(_tmp, _r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7) \ SAVE_ONE_RR(0,_r0, _tmp);; \ SAVE_ONE_RR(1,_r1, _tmp);; \ SAVE_ONE_RR(2,_r2, _tmp);; \ SAVE_ONE_RR(3,_r3, _tmp);; \ SAVE_ONE_RR(4,_r4, _tmp);; \ SAVE_ONE_RR(5,_r5, _tmp);; \ SAVE_ONE_RR(6,_r6, _tmp);; \ SAVE_ONE_RR(7,_r7, _tmp);; #define STORE_REGION_REGS(ptr, _r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7) \ st8 [ptr]=_r0, 8;; \ st8 [ptr]=_r1, 8;; \ st8 [ptr]=_r2, 8;; \ st8 [ptr]=_r3, 8;; \ st8 [ptr]=_r4, 8;; \ st8 [ptr]=_r5, 8;; \ st8 [ptr]=_r6, 8;; \ st8 [ptr]=_r7, 8;; #define RESTORE_REGION_REGS(ptr, _idx1, _idx2, _tmp) \ mov ar.lc=0x08-1;; \ movl _idx1=0x00;; \ RestRR: \ dep.z _idx2=_idx1,61,3;; \ ld8 _tmp=[ptr],8;; \ mov rr[_idx2]=_tmp;; \ srlz.d;; \ add _idx1=1,_idx1;; \ br.cloop.sptk.few RestRR #define SET_AREA_FOR_BOOTING_CPU(reg1, reg2) \ movl reg1=sal_state_for_booting_cpu;; \ ld8 reg2=[reg1];; /* * Adjust region registers saved before starting to save * break regs and rest of the states that need to be preserved. */ #define SAL_TO_OS_BOOT_HANDOFF_STATE_SAVE(_reg1,_reg2,_pred) \ SAVE_FROM_REG(b0,_reg1,_reg2);; \ SAVE_FROM_REG(b1,_reg1,_reg2);; \ SAVE_FROM_REG(b2,_reg1,_reg2);; \ SAVE_FROM_REG(b3,_reg1,_reg2);; \ SAVE_FROM_REG(b4,_reg1,_reg2);; \ SAVE_FROM_REG(b5,_reg1,_reg2);; \ st8 [_reg1]=r1,0x08;; \ st8 [_reg1]=r12,0x08;; \ st8 [_reg1]=r13,0x08;; \ SAVE_FROM_REG(ar.fpsr,_reg1,_reg2);; \ SAVE_FROM_REG(ar.pfs,_reg1,_reg2);; \ SAVE_FROM_REG(ar.rnat,_reg1,_reg2);; \ SAVE_FROM_REG(ar.unat,_reg1,_reg2);; \ SAVE_FROM_REG(ar.bspstore,_reg1,_reg2);; \ SAVE_FROM_REG(cr.dcr,_reg1,_reg2);; \ SAVE_FROM_REG(cr.iva,_reg1,_reg2);; \ SAVE_FROM_REG(cr.pta,_reg1,_reg2);; \ SAVE_FROM_REG(cr.itv,_reg1,_reg2);; \ SAVE_FROM_REG(cr.pmv,_reg1,_reg2);; \ SAVE_FROM_REG(cr.cmcv,_reg1,_reg2);; \ SAVE_FROM_REG(cr.lrr0,_reg1,_reg2);; \ SAVE_FROM_REG(cr.lrr1,_reg1,_reg2);; \ st8 [_reg1]=r4,0x08;; \ st8 [_reg1]=r5,0x08;; \ st8 [_reg1]=r6,0x08;; \ st8 [_reg1]=r7,0x08;; \ st8 [_reg1]=_pred,0x08;; \ SAVE_FROM_REG(ar.lc, _reg1, _reg2);; \ stf.spill.nta [_reg1]=f2,16;; \ stf.spill.nta [_reg1]=f3,16;; \ stf.spill.nta [_reg1]=f4,16;; \ stf.spill.nta [_reg1]=f5,16;; \ stf.spill.nta [_reg1]=f16,16;; \ stf.spill.nta [_reg1]=f17,16;; \ stf.spill.nta [_reg1]=f18,16;; \ stf.spill.nta [_reg1]=f19,16;; \ stf.spill.nta [_reg1]=f20,16;; \ stf.spill.nta [_reg1]=f21,16;; \ stf.spill.nta [_reg1]=f22,16;; \ stf.spill.nta [_reg1]=f23,16;; \ stf.spill.nta [_reg1]=f24,16;; \ stf.spill.nta [_reg1]=f25,16;; \ stf.spill.nta [_reg1]=f26,16;; \ stf.spill.nta [_reg1]=f27,16;; \ stf.spill.nta [_reg1]=f28,16;; \ stf.spill.nta [_reg1]=f29,16;; \ stf.spill.nta [_reg1]=f30,16;; \ stf.spill.nta [_reg1]=f31,16;; #else #define SET_AREA_FOR_BOOTING_CPU(a1, a2) #define SAL_TO_OS_BOOT_HANDOFF_STATE_SAVE(a1,a2, a3) #define SAVE_REGION_REGS(_tmp, _r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7) #define STORE_REGION_REGS(ptr, _r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7) #endif #define SET_ONE_RR(num, pgsize, _tmp1, _tmp2, vhpt) \ movl _tmp1=(num << 61);; \ mov _tmp2=((ia64_rid(IA64_REGION_ID_KERNEL, (num<<61)) << 8) | (pgsize << 2) | vhpt);; \ mov rr[_tmp1]=_tmp2 __PAGE_ALIGNED_DATA .global empty_zero_page EXPORT_DATA_SYMBOL_GPL(empty_zero_page) empty_zero_page: .skip PAGE_SIZE .global swapper_pg_dir swapper_pg_dir: .skip PAGE_SIZE .rodata halt_msg: stringz "Halting kernel\n" __REF .global start_ap /* * Start the kernel. When the bootloader passes control to _start(), r28 * points to the address of the boot parameter area. Execution reaches * here in physical mode. */ GLOBAL_ENTRY(_start) start_ap: .prologue .save rp, r0 // terminate unwind chain with a NULL rp .body rsm psr.i | psr.ic ;; srlz.i ;; { flushrs // must be first insn in group srlz.i } ;; /* * Save the region registers, predicate before they get clobbered */ SAVE_REGION_REGS(r2, r8,r9,r10,r11,r12,r13,r14,r15); mov r25=pr;; /* * Initialize kernel region registers: * rr[0]: VHPT enabled, page size = PAGE_SHIFT * rr[1]: VHPT enabled, page size = PAGE_SHIFT * rr[2]: VHPT enabled, page size = PAGE_SHIFT * rr[3]: VHPT enabled, page size = PAGE_SHIFT * rr[4]: VHPT enabled, page size = PAGE_SHIFT * rr[5]: VHPT enabled, page size = PAGE_SHIFT * rr[6]: VHPT disabled, page size = IA64_GRANULE_SHIFT * rr[7]: VHPT disabled, page size = IA64_GRANULE_SHIFT * We initialize all of them to prevent inadvertently assuming * something about the state of address translation early in boot. */ SET_ONE_RR(0, PAGE_SHIFT, r2, r16, 1);; SET_ONE_RR(1, PAGE_SHIFT, r2, r16, 1);; SET_ONE_RR(2, PAGE_SHIFT, r2, r16, 1);; SET_ONE_RR(3, PAGE_SHIFT, r2, r16, 1);; SET_ONE_RR(4, PAGE_SHIFT, r2, r16, 1);; SET_ONE_RR(5, PAGE_SHIFT, r2, r16, 1);; SET_ONE_RR(6, IA64_GRANULE_SHIFT, r2, r16, 0);; SET_ONE_RR(7, IA64_GRANULE_SHIFT, r2, r16, 0);; /* * Now pin mappings into the TLB for kernel text and data */ mov r18=KERNEL_TR_PAGE_SHIFT<<2 movl r17=KERNEL_START ;; mov cr.itir=r18 mov cr.ifa=r17 mov r16=IA64_TR_KERNEL mov r3=ip movl r18=PAGE_KERNEL ;; dep r2=0,r3,0,KERNEL_TR_PAGE_SHIFT ;; or r18=r2,r18 ;; srlz.i ;; itr.i itr[r16]=r18 ;; itr.d dtr[r16]=r18 ;; srlz.i /* * Switch into virtual mode: */ movl r16=(IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH|IA64_PSR_BN \ |IA64_PSR_DI) ;; mov cr.ipsr=r16 movl r17=1f ;; mov cr.iip=r17 mov cr.ifs=r0 ;; rfi ;; 1: // now we are in virtual mode SET_AREA_FOR_BOOTING_CPU(r2, r16); STORE_REGION_REGS(r16, r8,r9,r10,r11,r12,r13,r14,r15); SAL_TO_OS_BOOT_HANDOFF_STATE_SAVE(r16,r17,r25) ;; // set IVT entry point---can't access I/O ports without it movl r3=ia64_ivt ;; mov cr.iva=r3 movl r2=FPSR_DEFAULT ;; srlz.i movl gp=__gp mov ar.fpsr=r2 ;; #define isAP p2 // are we an Application Processor? #define isBP p3 // are we the Bootstrap Processor? #ifdef CONFIG_SMP /* * Find the init_task for the currently booting CPU. At poweron, and in * UP mode, task_for_booting_cpu is NULL. */ movl r3=task_for_booting_cpu ;; ld8 r3=[r3] movl r2=init_task ;; cmp.eq isBP,isAP=r3,r0 ;; (isAP) mov r2=r3 #else movl r2=init_task cmp.eq isBP,isAP=r0,r0 #endif ;; tpa r3=r2 // r3 == phys addr of task struct mov r16=-1 (isBP) br.cond.dpnt .load_current // BP stack is on region 5 --- no need to map it // load mapping for stack (virtaddr in r2, physaddr in r3) rsm psr.ic movl r17=PAGE_KERNEL ;; srlz.d dep r18=0,r3,0,12 ;; or r18=r17,r18 dep r2=-1,r3,61,3 // IMVA of task ;; mov r17=rr[r2] shr.u r16=r3,IA64_GRANULE_SHIFT ;; dep r17=0,r17,8,24 ;; mov cr.itir=r17 mov cr.ifa=r2 mov r19=IA64_TR_CURRENT_STACK ;; itr.d dtr[r19]=r18 ;; ssm psr.ic srlz.d ;; .load_current: // load the "current" pointer (r13) and ar.k6 with the current task mov IA64_KR(CURRENT)=r2 // virtual address mov IA64_KR(CURRENT_STACK)=r16 mov r13=r2 /* * Reserve space at the top of the stack for "struct pt_regs". Kernel * threads don't store interesting values in that structure, but the space * still needs to be there because time-critical stuff such as the context * switching can be implemented more efficiently (for example, __switch_to() * always sets the psr.dfh bit of the task it is switching to). */ addl r12=IA64_STK_OFFSET-IA64_PT_REGS_SIZE-16,r2 addl r2=IA64_RBS_OFFSET,r2 // initialize the RSE mov ar.rsc=0 // place RSE in enforced lazy mode ;; loadrs // clear the dirty partition movl r19=__phys_per_cpu_start mov r18=PERCPU_PAGE_SIZE ;; #ifndef CONFIG_SMP add r19=r19,r18 ;; #else (isAP) br.few 2f movl r20=__cpu0_per_cpu ;; shr.u r18=r18,3 1: ld8 r21=[r19],8;; st8[r20]=r21,8 adds r18=-1,r18;; cmp4.lt p7,p6=0,r18 (p7) br.cond.dptk.few 1b mov r19=r20 ;; 2: #endif tpa r19=r19 ;; .pred.rel.mutex isBP,isAP (isBP) mov IA64_KR(PER_CPU_DATA)=r19 // per-CPU base for cpu0 (isAP) mov IA64_KR(PER_CPU_DATA)=r0 // clear physical per-CPU base ;; mov ar.bspstore=r2 // establish the new RSE stack ;; mov ar.rsc=0x3 // place RSE in eager mode (isBP) dep r28=-1,r28,61,3 // make address virtual (isBP) movl r2=ia64_boot_param ;; (isBP) st8 [r2]=r28 // save the address of the boot param area passed by the bootloader #ifdef CONFIG_SMP (isAP) br.call.sptk.many rp=start_secondary .ret0: (isAP) br.cond.sptk self #endif // This is executed by the bootstrap processor (bsp) only: #ifdef CONFIG_IA64_FW_EMU // initialize PAL & SAL emulator: br.call.sptk.many rp=sys_fw_init .ret1: #endif br.call.sptk.many rp=start_kernel .ret2: addl r3=@ltoff(halt_msg),gp ;; alloc r2=ar.pfs,8,0,2,0 ;; ld8 out0=[r3] br.call.sptk.many b0=console_print self: hint @pause br.sptk.many self // endless loop END(_start) .text GLOBAL_ENTRY(ia64_save_debug_regs) alloc r16=ar.pfs,1,0,0,0 mov r20=ar.lc // preserve ar.lc mov ar.lc=IA64_NUM_DBG_REGS-1 mov r18=0 add r19=IA64_NUM_DBG_REGS*8,in0 ;; 1: mov r16=dbr[r18] #ifdef CONFIG_ITANIUM ;; srlz.d #endif mov r17=ibr[r18] add r18=1,r18 ;; st8.nta [in0]=r16,8 st8.nta [r19]=r17,8 br.cloop.sptk.many 1b ;; mov ar.lc=r20 // restore ar.lc br.ret.sptk.many rp END(ia64_save_debug_regs) GLOBAL_ENTRY(ia64_load_debug_regs) alloc r16=ar.pfs,1,0,0,0 lfetch.nta [in0] mov r20=ar.lc // preserve ar.lc add r19=IA64_NUM_DBG_REGS*8,in0 mov ar.lc=IA64_NUM_DBG_REGS-1 mov r18=-1 ;; 1: ld8.nta r16=[in0],8 ld8.nta r17=[r19],8 add r18=1,r18 ;; mov dbr[r18]=r16 #ifdef CONFIG_ITANIUM ;; srlz.d // Errata 132 (NoFix status) #endif mov ibr[r18]=r17 br.cloop.sptk.many 1b ;; mov ar.lc=r20 // restore ar.lc br.ret.sptk.many rp END(ia64_load_debug_regs) GLOBAL_ENTRY(__ia64_save_fpu) alloc r2=ar.pfs,1,4,0,0 adds loc0=96*16-16,in0 adds loc1=96*16-16-128,in0 ;; stf.spill.nta [loc0]=f127,-256 stf.spill.nta [loc1]=f119,-256 ;; stf.spill.nta [loc0]=f111,-256 stf.spill.nta [loc1]=f103,-256 ;; stf.spill.nta [loc0]=f95,-256 stf.spill.nta [loc1]=f87,-256 ;; stf.spill.nta [loc0]=f79,-256 stf.spill.nta [loc1]=f71,-256 ;; stf.spill.nta [loc0]=f63,-256 stf.spill.nta [loc1]=f55,-256 adds loc2=96*16-32,in0 ;; stf.spill.nta [loc0]=f47,-256 stf.spill.nta [loc1]=f39,-256 adds loc3=96*16-32-128,in0 ;; stf.spill.nta [loc2]=f126,-256 stf.spill.nta [loc3]=f118,-256 ;; stf.spill.nta [loc2]=f110,-256 stf.spill.nta [loc3]=f102,-256 ;; stf.spill.nta [loc2]=f94,-256 stf.spill.nta [loc3]=f86,-256 ;; stf.spill.nta [loc2]=f78,-256 stf.spill.nta [loc3]=f70,-256 ;; stf.spill.nta [loc2]=f62,-256 stf.spill.nta [loc3]=f54,-256 adds loc0=96*16-48,in0 ;; stf.spill.nta [loc2]=f46,-256 stf.spill.nta [loc3]=f38,-256 adds loc1=96*16-48-128,in0 ;; stf.spill.nta [loc0]=f125,-256 stf.spill.nta [loc1]=f117,-256 ;; stf.spill.nta [loc0]=f109,-256 stf.spill.nta [loc1]=f101,-256 ;; stf.spill.nta [loc0]=f93,-256 stf.spill.nta [loc1]=f85,-256 ;; stf.spill.nta [loc0]=f77,-256 stf.spill.nta [loc1]=f69,-256 ;; stf.spill.nta [loc0]=f61,-256 stf.spill.nta [loc1]=f53,-256 adds loc2=96*16-64,in0 ;; stf.spill.nta [loc0]=f45,-256 stf.spill.nta [loc1]=f37,-256 adds loc3=96*16-64-128,in0 ;; stf.spill.nta [loc2]=f124,-256 stf.spill.nta [loc3]=f116,-256 ;; stf.spill.nta [loc2]=f108,-256 stf.spill.nta [loc3]=f100,-256 ;; stf.spill.nta [loc2]=f92,-256 stf.spill.nta [loc3]=f84,-256 ;; stf.spill.nta [loc2]=f76,-256 stf.spill.nta [loc3]=f68,-256 ;; stf.spill.nta [loc2]=f60,-256 stf.spill.nta [loc3]=f52,-256 adds loc0=96*16-80,in0 ;; stf.spill.nta [loc2]=f44,-256 stf.spill.nta [loc3]=f36,-256 adds loc1=96*16-80-128,in0 ;; stf.spill.nta [loc0]=f123,-256 stf.spill.nta [loc1]=f115,-256 ;; stf.spill.nta [loc0]=f107,-256 stf.spill.nta [loc1]=f99,-256 ;; stf.spill.nta [loc0]=f91,-256 stf.spill.nta [loc1]=f83,-256 ;; stf.spill.nta [loc0]=f75,-256 stf.spill.nta [loc1]=f67,-256 ;; stf.spill.nta [loc0]=f59,-256 stf.spill.nta [loc1]=f51,-256 adds loc2=96*16-96,in0 ;; stf.spill.nta [loc0]=f43,-256 stf.spill.nta [loc1]=f35,-256 adds loc3=96*16-96-128,in0 ;; stf.spill.nta [loc2]=f122,-256 stf.spill.nta [loc3]=f114,-256 ;; stf.spill.nta [loc2]=f106,-256 stf.spill.nta [loc3]=f98,-256 ;; stf.spill.nta [loc2]=f90,-256 stf.spill.nta [loc3]=f82,-256 ;; stf.spill.nta [loc2]=f74,-256 stf.spill.nta [loc3]=f66,-256 ;; stf.spill.nta [loc2]=f58,-256 stf.spill.nta [loc3]=f50,-256 adds loc0=96*16-112,in0 ;; stf.spill.nta [loc2]=f42,-256 stf.spill.nta [loc3]=f34,-256 adds loc1=96*16-112-128,in0 ;; stf.spill.nta [loc0]=f121,-256 stf.spill.nta [loc1]=f113,-256 ;; stf.spill.nta [loc0]=f105,-256 stf.spill.nta [loc1]=f97,-256 ;; stf.spill.nta [loc0]=f89,-256 stf.spill.nta [loc1]=f81,-256 ;; stf.spill.nta [loc0]=f73,-256 stf.spill.nta [loc1]=f65,-256 ;; stf.spill.nta [loc0]=f57,-256 stf.spill.nta [loc1]=f49,-256 adds loc2=96*16-128,in0 ;; stf.spill.nta [loc0]=f41,-256 stf.spill.nta [loc1]=f33,-256 adds loc3=96*16-128-128,in0 ;; stf.spill.nta [loc2]=f120,-256 stf.spill.nta [loc3]=f112,-256 ;; stf.spill.nta [loc2]=f104,-256 stf.spill.nta [loc3]=f96,-256 ;; stf.spill.nta [loc2]=f88,-256 stf.spill.nta [loc3]=f80,-256 ;; stf.spill.nta [loc2]=f72,-256 stf.spill.nta [loc3]=f64,-256 ;; stf.spill.nta [loc2]=f56,-256 stf.spill.nta [loc3]=f48,-256 ;; stf.spill.nta [loc2]=f40 stf.spill.nta [loc3]=f32 br.ret.sptk.many rp END(__ia64_save_fpu) GLOBAL_ENTRY(__ia64_load_fpu) alloc r2=ar.pfs,1,2,0,0 adds r3=128,in0 adds r14=256,in0 adds r15=384,in0 mov loc0=512 mov loc1=-1024+16 ;; ldf.fill.nta f32=[in0],loc0 ldf.fill.nta f40=[ r3],loc0 ldf.fill.nta f48=[r14],loc0 ldf.fill.nta f56=[r15],loc0 ;; ldf.fill.nta f64=[in0],loc0 ldf.fill.nta f72=[ r3],loc0 ldf.fill.nta f80=[r14],loc0 ldf.fill.nta f88=[r15],loc0 ;; ldf.fill.nta f96=[in0],loc1 ldf.fill.nta f104=[ r3],loc1 ldf.fill.nta f112=[r14],loc1 ldf.fill.nta f120=[r15],loc1 ;; ldf.fill.nta f33=[in0],loc0 ldf.fill.nta f41=[ r3],loc0 ldf.fill.nta f49=[r14],loc0 ldf.fill.nta f57=[r15],loc0 ;; ldf.fill.nta f65=[in0],loc0 ldf.fill.nta f73=[ r3],loc0 ldf.fill.nta f81=[r14],loc0 ldf.fill.nta f89=[r15],loc0 ;; ldf.fill.nta f97=[in0],loc1 ldf.fill.nta f105=[ r3],loc1 ldf.fill.nta f113=[r14],loc1 ldf.fill.nta f121=[r15],loc1 ;; ldf.fill.nta f34=[in0],loc0 ldf.fill.nta f42=[ r3],loc0 ldf.fill.nta f50=[r14],loc0 ldf.fill.nta f58=[r15],loc0 ;; ldf.fill.nta f66=[in0],loc0 ldf.fill.nta f74=[ r3],loc0 ldf.fill.nta f82=[r14],loc0 ldf.fill.nta f90=[r15],loc0 ;; ldf.fill.nta f98=[in0],loc1 ldf.fill.nta f106=[ r3],loc1 ldf.fill.nta f114=[r14],loc1 ldf.fill.nta f122=[r15],loc1 ;; ldf.fill.nta f35=[in0],loc0 ldf.fill.nta f43=[ r3],loc0 ldf.fill.nta f51=[r14],loc0 ldf.fill.nta f59=[r15],loc0 ;; ldf.fill.nta f67=[in0],loc0 ldf.fill.nta f75=[ r3],loc0 ldf.fill.nta f83=[r14],loc0 ldf.fill.nta f91=[r15],loc0 ;; ldf.fill.nta f99=[in0],loc1 ldf.fill.nta f107=[ r3],loc1 ldf.fill.nta f115=[r14],loc1 ldf.fill.nta f123=[r15],loc1 ;; ldf.fill.nta f36=[in0],loc0 ldf.fill.nta f44=[ r3],loc0 ldf.fill.nta f52=[r14],loc0 ldf.fill.nta f60=[r15],loc0 ;; ldf.fill.nta f68=[in0],loc0 ldf.fill.nta f76=[ r3],loc0 ldf.fill.nta f84=[r14],loc0 ldf.fill.nta f92=[r15],loc0 ;; ldf.fill.nta f100=[in0],loc1 ldf.fill.nta f108=[ r3],loc1 ldf.fill.nta f116=[r14],loc1 ldf.fill.nta f124=[r15],loc1 ;; ldf.fill.nta f37=[in0],loc0 ldf.fill.nta f45=[ r3],loc0 ldf.fill.nta f53=[r14],loc0 ldf.fill.nta f61=[r15],loc0 ;; ldf.fill.nta f69=[in0],loc0 ldf.fill.nta f77=[ r3],loc0 ldf.fill.nta f85=[r14],loc0 ldf.fill.nta f93=[r15],loc0 ;; ldf.fill.nta f101=[in0],loc1 ldf.fill.nta f109=[ r3],loc1 ldf.fill.nta f117=[r14],loc1 ldf.fill.nta f125=[r15],loc1 ;; ldf.fill.nta f38 =[in0],loc0 ldf.fill.nta f46 =[ r3],loc0 ldf.fill.nta f54 =[r14],loc0 ldf.fill.nta f62 =[r15],loc0 ;; ldf.fill.nta f70 =[in0],loc0 ldf.fill.nta f78 =[ r3],loc0 ldf.fill.nta f86 =[r14],loc0 ldf.fill.nta f94 =[r15],loc0 ;; ldf.fill.nta f102=[in0],loc1 ldf.fill.nta f110=[ r3],loc1 ldf.fill.nta f118=[r14],loc1 ldf.fill.nta f126=[r15],loc1 ;; ldf.fill.nta f39 =[in0],loc0 ldf.fill.nta f47 =[ r3],loc0 ldf.fill.nta f55 =[r14],loc0 ldf.fill.nta f63 =[r15],loc0 ;; ldf.fill.nta f71 =[in0],loc0 ldf.fill.nta f79 =[ r3],loc0 ldf.fill.nta f87 =[r14],loc0 ldf.fill.nta f95 =[r15],loc0 ;; ldf.fill.nta f103=[in0] ldf.fill.nta f111=[ r3] ldf.fill.nta f119=[r14] ldf.fill.nta f127=[r15] br.ret.sptk.many rp END(__ia64_load_fpu) GLOBAL_ENTRY(__ia64_init_fpu) stf.spill [sp]=f0 // M3 mov f32=f0 // F nop.b 0 ldfps f33,f34=[sp] // M0 ldfps f35,f36=[sp] // M1 mov f37=f0 // F ;; setf.s f38=r0 // M2 setf.s f39=r0 // M3 mov f40=f0 // F ldfps f41,f42=[sp] // M0 ldfps f43,f44=[sp] // M1 mov f45=f0 // F setf.s f46=r0 // M2 setf.s f47=r0 // M3 mov f48=f0 // F ldfps f49,f50=[sp] // M0 ldfps f51,f52=[sp] // M1 mov f53=f0 // F setf.s f54=r0 // M2 setf.s f55=r0 // M3 mov f56=f0 // F ldfps f57,f58=[sp] // M0 ldfps f59,f60=[sp] // M1 mov f61=f0 // F setf.s f62=r0 // M2 setf.s f63=r0 // M3 mov f64=f0 // F ldfps f65,f66=[sp] // M0 ldfps f67,f68=[sp] // M1 mov f69=f0 // F setf.s f70=r0 // M2 setf.s f71=r0 // M3 mov f72=f0 // F ldfps f73,f74=[sp] // M0 ldfps f75,f76=[sp] // M1 mov f77=f0 // F setf.s f78=r0 // M2 setf.s f79=r0 // M3 mov f80=f0 // F ldfps f81,f82=[sp] // M0 ldfps f83,f84=[sp] // M1 mov f85=f0 // F setf.s f86=r0 // M2 setf.s f87=r0 // M3 mov f88=f0 // F /* * When the instructions are cached, it would be faster to initialize * the remaining registers with simply mov instructions (F-unit). * This gets the time down to ~29 cycles. However, this would use up * 33 bundles, whereas continuing with the above pattern yields * 10 bundles and ~30 cycles. */ ldfps f89,f90=[sp] // M0 ldfps f91,f92=[sp] // M1 mov f93=f0 // F setf.s f94=r0 // M2 setf.s f95=r0 // M3 mov f96=f0 // F ldfps f97,f98=[sp] // M0 ldfps f99,f100=[sp] // M1 mov f101=f0 // F setf.s f102=r0 // M2 setf.s f103=r0 // M3 mov f104=f0 // F ldfps f105,f106=[sp] // M0 ldfps f107,f108=[sp] // M1 mov f109=f0 // F setf.s f110=r0 // M2 setf.s f111=r0 // M3 mov f112=f0 // F ldfps f113,f114=[sp] // M0 ldfps f115,f116=[sp] // M1 mov f117=f0 // F setf.s f118=r0 // M2 setf.s f119=r0 // M3 mov f120=f0 // F ldfps f121,f122=[sp] // M0 ldfps f123,f124=[sp] // M1 mov f125=f0 // F setf.s f126=r0 // M2 setf.s f127=r0 // M3 br.ret.sptk.many rp // F END(__ia64_init_fpu) /* * Switch execution mode from virtual to physical * * Inputs: * r16 = new psr to establish * Output: * r19 = old virtual address of ar.bsp * r20 = old virtual address of sp * * Note: RSE must already be in enforced lazy mode */ GLOBAL_ENTRY(ia64_switch_mode_phys) { rsm psr.i | psr.ic // disable interrupts and interrupt collection mov r15=ip } ;; { flushrs // must be first insn in group srlz.i } ;; mov cr.ipsr=r16 // set new PSR add r3=1f-ia64_switch_mode_phys,r15 mov r19=ar.bsp mov r20=sp mov r14=rp // get return address into a general register ;; // going to physical mode, use tpa to translate virt->phys tpa r17=r19 tpa r3=r3 tpa sp=sp tpa r14=r14 ;; mov r18=ar.rnat // save ar.rnat mov ar.bspstore=r17 // this steps on ar.rnat mov cr.iip=r3 mov cr.ifs=r0 ;; mov ar.rnat=r18 // restore ar.rnat rfi // must be last insn in group ;; 1: mov rp=r14 br.ret.sptk.many rp END(ia64_switch_mode_phys) /* * Switch execution mode from physical to virtual * * Inputs: * r16 = new psr to establish * r19 = new bspstore to establish * r20 = new sp to establish * * Note: RSE must already be in enforced lazy mode */ GLOBAL_ENTRY(ia64_switch_mode_virt) { rsm psr.i | psr.ic // disable interrupts and interrupt collection mov r15=ip } ;; { flushrs // must be first insn in group srlz.i } ;; mov cr.ipsr=r16 // set new PSR add r3=1f-ia64_switch_mode_virt,r15 mov r14=rp // get return address into a general register ;; // going to virtual // - for code addresses, set upper bits of addr to KERNEL_START // - for stack addresses, copy from input argument movl r18=KERNEL_START dep r3=0,r3,KERNEL_TR_PAGE_SHIFT,64-KERNEL_TR_PAGE_SHIFT dep r14=0,r14,KERNEL_TR_PAGE_SHIFT,64-KERNEL_TR_PAGE_SHIFT mov sp=r20 ;; or r3=r3,r18 or r14=r14,r18 ;; mov r18=ar.rnat // save ar.rnat mov ar.bspstore=r19 // this steps on ar.rnat mov cr.iip=r3 mov cr.ifs=r0 ;; mov ar.rnat=r18 // restore ar.rnat rfi // must be last insn in group ;; 1: mov rp=r14 br.ret.sptk.many rp END(ia64_switch_mode_virt) GLOBAL_ENTRY(ia64_delay_loop) .prologue { nop 0 // work around GAS unwind info generation bug... .save ar.lc,r2 mov r2=ar.lc .body ;; mov ar.lc=r32 } ;; // force loop to be 32-byte aligned (GAS bug means we cannot use .align // inside function body without corrupting unwind info). { nop 0 } 1: br.cloop.sptk.few 1b ;; mov ar.lc=r2 br.ret.sptk.many rp END(ia64_delay_loop) /* * Return a CPU-local timestamp in nano-seconds. This timestamp is * NOT synchronized across CPUs its return value must never be * compared against the values returned on another CPU. The usage in * kernel/sched/core.c ensures that. * * The return-value of sched_clock() is NOT supposed to wrap-around. * If it did, it would cause some scheduling hiccups (at the worst). * Fortunately, with a 64-bit cycle-counter ticking at 100GHz, even * that would happen only once every 5+ years. * * The code below basically calculates: * * (ia64_get_itc() * local_cpu_data->nsec_per_cyc) >> IA64_NSEC_PER_CYC_SHIFT * * except that the multiplication and the shift are done with 128-bit * intermediate precision so that we can produce a full 64-bit result. */ GLOBAL_ENTRY(ia64_native_sched_clock) addl r8=THIS_CPU(ia64_cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0 mov.m r9=ar.itc // fetch cycle-counter (35 cyc) ;; ldf8 f8=[r8] ;; setf.sig f9=r9 // certain to stall, so issue it _after_ ldf8... ;; xmpy.lu f10=f9,f8 // calculate low 64 bits of 128-bit product (4 cyc) xmpy.hu f11=f9,f8 // calculate high 64 bits of 128-bit product ;; getf.sig r8=f10 // (5 cyc) getf.sig r9=f11 ;; shrp r8=r9,r8,IA64_NSEC_PER_CYC_SHIFT br.ret.sptk.many rp END(ia64_native_sched_clock) #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE GLOBAL_ENTRY(cycle_to_nsec) alloc r16=ar.pfs,1,0,0,0 addl r8=THIS_CPU(ia64_cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0 ;; ldf8 f8=[r8] ;; setf.sig f9=r32 ;; xmpy.lu f10=f9,f8 // calculate low 64 bits of 128-bit product (4 cyc) xmpy.hu f11=f9,f8 // calculate high 64 bits of 128-bit product ;; getf.sig r8=f10 // (5 cyc) getf.sig r9=f11 ;; shrp r8=r9,r8,IA64_NSEC_PER_CYC_SHIFT br.ret.sptk.many rp END(cycle_to_nsec) #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ #ifdef CONFIG_IA64_BRL_EMU /* * Assembly routines used by brl_emu.c to set preserved register state. */ #define SET_REG(reg) \ GLOBAL_ENTRY(ia64_set_##reg); \ alloc r16=ar.pfs,1,0,0,0; \ mov reg=r32; \ ;; \ br.ret.sptk.many rp; \ END(ia64_set_##reg) SET_REG(b1); SET_REG(b2); SET_REG(b3); SET_REG(b4); SET_REG(b5); #endif /* CONFIG_IA64_BRL_EMU */ #ifdef CONFIG_SMP #ifdef CONFIG_HOTPLUG_CPU GLOBAL_ENTRY(ia64_jump_to_sal) alloc r16=ar.pfs,1,0,0,0;; rsm psr.i | psr.ic { flushrs srlz.i } tpa r25=in0 movl r18=tlb_purge_done;; DATA_VA_TO_PA(r18);; mov b1=r18 // Return location movl r18=ia64_do_tlb_purge;; DATA_VA_TO_PA(r18);; mov b2=r18 // doing tlb_flush work mov ar.rsc=0 // Put RSE in enforced lazy, LE mode movl r17=1f;; DATA_VA_TO_PA(r17);; mov cr.iip=r17 movl r16=SAL_PSR_BITS_TO_SET;; mov cr.ipsr=r16 mov cr.ifs=r0;; rfi;; // note: this unmask MCA/INIT (psr.mc) 1: /* * Invalidate all TLB data/inst */ br.sptk.many b2;; // jump to tlb purge code tlb_purge_done: RESTORE_REGION_REGS(r25, r17,r18,r19);; RESTORE_REG(b0, r25, r17);; RESTORE_REG(b1, r25, r17);; RESTORE_REG(b2, r25, r17);; RESTORE_REG(b3, r25, r17);; RESTORE_REG(b4, r25, r17);; RESTORE_REG(b5, r25, r17);; ld8 r1=[r25],0x08;; ld8 r12=[r25],0x08;; ld8 r13=[r25],0x08;; RESTORE_REG(ar.fpsr, r25, r17);; RESTORE_REG(ar.pfs, r25, r17);; RESTORE_REG(ar.rnat, r25, r17);; RESTORE_REG(ar.unat, r25, r17);; RESTORE_REG(ar.bspstore, r25, r17);; RESTORE_REG(cr.dcr, r25, r17);; RESTORE_REG(cr.iva, r25, r17);; RESTORE_REG(cr.pta, r25, r17);; srlz.d;; // required not to violate RAW dependency RESTORE_REG(cr.itv, r25, r17);; RESTORE_REG(cr.pmv, r25, r17);; RESTORE_REG(cr.cmcv, r25, r17);; RESTORE_REG(cr.lrr0, r25, r17);; RESTORE_REG(cr.lrr1, r25, r17);; ld8 r4=[r25],0x08;; ld8 r5=[r25],0x08;; ld8 r6=[r25],0x08;; ld8 r7=[r25],0x08;; ld8 r17=[r25],0x08;; mov pr=r17,-1;; RESTORE_REG(ar.lc, r25, r17);; /* * Now Restore floating point regs */ ldf.fill.nta f2=[r25],16;; ldf.fill.nta f3=[r25],16;; ldf.fill.nta f4=[r25],16;; ldf.fill.nta f5=[r25],16;; ldf.fill.nta f16=[r25],16;; ldf.fill.nta f17=[r25],16;; ldf.fill.nta f18=[r25],16;; ldf.fill.nta f19=[r25],16;; ldf.fill.nta f20=[r25],16;; ldf.fill.nta f21=[r25],16;; ldf.fill.nta f22=[r25],16;; ldf.fill.nta f23=[r25],16;; ldf.fill.nta f24=[r25],16;; ldf.fill.nta f25=[r25],16;; ldf.fill.nta f26=[r25],16;; ldf.fill.nta f27=[r25],16;; ldf.fill.nta f28=[r25],16;; ldf.fill.nta f29=[r25],16;; ldf.fill.nta f30=[r25],16;; ldf.fill.nta f31=[r25],16;; /* * Now that we have done all the register restores * we are now ready for the big DIVE to SAL Land */ ssm psr.ic;; srlz.d;; br.ret.sptk.many b0;; END(ia64_jump_to_sal) #endif /* CONFIG_HOTPLUG_CPU */ #endif /* CONFIG_SMP */
AirFortressIlikara/LS2K0300-linux-4.19
7,877
arch/ia64/kernel/pal.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * PAL Firmware support * IA-64 Processor Programmers Reference Vol 2 * * Copyright (C) 1999 Don Dugger <don.dugger@intel.com> * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> * Copyright (C) 1999-2001, 2003 Hewlett-Packard Co * David Mosberger <davidm@hpl.hp.com> * Stephane Eranian <eranian@hpl.hp.com> * * 05/22/2000 eranian Added support for stacked register calls * 05/24/2000 eranian Added support for physical mode static calls */ #include <asm/asmmacro.h> #include <asm/processor.h> #include <asm/export.h> .data pal_entry_point: data8 ia64_pal_default_handler .text /* * Set the PAL entry point address. This could be written in C code, but we * do it here to keep it all in one module (besides, it's so trivial that it's * not a big deal). * * in0 Address of the PAL entry point (text address, NOT a function * descriptor). */ GLOBAL_ENTRY(ia64_pal_handler_init) alloc r3=ar.pfs,1,0,0,0 movl r2=pal_entry_point ;; st8 [r2]=in0 br.ret.sptk.many rp END(ia64_pal_handler_init) /* * Default PAL call handler. This needs to be coded in assembly because it * uses the static calling convention, i.e., the RSE may not be used and * calls are done via "br.cond" (not "br.call"). */ GLOBAL_ENTRY(ia64_pal_default_handler) mov r8=-1 br.cond.sptk.many rp END(ia64_pal_default_handler) /* * Make a PAL call using the static calling convention. * * in0 Index of PAL service * in1 - in3 Remaining PAL arguments */ GLOBAL_ENTRY(ia64_pal_call_static) .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(4) alloc loc1 = ar.pfs,4,5,0,0 movl loc2 = pal_entry_point 1: { mov r28 = in0 mov r29 = in1 mov r8 = ip } ;; ld8 loc2 = [loc2] // loc2 <- entry point adds r8 = 1f-1b,r8 mov loc4=ar.rsc // save RSE configuration ;; mov ar.rsc=0 // put RSE in enforced lazy, LE mode mov loc3 = psr mov loc0 = rp .body mov r30 = in2 mov r31 = in3 mov b7 = loc2 rsm psr.i ;; mov rp = r8 br.cond.sptk.many b7 1: mov psr.l = loc3 mov ar.rsc = loc4 // restore RSE configuration mov ar.pfs = loc1 mov rp = loc0 ;; srlz.d // seralize restoration of psr.l br.ret.sptk.many b0 END(ia64_pal_call_static) EXPORT_SYMBOL(ia64_pal_call_static) /* * Make a PAL call using the stacked registers calling convention. * * Inputs: * in0 Index of PAL service * in2 - in3 Remaining PAL arguments */ GLOBAL_ENTRY(ia64_pal_call_stacked) .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(4) alloc loc1 = ar.pfs,4,4,4,0 movl loc2 = pal_entry_point mov r28 = in0 // Index MUST be copied to r28 mov out0 = in0 // AND in0 of PAL function mov loc0 = rp .body ;; ld8 loc2 = [loc2] // loc2 <- entry point mov out1 = in1 mov out2 = in2 mov out3 = in3 mov loc3 = psr ;; rsm psr.i mov b7 = loc2 ;; br.call.sptk.many rp=b7 // now make the call .ret0: mov psr.l = loc3 mov ar.pfs = loc1 mov rp = loc0 ;; srlz.d // serialize restoration of psr.l br.ret.sptk.many b0 END(ia64_pal_call_stacked) EXPORT_SYMBOL(ia64_pal_call_stacked) /* * Make a physical mode PAL call using the static registers calling convention. * * Inputs: * in0 Index of PAL service * in2 - in3 Remaining PAL arguments * * PSR_LP, PSR_TB, PSR_ID, PSR_DA are never set by the kernel. * So we don't need to clear them. */ #define PAL_PSR_BITS_TO_CLEAR \ (IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_DB | IA64_PSR_RT |\ IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | \ IA64_PSR_DFL | IA64_PSR_DFH) #define PAL_PSR_BITS_TO_SET \ (IA64_PSR_BN) GLOBAL_ENTRY(ia64_pal_call_phys_static) .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(4) alloc loc1 = ar.pfs,4,7,0,0 movl loc2 = pal_entry_point 1: { mov r28 = in0 // copy procedure index mov r8 = ip // save ip to compute branch mov loc0 = rp // save rp } .body ;; ld8 loc2 = [loc2] // loc2 <- entry point mov r29 = in1 // first argument mov r30 = in2 // copy arg2 mov r31 = in3 // copy arg3 ;; mov loc3 = psr // save psr adds r8 = 1f-1b,r8 // calculate return address for call ;; mov loc4=ar.rsc // save RSE configuration dep.z loc2=loc2,0,61 // convert pal entry point to physical tpa r8=r8 // convert rp to physical ;; mov b7 = loc2 // install target to branch reg mov ar.rsc=0 // put RSE in enforced lazy, LE mode movl r16=PAL_PSR_BITS_TO_CLEAR movl r17=PAL_PSR_BITS_TO_SET ;; or loc3=loc3,r17 // add in psr the bits to set ;; andcm r16=loc3,r16 // removes bits to clear from psr br.call.sptk.many rp=ia64_switch_mode_phys mov rp = r8 // install return address (physical) mov loc5 = r19 mov loc6 = r20 br.cond.sptk.many b7 1: mov ar.rsc=0 // put RSE in enforced lazy, LE mode mov r16=loc3 // r16= original psr mov r19=loc5 mov r20=loc6 br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode mov psr.l = loc3 // restore init PSR mov ar.pfs = loc1 mov rp = loc0 ;; mov ar.rsc=loc4 // restore RSE configuration srlz.d // seralize restoration of psr.l br.ret.sptk.many b0 END(ia64_pal_call_phys_static) EXPORT_SYMBOL(ia64_pal_call_phys_static) /* * Make a PAL call using the stacked registers in physical mode. * * Inputs: * in0 Index of PAL service * in2 - in3 Remaining PAL arguments */ GLOBAL_ENTRY(ia64_pal_call_phys_stacked) .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(5) alloc loc1 = ar.pfs,5,7,4,0 movl loc2 = pal_entry_point 1: { mov r28 = in0 // copy procedure index mov loc0 = rp // save rp } .body ;; ld8 loc2 = [loc2] // loc2 <- entry point mov loc3 = psr // save psr ;; mov loc4=ar.rsc // save RSE configuration dep.z loc2=loc2,0,61 // convert pal entry point to physical ;; mov ar.rsc=0 // put RSE in enforced lazy, LE mode movl r16=PAL_PSR_BITS_TO_CLEAR movl r17=PAL_PSR_BITS_TO_SET ;; or loc3=loc3,r17 // add in psr the bits to set mov b7 = loc2 // install target to branch reg ;; andcm r16=loc3,r16 // removes bits to clear from psr br.call.sptk.many rp=ia64_switch_mode_phys mov out0 = in0 // first argument mov out1 = in1 // copy arg2 mov out2 = in2 // copy arg3 mov out3 = in3 // copy arg3 mov loc5 = r19 mov loc6 = r20 br.call.sptk.many rp=b7 // now make the call mov ar.rsc=0 // put RSE in enforced lazy, LE mode mov r16=loc3 // r16= original psr mov r19=loc5 mov r20=loc6 br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode mov psr.l = loc3 // restore init PSR mov ar.pfs = loc1 mov rp = loc0 ;; mov ar.rsc=loc4 // restore RSE configuration srlz.d // seralize restoration of psr.l br.ret.sptk.many b0 END(ia64_pal_call_phys_stacked) EXPORT_SYMBOL(ia64_pal_call_phys_stacked) /* * Save scratch fp scratch regs which aren't saved in pt_regs already * (fp10-fp15). * * NOTE: We need to do this since firmware (SAL and PAL) may use any of the * scratch regs fp-low partition. * * Inputs: * in0 Address of stack storage for fp regs */ GLOBAL_ENTRY(ia64_save_scratch_fpregs) alloc r3=ar.pfs,1,0,0,0 add r2=16,in0 ;; stf.spill [in0] = f10,32 stf.spill [r2] = f11,32 ;; stf.spill [in0] = f12,32 stf.spill [r2] = f13,32 ;; stf.spill [in0] = f14,32 stf.spill [r2] = f15,32 br.ret.sptk.many rp END(ia64_save_scratch_fpregs) EXPORT_SYMBOL(ia64_save_scratch_fpregs) /* * Load scratch fp scratch regs (fp10-fp15) * * Inputs: * in0 Address of stack storage for fp regs */ GLOBAL_ENTRY(ia64_load_scratch_fpregs) alloc r3=ar.pfs,1,0,0,0 add r2=16,in0 ;; ldf.fill f10 = [in0],32 ldf.fill f11 = [r2],32 ;; ldf.fill f12 = [in0],32 ldf.fill f13 = [r2],32 ;; ldf.fill f14 = [in0],32 ldf.fill f15 = [r2],32 br.ret.sptk.many rp END(ia64_load_scratch_fpregs) EXPORT_SYMBOL(ia64_load_scratch_fpregs)
AirFortressIlikara/LS2K0300-linux-4.19
47,508
arch/ia64/kernel/entry.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/ia64/kernel/entry.S * * Kernel entry points. * * Copyright (C) 1998-2003, 2005 Hewlett-Packard Co * David Mosberger-Tang <davidm@hpl.hp.com> * Copyright (C) 1999, 2002-2003 * Asit Mallick <Asit.K.Mallick@intel.com> * Don Dugger <Don.Dugger@intel.com> * Suresh Siddha <suresh.b.siddha@intel.com> * Fenghua Yu <fenghua.yu@intel.com> * Copyright (C) 1999 VA Linux Systems * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> */ /* * ia64_switch_to now places correct virtual mapping in in TR2 for * kernel stack. This allows us to handle interrupts without changing * to physical mode. * * Jonathan Nicklin <nicklin@missioncriticallinux.com> * Patrick O'Rourke <orourke@missioncriticallinux.com> * 11/07/2000 */ /* * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> * VA Linux Systems Japan K.K. * pv_ops. */ /* * Global (preserved) predicate usage on syscall entry/exit path: * * pKStk: See entry.h. * pUStk: See entry.h. * pSys: See entry.h. * pNonSys: !pSys */ #include <asm/asmmacro.h> #include <asm/cache.h> #include <asm/errno.h> #include <asm/kregs.h> #include <asm/asm-offsets.h> #include <asm/pgtable.h> #include <asm/percpu.h> #include <asm/processor.h> #include <asm/thread_info.h> #include <asm/unistd.h> #include <asm/ftrace.h> #include <asm/export.h> #include "minstate.h" /* * execve() is special because in case of success, we need to * setup a null register window frame. */ ENTRY(ia64_execve) /* * Allocate 8 input registers since ptrace() may clobber them */ .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8) alloc loc1=ar.pfs,8,2,3,0 mov loc0=rp .body mov out0=in0 // filename ;; // stop bit between alloc and call mov out1=in1 // argv mov out2=in2 // envp br.call.sptk.many rp=sys_execve .ret0: cmp4.ge p6,p7=r8,r0 mov ar.pfs=loc1 // restore ar.pfs sxt4 r8=r8 // return 64-bit result ;; stf.spill [sp]=f0 mov rp=loc0 (p6) mov ar.pfs=r0 // clear ar.pfs on success (p7) br.ret.sptk.many rp /* * In theory, we'd have to zap this state only to prevent leaking of * security sensitive state (e.g., if current->mm->dumpable is zero). However, * this executes in less than 20 cycles even on Itanium, so it's not worth * optimizing for...). */ mov ar.unat=0; mov ar.lc=0 mov r4=0; mov f2=f0; mov b1=r0 mov r5=0; mov f3=f0; mov b2=r0 mov r6=0; mov f4=f0; mov b3=r0 mov r7=0; mov f5=f0; mov b4=r0 ldf.fill f12=[sp]; mov f13=f0; mov b5=r0 ldf.fill f14=[sp]; ldf.fill f15=[sp]; mov f16=f0 ldf.fill f17=[sp]; ldf.fill f18=[sp]; mov f19=f0 ldf.fill f20=[sp]; ldf.fill f21=[sp]; mov f22=f0 ldf.fill f23=[sp]; ldf.fill f24=[sp]; mov f25=f0 ldf.fill f26=[sp]; ldf.fill f27=[sp]; mov f28=f0 ldf.fill f29=[sp]; ldf.fill f30=[sp]; mov f31=f0 br.ret.sptk.many rp END(ia64_execve) /* * sys_clone2(u64 flags, u64 ustack_base, u64 ustack_size, u64 parent_tidptr, u64 child_tidptr, * u64 tls) */ GLOBAL_ENTRY(sys_clone2) /* * Allocate 8 input registers since ptrace() may clobber them */ .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8) alloc r16=ar.pfs,8,2,6,0 DO_SAVE_SWITCH_STACK adds r2=PT(R16)+IA64_SWITCH_STACK_SIZE+16,sp mov loc0=rp mov loc1=r16 // save ar.pfs across do_fork .body mov out1=in1 mov out2=in2 tbit.nz p6,p0=in0,CLONE_SETTLS_BIT mov out3=in3 // parent_tidptr: valid only w/CLONE_PARENT_SETTID ;; (p6) st8 [r2]=in5 // store TLS in r16 for copy_thread() mov out4=in4 // child_tidptr: valid only w/CLONE_CHILD_SETTID or CLONE_CHILD_CLEARTID mov out0=in0 // out0 = clone_flags br.call.sptk.many rp=do_fork .ret1: .restore sp adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack mov ar.pfs=loc1 mov rp=loc0 br.ret.sptk.many rp END(sys_clone2) /* * sys_clone(u64 flags, u64 ustack_base, u64 parent_tidptr, u64 child_tidptr, u64 tls) * Deprecated. Use sys_clone2() instead. */ GLOBAL_ENTRY(sys_clone) /* * Allocate 8 input registers since ptrace() may clobber them */ .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8) alloc r16=ar.pfs,8,2,6,0 DO_SAVE_SWITCH_STACK adds r2=PT(R16)+IA64_SWITCH_STACK_SIZE+16,sp mov loc0=rp mov loc1=r16 // save ar.pfs across do_fork .body mov out1=in1 mov out2=16 // stacksize (compensates for 16-byte scratch area) tbit.nz p6,p0=in0,CLONE_SETTLS_BIT mov out3=in2 // parent_tidptr: valid only w/CLONE_PARENT_SETTID ;; (p6) st8 [r2]=in4 // store TLS in r13 (tp) mov out4=in3 // child_tidptr: valid only w/CLONE_CHILD_SETTID or CLONE_CHILD_CLEARTID mov out0=in0 // out0 = clone_flags br.call.sptk.many rp=do_fork .ret2: .restore sp adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack mov ar.pfs=loc1 mov rp=loc0 br.ret.sptk.many rp END(sys_clone) /* * prev_task <- ia64_switch_to(struct task_struct *next) * With Ingo's new scheduler, interrupts are disabled when this routine gets * called. The code starting at .map relies on this. The rest of the code * doesn't care about the interrupt masking status. */ GLOBAL_ENTRY(ia64_switch_to) .prologue alloc r16=ar.pfs,1,0,0,0 DO_SAVE_SWITCH_STACK .body adds r22=IA64_TASK_THREAD_KSP_OFFSET,r13 movl r25=init_task mov r27=IA64_KR(CURRENT_STACK) adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0 dep r20=0,in0,61,3 // physical address of "next" ;; st8 [r22]=sp // save kernel stack pointer of old task shr.u r26=r20,IA64_GRANULE_SHIFT cmp.eq p7,p6=r25,in0 ;; /* * If we've already mapped this task's page, we can skip doing it again. */ (p6) cmp.eq p7,p6=r26,r27 (p6) br.cond.dpnt .map ;; .done: ld8 sp=[r21] // load kernel stack pointer of new task MOV_TO_KR(CURRENT, in0, r8, r9) // update "current" application register mov r8=r13 // return pointer to previously running task mov r13=in0 // set "current" pointer ;; DO_LOAD_SWITCH_STACK #ifdef CONFIG_SMP sync.i // ensure "fc"s done by this CPU are visible on other CPUs #endif br.ret.sptk.many rp // boogie on out in new context .map: RSM_PSR_IC(r25) // interrupts (psr.i) are already disabled here movl r25=PAGE_KERNEL ;; srlz.d or r23=r25,r20 // construct PA | page properties mov r25=IA64_GRANULE_SHIFT<<2 ;; MOV_TO_ITIR(p0, r25, r8) MOV_TO_IFA(in0, r8) // VA of next task... ;; mov r25=IA64_TR_CURRENT_STACK MOV_TO_KR(CURRENT_STACK, r26, r8, r9) // remember last page we mapped... ;; itr.d dtr[r25]=r23 // wire in new mapping... SSM_PSR_IC_AND_SRLZ_D(r8, r9) // reenable the psr.ic bit br.cond.sptk .done END(ia64_switch_to) /* * Note that interrupts are enabled during save_switch_stack and load_switch_stack. This * means that we may get an interrupt with "sp" pointing to the new kernel stack while * ar.bspstore is still pointing to the old kernel backing store area. Since ar.rsc, * ar.rnat, ar.bsp, and ar.bspstore are all preserved by interrupts, this is not a * problem. Also, we don't need to specify unwind information for preserved registers * that are not modified in save_switch_stack as the right unwind information is already * specified at the call-site of save_switch_stack. */ /* * save_switch_stack: * - r16 holds ar.pfs * - b7 holds address to return to * - rp (b0) holds return address to save */ GLOBAL_ENTRY(save_switch_stack) .prologue .altrp b7 flushrs // flush dirty regs to backing store (must be first in insn group) .save @priunat,r17 mov r17=ar.unat // preserve caller's .body #ifdef CONFIG_ITANIUM adds r2=16+128,sp adds r3=16+64,sp adds r14=SW(R4)+16,sp ;; st8.spill [r14]=r4,16 // spill r4 lfetch.fault.excl.nt1 [r3],128 ;; lfetch.fault.excl.nt1 [r2],128 lfetch.fault.excl.nt1 [r3],128 ;; lfetch.fault.excl [r2] lfetch.fault.excl [r3] adds r15=SW(R5)+16,sp #else add r2=16+3*128,sp add r3=16,sp add r14=SW(R4)+16,sp ;; st8.spill [r14]=r4,SW(R6)-SW(R4) // spill r4 and prefetch offset 0x1c0 lfetch.fault.excl.nt1 [r3],128 // prefetch offset 0x010 ;; lfetch.fault.excl.nt1 [r3],128 // prefetch offset 0x090 lfetch.fault.excl.nt1 [r2],128 // prefetch offset 0x190 ;; lfetch.fault.excl.nt1 [r3] // prefetch offset 0x110 lfetch.fault.excl.nt1 [r2] // prefetch offset 0x210 adds r15=SW(R5)+16,sp #endif ;; st8.spill [r15]=r5,SW(R7)-SW(R5) // spill r5 mov.m ar.rsc=0 // put RSE in mode: enforced lazy, little endian, pl 0 add r2=SW(F2)+16,sp // r2 = &sw->f2 ;; st8.spill [r14]=r6,SW(B0)-SW(R6) // spill r6 mov.m r18=ar.fpsr // preserve fpsr add r3=SW(F3)+16,sp // r3 = &sw->f3 ;; stf.spill [r2]=f2,32 mov.m r19=ar.rnat mov r21=b0 stf.spill [r3]=f3,32 st8.spill [r15]=r7,SW(B2)-SW(R7) // spill r7 mov r22=b1 ;; // since we're done with the spills, read and save ar.unat: mov.m r29=ar.unat mov.m r20=ar.bspstore mov r23=b2 stf.spill [r2]=f4,32 stf.spill [r3]=f5,32 mov r24=b3 ;; st8 [r14]=r21,SW(B1)-SW(B0) // save b0 st8 [r15]=r23,SW(B3)-SW(B2) // save b2 mov r25=b4 mov r26=b5 ;; st8 [r14]=r22,SW(B4)-SW(B1) // save b1 st8 [r15]=r24,SW(AR_PFS)-SW(B3) // save b3 mov r21=ar.lc // I-unit stf.spill [r2]=f12,32 stf.spill [r3]=f13,32 ;; st8 [r14]=r25,SW(B5)-SW(B4) // save b4 st8 [r15]=r16,SW(AR_LC)-SW(AR_PFS) // save ar.pfs stf.spill [r2]=f14,32 stf.spill [r3]=f15,32 ;; st8 [r14]=r26 // save b5 st8 [r15]=r21 // save ar.lc stf.spill [r2]=f16,32 stf.spill [r3]=f17,32 ;; stf.spill [r2]=f18,32 stf.spill [r3]=f19,32 ;; stf.spill [r2]=f20,32 stf.spill [r3]=f21,32 ;; stf.spill [r2]=f22,32 stf.spill [r3]=f23,32 ;; stf.spill [r2]=f24,32 stf.spill [r3]=f25,32 ;; stf.spill [r2]=f26,32 stf.spill [r3]=f27,32 ;; stf.spill [r2]=f28,32 stf.spill [r3]=f29,32 ;; stf.spill [r2]=f30,SW(AR_UNAT)-SW(F30) stf.spill [r3]=f31,SW(PR)-SW(F31) add r14=SW(CALLER_UNAT)+16,sp ;; st8 [r2]=r29,SW(AR_RNAT)-SW(AR_UNAT) // save ar.unat st8 [r14]=r17,SW(AR_FPSR)-SW(CALLER_UNAT) // save caller_unat mov r21=pr ;; st8 [r2]=r19,SW(AR_BSPSTORE)-SW(AR_RNAT) // save ar.rnat st8 [r3]=r21 // save predicate registers ;; st8 [r2]=r20 // save ar.bspstore st8 [r14]=r18 // save fpsr mov ar.rsc=3 // put RSE back into eager mode, pl 0 br.cond.sptk.many b7 END(save_switch_stack) /* * load_switch_stack: * - "invala" MUST be done at call site (normally in DO_LOAD_SWITCH_STACK) * - b7 holds address to return to * - must not touch r8-r11 */ GLOBAL_ENTRY(load_switch_stack) .prologue .altrp b7 .body lfetch.fault.nt1 [sp] adds r2=SW(AR_BSPSTORE)+16,sp adds r3=SW(AR_UNAT)+16,sp mov ar.rsc=0 // put RSE into enforced lazy mode adds r14=SW(CALLER_UNAT)+16,sp adds r15=SW(AR_FPSR)+16,sp ;; ld8 r27=[r2],(SW(B0)-SW(AR_BSPSTORE)) // bspstore ld8 r29=[r3],(SW(B1)-SW(AR_UNAT)) // unat ;; ld8 r21=[r2],16 // restore b0 ld8 r22=[r3],16 // restore b1 ;; ld8 r23=[r2],16 // restore b2 ld8 r24=[r3],16 // restore b3 ;; ld8 r25=[r2],16 // restore b4 ld8 r26=[r3],16 // restore b5 ;; ld8 r16=[r2],(SW(PR)-SW(AR_PFS)) // ar.pfs ld8 r17=[r3],(SW(AR_RNAT)-SW(AR_LC)) // ar.lc ;; ld8 r28=[r2] // restore pr ld8 r30=[r3] // restore rnat ;; ld8 r18=[r14],16 // restore caller's unat ld8 r19=[r15],24 // restore fpsr ;; ldf.fill f2=[r14],32 ldf.fill f3=[r15],32 ;; ldf.fill f4=[r14],32 ldf.fill f5=[r15],32 ;; ldf.fill f12=[r14],32 ldf.fill f13=[r15],32 ;; ldf.fill f14=[r14],32 ldf.fill f15=[r15],32 ;; ldf.fill f16=[r14],32 ldf.fill f17=[r15],32 ;; ldf.fill f18=[r14],32 ldf.fill f19=[r15],32 mov b0=r21 ;; ldf.fill f20=[r14],32 ldf.fill f21=[r15],32 mov b1=r22 ;; ldf.fill f22=[r14],32 ldf.fill f23=[r15],32 mov b2=r23 ;; mov ar.bspstore=r27 mov ar.unat=r29 // establish unat holding the NaT bits for r4-r7 mov b3=r24 ;; ldf.fill f24=[r14],32 ldf.fill f25=[r15],32 mov b4=r25 ;; ldf.fill f26=[r14],32 ldf.fill f27=[r15],32 mov b5=r26 ;; ldf.fill f28=[r14],32 ldf.fill f29=[r15],32 mov ar.pfs=r16 ;; ldf.fill f30=[r14],32 ldf.fill f31=[r15],24 mov ar.lc=r17 ;; ld8.fill r4=[r14],16 ld8.fill r5=[r15],16 mov pr=r28,-1 ;; ld8.fill r6=[r14],16 ld8.fill r7=[r15],16 mov ar.unat=r18 // restore caller's unat mov ar.rnat=r30 // must restore after bspstore but before rsc! mov ar.fpsr=r19 // restore fpsr mov ar.rsc=3 // put RSE back into eager mode, pl 0 br.cond.sptk.many b7 END(load_switch_stack) /* * Invoke a system call, but do some tracing before and after the call. * We MUST preserve the current register frame throughout this routine * because some system calls (such as ia64_execve) directly * manipulate ar.pfs. */ GLOBAL_ENTRY(ia64_trace_syscall) PT_REGS_UNWIND_INFO(0) /* * We need to preserve the scratch registers f6-f11 in case the system * call is sigreturn. */ adds r16=PT(F6)+16,sp adds r17=PT(F7)+16,sp ;; stf.spill [r16]=f6,32 stf.spill [r17]=f7,32 ;; stf.spill [r16]=f8,32 stf.spill [r17]=f9,32 ;; stf.spill [r16]=f10 stf.spill [r17]=f11 br.call.sptk.many rp=syscall_trace_enter // give parent a chance to catch syscall args cmp.lt p6,p0=r8,r0 // check tracehook adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8 adds r3=PT(R10)+16,sp // r3 = &pt_regs.r10 mov r10=0 (p6) br.cond.sptk strace_error // syscall failed -> adds r16=PT(F6)+16,sp adds r17=PT(F7)+16,sp ;; ldf.fill f6=[r16],32 ldf.fill f7=[r17],32 ;; ldf.fill f8=[r16],32 ldf.fill f9=[r17],32 ;; ldf.fill f10=[r16] ldf.fill f11=[r17] // the syscall number may have changed, so re-load it and re-calculate the // syscall entry-point: adds r15=PT(R15)+16,sp // r15 = &pt_regs.r15 (syscall #) ;; ld8 r15=[r15] mov r3=NR_syscalls - 1 ;; adds r15=-1024,r15 movl r16=sys_call_table ;; shladd r20=r15,3,r16 // r20 = sys_call_table + 8*(syscall-1024) cmp.leu p6,p7=r15,r3 ;; (p6) ld8 r20=[r20] // load address of syscall entry point (p7) movl r20=sys_ni_syscall ;; mov b6=r20 br.call.sptk.many rp=b6 // do the syscall .strace_check_retval: cmp.lt p6,p0=r8,r0 // syscall failed? adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8 adds r3=PT(R10)+16,sp // r3 = &pt_regs.r10 mov r10=0 (p6) br.cond.sptk strace_error // syscall failed -> ;; // avoid RAW on r10 .strace_save_retval: .mem.offset 0,0; st8.spill [r2]=r8 // store return value in slot for r8 .mem.offset 8,0; st8.spill [r3]=r10 // clear error indication in slot for r10 br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value .ret3: (pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk (pUStk) rsm psr.i // disable interrupts br.cond.sptk ia64_work_pending_syscall_end strace_error: ld8 r3=[r2] // load pt_regs.r8 sub r9=0,r8 // negate return value to get errno value ;; cmp.ne p6,p0=r3,r0 // is pt_regs.r8!=0? adds r3=16,r2 // r3=&pt_regs.r10 ;; (p6) mov r10=-1 (p6) mov r8=r9 br.cond.sptk .strace_save_retval END(ia64_trace_syscall) /* * When traced and returning from sigreturn, we invoke syscall_trace but then * go straight to ia64_leave_kernel rather than ia64_leave_syscall. */ GLOBAL_ENTRY(ia64_strace_leave_kernel) PT_REGS_UNWIND_INFO(0) { /* * Some versions of gas generate bad unwind info if the first instruction of a * procedure doesn't go into the first slot of a bundle. This is a workaround. */ nop.m 0 nop.i 0 br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value } .ret4: br.cond.sptk ia64_leave_kernel END(ia64_strace_leave_kernel) ENTRY(call_payload) .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(0) /* call the kernel_thread payload; fn is in r4, arg - in r5 */ alloc loc1=ar.pfs,0,3,1,0 mov loc0=rp mov loc2=gp mov out0=r5 // arg ld8 r14 = [r4], 8 // fn.address ;; mov b6 = r14 ld8 gp = [r4] // fn.gp ;; br.call.sptk.many rp=b6 // fn(arg) .ret12: mov gp=loc2 mov rp=loc0 mov ar.pfs=loc1 /* ... and if it has returned, we are going to userland */ cmp.ne pKStk,pUStk=r0,r0 br.ret.sptk.many rp END(call_payload) GLOBAL_ENTRY(ia64_ret_from_clone) PT_REGS_UNWIND_INFO(0) { /* * Some versions of gas generate bad unwind info if the first instruction of a * procedure doesn't go into the first slot of a bundle. This is a workaround. */ nop.m 0 nop.i 0 /* * We need to call schedule_tail() to complete the scheduling process. * Called by ia64_switch_to() after do_fork()->copy_thread(). r8 contains the * address of the previously executing task. */ br.call.sptk.many rp=ia64_invoke_schedule_tail } .ret8: (pKStk) br.call.sptk.many rp=call_payload adds r2=TI_FLAGS+IA64_TASK_SIZE,r13 ;; ld4 r2=[r2] ;; mov r8=0 and r2=_TIF_SYSCALL_TRACEAUDIT,r2 ;; cmp.ne p6,p0=r2,r0 (p6) br.cond.spnt .strace_check_retval ;; // added stop bits to prevent r8 dependency END(ia64_ret_from_clone) // fall through GLOBAL_ENTRY(ia64_ret_from_syscall) PT_REGS_UNWIND_INFO(0) cmp.ge p6,p7=r8,r0 // syscall executed successfully? adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8 mov r10=r0 // clear error indication in r10 (p7) br.cond.spnt handle_syscall_error // handle potential syscall failure END(ia64_ret_from_syscall) // fall through /* * ia64_leave_syscall(): Same as ia64_leave_kernel, except that it doesn't * need to switch to bank 0 and doesn't restore the scratch registers. * To avoid leaking kernel bits, the scratch registers are set to * the following known-to-be-safe values: * * r1: restored (global pointer) * r2: cleared * r3: 1 (when returning to user-level) * r8-r11: restored (syscall return value(s)) * r12: restored (user-level stack pointer) * r13: restored (user-level thread pointer) * r14: set to __kernel_syscall_via_epc * r15: restored (syscall #) * r16-r17: cleared * r18: user-level b6 * r19: cleared * r20: user-level ar.fpsr * r21: user-level b0 * r22: cleared * r23: user-level ar.bspstore * r24: user-level ar.rnat * r25: user-level ar.unat * r26: user-level ar.pfs * r27: user-level ar.rsc * r28: user-level ip * r29: user-level psr * r30: user-level cfm * r31: user-level pr * f6-f11: cleared * pr: restored (user-level pr) * b0: restored (user-level rp) * b6: restored * b7: set to __kernel_syscall_via_epc * ar.unat: restored (user-level ar.unat) * ar.pfs: restored (user-level ar.pfs) * ar.rsc: restored (user-level ar.rsc) * ar.rnat: restored (user-level ar.rnat) * ar.bspstore: restored (user-level ar.bspstore) * ar.fpsr: restored (user-level ar.fpsr) * ar.ccv: cleared * ar.csd: cleared * ar.ssd: cleared */ GLOBAL_ENTRY(ia64_leave_syscall) PT_REGS_UNWIND_INFO(0) /* * work.need_resched etc. mustn't get changed by this CPU before it returns to * user- or fsys-mode, hence we disable interrupts early on. * * p6 controls whether current_thread_info()->flags needs to be check for * extra work. We always check for extra work when returning to user-level. * With CONFIG_PREEMPT, we also check for extra work when the preempt_count * is 0. After extra work processing has been completed, execution * resumes at ia64_work_processed_syscall with p6 set to 1 if the extra-work-check * needs to be redone. */ #ifdef CONFIG_PREEMPT RSM_PSR_I(p0, r2, r18) // disable interrupts cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall (pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13 ;; .pred.rel.mutex pUStk,pKStk (pKStk) ld4 r21=[r20] // r21 <- preempt_count (pUStk) mov r21=0 // r21 <- 0 ;; cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0) #else /* !CONFIG_PREEMPT */ RSM_PSR_I(pUStk, r2, r18) cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall (pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk #endif .global ia64_work_processed_syscall; ia64_work_processed_syscall: #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE adds r2=PT(LOADRS)+16,r12 MOV_FROM_ITC(pUStk, p9, r22, r19) // fetch time at leave adds r18=TI_FLAGS+IA64_TASK_SIZE,r13 ;; (p6) ld4 r31=[r18] // load current_thread_info()->flags ld8 r19=[r2],PT(B6)-PT(LOADRS) // load ar.rsc value for "loadrs" adds r3=PT(AR_BSPSTORE)+16,r12 // deferred ;; #else adds r2=PT(LOADRS)+16,r12 adds r3=PT(AR_BSPSTORE)+16,r12 adds r18=TI_FLAGS+IA64_TASK_SIZE,r13 ;; (p6) ld4 r31=[r18] // load current_thread_info()->flags ld8 r19=[r2],PT(B6)-PT(LOADRS) // load ar.rsc value for "loadrs" nop.i 0 ;; #endif mov r16=ar.bsp // M2 get existing backing store pointer ld8 r18=[r2],PT(R9)-PT(B6) // load b6 (p6) and r15=TIF_WORK_MASK,r31 // any work other than TIF_SYSCALL_TRACE? ;; ld8 r23=[r3],PT(R11)-PT(AR_BSPSTORE) // load ar.bspstore (may be garbage) (p6) cmp4.ne.unc p6,p0=r15, r0 // any special work pending? (p6) br.cond.spnt .work_pending_syscall ;; // start restoring the state saved on the kernel stack (struct pt_regs): ld8 r9=[r2],PT(CR_IPSR)-PT(R9) ld8 r11=[r3],PT(CR_IIP)-PT(R11) (pNonSys) break 0 // bug check: we shouldn't be here if pNonSys is TRUE! ;; invala // M0|1 invalidate ALAT RSM_PSR_I_IC(r28, r29, r30) // M2 turn off interrupts and interruption collection cmp.eq p9,p0=r0,r0 // A set p9 to indicate that we should restore cr.ifs ld8 r29=[r2],16 // M0|1 load cr.ipsr ld8 r28=[r3],16 // M0|1 load cr.iip #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE (pUStk) add r14=TI_AC_LEAVE+IA64_TASK_SIZE,r13 ;; ld8 r30=[r2],16 // M0|1 load cr.ifs ld8 r25=[r3],16 // M0|1 load ar.unat (pUStk) add r15=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13 ;; #else mov r22=r0 // A clear r22 ;; ld8 r30=[r2],16 // M0|1 load cr.ifs ld8 r25=[r3],16 // M0|1 load ar.unat (pUStk) add r14=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13 ;; #endif ld8 r26=[r2],PT(B0)-PT(AR_PFS) // M0|1 load ar.pfs MOV_FROM_PSR(pKStk, r22, r21) // M2 read PSR now that interrupts are disabled nop 0 ;; ld8 r21=[r2],PT(AR_RNAT)-PT(B0) // M0|1 load b0 ld8 r27=[r3],PT(PR)-PT(AR_RSC) // M0|1 load ar.rsc mov f6=f0 // F clear f6 ;; ld8 r24=[r2],PT(AR_FPSR)-PT(AR_RNAT) // M0|1 load ar.rnat (may be garbage) ld8 r31=[r3],PT(R1)-PT(PR) // M0|1 load predicates mov f7=f0 // F clear f7 ;; ld8 r20=[r2],PT(R12)-PT(AR_FPSR) // M0|1 load ar.fpsr ld8.fill r1=[r3],16 // M0|1 load r1 (pUStk) mov r17=1 // A ;; #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE (pUStk) st1 [r15]=r17 // M2|3 #else (pUStk) st1 [r14]=r17 // M2|3 #endif ld8.fill r13=[r3],16 // M0|1 mov f8=f0 // F clear f8 ;; ld8.fill r12=[r2] // M0|1 restore r12 (sp) ld8.fill r15=[r3] // M0|1 restore r15 mov b6=r18 // I0 restore b6 LOAD_PHYS_STACK_REG_SIZE(r17) mov f9=f0 // F clear f9 (pKStk) br.cond.dpnt.many skip_rbs_switch // B srlz.d // M0 ensure interruption collection is off (for cover) shr.u r18=r19,16 // I0|1 get byte size of existing "dirty" partition COVER // B add current frame into dirty partition & set cr.ifs ;; #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE mov r19=ar.bsp // M2 get new backing store pointer st8 [r14]=r22 // M save time at leave mov f10=f0 // F clear f10 mov r22=r0 // A clear r22 movl r14=__kernel_syscall_via_epc // X ;; #else mov r19=ar.bsp // M2 get new backing store pointer mov f10=f0 // F clear f10 nop.m 0 movl r14=__kernel_syscall_via_epc // X ;; #endif mov.m ar.csd=r0 // M2 clear ar.csd mov.m ar.ccv=r0 // M2 clear ar.ccv mov b7=r14 // I0 clear b7 (hint with __kernel_syscall_via_epc) mov.m ar.ssd=r0 // M2 clear ar.ssd mov f11=f0 // F clear f11 br.cond.sptk.many rbs_switch // B END(ia64_leave_syscall) GLOBAL_ENTRY(ia64_leave_kernel) PT_REGS_UNWIND_INFO(0) /* * work.need_resched etc. mustn't get changed by this CPU before it returns to * user- or fsys-mode, hence we disable interrupts early on. * * p6 controls whether current_thread_info()->flags needs to be check for * extra work. We always check for extra work when returning to user-level. * With CONFIG_PREEMPT, we also check for extra work when the preempt_count * is 0. After extra work processing has been completed, execution * resumes at .work_processed_syscall with p6 set to 1 if the extra-work-check * needs to be redone. */ #ifdef CONFIG_PREEMPT RSM_PSR_I(p0, r17, r31) // disable interrupts cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel (pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13 ;; .pred.rel.mutex pUStk,pKStk (pKStk) ld4 r21=[r20] // r21 <- preempt_count (pUStk) mov r21=0 // r21 <- 0 ;; cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0) #else RSM_PSR_I(pUStk, r17, r31) cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel (pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk #endif .work_processed_kernel: adds r17=TI_FLAGS+IA64_TASK_SIZE,r13 ;; (p6) ld4 r31=[r17] // load current_thread_info()->flags adds r21=PT(PR)+16,r12 ;; lfetch [r21],PT(CR_IPSR)-PT(PR) adds r2=PT(B6)+16,r12 adds r3=PT(R16)+16,r12 ;; lfetch [r21] ld8 r28=[r2],8 // load b6 adds r29=PT(R24)+16,r12 ld8.fill r16=[r3],PT(AR_CSD)-PT(R16) adds r30=PT(AR_CCV)+16,r12 (p6) and r19=TIF_WORK_MASK,r31 // any work other than TIF_SYSCALL_TRACE? ;; ld8.fill r24=[r29] ld8 r15=[r30] // load ar.ccv (p6) cmp4.ne.unc p6,p0=r19, r0 // any special work pending? ;; ld8 r29=[r2],16 // load b7 ld8 r30=[r3],16 // load ar.csd (p6) br.cond.spnt .work_pending ;; ld8 r31=[r2],16 // load ar.ssd ld8.fill r8=[r3],16 ;; ld8.fill r9=[r2],16 ld8.fill r10=[r3],PT(R17)-PT(R10) ;; ld8.fill r11=[r2],PT(R18)-PT(R11) ld8.fill r17=[r3],16 ;; ld8.fill r18=[r2],16 ld8.fill r19=[r3],16 ;; ld8.fill r20=[r2],16 ld8.fill r21=[r3],16 mov ar.csd=r30 mov ar.ssd=r31 ;; RSM_PSR_I_IC(r23, r22, r25) // initiate turning off of interrupt and interruption collection invala // invalidate ALAT ;; ld8.fill r22=[r2],24 ld8.fill r23=[r3],24 mov b6=r28 ;; ld8.fill r25=[r2],16 ld8.fill r26=[r3],16 mov b7=r29 ;; ld8.fill r27=[r2],16 ld8.fill r28=[r3],16 ;; ld8.fill r29=[r2],16 ld8.fill r30=[r3],24 ;; ld8.fill r31=[r2],PT(F9)-PT(R31) adds r3=PT(F10)-PT(F6),r3 ;; ldf.fill f9=[r2],PT(F6)-PT(F9) ldf.fill f10=[r3],PT(F8)-PT(F10) ;; ldf.fill f6=[r2],PT(F7)-PT(F6) ;; ldf.fill f7=[r2],PT(F11)-PT(F7) ldf.fill f8=[r3],32 ;; srlz.d // ensure that inter. collection is off (VHPT is don't care, since text is pinned) mov ar.ccv=r15 ;; ldf.fill f11=[r2] BSW_0(r2, r3, r15) // switch back to bank 0 (no stop bit required beforehand...) ;; (pUStk) mov r18=IA64_KR(CURRENT)// M2 (12 cycle read latency) adds r16=PT(CR_IPSR)+16,r12 adds r17=PT(CR_IIP)+16,r12 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE .pred.rel.mutex pUStk,pKStk MOV_FROM_PSR(pKStk, r22, r29) // M2 read PSR now that interrupts are disabled MOV_FROM_ITC(pUStk, p9, r22, r29) // M fetch time at leave nop.i 0 ;; #else MOV_FROM_PSR(pKStk, r22, r29) // M2 read PSR now that interrupts are disabled nop.i 0 nop.i 0 ;; #endif ld8 r29=[r16],16 // load cr.ipsr ld8 r28=[r17],16 // load cr.iip ;; ld8 r30=[r16],16 // load cr.ifs ld8 r25=[r17],16 // load ar.unat ;; ld8 r26=[r16],16 // load ar.pfs ld8 r27=[r17],16 // load ar.rsc cmp.eq p9,p0=r0,r0 // set p9 to indicate that we should restore cr.ifs ;; ld8 r24=[r16],16 // load ar.rnat (may be garbage) ld8 r23=[r17],16 // load ar.bspstore (may be garbage) ;; ld8 r31=[r16],16 // load predicates ld8 r21=[r17],16 // load b0 ;; ld8 r19=[r16],16 // load ar.rsc value for "loadrs" ld8.fill r1=[r17],16 // load r1 ;; ld8.fill r12=[r16],16 ld8.fill r13=[r17],16 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE (pUStk) adds r3=TI_AC_LEAVE+IA64_TASK_SIZE,r18 #else (pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18 #endif ;; ld8 r20=[r16],16 // ar.fpsr ld8.fill r15=[r17],16 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE (pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18 // deferred #endif ;; ld8.fill r14=[r16],16 ld8.fill r2=[r17] (pUStk) mov r17=1 ;; #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE // mmi_ : ld8 st1 shr;; mmi_ : st8 st1 shr;; // mib : mov add br -> mib : ld8 add br // bbb_ : br nop cover;; mbb_ : mov br cover;; // // no one require bsp in r16 if (pKStk) branch is selected. (pUStk) st8 [r3]=r22 // save time at leave (pUStk) st1 [r18]=r17 // restore current->thread.on_ustack shr.u r18=r19,16 // get byte size of existing "dirty" partition ;; ld8.fill r3=[r16] // deferred LOAD_PHYS_STACK_REG_SIZE(r17) (pKStk) br.cond.dpnt skip_rbs_switch mov r16=ar.bsp // get existing backing store pointer #else ld8.fill r3=[r16] (pUStk) st1 [r18]=r17 // restore current->thread.on_ustack shr.u r18=r19,16 // get byte size of existing "dirty" partition ;; mov r16=ar.bsp // get existing backing store pointer LOAD_PHYS_STACK_REG_SIZE(r17) (pKStk) br.cond.dpnt skip_rbs_switch #endif /* * Restore user backing store. * * NOTE: alloc, loadrs, and cover can't be predicated. */ (pNonSys) br.cond.dpnt dont_preserve_current_frame COVER // add current frame into dirty partition and set cr.ifs ;; mov r19=ar.bsp // get new backing store pointer rbs_switch: sub r16=r16,r18 // krbs = old bsp - size of dirty partition cmp.ne p9,p0=r0,r0 // clear p9 to skip restore of cr.ifs ;; sub r19=r19,r16 // calculate total byte size of dirty partition add r18=64,r18 // don't force in0-in7 into memory... ;; shl r19=r19,16 // shift size of dirty partition into loadrs position ;; dont_preserve_current_frame: /* * To prevent leaking bits between the kernel and user-space, * we must clear the stacked registers in the "invalid" partition here. * Not pretty, but at least it's fast (3.34 registers/cycle on Itanium, * 5 registers/cycle on McKinley). */ # define pRecurse p6 # define pReturn p7 #ifdef CONFIG_ITANIUM # define Nregs 10 #else # define Nregs 14 #endif alloc loc0=ar.pfs,2,Nregs-2,2,0 shr.u loc1=r18,9 // RNaTslots <= floor(dirtySize / (64*8)) sub r17=r17,r18 // r17 = (physStackedSize + 8) - dirtySize ;; mov ar.rsc=r19 // load ar.rsc to be used for "loadrs" shladd in0=loc1,3,r17 mov in1=0 ;; TEXT_ALIGN(32) rse_clear_invalid: #ifdef CONFIG_ITANIUM // cycle 0 { .mii alloc loc0=ar.pfs,2,Nregs-2,2,0 cmp.lt pRecurse,p0=Nregs*8,in0 // if more than Nregs regs left to clear, (re)curse add out0=-Nregs*8,in0 }{ .mfb add out1=1,in1 // increment recursion count nop.f 0 nop.b 0 // can't do br.call here because of alloc (WAW on CFM) ;; }{ .mfi // cycle 1 mov loc1=0 nop.f 0 mov loc2=0 }{ .mib mov loc3=0 mov loc4=0 (pRecurse) br.call.sptk.many b0=rse_clear_invalid }{ .mfi // cycle 2 mov loc5=0 nop.f 0 cmp.ne pReturn,p0=r0,in1 // if recursion count != 0, we need to do a br.ret }{ .mib mov loc6=0 mov loc7=0 (pReturn) br.ret.sptk.many b0 } #else /* !CONFIG_ITANIUM */ alloc loc0=ar.pfs,2,Nregs-2,2,0 cmp.lt pRecurse,p0=Nregs*8,in0 // if more than Nregs regs left to clear, (re)curse add out0=-Nregs*8,in0 add out1=1,in1 // increment recursion count mov loc1=0 mov loc2=0 ;; mov loc3=0 mov loc4=0 mov loc5=0 mov loc6=0 mov loc7=0 (pRecurse) br.call.dptk.few b0=rse_clear_invalid ;; mov loc8=0 mov loc9=0 cmp.ne pReturn,p0=r0,in1 // if recursion count != 0, we need to do a br.ret mov loc10=0 mov loc11=0 (pReturn) br.ret.dptk.many b0 #endif /* !CONFIG_ITANIUM */ # undef pRecurse # undef pReturn ;; alloc r17=ar.pfs,0,0,0,0 // drop current register frame ;; loadrs ;; skip_rbs_switch: mov ar.unat=r25 // M2 (pKStk) extr.u r22=r22,21,1 // I0 extract current value of psr.pp from r22 (pLvSys)mov r19=r0 // A clear r19 for leave_syscall, no-op otherwise ;; (pUStk) mov ar.bspstore=r23 // M2 (pKStk) dep r29=r22,r29,21,1 // I0 update ipsr.pp with psr.pp (pLvSys)mov r16=r0 // A clear r16 for leave_syscall, no-op otherwise ;; MOV_TO_IPSR(p0, r29, r25) // M2 mov ar.pfs=r26 // I0 (pLvSys)mov r17=r0 // A clear r17 for leave_syscall, no-op otherwise MOV_TO_IFS(p9, r30, r25)// M2 mov b0=r21 // I0 (pLvSys)mov r18=r0 // A clear r18 for leave_syscall, no-op otherwise mov ar.fpsr=r20 // M2 MOV_TO_IIP(r28, r25) // M2 nop 0 ;; (pUStk) mov ar.rnat=r24 // M2 must happen with RSE in lazy mode nop 0 (pLvSys)mov r2=r0 mov ar.rsc=r27 // M2 mov pr=r31,-1 // I0 RFI // B /* * On entry: * r20 = &current->thread_info->pre_count (if CONFIG_PREEMPT) * r31 = current->thread_info->flags * On exit: * p6 = TRUE if work-pending-check needs to be redone * * Interrupts are disabled on entry, reenabled depend on work, and * disabled on exit. */ .work_pending_syscall: add r2=-8,r2 add r3=-8,r3 ;; st8 [r2]=r8 st8 [r3]=r10 .work_pending: tbit.z p6,p0=r31,TIF_NEED_RESCHED // is resched not needed? (p6) br.cond.sptk.few .notify br.call.spnt.many rp=preempt_schedule_irq .ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1 (re-check) (pLvSys)br.cond.sptk.few ia64_work_pending_syscall_end br.cond.sptk.many .work_processed_kernel .notify: (pUStk) br.call.spnt.many rp=notify_resume_user .ret10: cmp.ne p6,p0=r0,r0 // p6 <- 0 (don't re-check) (pLvSys)br.cond.sptk.few ia64_work_pending_syscall_end br.cond.sptk.many .work_processed_kernel .global ia64_work_pending_syscall_end; ia64_work_pending_syscall_end: adds r2=PT(R8)+16,r12 adds r3=PT(R10)+16,r12 ;; ld8 r8=[r2] ld8 r10=[r3] br.cond.sptk.many ia64_work_processed_syscall END(ia64_leave_kernel) ENTRY(handle_syscall_error) /* * Some system calls (e.g., ptrace, mmap) can return arbitrary values which could * lead us to mistake a negative return value as a failed syscall. Those syscall * must deposit a non-zero value in pt_regs.r8 to indicate an error. If * pt_regs.r8 is zero, we assume that the call completed successfully. */ PT_REGS_UNWIND_INFO(0) ld8 r3=[r2] // load pt_regs.r8 ;; cmp.eq p6,p7=r3,r0 // is pt_regs.r8==0? ;; (p7) mov r10=-1 (p7) sub r8=0,r8 // negate return value to get errno br.cond.sptk ia64_leave_syscall END(handle_syscall_error) /* * Invoke schedule_tail(task) while preserving in0-in7, which may be needed * in case a system call gets restarted. */ GLOBAL_ENTRY(ia64_invoke_schedule_tail) .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8) alloc loc1=ar.pfs,8,2,1,0 mov loc0=rp mov out0=r8 // Address of previous task ;; br.call.sptk.many rp=schedule_tail .ret11: mov ar.pfs=loc1 mov rp=loc0 br.ret.sptk.many rp END(ia64_invoke_schedule_tail) /* * Setup stack and call do_notify_resume_user(), keeping interrupts * disabled. * * Note that pSys and pNonSys need to be set up by the caller. * We declare 8 input registers so the system call args get preserved, * in case we need to restart a system call. */ GLOBAL_ENTRY(notify_resume_user) .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8) alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs in case of syscall restart! mov r9=ar.unat mov loc0=rp // save return address mov out0=0 // there is no "oldset" adds out1=8,sp // out1=&sigscratch->ar_pfs (pSys) mov out2=1 // out2==1 => we're in a syscall ;; (pNonSys) mov out2=0 // out2==0 => not a syscall .fframe 16 .spillsp ar.unat, 16 st8 [sp]=r9,-16 // allocate space for ar.unat and save it st8 [out1]=loc1,-8 // save ar.pfs, out1=&sigscratch .body br.call.sptk.many rp=do_notify_resume_user .ret15: .restore sp adds sp=16,sp // pop scratch stack space ;; ld8 r9=[sp] // load new unat from sigscratch->scratch_unat mov rp=loc0 ;; mov ar.unat=r9 mov ar.pfs=loc1 br.ret.sptk.many rp END(notify_resume_user) ENTRY(sys_rt_sigreturn) PT_REGS_UNWIND_INFO(0) /* * Allocate 8 input registers since ptrace() may clobber them */ alloc r2=ar.pfs,8,0,1,0 .prologue PT_REGS_SAVES(16) adds sp=-16,sp .body cmp.eq pNonSys,pSys=r0,r0 // sigreturn isn't a normal syscall... ;; /* * leave_kernel() restores f6-f11 from pt_regs, but since the streamlined * syscall-entry path does not save them we save them here instead. Note: we * don't need to save any other registers that are not saved by the stream-lined * syscall path, because restore_sigcontext() restores them. */ adds r16=PT(F6)+32,sp adds r17=PT(F7)+32,sp ;; stf.spill [r16]=f6,32 stf.spill [r17]=f7,32 ;; stf.spill [r16]=f8,32 stf.spill [r17]=f9,32 ;; stf.spill [r16]=f10 stf.spill [r17]=f11 adds out0=16,sp // out0 = &sigscratch br.call.sptk.many rp=ia64_rt_sigreturn .ret19: .restore sp,0 adds sp=16,sp ;; ld8 r9=[sp] // load new ar.unat mov.sptk b7=r8,ia64_leave_kernel ;; mov ar.unat=r9 br.many b7 END(sys_rt_sigreturn) GLOBAL_ENTRY(ia64_prepare_handle_unaligned) .prologue /* * r16 = fake ar.pfs, we simply need to make sure privilege is still 0 */ mov r16=r0 DO_SAVE_SWITCH_STACK br.call.sptk.many rp=ia64_handle_unaligned // stack frame setup in ivt .ret21: .body DO_LOAD_SWITCH_STACK br.cond.sptk.many rp // goes to ia64_leave_kernel END(ia64_prepare_handle_unaligned) // // unw_init_running(void (*callback)(info, arg), void *arg) // # define EXTRA_FRAME_SIZE ((UNW_FRAME_INFO_SIZE+15)&~15) GLOBAL_ENTRY(unw_init_running) .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2) alloc loc1=ar.pfs,2,3,3,0 ;; ld8 loc2=[in0],8 mov loc0=rp mov r16=loc1 DO_SAVE_SWITCH_STACK .body .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2) .fframe IA64_SWITCH_STACK_SIZE+EXTRA_FRAME_SIZE SWITCH_STACK_SAVES(EXTRA_FRAME_SIZE) adds sp=-EXTRA_FRAME_SIZE,sp .body ;; adds out0=16,sp // &info mov out1=r13 // current adds out2=16+EXTRA_FRAME_SIZE,sp // &switch_stack br.call.sptk.many rp=unw_init_frame_info 1: adds out0=16,sp // &info mov b6=loc2 mov loc2=gp // save gp across indirect function call ;; ld8 gp=[in0] mov out1=in1 // arg br.call.sptk.many rp=b6 // invoke the callback function 1: mov gp=loc2 // restore gp // For now, we don't allow changing registers from within // unw_init_running; if we ever want to allow that, we'd // have to do a load_switch_stack here: .restore sp adds sp=IA64_SWITCH_STACK_SIZE+EXTRA_FRAME_SIZE,sp mov ar.pfs=loc1 mov rp=loc0 br.ret.sptk.many rp END(unw_init_running) EXPORT_SYMBOL(unw_init_running) #ifdef CONFIG_FUNCTION_TRACER #ifdef CONFIG_DYNAMIC_FTRACE GLOBAL_ENTRY(_mcount) br ftrace_stub END(_mcount) EXPORT_SYMBOL(_mcount) .here: br.ret.sptk.many b0 GLOBAL_ENTRY(ftrace_caller) alloc out0 = ar.pfs, 8, 0, 4, 0 mov out3 = r0 ;; mov out2 = b0 add r3 = 0x20, r3 mov out1 = r1; br.call.sptk.many b0 = ftrace_patch_gp //this might be called from module, so we must patch gp ftrace_patch_gp: movl gp=__gp mov b0 = r3 ;; .global ftrace_call; ftrace_call: { .mlx nop.m 0x0 movl r3 = .here;; } alloc loc0 = ar.pfs, 4, 4, 2, 0 ;; mov loc1 = b0 mov out0 = b0 mov loc2 = r8 mov loc3 = r15 ;; adds out0 = -MCOUNT_INSN_SIZE, out0 mov out1 = in2 mov b6 = r3 br.call.sptk.many b0 = b6 ;; mov ar.pfs = loc0 mov b0 = loc1 mov r8 = loc2 mov r15 = loc3 br ftrace_stub ;; END(ftrace_caller) #else GLOBAL_ENTRY(_mcount) movl r2 = ftrace_stub movl r3 = ftrace_trace_function;; ld8 r3 = [r3];; ld8 r3 = [r3];; cmp.eq p7,p0 = r2, r3 (p7) br.sptk.many ftrace_stub ;; alloc loc0 = ar.pfs, 4, 4, 2, 0 ;; mov loc1 = b0 mov out0 = b0 mov loc2 = r8 mov loc3 = r15 ;; adds out0 = -MCOUNT_INSN_SIZE, out0 mov out1 = in2 mov b6 = r3 br.call.sptk.many b0 = b6 ;; mov ar.pfs = loc0 mov b0 = loc1 mov r8 = loc2 mov r15 = loc3 br ftrace_stub ;; END(_mcount) #endif GLOBAL_ENTRY(ftrace_stub) mov r3 = b0 movl r2 = _mcount_ret_helper ;; mov b6 = r2 mov b7 = r3 br.ret.sptk.many b6 _mcount_ret_helper: mov b0 = r42 mov r1 = r41 mov ar.pfs = r40 br b7 END(ftrace_stub) #endif /* CONFIG_FUNCTION_TRACER */ .rodata .align 8 .globl sys_call_table sys_call_table: data8 sys_ni_syscall // This must be sys_ni_syscall! See ivt.S. data8 sys_exit // 1025 data8 sys_read data8 sys_write data8 sys_open data8 sys_close data8 sys_creat // 1030 data8 sys_link data8 sys_unlink data8 ia64_execve data8 sys_chdir data8 sys_fchdir // 1035 data8 sys_utimes data8 sys_mknod data8 sys_chmod data8 sys_chown data8 sys_lseek // 1040 data8 sys_getpid data8 sys_getppid data8 sys_mount data8 sys_umount data8 sys_setuid // 1045 data8 sys_getuid data8 sys_geteuid data8 sys_ptrace data8 sys_access data8 sys_sync // 1050 data8 sys_fsync data8 sys_fdatasync data8 sys_kill data8 sys_rename data8 sys_mkdir // 1055 data8 sys_rmdir data8 sys_dup data8 sys_ia64_pipe data8 sys_times data8 ia64_brk // 1060 data8 sys_setgid data8 sys_getgid data8 sys_getegid data8 sys_acct data8 sys_ioctl // 1065 data8 sys_fcntl data8 sys_umask data8 sys_chroot data8 sys_ustat data8 sys_dup2 // 1070 data8 sys_setreuid data8 sys_setregid data8 sys_getresuid data8 sys_setresuid data8 sys_getresgid // 1075 data8 sys_setresgid data8 sys_getgroups data8 sys_setgroups data8 sys_getpgid data8 sys_setpgid // 1080 data8 sys_setsid data8 sys_getsid data8 sys_sethostname data8 sys_setrlimit data8 sys_getrlimit // 1085 data8 sys_getrusage data8 sys_gettimeofday data8 sys_settimeofday data8 sys_select data8 sys_poll // 1090 data8 sys_symlink data8 sys_readlink data8 sys_uselib data8 sys_swapon data8 sys_swapoff // 1095 data8 sys_reboot data8 sys_truncate data8 sys_ftruncate data8 sys_fchmod data8 sys_fchown // 1100 data8 ia64_getpriority data8 sys_setpriority data8 sys_statfs data8 sys_fstatfs data8 sys_gettid // 1105 data8 sys_semget data8 sys_semop data8 sys_semctl data8 sys_msgget data8 sys_msgsnd // 1110 data8 sys_msgrcv data8 sys_msgctl data8 sys_shmget data8 sys_shmat data8 sys_shmdt // 1115 data8 sys_shmctl data8 sys_syslog data8 sys_setitimer data8 sys_getitimer data8 sys_ni_syscall // 1120 /* was: ia64_oldstat */ data8 sys_ni_syscall /* was: ia64_oldlstat */ data8 sys_ni_syscall /* was: ia64_oldfstat */ data8 sys_vhangup data8 sys_lchown data8 sys_remap_file_pages // 1125 data8 sys_wait4 data8 sys_sysinfo data8 sys_clone data8 sys_setdomainname data8 sys_newuname // 1130 data8 sys_adjtimex data8 sys_ni_syscall /* was: ia64_create_module */ data8 sys_init_module data8 sys_delete_module data8 sys_ni_syscall // 1135 /* was: sys_get_kernel_syms */ data8 sys_ni_syscall /* was: sys_query_module */ data8 sys_quotactl data8 sys_bdflush data8 sys_sysfs data8 sys_personality // 1140 data8 sys_ni_syscall // sys_afs_syscall data8 sys_setfsuid data8 sys_setfsgid data8 sys_getdents data8 sys_flock // 1145 data8 sys_readv data8 sys_writev data8 sys_pread64 data8 sys_pwrite64 data8 sys_sysctl // 1150 data8 sys_mmap data8 sys_munmap data8 sys_mlock data8 sys_mlockall data8 sys_mprotect // 1155 data8 ia64_mremap data8 sys_msync data8 sys_munlock data8 sys_munlockall data8 sys_sched_getparam // 1160 data8 sys_sched_setparam data8 sys_sched_getscheduler data8 sys_sched_setscheduler data8 sys_sched_yield data8 sys_sched_get_priority_max // 1165 data8 sys_sched_get_priority_min data8 sys_sched_rr_get_interval data8 sys_nanosleep data8 sys_ni_syscall // old nfsservctl data8 sys_prctl // 1170 data8 sys_getpagesize data8 sys_mmap2 data8 sys_pciconfig_read data8 sys_pciconfig_write data8 sys_perfmonctl // 1175 data8 sys_sigaltstack data8 sys_rt_sigaction data8 sys_rt_sigpending data8 sys_rt_sigprocmask data8 sys_rt_sigqueueinfo // 1180 data8 sys_rt_sigreturn data8 sys_rt_sigsuspend data8 sys_rt_sigtimedwait data8 sys_getcwd data8 sys_capget // 1185 data8 sys_capset data8 sys_sendfile64 data8 sys_ni_syscall // sys_getpmsg (STREAMS) data8 sys_ni_syscall // sys_putpmsg (STREAMS) data8 sys_socket // 1190 data8 sys_bind data8 sys_connect data8 sys_listen data8 sys_accept data8 sys_getsockname // 1195 data8 sys_getpeername data8 sys_socketpair data8 sys_send data8 sys_sendto data8 sys_recv // 1200 data8 sys_recvfrom data8 sys_shutdown data8 sys_setsockopt data8 sys_getsockopt data8 sys_sendmsg // 1205 data8 sys_recvmsg data8 sys_pivot_root data8 sys_mincore data8 sys_madvise data8 sys_newstat // 1210 data8 sys_newlstat data8 sys_newfstat data8 sys_clone2 data8 sys_getdents64 data8 sys_getunwind // 1215 data8 sys_readahead data8 sys_setxattr data8 sys_lsetxattr data8 sys_fsetxattr data8 sys_getxattr // 1220 data8 sys_lgetxattr data8 sys_fgetxattr data8 sys_listxattr data8 sys_llistxattr data8 sys_flistxattr // 1225 data8 sys_removexattr data8 sys_lremovexattr data8 sys_fremovexattr data8 sys_tkill data8 sys_futex // 1230 data8 sys_sched_setaffinity data8 sys_sched_getaffinity data8 sys_set_tid_address data8 sys_fadvise64_64 data8 sys_tgkill // 1235 data8 sys_exit_group data8 sys_lookup_dcookie data8 sys_io_setup data8 sys_io_destroy data8 sys_io_getevents // 1240 data8 sys_io_submit data8 sys_io_cancel data8 sys_epoll_create data8 sys_epoll_ctl data8 sys_epoll_wait // 1245 data8 sys_restart_syscall data8 sys_semtimedop data8 sys_timer_create data8 sys_timer_settime data8 sys_timer_gettime // 1250 data8 sys_timer_getoverrun data8 sys_timer_delete data8 sys_clock_settime data8 sys_clock_gettime data8 sys_clock_getres // 1255 data8 sys_clock_nanosleep data8 sys_fstatfs64 data8 sys_statfs64 data8 sys_mbind data8 sys_get_mempolicy // 1260 data8 sys_set_mempolicy data8 sys_mq_open data8 sys_mq_unlink data8 sys_mq_timedsend data8 sys_mq_timedreceive // 1265 data8 sys_mq_notify data8 sys_mq_getsetattr data8 sys_kexec_load data8 sys_ni_syscall // reserved for vserver data8 sys_waitid // 1270 data8 sys_add_key data8 sys_request_key data8 sys_keyctl data8 sys_ioprio_set data8 sys_ioprio_get // 1275 data8 sys_move_pages data8 sys_inotify_init data8 sys_inotify_add_watch data8 sys_inotify_rm_watch data8 sys_migrate_pages // 1280 data8 sys_openat data8 sys_mkdirat data8 sys_mknodat data8 sys_fchownat data8 sys_futimesat // 1285 data8 sys_newfstatat data8 sys_unlinkat data8 sys_renameat data8 sys_linkat data8 sys_symlinkat // 1290 data8 sys_readlinkat data8 sys_fchmodat data8 sys_faccessat data8 sys_pselect6 data8 sys_ppoll // 1295 data8 sys_unshare data8 sys_splice data8 sys_set_robust_list data8 sys_get_robust_list data8 sys_sync_file_range // 1300 data8 sys_tee data8 sys_vmsplice data8 sys_fallocate data8 sys_getcpu data8 sys_epoll_pwait // 1305 data8 sys_utimensat data8 sys_signalfd data8 sys_ni_syscall data8 sys_eventfd data8 sys_timerfd_create // 1310 data8 sys_timerfd_settime data8 sys_timerfd_gettime data8 sys_signalfd4 data8 sys_eventfd2 data8 sys_epoll_create1 // 1315 data8 sys_dup3 data8 sys_pipe2 data8 sys_inotify_init1 data8 sys_preadv data8 sys_pwritev // 1320 data8 sys_rt_tgsigqueueinfo data8 sys_recvmmsg data8 sys_fanotify_init data8 sys_fanotify_mark data8 sys_prlimit64 // 1325 data8 sys_name_to_handle_at data8 sys_open_by_handle_at data8 sys_clock_adjtime data8 sys_syncfs data8 sys_setns // 1330 data8 sys_sendmmsg data8 sys_process_vm_readv data8 sys_process_vm_writev data8 sys_accept4 data8 sys_finit_module // 1335 data8 sys_sched_setattr data8 sys_sched_getattr data8 sys_renameat2 data8 sys_getrandom data8 sys_memfd_create // 1340 data8 sys_bpf data8 sys_execveat data8 sys_userfaultfd data8 sys_membarrier data8 sys_kcmp // 1345 data8 sys_mlock2 data8 sys_copy_file_range data8 sys_preadv2 data8 sys_pwritev2 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
AirFortressIlikara/LS2K0300-linux-4.19
2,740
arch/ia64/kernel/gate.lds.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Linker script for gate DSO. The gate pages are an ELF shared object * prelinked to its virtual address, with only one read-only segment and * one execute-only segment (both fit in one page). This script controls * its layout. */ #include <asm/page.h> SECTIONS { . = GATE_ADDR + SIZEOF_HEADERS; .hash : { *(.hash) } :readable .gnu.hash : { *(.gnu.hash) } .dynsym : { *(.dynsym) } .dynstr : { *(.dynstr) } .gnu.version : { *(.gnu.version) } .gnu.version_d : { *(.gnu.version_d) } .gnu.version_r : { *(.gnu.version_r) } .note : { *(.note*) } :readable :note .dynamic : { *(.dynamic) } :readable :dynamic /* * This linker script is used both with -r and with -shared. For * the layouts to match, we need to skip more than enough space for * the dynamic symbol table et al. If this amount is insufficient, * ld -shared will barf. Just increase it here. */ . = GATE_ADDR + 0x600; .data..patch : { __start_gate_mckinley_e9_patchlist = .; *(.data..patch.mckinley_e9) __end_gate_mckinley_e9_patchlist = .; __start_gate_vtop_patchlist = .; *(.data..patch.vtop) __end_gate_vtop_patchlist = .; __start_gate_fsyscall_patchlist = .; *(.data..patch.fsyscall_table) __end_gate_fsyscall_patchlist = .; __start_gate_brl_fsys_bubble_down_patchlist = .; *(.data..patch.brl_fsys_bubble_down) __end_gate_brl_fsys_bubble_down_patchlist = .; } :readable .IA_64.unwind_info : { *(.IA_64.unwind_info*) } .IA_64.unwind : { *(.IA_64.unwind*) } :readable :unwind #ifdef HAVE_BUGGY_SEGREL .text (GATE_ADDR + PAGE_SIZE) : { *(.text) *(.text.*) } :readable #else . = ALIGN(PERCPU_PAGE_SIZE) + (. & (PERCPU_PAGE_SIZE - 1)); .text : { *(.text) *(.text.*) } :epc #endif /DISCARD/ : { *(.got.plt) *(.got) *(.data .data.* .gnu.linkonce.d.*) *(.dynbss) *(.bss .bss.* .gnu.linkonce.b.*) *(__ex_table) *(__mca_table) } } /* * ld does not recognize this name token; use the constant. */ #define PT_IA_64_UNWIND 0x70000001 /* * We must supply the ELF program headers explicitly to get just one * PT_LOAD segment, and set the flags explicitly to make segments read-only. */ PHDRS { readable PT_LOAD FILEHDR PHDRS FLAGS(4); /* PF_R */ #ifndef HAVE_BUGGY_SEGREL epc PT_LOAD FILEHDR PHDRS FLAGS(1); /* PF_X */ #endif dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ note PT_NOTE FLAGS(4); /* PF_R */ unwind PT_IA_64_UNWIND; } /* * This controls what symbols we export from the DSO. */ VERSION { LINUX_2.5 { global: __kernel_syscall_via_break; __kernel_syscall_via_epc; __kernel_sigtramp; local: *; }; } /* The ELF entry point can be used to set the AT_SYSINFO value. */ ENTRY(__kernel_syscall_via_epc)
AirFortressIlikara/LS2K0300-linux-4.19
7,685
arch/ia64/kernel/relocate_kernel.S
/* * arch/ia64/kernel/relocate_kernel.S * * Relocate kexec'able kernel and start it * * Copyright (C) 2005 Hewlett-Packard Development Company, L.P. * Copyright (C) 2005 Khalid Aziz <khalid.aziz@hp.com> * Copyright (C) 2005 Intel Corp, Zou Nan hai <nanhai.zou@intel.com> * * This source code is licensed under the GNU General Public License, * Version 2. See the file COPYING for more details. */ #include <asm/asmmacro.h> #include <asm/kregs.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/mca_asm.h> /* Must be relocatable PIC code callable as a C function */ GLOBAL_ENTRY(relocate_new_kernel) .prologue alloc r31=ar.pfs,4,0,0,0 .body .reloc_entry: { rsm psr.i| psr.ic mov r2=ip } ;; { flushrs // must be first insn in group srlz.i } ;; dep r2=0,r2,61,3 //to physical address ;; //first switch to physical mode add r3=1f-.reloc_entry, r2 movl r16 = IA64_PSR_AC|IA64_PSR_BN|IA64_PSR_IC mov ar.rsc=0 // put RSE in enforced lazy mode ;; add sp=(memory_stack_end - 16 - .reloc_entry),r2 add r8=(register_stack - .reloc_entry),r2 ;; mov r18=ar.rnat mov ar.bspstore=r8 ;; mov cr.ipsr=r16 mov cr.iip=r3 mov cr.ifs=r0 srlz.i ;; mov ar.rnat=r18 rfi // note: this unmask MCA/INIT (psr.mc) ;; 1: //physical mode code begin mov b6=in1 dep r28=0,in2,61,3 //to physical address // purge all TC entries #define O(member) IA64_CPUINFO_##member##_OFFSET GET_THIS_PADDR(r2, ia64_cpu_info) // load phys addr of cpu_info into r2 ;; addl r17=O(PTCE_STRIDE),r2 addl r2=O(PTCE_BASE),r2 ;; ld8 r18=[r2],(O(PTCE_COUNT)-O(PTCE_BASE));; // r18=ptce_base ld4 r19=[r2],4 // r19=ptce_count[0] ld4 r21=[r17],4 // r21=ptce_stride[0] ;; ld4 r20=[r2] // r20=ptce_count[1] ld4 r22=[r17] // r22=ptce_stride[1] mov r24=r0 ;; adds r20=-1,r20 ;; #undef O 2: cmp.ltu p6,p7=r24,r19 (p7) br.cond.dpnt.few 4f mov ar.lc=r20 3: ptc.e r18 ;; add r18=r22,r18 br.cloop.sptk.few 3b ;; add r18=r21,r18 add r24=1,r24 ;; br.sptk.few 2b 4: srlz.i ;; // purge TR entry for kernel text and data movl r16=KERNEL_START mov r18=KERNEL_TR_PAGE_SHIFT<<2 ;; ptr.i r16, r18 ptr.d r16, r18 ;; srlz.i ;; // purge TR entry for pal code mov r16=in3 mov r18=IA64_GRANULE_SHIFT<<2 ;; ptr.i r16,r18 ;; srlz.i ;; // purge TR entry for stack mov r16=IA64_KR(CURRENT_STACK) ;; shl r16=r16,IA64_GRANULE_SHIFT movl r19=PAGE_OFFSET ;; add r16=r19,r16 mov r18=IA64_GRANULE_SHIFT<<2 ;; ptr.d r16,r18 ;; srlz.i ;; //copy segments movl r16=PAGE_MASK mov r30=in0 // in0 is page_list br.sptk.few .dest_page ;; .loop: ld8 r30=[in0], 8;; .dest_page: tbit.z p0, p6=r30, 0;; // 0x1 dest page (p6) and r17=r30, r16 (p6) br.cond.sptk.few .loop;; tbit.z p0, p6=r30, 1;; // 0x2 indirect page (p6) and in0=r30, r16 (p6) br.cond.sptk.few .loop;; tbit.z p0, p6=r30, 2;; // 0x4 end flag (p6) br.cond.sptk.few .end_loop;; tbit.z p6, p0=r30, 3;; // 0x8 source page (p6) br.cond.sptk.few .loop and r18=r30, r16 // simple copy page, may optimize later movl r14=PAGE_SIZE/8 - 1;; mov ar.lc=r14;; 1: ld8 r14=[r18], 8;; st8 [r17]=r14;; fc.i r17 add r17=8, r17 br.ctop.sptk.few 1b br.sptk.few .loop ;; .end_loop: sync.i // for fc.i ;; srlz.i ;; srlz.d ;; br.call.sptk.many b0=b6;; .align 32 memory_stack: .fill 8192, 1, 0 memory_stack_end: register_stack: .fill 8192, 1, 0 register_stack_end: relocate_new_kernel_end: END(relocate_new_kernel) .global relocate_new_kernel_size relocate_new_kernel_size: data8 relocate_new_kernel_end - relocate_new_kernel GLOBAL_ENTRY(ia64_dump_cpu_regs) .prologue alloc loc0=ar.pfs,1,2,0,0 .body mov ar.rsc=0 // put RSE in enforced lazy mode add loc1=4*8, in0 // save r4 and r5 first ;; { flushrs // flush dirty regs to backing store srlz.i } st8 [loc1]=r4, 8 ;; st8 [loc1]=r5, 8 ;; add loc1=32*8, in0 mov r4=ar.rnat ;; st8 [in0]=r0, 8 // r0 st8 [loc1]=r4, 8 // rnat mov r5=pr ;; st8 [in0]=r1, 8 // r1 st8 [loc1]=r5, 8 // pr mov r4=b0 ;; st8 [in0]=r2, 8 // r2 st8 [loc1]=r4, 8 // b0 mov r5=b1; ;; st8 [in0]=r3, 24 // r3 st8 [loc1]=r5, 8 // b1 mov r4=b2 ;; st8 [in0]=r6, 8 // r6 st8 [loc1]=r4, 8 // b2 mov r5=b3 ;; st8 [in0]=r7, 8 // r7 st8 [loc1]=r5, 8 // b3 mov r4=b4 ;; st8 [in0]=r8, 8 // r8 st8 [loc1]=r4, 8 // b4 mov r5=b5 ;; st8 [in0]=r9, 8 // r9 st8 [loc1]=r5, 8 // b5 mov r4=b6 ;; st8 [in0]=r10, 8 // r10 st8 [loc1]=r5, 8 // b6 mov r5=b7 ;; st8 [in0]=r11, 8 // r11 st8 [loc1]=r5, 8 // b7 mov r4=b0 ;; st8 [in0]=r12, 8 // r12 st8 [loc1]=r4, 8 // ip mov r5=loc0 ;; st8 [in0]=r13, 8 // r13 extr.u r5=r5, 0, 38 // ar.pfs.pfm mov r4=r0 // user mask ;; st8 [in0]=r14, 8 // r14 st8 [loc1]=r5, 8 // cfm ;; st8 [in0]=r15, 8 // r15 st8 [loc1]=r4, 8 // user mask mov r5=ar.rsc ;; st8 [in0]=r16, 8 // r16 st8 [loc1]=r5, 8 // ar.rsc mov r4=ar.bsp ;; st8 [in0]=r17, 8 // r17 st8 [loc1]=r4, 8 // ar.bsp mov r5=ar.bspstore ;; st8 [in0]=r18, 8 // r18 st8 [loc1]=r5, 8 // ar.bspstore mov r4=ar.rnat ;; st8 [in0]=r19, 8 // r19 st8 [loc1]=r4, 8 // ar.rnat mov r5=ar.ccv ;; st8 [in0]=r20, 8 // r20 st8 [loc1]=r5, 8 // ar.ccv mov r4=ar.unat ;; st8 [in0]=r21, 8 // r21 st8 [loc1]=r4, 8 // ar.unat mov r5 = ar.fpsr ;; st8 [in0]=r22, 8 // r22 st8 [loc1]=r5, 8 // ar.fpsr mov r4 = ar.unat ;; st8 [in0]=r23, 8 // r23 st8 [loc1]=r4, 8 // unat mov r5 = ar.fpsr ;; st8 [in0]=r24, 8 // r24 st8 [loc1]=r5, 8 // fpsr mov r4 = ar.pfs ;; st8 [in0]=r25, 8 // r25 st8 [loc1]=r4, 8 // ar.pfs mov r5 = ar.lc ;; st8 [in0]=r26, 8 // r26 st8 [loc1]=r5, 8 // ar.lc mov r4 = ar.ec ;; st8 [in0]=r27, 8 // r27 st8 [loc1]=r4, 8 // ar.ec mov r5 = ar.csd ;; st8 [in0]=r28, 8 // r28 st8 [loc1]=r5, 8 // ar.csd mov r4 = ar.ssd ;; st8 [in0]=r29, 8 // r29 st8 [loc1]=r4, 8 // ar.ssd ;; st8 [in0]=r30, 8 // r30 ;; st8 [in0]=r31, 8 // r31 mov ar.pfs=loc0 ;; br.ret.sptk.many rp END(ia64_dump_cpu_regs)
AirFortressIlikara/LS2K0300-linux-4.19
2,961
arch/ia64/kernel/esi_stub.S
/* * ESI call stub. * * Copyright (C) 2005 Hewlett-Packard Co * Alex Williamson <alex.williamson@hp.com> * * Based on EFI call stub by David Mosberger. The stub is virtually * identical to the one for EFI phys-mode calls, except that ESI * calls may have up to 8 arguments, so they get passed to this routine * through memory. * * This stub allows us to make ESI calls in physical mode with interrupts * turned off. ESI calls may not support calling from virtual mode. * * Google for "Extensible SAL specification" for a document describing the * ESI standard. */ /* * PSR settings as per SAL spec (Chapter 8 in the "IA-64 System * Abstraction Layer Specification", revision 2.6e). Note that * psr.dfl and psr.dfh MUST be cleared, despite what this manual says. * Otherwise, SAL dies whenever it's trying to do an IA-32 BIOS call * (the br.ia instruction fails unless psr.dfl and psr.dfh are * cleared). Fortunately, SAL promises not to touch the floating * point regs, so at least we don't have to save f2-f127. */ #define PSR_BITS_TO_CLEAR \ (IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_RT | \ IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | \ IA64_PSR_DFL | IA64_PSR_DFH) #define PSR_BITS_TO_SET \ (IA64_PSR_BN) #include <asm/processor.h> #include <asm/asmmacro.h> #include <asm/export.h> /* * Inputs: * in0 = address of function descriptor of ESI routine to call * in1 = address of array of ESI parameters * * Outputs: * r8 = result returned by called function */ GLOBAL_ENTRY(esi_call_phys) .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2) alloc loc1=ar.pfs,2,7,8,0 ld8 r2=[in0],8 // load ESI function's entry point mov loc0=rp .body ;; ld8 out0=[in1],8 // ESI params loaded from array ;; // passing all as inputs doesn't work ld8 out1=[in1],8 ;; ld8 out2=[in1],8 ;; ld8 out3=[in1],8 ;; ld8 out4=[in1],8 ;; ld8 out5=[in1],8 ;; ld8 out6=[in1],8 ;; ld8 out7=[in1] mov loc2=gp // save global pointer mov loc4=ar.rsc // save RSE configuration mov ar.rsc=0 // put RSE in enforced lazy, LE mode ;; ld8 gp=[in0] // load ESI function's global pointer movl r16=PSR_BITS_TO_CLEAR mov loc3=psr // save processor status word movl r17=PSR_BITS_TO_SET ;; or loc3=loc3,r17 mov b6=r2 ;; andcm r16=loc3,r16 // get psr with IT, DT, and RT bits cleared br.call.sptk.many rp=ia64_switch_mode_phys .ret0: mov loc5=r19 // old ar.bsp mov loc6=r20 // old sp br.call.sptk.many rp=b6 // call the ESI function .ret1: mov ar.rsc=0 // put RSE in enforced lazy, LE mode mov r16=loc3 // save virtual mode psr mov r19=loc5 // save virtual mode bspstore mov r20=loc6 // save virtual mode sp br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode .ret2: mov ar.rsc=loc4 // restore RSE configuration mov ar.pfs=loc1 mov rp=loc0 mov gp=loc2 br.ret.sptk.many rp END(esi_call_phys) EXPORT_SYMBOL_GPL(esi_call_phys)
AirFortressIlikara/LS2K0300-linux-4.19
1,135
arch/ia64/kernel/mca_drv_asm.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * File: mca_drv_asm.S * Purpose: Assembly portion of Generic MCA handling * * Copyright (C) 2004 FUJITSU LIMITED * Copyright (C) 2004 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com> */ #include <linux/threads.h> #include <asm/asmmacro.h> #include <asm/processor.h> #include <asm/ptrace.h> GLOBAL_ENTRY(mca_handler_bhhook) invala // clear RSE ? cover ;; clrrrb ;; alloc r16=ar.pfs,0,2,3,0 // make a new frame mov ar.rsc=0 mov r13=IA64_KR(CURRENT) // current task pointer ;; mov r2=r13 ;; addl r22=IA64_RBS_OFFSET,r2 ;; mov ar.bspstore=r22 addl sp=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r2 ;; adds r2=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13 ;; st1 [r2]=r0 // clear current->thread.on_ustack flag mov loc0=r16 movl loc1=mca_handler_bh // recovery C function ;; mov out0=r8 // poisoned address mov out1=r9 // iip mov out2=r10 // psr mov b6=loc1 ;; mov loc1=rp ssm psr.ic ;; srlz.i ;; ssm psr.i br.call.sptk.many rp=b6 // does not return ... ;; mov ar.pfs=loc0 mov rp=loc1 ;; mov r8=r0 br.ret.sptk.many rp END(mca_handler_bhhook)
AirFortressIlikara/LS2K0300-linux-4.19
4,580
arch/ia64/kernel/vmlinux.lds.S
/* SPDX-License-Identifier: GPL-2.0 */ #include <asm/cache.h> #include <asm/ptrace.h> #include <asm/pgtable.h> #include <asm/thread_info.h> #include <asm-generic/vmlinux.lds.h> OUTPUT_FORMAT("elf64-ia64-little") OUTPUT_ARCH(ia64) ENTRY(phys_start) jiffies = jiffies_64; PHDRS { code PT_LOAD; percpu PT_LOAD; data PT_LOAD; note PT_NOTE; unwind 0x70000001; /* PT_IA_64_UNWIND, but ld doesn't match the name */ } SECTIONS { /* * unwind exit sections must be discarded before * the rest of the sections get included. */ /DISCARD/ : { *(.IA_64.unwind.exit.text) *(.IA_64.unwind_info.exit.text) *(.comment) *(.note) } v = PAGE_OFFSET; /* this symbol is here to make debugging easier... */ phys_start = _start - LOAD_OFFSET; code : { } :code . = KERNEL_START; _text = .; _stext = .; .text : AT(ADDR(.text) - LOAD_OFFSET) { __start_ivt_text = .; *(.text..ivt) __end_ivt_text = .; TEXT_TEXT SCHED_TEXT CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT *(.gnu.linkonce.t*) } .text2 : AT(ADDR(.text2) - LOAD_OFFSET) { *(.text2) } #ifdef CONFIG_SMP .text..lock : AT(ADDR(.text..lock) - LOAD_OFFSET) { *(.text..lock) } #endif _etext = .; /* * Read-only data */ NOTES :code :note /* put .notes in text and mark in PT_NOTE */ code_continues : { } : code /* switch back to regular program... */ EXCEPTION_TABLE(16) /* MCA table */ . = ALIGN(16); __mca_table : AT(ADDR(__mca_table) - LOAD_OFFSET) { __start___mca_table = .; *(__mca_table) __stop___mca_table = .; } .data..patch.phys_stack_reg : AT(ADDR(.data..patch.phys_stack_reg) - LOAD_OFFSET) { __start___phys_stack_reg_patchlist = .; *(.data..patch.phys_stack_reg) __end___phys_stack_reg_patchlist = .; } /* * Global data */ _data = .; /* Unwind info & table: */ . = ALIGN(8); .IA_64.unwind_info : AT(ADDR(.IA_64.unwind_info) - LOAD_OFFSET) { *(.IA_64.unwind_info*) } .IA_64.unwind : AT(ADDR(.IA_64.unwind) - LOAD_OFFSET) { __start_unwind = .; *(.IA_64.unwind*) __end_unwind = .; } :code :unwind code_continues2 : { } : code RODATA .opd : AT(ADDR(.opd) - LOAD_OFFSET) { __start_opd = .; *(.opd) __end_opd = .; } /* * Initialization code and data: */ . = ALIGN(PAGE_SIZE); __init_begin = .; INIT_TEXT_SECTION(PAGE_SIZE) INIT_DATA_SECTION(16) .data..patch.vtop : AT(ADDR(.data..patch.vtop) - LOAD_OFFSET) { __start___vtop_patchlist = .; *(.data..patch.vtop) __end___vtop_patchlist = .; } .data..patch.rse : AT(ADDR(.data..patch.rse) - LOAD_OFFSET) { __start___rse_patchlist = .; *(.data..patch.rse) __end___rse_patchlist = .; } .data..patch.mckinley_e9 : AT(ADDR(.data..patch.mckinley_e9) - LOAD_OFFSET) { __start___mckinley_e9_bundles = .; *(.data..patch.mckinley_e9) __end___mckinley_e9_bundles = .; } #if defined(CONFIG_IA64_GENERIC) /* Machine Vector */ . = ALIGN(16); .machvec : AT(ADDR(.machvec) - LOAD_OFFSET) { machvec_start = .; *(.machvec) machvec_end = .; } #endif #ifdef CONFIG_SMP . = ALIGN(PERCPU_PAGE_SIZE); __cpu0_per_cpu = .; . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */ #endif . = ALIGN(PAGE_SIZE); __init_end = .; .data..page_aligned : AT(ADDR(.data..page_aligned) - LOAD_OFFSET) { PAGE_ALIGNED_DATA(PAGE_SIZE) . = ALIGN(PAGE_SIZE); __start_gate_section = .; *(.data..gate) __stop_gate_section = .; } /* * make sure the gate page doesn't expose * kernel data */ . = ALIGN(PAGE_SIZE); /* Per-cpu data: */ . = ALIGN(PERCPU_PAGE_SIZE); PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu) __phys_per_cpu_start = __per_cpu_load; /* * ensure percpu data fits * into percpu page size */ . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; data : { } :data .data : AT(ADDR(.data) - LOAD_OFFSET) { _sdata = .; INIT_TASK_DATA(PAGE_SIZE) CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES) READ_MOSTLY_DATA(SMP_CACHE_BYTES) DATA_DATA *(.data1) *(.gnu.linkonce.d*) CONSTRUCTORS } BUG_TABLE . = ALIGN(16); /* gp must be 16-byte aligned for exc. table */ .got : AT(ADDR(.got) - LOAD_OFFSET) { *(.got.plt) *(.got) } __gp = ADDR(.got) + 0x200000; /* * We want the small data sections together, * so single-instruction offsets can access * them all, and initialized data all before * uninitialized, so we can shorten the * on-disk segment size. */ .sdata : AT(ADDR(.sdata) - LOAD_OFFSET) { *(.sdata) *(.sdata1) *(.srdata) } _edata = .; BSS_SECTION(0, 0, 0) _end = .; code : { } :code STABS_DEBUG DWARF_DEBUG /* Default discards */ DISCARDS }
AirFortressIlikara/LS2K0300-linux-4.19
11,606
arch/ia64/kernel/gate.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * This file contains the code that gets mapped at the upper end of each task's text * region. For now, it contains the signal trampoline code only. * * Copyright (C) 1999-2003 Hewlett-Packard Co * David Mosberger-Tang <davidm@hpl.hp.com> */ #include <asm/asmmacro.h> #include <asm/errno.h> #include <asm/asm-offsets.h> #include <asm/sigcontext.h> #include <asm/unistd.h> #include <asm/kregs.h> #include <asm/page.h> #include <asm/native/inst.h> /* * We can't easily refer to symbols inside the kernel. To avoid full runtime relocation, * complications with the linker (which likes to create PLT stubs for branches * to targets outside the shared object) and to avoid multi-phase kernel builds, we * simply create minimalistic "patch lists" in special ELF sections. */ .section ".data..patch.fsyscall_table", "a" .previous #define LOAD_FSYSCALL_TABLE(reg) \ [1:] movl reg=0; \ .xdata4 ".data..patch.fsyscall_table", 1b-. .section ".data..patch.brl_fsys_bubble_down", "a" .previous #define BRL_COND_FSYS_BUBBLE_DOWN(pr) \ [1:](pr)brl.cond.sptk 0; \ ;; \ .xdata4 ".data..patch.brl_fsys_bubble_down", 1b-. GLOBAL_ENTRY(__kernel_syscall_via_break) .prologue .altrp b6 .body /* * Note: for (fast) syscall restart to work, the break instruction must be * the first one in the bundle addressed by syscall_via_break. */ { .mib break 0x100000 nop.i 0 br.ret.sptk.many b6 } END(__kernel_syscall_via_break) # define ARG0_OFF (16 + IA64_SIGFRAME_ARG0_OFFSET) # define ARG1_OFF (16 + IA64_SIGFRAME_ARG1_OFFSET) # define ARG2_OFF (16 + IA64_SIGFRAME_ARG2_OFFSET) # define SIGHANDLER_OFF (16 + IA64_SIGFRAME_HANDLER_OFFSET) # define SIGCONTEXT_OFF (16 + IA64_SIGFRAME_SIGCONTEXT_OFFSET) # define FLAGS_OFF IA64_SIGCONTEXT_FLAGS_OFFSET # define CFM_OFF IA64_SIGCONTEXT_CFM_OFFSET # define FR6_OFF IA64_SIGCONTEXT_FR6_OFFSET # define BSP_OFF IA64_SIGCONTEXT_AR_BSP_OFFSET # define RNAT_OFF IA64_SIGCONTEXT_AR_RNAT_OFFSET # define UNAT_OFF IA64_SIGCONTEXT_AR_UNAT_OFFSET # define FPSR_OFF IA64_SIGCONTEXT_AR_FPSR_OFFSET # define PR_OFF IA64_SIGCONTEXT_PR_OFFSET # define RP_OFF IA64_SIGCONTEXT_IP_OFFSET # define SP_OFF IA64_SIGCONTEXT_R12_OFFSET # define RBS_BASE_OFF IA64_SIGCONTEXT_RBS_BASE_OFFSET # define LOADRS_OFF IA64_SIGCONTEXT_LOADRS_OFFSET # define base0 r2 # define base1 r3 /* * When we get here, the memory stack looks like this: * * +===============================+ * | | * // struct sigframe // * | | * +-------------------------------+ <-- sp+16 * | 16 byte of scratch | * | space | * +-------------------------------+ <-- sp * * The register stack looks _exactly_ the way it looked at the time the signal * occurred. In other words, we're treading on a potential mine-field: each * incoming general register may be a NaT value (including sp, in which case the * process ends up dying with a SIGSEGV). * * The first thing need to do is a cover to get the registers onto the backing * store. Once that is done, we invoke the signal handler which may modify some * of the machine state. After returning from the signal handler, we return * control to the previous context by executing a sigreturn system call. A signal * handler may call the rt_sigreturn() function to directly return to a given * sigcontext. However, the user-level sigreturn() needs to do much more than * calling the rt_sigreturn() system call as it needs to unwind the stack to * restore preserved registers that may have been saved on the signal handler's * call stack. */ #define SIGTRAMP_SAVES \ .unwabi 3, 's'; /* mark this as a sigtramp handler (saves scratch regs) */ \ .unwabi @svr4, 's'; /* backwards compatibility with old unwinders (remove in v2.7) */ \ .savesp ar.unat, UNAT_OFF+SIGCONTEXT_OFF; \ .savesp ar.fpsr, FPSR_OFF+SIGCONTEXT_OFF; \ .savesp pr, PR_OFF+SIGCONTEXT_OFF; \ .savesp rp, RP_OFF+SIGCONTEXT_OFF; \ .savesp ar.pfs, CFM_OFF+SIGCONTEXT_OFF; \ .vframesp SP_OFF+SIGCONTEXT_OFF GLOBAL_ENTRY(__kernel_sigtramp) // describe the state that is active when we get here: .prologue SIGTRAMP_SAVES .body .label_state 1 adds base0=SIGHANDLER_OFF,sp adds base1=RBS_BASE_OFF+SIGCONTEXT_OFF,sp br.call.sptk.many rp=1f 1: ld8 r17=[base0],(ARG0_OFF-SIGHANDLER_OFF) // get pointer to signal handler's plabel ld8 r15=[base1] // get address of new RBS base (or NULL) cover // push args in interrupted frame onto backing store ;; cmp.ne p1,p0=r15,r0 // do we need to switch rbs? (note: pr is saved by kernel) mov.m r9=ar.bsp // fetch ar.bsp .spillsp.p p1, ar.rnat, RNAT_OFF+SIGCONTEXT_OFF (p1) br.cond.spnt setup_rbs // yup -> (clobbers p8, r14-r16, and r18-r20) back_from_setup_rbs: alloc r8=ar.pfs,0,0,3,0 ld8 out0=[base0],16 // load arg0 (signum) adds base1=(ARG1_OFF-(RBS_BASE_OFF+SIGCONTEXT_OFF)),base1 ;; ld8 out1=[base1] // load arg1 (siginfop) ld8 r10=[r17],8 // get signal handler entry point ;; ld8 out2=[base0] // load arg2 (sigcontextp) ld8 gp=[r17] // get signal handler's global pointer adds base0=(BSP_OFF+SIGCONTEXT_OFF),sp ;; .spillsp ar.bsp, BSP_OFF+SIGCONTEXT_OFF st8 [base0]=r9 // save sc_ar_bsp adds base0=(FR6_OFF+SIGCONTEXT_OFF),sp adds base1=(FR6_OFF+16+SIGCONTEXT_OFF),sp ;; stf.spill [base0]=f6,32 stf.spill [base1]=f7,32 ;; stf.spill [base0]=f8,32 stf.spill [base1]=f9,32 mov b6=r10 ;; stf.spill [base0]=f10,32 stf.spill [base1]=f11,32 ;; stf.spill [base0]=f12,32 stf.spill [base1]=f13,32 ;; stf.spill [base0]=f14,32 stf.spill [base1]=f15,32 br.call.sptk.many rp=b6 // call the signal handler .ret0: adds base0=(BSP_OFF+SIGCONTEXT_OFF),sp ;; ld8 r15=[base0] // fetch sc_ar_bsp mov r14=ar.bsp ;; cmp.ne p1,p0=r14,r15 // do we need to restore the rbs? (p1) br.cond.spnt restore_rbs // yup -> (clobbers r14-r18, f6 & f7) ;; back_from_restore_rbs: adds base0=(FR6_OFF+SIGCONTEXT_OFF),sp adds base1=(FR6_OFF+16+SIGCONTEXT_OFF),sp ;; ldf.fill f6=[base0],32 ldf.fill f7=[base1],32 ;; ldf.fill f8=[base0],32 ldf.fill f9=[base1],32 ;; ldf.fill f10=[base0],32 ldf.fill f11=[base1],32 ;; ldf.fill f12=[base0],32 ldf.fill f13=[base1],32 ;; ldf.fill f14=[base0],32 ldf.fill f15=[base1],32 mov r15=__NR_rt_sigreturn .restore sp // pop .prologue break __BREAK_SYSCALL .prologue SIGTRAMP_SAVES setup_rbs: mov ar.rsc=0 // put RSE into enforced lazy mode ;; .save ar.rnat, r19 mov r19=ar.rnat // save RNaT before switching backing store area adds r14=(RNAT_OFF+SIGCONTEXT_OFF),sp mov r18=ar.bspstore mov ar.bspstore=r15 // switch over to new register backing store area ;; .spillsp ar.rnat, RNAT_OFF+SIGCONTEXT_OFF st8 [r14]=r19 // save sc_ar_rnat .body mov.m r16=ar.bsp // sc_loadrs <- (new bsp - new bspstore) << 16 adds r14=(LOADRS_OFF+SIGCONTEXT_OFF),sp ;; invala sub r15=r16,r15 extr.u r20=r18,3,6 ;; mov ar.rsc=0xf // set RSE into eager mode, pl 3 cmp.eq p8,p0=63,r20 shl r15=r15,16 ;; st8 [r14]=r15 // save sc_loadrs (p8) st8 [r18]=r19 // if bspstore points at RNaT slot, store RNaT there now .restore sp // pop .prologue br.cond.sptk back_from_setup_rbs .prologue SIGTRAMP_SAVES .spillsp ar.rnat, RNAT_OFF+SIGCONTEXT_OFF .body restore_rbs: // On input: // r14 = bsp1 (bsp at the time of return from signal handler) // r15 = bsp0 (bsp at the time the signal occurred) // // Here, we need to calculate bspstore0, the value that ar.bspstore needs // to be set to, based on bsp0 and the size of the dirty partition on // the alternate stack (sc_loadrs >> 16). This can be done with the // following algorithm: // // bspstore0 = rse_skip_regs(bsp0, -rse_num_regs(bsp1 - (loadrs >> 19), bsp1)); // // This is what the code below does. // alloc r2=ar.pfs,0,0,0,0 // alloc null frame adds r16=(LOADRS_OFF+SIGCONTEXT_OFF),sp adds r18=(RNAT_OFF+SIGCONTEXT_OFF),sp ;; ld8 r17=[r16] ld8 r16=[r18] // get new rnat extr.u r18=r15,3,6 // r18 <- rse_slot_num(bsp0) ;; mov ar.rsc=r17 // put RSE into enforced lazy mode shr.u r17=r17,16 ;; sub r14=r14,r17 // r14 (bspstore1) <- bsp1 - (sc_loadrs >> 16) shr.u r17=r17,3 // r17 <- (sc_loadrs >> 19) ;; loadrs // restore dirty partition extr.u r14=r14,3,6 // r14 <- rse_slot_num(bspstore1) ;; add r14=r14,r17 // r14 <- rse_slot_num(bspstore1) + (sc_loadrs >> 19) ;; shr.u r14=r14,6 // r14 <- (rse_slot_num(bspstore1) + (sc_loadrs >> 19))/0x40 ;; sub r14=r14,r17 // r14 <- -rse_num_regs(bspstore1, bsp1) movl r17=0x8208208208208209 ;; add r18=r18,r14 // r18 (delta) <- rse_slot_num(bsp0) - rse_num_regs(bspstore1,bsp1) setf.sig f7=r17 cmp.lt p7,p0=r14,r0 // p7 <- (r14 < 0)? ;; (p7) adds r18=-62,r18 // delta -= 62 ;; setf.sig f6=r18 ;; xmpy.h f6=f6,f7 ;; getf.sig r17=f6 ;; add r17=r17,r18 shr r18=r18,63 ;; shr r17=r17,5 ;; sub r17=r17,r18 // r17 = delta/63 ;; add r17=r14,r17 // r17 <- delta/63 - rse_num_regs(bspstore1, bsp1) ;; shladd r15=r17,3,r15 // r15 <- bsp0 + 8*(delta/63 - rse_num_regs(bspstore1, bsp1)) ;; mov ar.bspstore=r15 // switch back to old register backing store area ;; mov ar.rnat=r16 // restore RNaT mov ar.rsc=0xf // (will be restored later on from sc_ar_rsc) // invala not necessary as that will happen when returning to user-mode br.cond.sptk back_from_restore_rbs END(__kernel_sigtramp) /* * On entry: * r11 = saved ar.pfs * r15 = system call # * b0 = saved return address * b6 = return address * On exit: * r11 = saved ar.pfs * r15 = system call # * b0 = saved return address * all other "scratch" registers: undefined * all "preserved" registers: same as on entry */ GLOBAL_ENTRY(__kernel_syscall_via_epc) .prologue .altrp b6 .body { /* * Note: the kernel cannot assume that the first two instructions in this * bundle get executed. The remaining code must be safe even if * they do not get executed. */ adds r17=-1024,r15 // A mov r10=0 // A default to successful syscall execution epc // B causes split-issue } ;; RSM_PSR_BE_I(r20, r22) // M2 (5 cyc to srlz.d) LOAD_FSYSCALL_TABLE(r14) // X ;; mov r16=IA64_KR(CURRENT) // M2 (12 cyc) shladd r18=r17,3,r14 // A mov r19=NR_syscalls-1 // A ;; lfetch [r18] // M0|1 MOV_FROM_PSR(p0, r29, r8) // M2 (12 cyc) // If r17 is a NaT, p6 will be zero cmp.geu p6,p7=r19,r17 // A (sysnr > 0 && sysnr < 1024+NR_syscalls)? ;; mov r21=ar.fpsr // M2 (12 cyc) tnat.nz p10,p9=r15 // I0 mov.i r26=ar.pfs // I0 (would stall anyhow due to srlz.d...) ;; srlz.d // M0 (forces split-issue) ensure PSR.BE==0 (p6) ld8 r18=[r18] // M0|1 nop.i 0 ;; nop.m 0 (p6) tbit.z.unc p8,p0=r18,0 // I0 (dual-issues with "mov b7=r18"!) nop.i 0 ;; SSM_PSR_I(p8, p14, r25) (p6) mov b7=r18 // I0 (p8) br.dptk.many b7 // B mov r27=ar.rsc // M2 (12 cyc) /* * brl.cond doesn't work as intended because the linker would convert this branch * into a branch to a PLT. Perhaps there will be a way to avoid this with some * future version of the linker. In the meantime, we just use an indirect branch * instead. */ #ifdef CONFIG_ITANIUM (p6) add r14=-8,r14 // r14 <- addr of fsys_bubble_down entry ;; (p6) ld8 r14=[r14] // r14 <- fsys_bubble_down ;; (p6) mov b7=r14 (p6) br.sptk.many b7 #else BRL_COND_FSYS_BUBBLE_DOWN(p6) #endif SSM_PSR_I(p0, p14, r10) mov r10=-1 (p10) mov r8=EINVAL (p9) mov r8=ENOSYS FSYS_RETURN END(__kernel_syscall_via_epc)
AirFortressIlikara/LS2K0300-linux-4.19
24,623
arch/ia64/kernel/fsys.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * This file contains the light-weight system call handlers (fsyscall-handlers). * * Copyright (C) 2003 Hewlett-Packard Co * David Mosberger-Tang <davidm@hpl.hp.com> * * 25-Sep-03 davidm Implement fsys_rt_sigprocmask(). * 18-Feb-03 louisk Implement fsys_gettimeofday(). * 28-Feb-03 davidm Fixed several bugs in fsys_gettimeofday(). Tuned it some more, * probably broke it along the way... ;-) * 13-Jul-04 clameter Implement fsys_clock_gettime and revise fsys_gettimeofday to make * it capable of using memory based clocks without falling back to C code. * 08-Feb-07 Fenghua Yu Implement fsys_getcpu. * */ #include <asm/asmmacro.h> #include <asm/errno.h> #include <asm/asm-offsets.h> #include <asm/percpu.h> #include <asm/thread_info.h> #include <asm/sal.h> #include <asm/signal.h> #include <asm/unistd.h> #include "entry.h" #include <asm/native/inst.h> /* * See Documentation/ia64/fsys.txt for details on fsyscalls. * * On entry to an fsyscall handler: * r10 = 0 (i.e., defaults to "successful syscall return") * r11 = saved ar.pfs (a user-level value) * r15 = system call number * r16 = "current" task pointer (in normal kernel-mode, this is in r13) * r32-r39 = system call arguments * b6 = return address (a user-level value) * ar.pfs = previous frame-state (a user-level value) * PSR.be = cleared to zero (i.e., little-endian byte order is in effect) * all other registers may contain values passed in from user-mode * * On return from an fsyscall handler: * r11 = saved ar.pfs (as passed into the fsyscall handler) * r15 = system call number (as passed into the fsyscall handler) * r32-r39 = system call arguments (as passed into the fsyscall handler) * b6 = return address (as passed into the fsyscall handler) * ar.pfs = previous frame-state (as passed into the fsyscall handler) */ ENTRY(fsys_ni_syscall) .prologue .altrp b6 .body mov r8=ENOSYS mov r10=-1 FSYS_RETURN END(fsys_ni_syscall) ENTRY(fsys_getpid) .prologue .altrp b6 .body add r17=IA64_TASK_SIGNAL_OFFSET,r16 ;; ld8 r17=[r17] // r17 = current->signal add r9=TI_FLAGS+IA64_TASK_SIZE,r16 ;; ld4 r9=[r9] add r17=IA64_SIGNAL_PIDS_TGID_OFFSET,r17 ;; and r9=TIF_ALLWORK_MASK,r9 ld8 r17=[r17] // r17 = current->signal->pids[PIDTYPE_TGID] ;; add r8=IA64_PID_LEVEL_OFFSET,r17 ;; ld4 r8=[r8] // r8 = pid->level add r17=IA64_PID_UPID_OFFSET,r17 // r17 = &pid->numbers[0] ;; shl r8=r8,IA64_UPID_SHIFT ;; add r17=r17,r8 // r17 = &pid->numbers[pid->level] ;; ld4 r8=[r17] // r8 = pid->numbers[pid->level].nr ;; mov r17=0 ;; cmp.ne p8,p0=0,r9 (p8) br.spnt.many fsys_fallback_syscall FSYS_RETURN END(fsys_getpid) ENTRY(fsys_set_tid_address) .prologue .altrp b6 .body add r9=TI_FLAGS+IA64_TASK_SIZE,r16 add r17=IA64_TASK_THREAD_PID_OFFSET,r16 ;; ld4 r9=[r9] tnat.z p6,p7=r32 // check argument register for being NaT ld8 r17=[r17] // r17 = current->thread_pid ;; and r9=TIF_ALLWORK_MASK,r9 add r8=IA64_PID_LEVEL_OFFSET,r17 add r18=IA64_TASK_CLEAR_CHILD_TID_OFFSET,r16 ;; ld4 r8=[r8] // r8 = pid->level add r17=IA64_PID_UPID_OFFSET,r17 // r17 = &pid->numbers[0] ;; shl r8=r8,IA64_UPID_SHIFT ;; add r17=r17,r8 // r17 = &pid->numbers[pid->level] ;; ld4 r8=[r17] // r8 = pid->numbers[pid->level].nr ;; cmp.ne p8,p0=0,r9 mov r17=-1 ;; (p6) st8 [r18]=r32 (p7) st8 [r18]=r17 (p8) br.spnt.many fsys_fallback_syscall ;; mov r17=0 // i must not leak kernel bits... mov r18=0 // i must not leak kernel bits... FSYS_RETURN END(fsys_set_tid_address) #if IA64_GTOD_SEQ_OFFSET !=0 #error fsys_gettimeofday incompatible with changes to struct fsyscall_gtod_data_t #endif #if IA64_ITC_JITTER_OFFSET !=0 #error fsys_gettimeofday incompatible with changes to struct itc_jitter_data_t #endif #define CLOCK_REALTIME 0 #define CLOCK_MONOTONIC 1 #define CLOCK_DIVIDE_BY_1000 0x4000 #define CLOCK_ADD_MONOTONIC 0x8000 ENTRY(fsys_gettimeofday) .prologue .altrp b6 .body mov r31 = r32 tnat.nz p6,p0 = r33 // guard against NaT argument (p6) br.cond.spnt.few .fail_einval mov r30 = CLOCK_DIVIDE_BY_1000 ;; .gettime: // Register map // Incoming r31 = pointer to address where to place result // r30 = flags determining how time is processed // r2,r3 = temp r4-r7 preserved // r8 = result nanoseconds // r9 = result seconds // r10 = temporary storage for clock difference // r11 = preserved: saved ar.pfs // r12 = preserved: memory stack // r13 = preserved: thread pointer // r14 = address of mask / mask value // r15 = preserved: system call number // r16 = preserved: current task pointer // r17 = (not used) // r18 = (not used) // r19 = address of itc_lastcycle // r20 = struct fsyscall_gtod_data (= address of gtod_lock.sequence) // r21 = address of mmio_ptr // r22 = address of wall_time or monotonic_time // r23 = address of shift / value // r24 = address mult factor / cycle_last value // r25 = itc_lastcycle value // r26 = address clocksource cycle_last // r27 = (not used) // r28 = sequence number at the beginning of critcal section // r29 = address of itc_jitter // r30 = time processing flags / memory address // r31 = pointer to result // Predicates // p6,p7 short term use // p8 = timesource ar.itc // p9 = timesource mmio64 // p10 = timesource mmio32 - not used // p11 = timesource not to be handled by asm code // p12 = memory time source ( = p9 | p10) - not used // p13 = do cmpxchg with itc_lastcycle // p14 = Divide by 1000 // p15 = Add monotonic // // Note that instructions are optimized for McKinley. McKinley can // process two bundles simultaneously and therefore we continuously // try to feed the CPU two bundles and then a stop. add r2 = TI_FLAGS+IA64_TASK_SIZE,r16 tnat.nz p6,p0 = r31 // guard against Nat argument (p6) br.cond.spnt.few .fail_einval movl r20 = fsyscall_gtod_data // load fsyscall gettimeofday data address ;; ld4 r2 = [r2] // process work pending flags movl r29 = itc_jitter_data // itc_jitter add r22 = IA64_GTOD_WALL_TIME_OFFSET,r20 // wall_time add r21 = IA64_CLKSRC_MMIO_OFFSET,r20 mov pr = r30,0xc000 // Set predicates according to function ;; and r2 = TIF_ALLWORK_MASK,r2 add r19 = IA64_ITC_LASTCYCLE_OFFSET,r29 (p15) add r22 = IA64_GTOD_MONO_TIME_OFFSET,r20 // monotonic_time ;; add r26 = IA64_CLKSRC_CYCLE_LAST_OFFSET,r20 // clksrc_cycle_last cmp.ne p6, p0 = 0, r2 // Fallback if work is scheduled (p6) br.cond.spnt.many fsys_fallback_syscall ;; // Begin critical section .time_redo: ld4.acq r28 = [r20] // gtod_lock.sequence, Must take first ;; and r28 = ~1,r28 // And make sequence even to force retry if odd ;; ld8 r30 = [r21] // clocksource->mmio_ptr add r24 = IA64_CLKSRC_MULT_OFFSET,r20 ld4 r2 = [r29] // itc_jitter value add r23 = IA64_CLKSRC_SHIFT_OFFSET,r20 add r14 = IA64_CLKSRC_MASK_OFFSET,r20 ;; ld4 r3 = [r24] // clocksource mult value ld8 r14 = [r14] // clocksource mask value cmp.eq p8,p9 = 0,r30 // use cpu timer if no mmio_ptr ;; setf.sig f7 = r3 // Setup for mult scaling of counter (p8) cmp.ne p13,p0 = r2,r0 // need itc_jitter compensation, set p13 ld4 r23 = [r23] // clocksource shift value ld8 r24 = [r26] // get clksrc_cycle_last value (p9) cmp.eq p13,p0 = 0,r30 // if mmio_ptr, clear p13 jitter control ;; .pred.rel.mutex p8,p9 MOV_FROM_ITC(p8, p6, r2, r10) // CPU_TIMER. 36 clocks latency!!! (p9) ld8 r2 = [r30] // MMIO_TIMER. Could also have latency issues.. (p13) ld8 r25 = [r19] // get itc_lastcycle value ld8 r9 = [r22],IA64_TIME_SN_SPEC_SNSEC_OFFSET // sec ;; ld8 r8 = [r22],-IA64_TIME_SN_SPEC_SNSEC_OFFSET // snsec (p13) sub r3 = r25,r2 // Diff needed before comparison (thanks davidm) ;; (p13) cmp.gt.unc p6,p7 = r3,r0 // check if it is less than last. p6,p7 cleared sub r10 = r2,r24 // current_cycle - last_cycle ;; (p6) sub r10 = r25,r24 // time we got was less than last_cycle (p7) mov ar.ccv = r25 // more than last_cycle. Prep for cmpxchg ;; (p7) cmpxchg8.rel r3 = [r19],r2,ar.ccv ;; (p7) cmp.ne p7,p0 = r25,r3 // if cmpxchg not successful ;; (p7) sub r10 = r3,r24 // then use new last_cycle instead ;; and r10 = r10,r14 // Apply mask ;; setf.sig f8 = r10 nop.i 123 ;; // fault check takes 5 cycles and we have spare time EX(.fail_efault, probe.w.fault r31, 3) xmpy.l f8 = f8,f7 // nsec_per_cyc*(counter-last_counter) ;; getf.sig r2 = f8 mf ;; ld4 r10 = [r20] // gtod_lock.sequence add r8 = r8,r2 // Add xtime.nsecs ;; shr.u r8 = r8,r23 // shift by factor cmp4.ne p7,p0 = r28,r10 (p7) br.cond.dpnt.few .time_redo // sequence number changed, redo // End critical section. // Now r8=tv->tv_nsec and r9=tv->tv_sec mov r10 = r0 movl r2 = 1000000000 add r23 = IA64_TIMESPEC_TV_NSEC_OFFSET, r31 (p14) movl r3 = 2361183241434822607 // Prep for / 1000 hack ;; .time_normalize: mov r21 = r8 cmp.ge p6,p0 = r8,r2 (p14) shr.u r20 = r8, 3 // We can repeat this if necessary just wasting time ;; (p14) setf.sig f8 = r20 (p6) sub r8 = r8,r2 (p6) add r9 = 1,r9 // two nops before the branch. (p14) setf.sig f7 = r3 // Chances for repeats are 1 in 10000 for gettod (p6) br.cond.dpnt.few .time_normalize ;; // Divided by 8 though shift. Now divide by 125 // The compiler was able to do that with a multiply // and a shift and we do the same EX(.fail_efault, probe.w.fault r23, 3) // This also costs 5 cycles (p14) xmpy.hu f8 = f8, f7 // xmpy has 5 cycles latency so use it ;; (p14) getf.sig r2 = f8 ;; mov r8 = r0 (p14) shr.u r21 = r2, 4 ;; EX(.fail_efault, st8 [r31] = r9) EX(.fail_efault, st8 [r23] = r21) FSYS_RETURN .fail_einval: mov r8 = EINVAL mov r10 = -1 FSYS_RETURN .fail_efault: mov r8 = EFAULT mov r10 = -1 FSYS_RETURN END(fsys_gettimeofday) ENTRY(fsys_clock_gettime) .prologue .altrp b6 .body cmp4.ltu p6, p0 = CLOCK_MONOTONIC, r32 // Fallback if this is not CLOCK_REALTIME or CLOCK_MONOTONIC (p6) br.spnt.few fsys_fallback_syscall mov r31 = r33 shl r30 = r32,15 br.many .gettime END(fsys_clock_gettime) /* * fsys_getcpu doesn't use the third parameter in this implementation. It reads * current_thread_info()->cpu and corresponding node in cpu_to_node_map. */ ENTRY(fsys_getcpu) .prologue .altrp b6 .body ;; add r2=TI_FLAGS+IA64_TASK_SIZE,r16 tnat.nz p6,p0 = r32 // guard against NaT argument add r3=TI_CPU+IA64_TASK_SIZE,r16 ;; ld4 r3=[r3] // M r3 = thread_info->cpu ld4 r2=[r2] // M r2 = thread_info->flags (p6) br.cond.spnt.few .fail_einval // B ;; tnat.nz p7,p0 = r33 // I guard against NaT argument (p7) br.cond.spnt.few .fail_einval // B ;; cmp.ne p6,p0=r32,r0 cmp.ne p7,p0=r33,r0 ;; #ifdef CONFIG_NUMA movl r17=cpu_to_node_map ;; EX(.fail_efault, (p6) probe.w.fault r32, 3) // M This takes 5 cycles EX(.fail_efault, (p7) probe.w.fault r33, 3) // M This takes 5 cycles shladd r18=r3,1,r17 ;; ld2 r20=[r18] // r20 = cpu_to_node_map[cpu] and r2 = TIF_ALLWORK_MASK,r2 ;; cmp.ne p8,p0=0,r2 (p8) br.spnt.many fsys_fallback_syscall ;; ;; EX(.fail_efault, (p6) st4 [r32] = r3) EX(.fail_efault, (p7) st2 [r33] = r20) mov r8=0 ;; #else EX(.fail_efault, (p6) probe.w.fault r32, 3) // M This takes 5 cycles EX(.fail_efault, (p7) probe.w.fault r33, 3) // M This takes 5 cycles and r2 = TIF_ALLWORK_MASK,r2 ;; cmp.ne p8,p0=0,r2 (p8) br.spnt.many fsys_fallback_syscall ;; EX(.fail_efault, (p6) st4 [r32] = r3) EX(.fail_efault, (p7) st2 [r33] = r0) mov r8=0 ;; #endif FSYS_RETURN END(fsys_getcpu) ENTRY(fsys_fallback_syscall) .prologue .altrp b6 .body /* * We only get here from light-weight syscall handlers. Thus, we already * know that r15 contains a valid syscall number. No need to re-check. */ adds r17=-1024,r15 movl r14=sys_call_table ;; RSM_PSR_I(p0, r26, r27) shladd r18=r17,3,r14 ;; ld8 r18=[r18] // load normal (heavy-weight) syscall entry-point MOV_FROM_PSR(p0, r29, r26) // read psr (12 cyc load latency) mov r27=ar.rsc mov r21=ar.fpsr mov r26=ar.pfs END(fsys_fallback_syscall) /* FALL THROUGH */ GLOBAL_ENTRY(fsys_bubble_down) .prologue .altrp b6 .body /* * We get here for syscalls that don't have a lightweight * handler. For those, we need to bubble down into the kernel * and that requires setting up a minimal pt_regs structure, * and initializing the CPU state more or less as if an * interruption had occurred. To make syscall-restarts work, * we setup pt_regs such that cr_iip points to the second * instruction in syscall_via_break. Decrementing the IP * hence will restart the syscall via break and not * decrementing IP will return us to the caller, as usual. * Note that we preserve the value of psr.pp rather than * initializing it from dcr.pp. This makes it possible to * distinguish fsyscall execution from other privileged * execution. * * On entry: * - normal fsyscall handler register usage, except * that we also have: * - r18: address of syscall entry point * - r21: ar.fpsr * - r26: ar.pfs * - r27: ar.rsc * - r29: psr * * We used to clear some PSR bits here but that requires slow * serialization. Fortuntely, that isn't really necessary. * The rationale is as follows: we used to clear bits * ~PSR_PRESERVED_BITS in PSR.L. Since * PSR_PRESERVED_BITS==PSR.{UP,MFL,MFH,PK,DT,PP,SP,RT,IC}, we * ended up clearing PSR.{BE,AC,I,DFL,DFH,DI,DB,SI,TB}. * However, * * PSR.BE : already is turned off in __kernel_syscall_via_epc() * PSR.AC : don't care (kernel normally turns PSR.AC on) * PSR.I : already turned off by the time fsys_bubble_down gets * invoked * PSR.DFL: always 0 (kernel never turns it on) * PSR.DFH: don't care --- kernel never touches f32-f127 on its own * initiative * PSR.DI : always 0 (kernel never turns it on) * PSR.SI : always 0 (kernel never turns it on) * PSR.DB : don't care --- kernel never enables kernel-level * breakpoints * PSR.TB : must be 0 already; if it wasn't zero on entry to * __kernel_syscall_via_epc, the branch to fsys_bubble_down * will trigger a taken branch; the taken-trap-handler then * converts the syscall into a break-based system-call. */ /* * Reading psr.l gives us only bits 0-31, psr.it, and psr.mc. * The rest we have to synthesize. */ # define PSR_ONE_BITS ((3 << IA64_PSR_CPL0_BIT) \ | (0x1 << IA64_PSR_RI_BIT) \ | IA64_PSR_BN | IA64_PSR_I) invala // M0|1 movl r14=ia64_ret_from_syscall // X nop.m 0 movl r28=__kernel_syscall_via_break // X create cr.iip ;; mov r2=r16 // A get task addr to addl-addressable register adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 // A mov r31=pr // I0 save pr (2 cyc) ;; st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag addl r22=IA64_RBS_OFFSET,r2 // A compute base of RBS add r3=TI_FLAGS+IA64_TASK_SIZE,r2 // A ;; ld4 r3=[r3] // M0|1 r3 = current_thread_info()->flags lfetch.fault.excl.nt1 [r22] // M0|1 prefetch register backing-store nop.i 0 ;; mov ar.rsc=0 // M2 set enforced lazy mode, pl 0, LE, loadrs=0 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE MOV_FROM_ITC(p0, p6, r30, r23) // M get cycle for accounting #else nop.m 0 #endif nop.i 0 ;; mov r23=ar.bspstore // M2 (12 cyc) save ar.bspstore mov.m r24=ar.rnat // M2 (5 cyc) read ar.rnat (dual-issues!) nop.i 0 ;; mov ar.bspstore=r22 // M2 (6 cyc) switch to kernel RBS movl r8=PSR_ONE_BITS // X ;; mov r25=ar.unat // M2 (5 cyc) save ar.unat mov r19=b6 // I0 save b6 (2 cyc) mov r20=r1 // A save caller's gp in r20 ;; or r29=r8,r29 // A construct cr.ipsr value to save mov b6=r18 // I0 copy syscall entry-point to b6 (7 cyc) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r2 // A compute base of memory stack mov r18=ar.bsp // M2 save (kernel) ar.bsp (12 cyc) cmp.ne pKStk,pUStk=r0,r0 // A set pKStk <- 0, pUStk <- 1 br.call.sptk.many b7=ia64_syscall_setup // B ;; #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE // mov.m r30=ar.itc is called in advance add r16=TI_AC_STAMP+IA64_TASK_SIZE,r2 add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r2 ;; ld8 r18=[r16],TI_AC_STIME-TI_AC_STAMP // time at last check in kernel ld8 r19=[r17],TI_AC_UTIME-TI_AC_LEAVE // time at leave kernel ;; ld8 r20=[r16],TI_AC_STAMP-TI_AC_STIME // cumulated stime ld8 r21=[r17] // cumulated utime sub r22=r19,r18 // stime before leave kernel ;; st8 [r16]=r30,TI_AC_STIME-TI_AC_STAMP // update stamp sub r18=r30,r19 // elapsed time in user mode ;; add r20=r20,r22 // sum stime add r21=r21,r18 // sum utime ;; st8 [r16]=r20 // update stime st8 [r17]=r21 // update utime ;; #endif mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0 mov rp=r14 // I0 set the real return addr and r3=_TIF_SYSCALL_TRACEAUDIT,r3 // A ;; SSM_PSR_I(p0, p6, r22) // M2 we're on kernel stacks now, reenable irqs cmp.eq p8,p0=r3,r0 // A (p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT nop.m 0 (p8) br.call.sptk.many b6=b6 // B (ignore return address) br.cond.spnt ia64_trace_syscall // B END(fsys_bubble_down) .rodata .align 8 .globl fsyscall_table data8 fsys_bubble_down fsyscall_table: data8 fsys_ni_syscall data8 0 // exit // 1025 data8 0 // read data8 0 // write data8 0 // open data8 0 // close data8 0 // creat // 1030 data8 0 // link data8 0 // unlink data8 0 // execve data8 0 // chdir data8 0 // fchdir // 1035 data8 0 // utimes data8 0 // mknod data8 0 // chmod data8 0 // chown data8 0 // lseek // 1040 data8 fsys_getpid // getpid data8 0 // getppid data8 0 // mount data8 0 // umount data8 0 // setuid // 1045 data8 0 // getuid data8 0 // geteuid data8 0 // ptrace data8 0 // access data8 0 // sync // 1050 data8 0 // fsync data8 0 // fdatasync data8 0 // kill data8 0 // rename data8 0 // mkdir // 1055 data8 0 // rmdir data8 0 // dup data8 0 // pipe data8 0 // times data8 0 // brk // 1060 data8 0 // setgid data8 0 // getgid data8 0 // getegid data8 0 // acct data8 0 // ioctl // 1065 data8 0 // fcntl data8 0 // umask data8 0 // chroot data8 0 // ustat data8 0 // dup2 // 1070 data8 0 // setreuid data8 0 // setregid data8 0 // getresuid data8 0 // setresuid data8 0 // getresgid // 1075 data8 0 // setresgid data8 0 // getgroups data8 0 // setgroups data8 0 // getpgid data8 0 // setpgid // 1080 data8 0 // setsid data8 0 // getsid data8 0 // sethostname data8 0 // setrlimit data8 0 // getrlimit // 1085 data8 0 // getrusage data8 fsys_gettimeofday // gettimeofday data8 0 // settimeofday data8 0 // select data8 0 // poll // 1090 data8 0 // symlink data8 0 // readlink data8 0 // uselib data8 0 // swapon data8 0 // swapoff // 1095 data8 0 // reboot data8 0 // truncate data8 0 // ftruncate data8 0 // fchmod data8 0 // fchown // 1100 data8 0 // getpriority data8 0 // setpriority data8 0 // statfs data8 0 // fstatfs data8 0 // gettid // 1105 data8 0 // semget data8 0 // semop data8 0 // semctl data8 0 // msgget data8 0 // msgsnd // 1110 data8 0 // msgrcv data8 0 // msgctl data8 0 // shmget data8 0 // shmat data8 0 // shmdt // 1115 data8 0 // shmctl data8 0 // syslog data8 0 // setitimer data8 0 // getitimer data8 0 // 1120 data8 0 data8 0 data8 0 // vhangup data8 0 // lchown data8 0 // remap_file_pages // 1125 data8 0 // wait4 data8 0 // sysinfo data8 0 // clone data8 0 // setdomainname data8 0 // newuname // 1130 data8 0 // adjtimex data8 0 data8 0 // init_module data8 0 // delete_module data8 0 // 1135 data8 0 data8 0 // quotactl data8 0 // bdflush data8 0 // sysfs data8 0 // personality // 1140 data8 0 // afs_syscall data8 0 // setfsuid data8 0 // setfsgid data8 0 // getdents data8 0 // flock // 1145 data8 0 // readv data8 0 // writev data8 0 // pread64 data8 0 // pwrite64 data8 0 // sysctl // 1150 data8 0 // mmap data8 0 // munmap data8 0 // mlock data8 0 // mlockall data8 0 // mprotect // 1155 data8 0 // mremap data8 0 // msync data8 0 // munlock data8 0 // munlockall data8 0 // sched_getparam // 1160 data8 0 // sched_setparam data8 0 // sched_getscheduler data8 0 // sched_setscheduler data8 0 // sched_yield data8 0 // sched_get_priority_max // 1165 data8 0 // sched_get_priority_min data8 0 // sched_rr_get_interval data8 0 // nanosleep data8 0 // nfsservctl data8 0 // prctl // 1170 data8 0 // getpagesize data8 0 // mmap2 data8 0 // pciconfig_read data8 0 // pciconfig_write data8 0 // perfmonctl // 1175 data8 0 // sigaltstack data8 0 // rt_sigaction data8 0 // rt_sigpending data8 0 // rt_sigprocmask data8 0 // rt_sigqueueinfo // 1180 data8 0 // rt_sigreturn data8 0 // rt_sigsuspend data8 0 // rt_sigtimedwait data8 0 // getcwd data8 0 // capget // 1185 data8 0 // capset data8 0 // sendfile data8 0 data8 0 data8 0 // socket // 1190 data8 0 // bind data8 0 // connect data8 0 // listen data8 0 // accept data8 0 // getsockname // 1195 data8 0 // getpeername data8 0 // socketpair data8 0 // send data8 0 // sendto data8 0 // recv // 1200 data8 0 // recvfrom data8 0 // shutdown data8 0 // setsockopt data8 0 // getsockopt data8 0 // sendmsg // 1205 data8 0 // recvmsg data8 0 // pivot_root data8 0 // mincore data8 0 // madvise data8 0 // newstat // 1210 data8 0 // newlstat data8 0 // newfstat data8 0 // clone2 data8 0 // getdents64 data8 0 // getunwind // 1215 data8 0 // readahead data8 0 // setxattr data8 0 // lsetxattr data8 0 // fsetxattr data8 0 // getxattr // 1220 data8 0 // lgetxattr data8 0 // fgetxattr data8 0 // listxattr data8 0 // llistxattr data8 0 // flistxattr // 1225 data8 0 // removexattr data8 0 // lremovexattr data8 0 // fremovexattr data8 0 // tkill data8 0 // futex // 1230 data8 0 // sched_setaffinity data8 0 // sched_getaffinity data8 fsys_set_tid_address // set_tid_address data8 0 // fadvise64_64 data8 0 // tgkill // 1235 data8 0 // exit_group data8 0 // lookup_dcookie data8 0 // io_setup data8 0 // io_destroy data8 0 // io_getevents // 1240 data8 0 // io_submit data8 0 // io_cancel data8 0 // epoll_create data8 0 // epoll_ctl data8 0 // epoll_wait // 1245 data8 0 // restart_syscall data8 0 // semtimedop data8 0 // timer_create data8 0 // timer_settime data8 0 // timer_gettime // 1250 data8 0 // timer_getoverrun data8 0 // timer_delete data8 0 // clock_settime data8 fsys_clock_gettime // clock_gettime data8 0 // clock_getres // 1255 data8 0 // clock_nanosleep data8 0 // fstatfs64 data8 0 // statfs64 data8 0 // mbind data8 0 // get_mempolicy // 1260 data8 0 // set_mempolicy data8 0 // mq_open data8 0 // mq_unlink data8 0 // mq_timedsend data8 0 // mq_timedreceive // 1265 data8 0 // mq_notify data8 0 // mq_getsetattr data8 0 // kexec_load data8 0 // vserver data8 0 // waitid // 1270 data8 0 // add_key data8 0 // request_key data8 0 // keyctl data8 0 // ioprio_set data8 0 // ioprio_get // 1275 data8 0 // move_pages data8 0 // inotify_init data8 0 // inotify_add_watch data8 0 // inotify_rm_watch data8 0 // migrate_pages // 1280 data8 0 // openat data8 0 // mkdirat data8 0 // mknodat data8 0 // fchownat data8 0 // futimesat // 1285 data8 0 // newfstatat data8 0 // unlinkat data8 0 // renameat data8 0 // linkat data8 0 // symlinkat // 1290 data8 0 // readlinkat data8 0 // fchmodat data8 0 // faccessat data8 0 data8 0 // 1295 data8 0 // unshare data8 0 // splice data8 0 // set_robust_list data8 0 // get_robust_list data8 0 // sync_file_range // 1300 data8 0 // tee data8 0 // vmsplice data8 0 data8 fsys_getcpu // getcpu // 1304 // fill in zeros for the remaining entries .zero: .space fsyscall_table + 8*NR_syscalls - .zero, 0
AirFortressIlikara/LS2K0300-linux-4.19
53,032
arch/ia64/kernel/ivt.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * arch/ia64/kernel/ivt.S * * Copyright (C) 1998-2001, 2003, 2005 Hewlett-Packard Co * Stephane Eranian <eranian@hpl.hp.com> * David Mosberger <davidm@hpl.hp.com> * Copyright (C) 2000, 2002-2003 Intel Co * Asit Mallick <asit.k.mallick@intel.com> * Suresh Siddha <suresh.b.siddha@intel.com> * Kenneth Chen <kenneth.w.chen@intel.com> * Fenghua Yu <fenghua.yu@intel.com> * * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling for SMP * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB handler now uses virtual PT. * * Copyright (C) 2005 Hewlett-Packard Co * Dan Magenheimer <dan.magenheimer@hp.com> * Xen paravirtualization * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> * VA Linux Systems Japan K.K. * pv_ops. * Yaozu (Eddie) Dong <eddie.dong@intel.com> */ /* * This file defines the interruption vector table used by the CPU. * It does not include one entry per possible cause of interruption. * * The first 20 entries of the table contain 64 bundles each while the * remaining 48 entries contain only 16 bundles each. * * The 64 bundles are used to allow inlining the whole handler for critical * interruptions like TLB misses. * * For each entry, the comment is as follows: * * // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51) * entry offset ----/ / / / / * entry number ---------/ / / / * size of the entry -------------/ / / * vector name -------------------------------------/ / * interruptions triggering this vector ----------------------/ * * The table is 32KB in size and must be aligned on 32KB boundary. * (The CPU ignores the 15 lower bits of the address) * * Table is based upon EAS2.6 (Oct 1999) */ #include <asm/asmmacro.h> #include <asm/break.h> #include <asm/kregs.h> #include <asm/asm-offsets.h> #include <asm/pgtable.h> #include <asm/processor.h> #include <asm/ptrace.h> #include <asm/thread_info.h> #include <asm/unistd.h> #include <asm/errno.h> #include <asm/export.h> #if 0 # define PSR_DEFAULT_BITS psr.ac #else # define PSR_DEFAULT_BITS 0 #endif #if 0 /* * This lets you track the last eight faults that occurred on the CPU. Make sure ar.k2 isn't * needed for something else before enabling this... */ # define DBG_FAULT(i) mov r16=ar.k2;; shl r16=r16,8;; add r16=(i),r16;;mov ar.k2=r16 #else # define DBG_FAULT(i) #endif #include "minstate.h" #define FAULT(n) \ mov r31=pr; \ mov r19=n;; /* prepare to save predicates */ \ br.sptk.many dispatch_to_fault_handler .section .text..ivt,"ax" .align 32768 // align on 32KB boundary .global ia64_ivt EXPORT_DATA_SYMBOL(ia64_ivt) ia64_ivt: ///////////////////////////////////////////////////////////////////////////////////////// // 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47) ENTRY(vhpt_miss) DBG_FAULT(0) /* * The VHPT vector is invoked when the TLB entry for the virtual page table * is missing. This happens only as a result of a previous * (the "original") TLB miss, which may either be caused by an instruction * fetch or a data access (or non-access). * * What we do here is normal TLB miss handing for the _original_ miss, * followed by inserting the TLB entry for the virtual page table page * that the VHPT walker was attempting to access. The latter gets * inserted as long as page table entry above pte level have valid * mappings for the faulting address. The TLB entry for the original * miss gets inserted only if the pte entry indicates that the page is * present. * * do_page_fault gets invoked in the following cases: * - the faulting virtual address uses unimplemented address bits * - the faulting virtual address has no valid page table mapping */ MOV_FROM_IFA(r16) // get address that caused the TLB miss #ifdef CONFIG_HUGETLB_PAGE movl r18=PAGE_SHIFT MOV_FROM_ITIR(r25) #endif ;; RSM_PSR_DT // use physical addressing for data mov r31=pr // save the predicate registers mov r19=IA64_KR(PT_BASE) // get page table base address shl r21=r16,3 // shift bit 60 into sign bit shr.u r17=r16,61 // get the region number into r17 ;; shr.u r22=r21,3 #ifdef CONFIG_HUGETLB_PAGE extr.u r26=r25,2,6 ;; cmp.ne p8,p0=r18,r26 sub r27=r26,r18 ;; (p8) dep r25=r18,r25,2,6 (p8) shr r22=r22,r27 #endif ;; cmp.eq p6,p7=5,r17 // is IFA pointing into to region 5? shr.u r18=r22,PGDIR_SHIFT // get bottom portion of pgd index bit ;; (p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place srlz.d LOAD_PHYSICAL(p6, r19, swapper_pg_dir) // region 5 is rooted at swapper_pg_dir .pred.rel "mutex", p6, p7 (p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT (p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3 ;; (p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=pgd_offset for region 5 (p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=pgd_offset for region[0-4] cmp.eq p7,p6=0,r21 // unused address bits all zeroes? #if CONFIG_PGTABLE_LEVELS == 4 shr.u r28=r22,PUD_SHIFT // shift pud index into position #else shr.u r18=r22,PMD_SHIFT // shift pmd index into position #endif ;; ld8 r17=[r17] // get *pgd (may be 0) ;; (p7) cmp.eq p6,p7=r17,r0 // was pgd_present(*pgd) == NULL? #if CONFIG_PGTABLE_LEVELS == 4 dep r28=r28,r17,3,(PAGE_SHIFT-3) // r28=pud_offset(pgd,addr) ;; shr.u r18=r22,PMD_SHIFT // shift pmd index into position (p7) ld8 r29=[r28] // get *pud (may be 0) ;; (p7) cmp.eq.or.andcm p6,p7=r29,r0 // was pud_present(*pud) == NULL? dep r17=r18,r29,3,(PAGE_SHIFT-3) // r17=pmd_offset(pud,addr) #else dep r17=r18,r17,3,(PAGE_SHIFT-3) // r17=pmd_offset(pgd,addr) #endif ;; (p7) ld8 r20=[r17] // get *pmd (may be 0) shr.u r19=r22,PAGE_SHIFT // shift pte index into position ;; (p7) cmp.eq.or.andcm p6,p7=r20,r0 // was pmd_present(*pmd) == NULL? dep r21=r19,r20,3,(PAGE_SHIFT-3) // r21=pte_offset(pmd,addr) ;; (p7) ld8 r18=[r21] // read *pte MOV_FROM_ISR(r19) // cr.isr bit 32 tells us if this is an insn miss ;; (p7) tbit.z p6,p7=r18,_PAGE_P_BIT // page present bit cleared? MOV_FROM_IHA(r22) // get the VHPT address that caused the TLB miss ;; // avoid RAW on p7 (p7) tbit.nz.unc p10,p11=r19,32 // is it an instruction TLB miss? dep r23=0,r20,0,PAGE_SHIFT // clear low bits to get page address ;; ITC_I_AND_D(p10, p11, r18, r24) // insert the instruction TLB entry and // insert the data TLB entry (p6) br.cond.spnt.many page_fault // handle bad address/page not present (page fault) MOV_TO_IFA(r22, r24) #ifdef CONFIG_HUGETLB_PAGE MOV_TO_ITIR(p8, r25, r24) // change to default page-size for VHPT #endif /* * Now compute and insert the TLB entry for the virtual page table. We never * execute in a page table page so there is no need to set the exception deferral * bit. */ adds r24=__DIRTY_BITS_NO_ED|_PAGE_PL_0|_PAGE_AR_RW,r23 ;; ITC_D(p7, r24, r25) ;; #ifdef CONFIG_SMP /* * Tell the assemblers dependency-violation checker that the above "itc" instructions * cannot possibly affect the following loads: */ dv_serialize_data /* * Re-check pagetable entry. If they changed, we may have received a ptc.g * between reading the pagetable and the "itc". If so, flush the entry we * inserted and retry. At this point, we have: * * r28 = equivalent of pud_offset(pgd, ifa) * r17 = equivalent of pmd_offset(pud, ifa) * r21 = equivalent of pte_offset(pmd, ifa) * * r29 = *pud * r20 = *pmd * r18 = *pte */ ld8 r25=[r21] // read *pte again ld8 r26=[r17] // read *pmd again #if CONFIG_PGTABLE_LEVELS == 4 ld8 r19=[r28] // read *pud again #endif cmp.ne p6,p7=r0,r0 ;; cmp.ne.or.andcm p6,p7=r26,r20 // did *pmd change #if CONFIG_PGTABLE_LEVELS == 4 cmp.ne.or.andcm p6,p7=r19,r29 // did *pud change #endif mov r27=PAGE_SHIFT<<2 ;; (p6) ptc.l r22,r27 // purge PTE page translation (p7) cmp.ne.or.andcm p6,p7=r25,r18 // did *pte change ;; (p6) ptc.l r16,r27 // purge translation #endif mov pr=r31,-1 // restore predicate registers RFI END(vhpt_miss) .org ia64_ivt+0x400 ///////////////////////////////////////////////////////////////////////////////////////// // 0x0400 Entry 1 (size 64 bundles) ITLB (21) ENTRY(itlb_miss) DBG_FAULT(1) /* * The ITLB handler accesses the PTE via the virtually mapped linear * page table. If a nested TLB miss occurs, we switch into physical * mode, walk the page table, and then re-execute the PTE read and * go on normally after that. */ MOV_FROM_IFA(r16) // get virtual address mov r29=b0 // save b0 mov r31=pr // save predicates .itlb_fault: MOV_FROM_IHA(r17) // get virtual address of PTE movl r30=1f // load nested fault continuation point ;; 1: ld8 r18=[r17] // read *pte ;; mov b0=r29 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared? (p6) br.cond.spnt page_fault ;; ITC_I(p0, r18, r19) ;; #ifdef CONFIG_SMP /* * Tell the assemblers dependency-violation checker that the above "itc" instructions * cannot possibly affect the following loads: */ dv_serialize_data ld8 r19=[r17] // read *pte again and see if same mov r20=PAGE_SHIFT<<2 // setup page size for purge ;; cmp.ne p7,p0=r18,r19 ;; (p7) ptc.l r16,r20 #endif mov pr=r31,-1 RFI END(itlb_miss) .org ia64_ivt+0x0800 ///////////////////////////////////////////////////////////////////////////////////////// // 0x0800 Entry 2 (size 64 bundles) DTLB (9,48) ENTRY(dtlb_miss) DBG_FAULT(2) /* * The DTLB handler accesses the PTE via the virtually mapped linear * page table. If a nested TLB miss occurs, we switch into physical * mode, walk the page table, and then re-execute the PTE read and * go on normally after that. */ MOV_FROM_IFA(r16) // get virtual address mov r29=b0 // save b0 mov r31=pr // save predicates dtlb_fault: MOV_FROM_IHA(r17) // get virtual address of PTE movl r30=1f // load nested fault continuation point ;; 1: ld8 r18=[r17] // read *pte ;; mov b0=r29 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared? (p6) br.cond.spnt page_fault ;; ITC_D(p0, r18, r19) ;; #ifdef CONFIG_SMP /* * Tell the assemblers dependency-violation checker that the above "itc" instructions * cannot possibly affect the following loads: */ dv_serialize_data ld8 r19=[r17] // read *pte again and see if same mov r20=PAGE_SHIFT<<2 // setup page size for purge ;; cmp.ne p7,p0=r18,r19 ;; (p7) ptc.l r16,r20 #endif mov pr=r31,-1 RFI END(dtlb_miss) .org ia64_ivt+0x0c00 ///////////////////////////////////////////////////////////////////////////////////////// // 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19) ENTRY(alt_itlb_miss) DBG_FAULT(3) MOV_FROM_IFA(r16) // get address that caused the TLB miss movl r17=PAGE_KERNEL MOV_FROM_IPSR(p0, r21) movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) mov r31=pr ;; #ifdef CONFIG_DISABLE_VHPT shr.u r22=r16,61 // get the region number into r21 ;; cmp.gt p8,p0=6,r22 // user mode ;; THASH(p8, r17, r16, r23) ;; MOV_TO_IHA(p8, r17, r23) (p8) mov r29=b0 // save b0 (p8) br.cond.dptk .itlb_fault #endif extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl and r19=r19,r16 // clear ed, reserved bits, and PTE control bits shr.u r18=r16,57 // move address bit 61 to bit 4 ;; andcm r18=0x10,r18 // bit 4=~address-bit(61) cmp.ne p8,p0=r0,r23 // psr.cpl != 0? or r19=r17,r19 // insert PTE control bits into r19 ;; or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6 (p8) br.cond.spnt page_fault ;; ITC_I(p0, r19, r18) // insert the TLB entry mov pr=r31,-1 RFI END(alt_itlb_miss) .org ia64_ivt+0x1000 ///////////////////////////////////////////////////////////////////////////////////////// // 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46) ENTRY(alt_dtlb_miss) DBG_FAULT(4) MOV_FROM_IFA(r16) // get address that caused the TLB miss movl r17=PAGE_KERNEL MOV_FROM_ISR(r20) movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) MOV_FROM_IPSR(p0, r21) mov r31=pr mov r24=PERCPU_ADDR ;; #ifdef CONFIG_DISABLE_VHPT shr.u r22=r16,61 // get the region number into r21 ;; cmp.gt p8,p0=6,r22 // access to region 0-5 ;; THASH(p8, r17, r16, r25) ;; MOV_TO_IHA(p8, r17, r25) (p8) mov r29=b0 // save b0 (p8) br.cond.dptk dtlb_fault #endif cmp.ge p10,p11=r16,r24 // access to per_cpu_data? tbit.z p12,p0=r16,61 // access to region 6? mov r25=PERCPU_PAGE_SHIFT << 2 mov r26=PERCPU_PAGE_SIZE nop.m 0 nop.b 0 ;; (p10) mov r19=IA64_KR(PER_CPU_DATA) (p11) and r19=r19,r16 // clear non-ppn fields extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl and r22=IA64_ISR_CODE_MASK,r20 // get the isr.code field tbit.nz p6,p7=r20,IA64_ISR_SP_BIT // is speculation bit on? tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on? ;; (p10) sub r19=r19,r26 MOV_TO_ITIR(p10, r25, r24) cmp.ne p8,p0=r0,r23 (p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field (p12) dep r17=-1,r17,4,1 // set ma=UC for region 6 addr (p8) br.cond.spnt page_fault dep r21=-1,r21,IA64_PSR_ED_BIT,1 ;; or r19=r19,r17 // insert PTE control bits into r19 MOV_TO_IPSR(p6, r21, r24) ;; ITC_D(p7, r19, r18) // insert the TLB entry mov pr=r31,-1 RFI END(alt_dtlb_miss) .org ia64_ivt+0x1400 ///////////////////////////////////////////////////////////////////////////////////////// // 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45) ENTRY(nested_dtlb_miss) /* * In the absence of kernel bugs, we get here when the virtually mapped linear * page table is accessed non-speculatively (e.g., in the Dirty-bit, Instruction * Access-bit, or Data Access-bit faults). If the DTLB entry for the virtual page * table is missing, a nested TLB miss fault is triggered and control is * transferred to this point. When this happens, we lookup the pte for the * faulting address by walking the page table in physical mode and return to the * continuation point passed in register r30 (or call page_fault if the address is * not mapped). * * Input: r16: faulting address * r29: saved b0 * r30: continuation address * r31: saved pr * * Output: r17: physical address of PTE of faulting address * r29: saved b0 * r30: continuation address * r31: saved pr * * Clobbered: b0, r18, r19, r21, r22, psr.dt (cleared) */ RSM_PSR_DT // switch to using physical data addressing mov r19=IA64_KR(PT_BASE) // get the page table base address shl r21=r16,3 // shift bit 60 into sign bit MOV_FROM_ITIR(r18) ;; shr.u r17=r16,61 // get the region number into r17 extr.u r18=r18,2,6 // get the faulting page size ;; cmp.eq p6,p7=5,r17 // is faulting address in region 5? add r22=-PAGE_SHIFT,r18 // adjustment for hugetlb address add r18=PGDIR_SHIFT-PAGE_SHIFT,r18 ;; shr.u r22=r16,r22 shr.u r18=r16,r18 (p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place srlz.d LOAD_PHYSICAL(p6, r19, swapper_pg_dir) // region 5 is rooted at swapper_pg_dir .pred.rel "mutex", p6, p7 (p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT (p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3 ;; (p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=pgd_offset for region 5 (p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=pgd_offset for region[0-4] cmp.eq p7,p6=0,r21 // unused address bits all zeroes? #if CONFIG_PGTABLE_LEVELS == 4 shr.u r18=r22,PUD_SHIFT // shift pud index into position #else shr.u r18=r22,PMD_SHIFT // shift pmd index into position #endif ;; ld8 r17=[r17] // get *pgd (may be 0) ;; (p7) cmp.eq p6,p7=r17,r0 // was pgd_present(*pgd) == NULL? dep r17=r18,r17,3,(PAGE_SHIFT-3) // r17=p[u|m]d_offset(pgd,addr) ;; #if CONFIG_PGTABLE_LEVELS == 4 (p7) ld8 r17=[r17] // get *pud (may be 0) shr.u r18=r22,PMD_SHIFT // shift pmd index into position ;; (p7) cmp.eq.or.andcm p6,p7=r17,r0 // was pud_present(*pud) == NULL? dep r17=r18,r17,3,(PAGE_SHIFT-3) // r17=pmd_offset(pud,addr) ;; #endif (p7) ld8 r17=[r17] // get *pmd (may be 0) shr.u r19=r22,PAGE_SHIFT // shift pte index into position ;; (p7) cmp.eq.or.andcm p6,p7=r17,r0 // was pmd_present(*pmd) == NULL? dep r17=r19,r17,3,(PAGE_SHIFT-3) // r17=pte_offset(pmd,addr); (p6) br.cond.spnt page_fault mov b0=r30 br.sptk.many b0 // return to continuation point END(nested_dtlb_miss) .org ia64_ivt+0x1800 ///////////////////////////////////////////////////////////////////////////////////////// // 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24) ENTRY(ikey_miss) DBG_FAULT(6) FAULT(6) END(ikey_miss) .org ia64_ivt+0x1c00 ///////////////////////////////////////////////////////////////////////////////////////// // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51) ENTRY(dkey_miss) DBG_FAULT(7) FAULT(7) END(dkey_miss) .org ia64_ivt+0x2000 ///////////////////////////////////////////////////////////////////////////////////////// // 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54) ENTRY(dirty_bit) DBG_FAULT(8) /* * What we do here is to simply turn on the dirty bit in the PTE. We need to * update both the page-table and the TLB entry. To efficiently access the PTE, * we address it through the virtual page table. Most likely, the TLB entry for * the relevant virtual page table page is still present in the TLB so we can * normally do this without additional TLB misses. In case the necessary virtual * page table TLB entry isn't present, we take a nested TLB miss hit where we look * up the physical address of the L3 PTE and then continue at label 1 below. */ MOV_FROM_IFA(r16) // get the address that caused the fault movl r30=1f // load continuation point in case of nested fault ;; THASH(p0, r17, r16, r18) // compute virtual address of L3 PTE mov r29=b0 // save b0 in case of nested fault mov r31=pr // save pr #ifdef CONFIG_SMP mov r28=ar.ccv // save ar.ccv ;; 1: ld8 r18=[r17] ;; // avoid RAW on r18 mov ar.ccv=r18 // set compare value for cmpxchg or r25=_PAGE_D|_PAGE_A,r18 // set the dirty and accessed bits tbit.z p7,p6 = r18,_PAGE_P_BIT // Check present bit ;; (p6) cmpxchg8.acq r26=[r17],r25,ar.ccv // Only update if page is present mov r24=PAGE_SHIFT<<2 ;; (p6) cmp.eq p6,p7=r26,r18 // Only compare if page is present ;; ITC_D(p6, r25, r18) // install updated PTE ;; /* * Tell the assemblers dependency-violation checker that the above "itc" instructions * cannot possibly affect the following loads: */ dv_serialize_data ld8 r18=[r17] // read PTE again ;; cmp.eq p6,p7=r18,r25 // is it same as the newly installed ;; (p7) ptc.l r16,r24 mov b0=r29 // restore b0 mov ar.ccv=r28 #else ;; 1: ld8 r18=[r17] ;; // avoid RAW on r18 or r18=_PAGE_D|_PAGE_A,r18 // set the dirty and accessed bits mov b0=r29 // restore b0 ;; st8 [r17]=r18 // store back updated PTE ITC_D(p0, r18, r16) // install updated PTE #endif mov pr=r31,-1 // restore pr RFI END(dirty_bit) .org ia64_ivt+0x2400 ///////////////////////////////////////////////////////////////////////////////////////// // 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27) ENTRY(iaccess_bit) DBG_FAULT(9) // Like Entry 8, except for instruction access MOV_FROM_IFA(r16) // get the address that caused the fault movl r30=1f // load continuation point in case of nested fault mov r31=pr // save predicates #ifdef CONFIG_ITANIUM /* * Erratum 10 (IFA may contain incorrect address) has "NoFix" status. */ MOV_FROM_IPSR(p0, r17) ;; MOV_FROM_IIP(r18) tbit.z p6,p0=r17,IA64_PSR_IS_BIT // IA64 instruction set? ;; (p6) mov r16=r18 // if so, use cr.iip instead of cr.ifa #endif /* CONFIG_ITANIUM */ ;; THASH(p0, r17, r16, r18) // compute virtual address of L3 PTE mov r29=b0 // save b0 in case of nested fault) #ifdef CONFIG_SMP mov r28=ar.ccv // save ar.ccv ;; 1: ld8 r18=[r17] ;; mov ar.ccv=r18 // set compare value for cmpxchg or r25=_PAGE_A,r18 // set the accessed bit tbit.z p7,p6 = r18,_PAGE_P_BIT // Check present bit ;; (p6) cmpxchg8.acq r26=[r17],r25,ar.ccv // Only if page present mov r24=PAGE_SHIFT<<2 ;; (p6) cmp.eq p6,p7=r26,r18 // Only if page present ;; ITC_I(p6, r25, r26) // install updated PTE ;; /* * Tell the assemblers dependency-violation checker that the above "itc" instructions * cannot possibly affect the following loads: */ dv_serialize_data ld8 r18=[r17] // read PTE again ;; cmp.eq p6,p7=r18,r25 // is it same as the newly installed ;; (p7) ptc.l r16,r24 mov b0=r29 // restore b0 mov ar.ccv=r28 #else /* !CONFIG_SMP */ ;; 1: ld8 r18=[r17] ;; or r18=_PAGE_A,r18 // set the accessed bit mov b0=r29 // restore b0 ;; st8 [r17]=r18 // store back updated PTE ITC_I(p0, r18, r16) // install updated PTE #endif /* !CONFIG_SMP */ mov pr=r31,-1 RFI END(iaccess_bit) .org ia64_ivt+0x2800 ///////////////////////////////////////////////////////////////////////////////////////// // 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55) ENTRY(daccess_bit) DBG_FAULT(10) // Like Entry 8, except for data access MOV_FROM_IFA(r16) // get the address that caused the fault movl r30=1f // load continuation point in case of nested fault ;; THASH(p0, r17, r16, r18) // compute virtual address of L3 PTE mov r31=pr mov r29=b0 // save b0 in case of nested fault) #ifdef CONFIG_SMP mov r28=ar.ccv // save ar.ccv ;; 1: ld8 r18=[r17] ;; // avoid RAW on r18 mov ar.ccv=r18 // set compare value for cmpxchg or r25=_PAGE_A,r18 // set the dirty bit tbit.z p7,p6 = r18,_PAGE_P_BIT // Check present bit ;; (p6) cmpxchg8.acq r26=[r17],r25,ar.ccv // Only if page is present mov r24=PAGE_SHIFT<<2 ;; (p6) cmp.eq p6,p7=r26,r18 // Only if page is present ;; ITC_D(p6, r25, r26) // install updated PTE /* * Tell the assemblers dependency-violation checker that the above "itc" instructions * cannot possibly affect the following loads: */ dv_serialize_data ;; ld8 r18=[r17] // read PTE again ;; cmp.eq p6,p7=r18,r25 // is it same as the newly installed ;; (p7) ptc.l r16,r24 mov ar.ccv=r28 #else ;; 1: ld8 r18=[r17] ;; // avoid RAW on r18 or r18=_PAGE_A,r18 // set the accessed bit ;; st8 [r17]=r18 // store back updated PTE ITC_D(p0, r18, r16) // install updated PTE #endif mov b0=r29 // restore b0 mov pr=r31,-1 RFI END(daccess_bit) .org ia64_ivt+0x2c00 ///////////////////////////////////////////////////////////////////////////////////////// // 0x2c00 Entry 11 (size 64 bundles) Break instruction (33) ENTRY(break_fault) /* * The streamlined system call entry/exit paths only save/restore the initial part * of pt_regs. This implies that the callers of system-calls must adhere to the * normal procedure calling conventions. * * Registers to be saved & restored: * CR registers: cr.ipsr, cr.iip, cr.ifs * AR registers: ar.unat, ar.pfs, ar.rsc, ar.rnat, ar.bspstore, ar.fpsr * others: pr, b0, b6, loadrs, r1, r11, r12, r13, r15 * Registers to be restored only: * r8-r11: output value from the system call. * * During system call exit, scratch registers (including r15) are modified/cleared * to prevent leaking bits from kernel to user level. */ DBG_FAULT(11) mov.m r16=IA64_KR(CURRENT) // M2 r16 <- current task (12 cyc) MOV_FROM_IPSR(p0, r29) // M2 (12 cyc) mov r31=pr // I0 (2 cyc) MOV_FROM_IIM(r17) // M2 (2 cyc) mov.m r27=ar.rsc // M2 (12 cyc) mov r18=__IA64_BREAK_SYSCALL // A mov.m ar.rsc=0 // M2 mov.m r21=ar.fpsr // M2 (12 cyc) mov r19=b6 // I0 (2 cyc) ;; mov.m r23=ar.bspstore // M2 (12 cyc) mov.m r24=ar.rnat // M2 (5 cyc) mov.i r26=ar.pfs // I0 (2 cyc) invala // M0|1 nop.m 0 // M mov r20=r1 // A save r1 nop.m 0 movl r30=sys_call_table // X MOV_FROM_IIP(r28) // M2 (2 cyc) cmp.eq p0,p7=r18,r17 // I0 is this a system call? (p7) br.cond.spnt non_syscall // B no -> // // From this point on, we are definitely on the syscall-path // and we can use (non-banked) scratch registers. // /////////////////////////////////////////////////////////////////////// mov r1=r16 // A move task-pointer to "addl"-addressable reg mov r2=r16 // A setup r2 for ia64_syscall_setup add r9=TI_FLAGS+IA64_TASK_SIZE,r16 // A r9 = &current_thread_info()->flags adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 adds r15=-1024,r15 // A subtract 1024 from syscall number mov r3=NR_syscalls - 1 ;; ld1.bias r17=[r16] // M0|1 r17 = current->thread.on_ustack flag ld4 r9=[r9] // M0|1 r9 = current_thread_info()->flags extr.u r8=r29,41,2 // I0 extract ei field from cr.ipsr shladd r30=r15,3,r30 // A r30 = sys_call_table + 8*(syscall-1024) addl r22=IA64_RBS_OFFSET,r1 // A compute base of RBS cmp.leu p6,p7=r15,r3 // A syscall number in range? ;; lfetch.fault.excl.nt1 [r22] // M0|1 prefetch RBS (p6) ld8 r30=[r30] // M0|1 load address of syscall entry point tnat.nz.or p7,p0=r15 // I0 is syscall nr a NaT? mov.m ar.bspstore=r22 // M2 switch to kernel RBS cmp.eq p8,p9=2,r8 // A isr.ei==2? ;; (p8) mov r8=0 // A clear ei to 0 (p7) movl r30=sys_ni_syscall // X (p8) adds r28=16,r28 // A switch cr.iip to next bundle (p9) adds r8=1,r8 // A increment ei to next slot #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE ;; mov b6=r30 // I0 setup syscall handler branch reg early #else nop.i 0 ;; #endif mov.m r25=ar.unat // M2 (5 cyc) dep r29=r8,r29,41,2 // I0 insert new ei into cr.ipsr adds r15=1024,r15 // A restore original syscall number // // If any of the above loads miss in L1D, we'll stall here until // the data arrives. // /////////////////////////////////////////////////////////////////////// st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE MOV_FROM_ITC(p0, p14, r30, r18) // M get cycle for accounting #else mov b6=r30 // I0 setup syscall handler branch reg early #endif cmp.eq pKStk,pUStk=r0,r17 // A were we on kernel stacks already? and r9=_TIF_SYSCALL_TRACEAUDIT,r9 // A mask trace or audit mov r18=ar.bsp // M2 (12 cyc) (pKStk) br.cond.spnt .break_fixup // B we're already in kernel-mode -- fix up RBS ;; .back_from_break_fixup: (pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1 // A compute base of memory stack cmp.eq p14,p0=r9,r0 // A are syscalls being traced/audited? br.call.sptk.many b7=ia64_syscall_setup // B 1: #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE // mov.m r30=ar.itc is called in advance, and r13 is current add r16=TI_AC_STAMP+IA64_TASK_SIZE,r13 // A add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r13 // A (pKStk) br.cond.spnt .skip_accounting // B unlikely skip ;; ld8 r18=[r16],TI_AC_STIME-TI_AC_STAMP // M get last stamp ld8 r19=[r17],TI_AC_UTIME-TI_AC_LEAVE // M time at leave ;; ld8 r20=[r16],TI_AC_STAMP-TI_AC_STIME // M cumulated stime ld8 r21=[r17] // M cumulated utime sub r22=r19,r18 // A stime before leave ;; st8 [r16]=r30,TI_AC_STIME-TI_AC_STAMP // M update stamp sub r18=r30,r19 // A elapsed time in user ;; add r20=r20,r22 // A sum stime add r21=r21,r18 // A sum utime ;; st8 [r16]=r20 // M update stime st8 [r17]=r21 // M update utime ;; .skip_accounting: #endif mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0 nop 0 BSW_1(r2, r14) // B (6 cyc) regs are saved, switch to bank 1 ;; SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r16) // M2 now it's safe to re-enable intr.-collection // M0 ensure interruption collection is on movl r3=ia64_ret_from_syscall // X ;; mov rp=r3 // I0 set the real return addr (p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT SSM_PSR_I(p15, p15, r16) // M2 restore psr.i (p14) br.call.sptk.many b6=b6 // B invoke syscall-handker (ignore return addr) br.cond.spnt.many ia64_trace_syscall // B do syscall-tracing thingamagic // NOT REACHED /////////////////////////////////////////////////////////////////////// // On entry, we optimistically assumed that we're coming from user-space. // For the rare cases where a system-call is done from within the kernel, // we fix things up at this point: .break_fixup: add r1=-IA64_PT_REGS_SIZE,sp // A allocate space for pt_regs structure mov ar.rnat=r24 // M2 restore kernel's AR.RNAT ;; mov ar.bspstore=r23 // M2 restore kernel's AR.BSPSTORE br.cond.sptk .back_from_break_fixup END(break_fault) .org ia64_ivt+0x3000 ///////////////////////////////////////////////////////////////////////////////////////// // 0x3000 Entry 12 (size 64 bundles) External Interrupt (4) ENTRY(interrupt) /* interrupt handler has become too big to fit this area. */ br.sptk.many __interrupt END(interrupt) .org ia64_ivt+0x3400 ///////////////////////////////////////////////////////////////////////////////////////// // 0x3400 Entry 13 (size 64 bundles) Reserved DBG_FAULT(13) FAULT(13) .org ia64_ivt+0x3800 ///////////////////////////////////////////////////////////////////////////////////////// // 0x3800 Entry 14 (size 64 bundles) Reserved DBG_FAULT(14) FAULT(14) /* * There is no particular reason for this code to be here, other than that * there happens to be space here that would go unused otherwise. If this * fault ever gets "unreserved", simply moved the following code to a more * suitable spot... * * ia64_syscall_setup() is a separate subroutine so that it can * allocate stacked registers so it can safely demine any * potential NaT values from the input registers. * * On entry: * - executing on bank 0 or bank 1 register set (doesn't matter) * - r1: stack pointer * - r2: current task pointer * - r3: preserved * - r11: original contents (saved ar.pfs to be saved) * - r12: original contents (sp to be saved) * - r13: original contents (tp to be saved) * - r15: original contents (syscall # to be saved) * - r18: saved bsp (after switching to kernel stack) * - r19: saved b6 * - r20: saved r1 (gp) * - r21: saved ar.fpsr * - r22: kernel's register backing store base (krbs_base) * - r23: saved ar.bspstore * - r24: saved ar.rnat * - r25: saved ar.unat * - r26: saved ar.pfs * - r27: saved ar.rsc * - r28: saved cr.iip * - r29: saved cr.ipsr * - r30: ar.itc for accounting (don't touch) * - r31: saved pr * - b0: original contents (to be saved) * On exit: * - p10: TRUE if syscall is invoked with more than 8 out * registers or r15's Nat is true * - r1: kernel's gp * - r3: preserved (same as on entry) * - r8: -EINVAL if p10 is true * - r12: points to kernel stack * - r13: points to current task * - r14: preserved (same as on entry) * - p13: preserved * - p15: TRUE if interrupts need to be re-enabled * - ar.fpsr: set to kernel settings * - b6: preserved (same as on entry) */ GLOBAL_ENTRY(ia64_syscall_setup) #if PT(B6) != 0 # error This code assumes that b6 is the first field in pt_regs. #endif st8 [r1]=r19 // save b6 add r16=PT(CR_IPSR),r1 // initialize first base pointer add r17=PT(R11),r1 // initialize second base pointer ;; alloc r19=ar.pfs,8,0,0,0 // ensure in0-in7 are writable st8 [r16]=r29,PT(AR_PFS)-PT(CR_IPSR) // save cr.ipsr tnat.nz p8,p0=in0 st8.spill [r17]=r11,PT(CR_IIP)-PT(R11) // save r11 tnat.nz p9,p0=in1 (pKStk) mov r18=r0 // make sure r18 isn't NaT ;; st8 [r16]=r26,PT(CR_IFS)-PT(AR_PFS) // save ar.pfs st8 [r17]=r28,PT(AR_UNAT)-PT(CR_IIP) // save cr.iip mov r28=b0 // save b0 (2 cyc) ;; st8 [r17]=r25,PT(AR_RSC)-PT(AR_UNAT) // save ar.unat dep r19=0,r19,38,26 // clear all bits but 0..37 [I0] (p8) mov in0=-1 ;; st8 [r16]=r19,PT(AR_RNAT)-PT(CR_IFS) // store ar.pfs.pfm in cr.ifs extr.u r11=r19,7,7 // I0 // get sol of ar.pfs and r8=0x7f,r19 // A // get sof of ar.pfs st8 [r17]=r27,PT(AR_BSPSTORE)-PT(AR_RSC)// save ar.rsc tbit.nz p15,p0=r29,IA64_PSR_I_BIT // I0 (p9) mov in1=-1 ;; (pUStk) sub r18=r18,r22 // r18=RSE.ndirty*8 tnat.nz p10,p0=in2 add r11=8,r11 ;; (pKStk) adds r16=PT(PR)-PT(AR_RNAT),r16 // skip over ar_rnat field (pKStk) adds r17=PT(B0)-PT(AR_BSPSTORE),r17 // skip over ar_bspstore field tnat.nz p11,p0=in3 ;; (p10) mov in2=-1 tnat.nz p12,p0=in4 // [I0] (p11) mov in3=-1 ;; (pUStk) st8 [r16]=r24,PT(PR)-PT(AR_RNAT) // save ar.rnat (pUStk) st8 [r17]=r23,PT(B0)-PT(AR_BSPSTORE) // save ar.bspstore shl r18=r18,16 // compute ar.rsc to be used for "loadrs" ;; st8 [r16]=r31,PT(LOADRS)-PT(PR) // save predicates st8 [r17]=r28,PT(R1)-PT(B0) // save b0 tnat.nz p13,p0=in5 // [I0] ;; st8 [r16]=r18,PT(R12)-PT(LOADRS) // save ar.rsc value for "loadrs" st8.spill [r17]=r20,PT(R13)-PT(R1) // save original r1 (p12) mov in4=-1 ;; .mem.offset 0,0; st8.spill [r16]=r12,PT(AR_FPSR)-PT(R12) // save r12 .mem.offset 8,0; st8.spill [r17]=r13,PT(R15)-PT(R13) // save r13 (p13) mov in5=-1 ;; st8 [r16]=r21,PT(R8)-PT(AR_FPSR) // save ar.fpsr tnat.nz p13,p0=in6 cmp.lt p10,p9=r11,r8 // frame size can't be more than local+8 ;; mov r8=1 (p9) tnat.nz p10,p0=r15 adds r12=-16,r1 // switch to kernel memory stack (with 16 bytes of scratch) st8.spill [r17]=r15 // save r15 tnat.nz p8,p0=in7 nop.i 0 mov r13=r2 // establish `current' movl r1=__gp // establish kernel global pointer ;; st8 [r16]=r8 // ensure pt_regs.r8 != 0 (see handle_syscall_error) (p13) mov in6=-1 (p8) mov in7=-1 cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0 movl r17=FPSR_DEFAULT ;; mov.m ar.fpsr=r17 // set ar.fpsr to kernel default value (p10) mov r8=-EINVAL br.ret.sptk.many b7 END(ia64_syscall_setup) .org ia64_ivt+0x3c00 ///////////////////////////////////////////////////////////////////////////////////////// // 0x3c00 Entry 15 (size 64 bundles) Reserved DBG_FAULT(15) FAULT(15) .org ia64_ivt+0x4000 ///////////////////////////////////////////////////////////////////////////////////////// // 0x4000 Entry 16 (size 64 bundles) Reserved DBG_FAULT(16) FAULT(16) #if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) /* * There is no particular reason for this code to be here, other than * that there happens to be space here that would go unused otherwise. * If this fault ever gets "unreserved", simply moved the following * code to a more suitable spot... * * account_sys_enter is called from SAVE_MIN* macros if accounting is * enabled and if the macro is entered from user mode. */ GLOBAL_ENTRY(account_sys_enter) // mov.m r20=ar.itc is called in advance, and r13 is current add r16=TI_AC_STAMP+IA64_TASK_SIZE,r13 add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r13 ;; ld8 r18=[r16],TI_AC_STIME-TI_AC_STAMP // time at last check in kernel ld8 r19=[r17],TI_AC_UTIME-TI_AC_LEAVE // time at left from kernel ;; ld8 r23=[r16],TI_AC_STAMP-TI_AC_STIME // cumulated stime ld8 r21=[r17] // cumulated utime sub r22=r19,r18 // stime before leave kernel ;; st8 [r16]=r20,TI_AC_STIME-TI_AC_STAMP // update stamp sub r18=r20,r19 // elapsed time in user mode ;; add r23=r23,r22 // sum stime add r21=r21,r18 // sum utime ;; st8 [r16]=r23 // update stime st8 [r17]=r21 // update utime ;; br.ret.sptk.many rp END(account_sys_enter) #endif .org ia64_ivt+0x4400 ///////////////////////////////////////////////////////////////////////////////////////// // 0x4400 Entry 17 (size 64 bundles) Reserved DBG_FAULT(17) FAULT(17) .org ia64_ivt+0x4800 ///////////////////////////////////////////////////////////////////////////////////////// // 0x4800 Entry 18 (size 64 bundles) Reserved DBG_FAULT(18) FAULT(18) .org ia64_ivt+0x4c00 ///////////////////////////////////////////////////////////////////////////////////////// // 0x4c00 Entry 19 (size 64 bundles) Reserved DBG_FAULT(19) FAULT(19) // // --- End of long entries, Beginning of short entries // .org ia64_ivt+0x5000 ///////////////////////////////////////////////////////////////////////////////////////// // 0x5000 Entry 20 (size 16 bundles) Page Not Present (10,22,49) ENTRY(page_not_present) DBG_FAULT(20) MOV_FROM_IFA(r16) RSM_PSR_DT /* * The Linux page fault handler doesn't expect non-present pages to be in * the TLB. Flush the existing entry now, so we meet that expectation. */ mov r17=PAGE_SHIFT<<2 ;; ptc.l r16,r17 ;; mov r31=pr srlz.d br.sptk.many page_fault END(page_not_present) .org ia64_ivt+0x5100 ///////////////////////////////////////////////////////////////////////////////////////// // 0x5100 Entry 21 (size 16 bundles) Key Permission (13,25,52) ENTRY(key_permission) DBG_FAULT(21) MOV_FROM_IFA(r16) RSM_PSR_DT mov r31=pr ;; srlz.d br.sptk.many page_fault END(key_permission) .org ia64_ivt+0x5200 ///////////////////////////////////////////////////////////////////////////////////////// // 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26) ENTRY(iaccess_rights) DBG_FAULT(22) MOV_FROM_IFA(r16) RSM_PSR_DT mov r31=pr ;; srlz.d br.sptk.many page_fault END(iaccess_rights) .org ia64_ivt+0x5300 ///////////////////////////////////////////////////////////////////////////////////////// // 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53) ENTRY(daccess_rights) DBG_FAULT(23) MOV_FROM_IFA(r16) RSM_PSR_DT mov r31=pr ;; srlz.d br.sptk.many page_fault END(daccess_rights) .org ia64_ivt+0x5400 ///////////////////////////////////////////////////////////////////////////////////////// // 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39) ENTRY(general_exception) DBG_FAULT(24) MOV_FROM_ISR(r16) mov r31=pr ;; cmp4.eq p6,p0=0,r16 (p6) br.sptk.many dispatch_illegal_op_fault ;; mov r19=24 // fault number br.sptk.many dispatch_to_fault_handler END(general_exception) .org ia64_ivt+0x5500 ///////////////////////////////////////////////////////////////////////////////////////// // 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35) ENTRY(disabled_fp_reg) DBG_FAULT(25) rsm psr.dfh // ensure we can access fph ;; srlz.d mov r31=pr mov r19=25 br.sptk.many dispatch_to_fault_handler END(disabled_fp_reg) .org ia64_ivt+0x5600 ///////////////////////////////////////////////////////////////////////////////////////// // 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50) ENTRY(nat_consumption) DBG_FAULT(26) MOV_FROM_IPSR(p0, r16) MOV_FROM_ISR(r17) mov r31=pr // save PR ;; and r18=0xf,r17 // r18 = cr.ipsr.code{3:0} tbit.z p6,p0=r17,IA64_ISR_NA_BIT ;; cmp.ne.or p6,p0=IA64_ISR_CODE_LFETCH,r18 dep r16=-1,r16,IA64_PSR_ED_BIT,1 (p6) br.cond.spnt 1f // branch if (cr.ispr.na == 0 || cr.ipsr.code{3:0} != LFETCH) ;; MOV_TO_IPSR(p0, r16, r18) mov pr=r31,-1 ;; RFI 1: mov pr=r31,-1 ;; FAULT(26) END(nat_consumption) .org ia64_ivt+0x5700 ///////////////////////////////////////////////////////////////////////////////////////// // 0x5700 Entry 27 (size 16 bundles) Speculation (40) ENTRY(speculation_vector) DBG_FAULT(27) /* * A [f]chk.[as] instruction needs to take the branch to the recovery code but * this part of the architecture is not implemented in hardware on some CPUs, such * as Itanium. Thus, in general we need to emulate the behavior. IIM contains * the relative target (not yet sign extended). So after sign extending it we * simply add it to IIP. We also need to reset the EI field of the IPSR to zero, * i.e., the slot to restart into. * * cr.imm contains zero_ext(imm21) */ MOV_FROM_IIM(r18) ;; MOV_FROM_IIP(r17) shl r18=r18,43 // put sign bit in position (43=64-21) ;; MOV_FROM_IPSR(p0, r16) shr r18=r18,39 // sign extend (39=43-4) ;; add r17=r17,r18 // now add the offset ;; MOV_TO_IIP(r17, r19) dep r16=0,r16,41,2 // clear EI ;; MOV_TO_IPSR(p0, r16, r19) ;; RFI END(speculation_vector) .org ia64_ivt+0x5800 ///////////////////////////////////////////////////////////////////////////////////////// // 0x5800 Entry 28 (size 16 bundles) Reserved DBG_FAULT(28) FAULT(28) .org ia64_ivt+0x5900 ///////////////////////////////////////////////////////////////////////////////////////// // 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56) ENTRY(debug_vector) DBG_FAULT(29) FAULT(29) END(debug_vector) .org ia64_ivt+0x5a00 ///////////////////////////////////////////////////////////////////////////////////////// // 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57) ENTRY(unaligned_access) DBG_FAULT(30) mov r31=pr // prepare to save predicates ;; br.sptk.many dispatch_unaligned_handler END(unaligned_access) .org ia64_ivt+0x5b00 ///////////////////////////////////////////////////////////////////////////////////////// // 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57) ENTRY(unsupported_data_reference) DBG_FAULT(31) FAULT(31) END(unsupported_data_reference) .org ia64_ivt+0x5c00 ///////////////////////////////////////////////////////////////////////////////////////// // 0x5c00 Entry 32 (size 16 bundles) Floating-Point Fault (64) ENTRY(floating_point_fault) DBG_FAULT(32) FAULT(32) END(floating_point_fault) .org ia64_ivt+0x5d00 ///////////////////////////////////////////////////////////////////////////////////////// // 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66) ENTRY(floating_point_trap) DBG_FAULT(33) FAULT(33) END(floating_point_trap) .org ia64_ivt+0x5e00 ///////////////////////////////////////////////////////////////////////////////////////// // 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66) ENTRY(lower_privilege_trap) DBG_FAULT(34) FAULT(34) END(lower_privilege_trap) .org ia64_ivt+0x5f00 ///////////////////////////////////////////////////////////////////////////////////////// // 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68) ENTRY(taken_branch_trap) DBG_FAULT(35) FAULT(35) END(taken_branch_trap) .org ia64_ivt+0x6000 ///////////////////////////////////////////////////////////////////////////////////////// // 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69) ENTRY(single_step_trap) DBG_FAULT(36) FAULT(36) END(single_step_trap) .org ia64_ivt+0x6100 ///////////////////////////////////////////////////////////////////////////////////////// // 0x6100 Entry 37 (size 16 bundles) Reserved DBG_FAULT(37) FAULT(37) .org ia64_ivt+0x6200 ///////////////////////////////////////////////////////////////////////////////////////// // 0x6200 Entry 38 (size 16 bundles) Reserved DBG_FAULT(38) FAULT(38) .org ia64_ivt+0x6300 ///////////////////////////////////////////////////////////////////////////////////////// // 0x6300 Entry 39 (size 16 bundles) Reserved DBG_FAULT(39) FAULT(39) .org ia64_ivt+0x6400 ///////////////////////////////////////////////////////////////////////////////////////// // 0x6400 Entry 40 (size 16 bundles) Reserved DBG_FAULT(40) FAULT(40) .org ia64_ivt+0x6500 ///////////////////////////////////////////////////////////////////////////////////////// // 0x6500 Entry 41 (size 16 bundles) Reserved DBG_FAULT(41) FAULT(41) .org ia64_ivt+0x6600 ///////////////////////////////////////////////////////////////////////////////////////// // 0x6600 Entry 42 (size 16 bundles) Reserved DBG_FAULT(42) FAULT(42) .org ia64_ivt+0x6700 ///////////////////////////////////////////////////////////////////////////////////////// // 0x6700 Entry 43 (size 16 bundles) Reserved DBG_FAULT(43) FAULT(43) .org ia64_ivt+0x6800 ///////////////////////////////////////////////////////////////////////////////////////// // 0x6800 Entry 44 (size 16 bundles) Reserved DBG_FAULT(44) FAULT(44) .org ia64_ivt+0x6900 ///////////////////////////////////////////////////////////////////////////////////////// // 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception (17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77) ENTRY(ia32_exception) DBG_FAULT(45) FAULT(45) END(ia32_exception) .org ia64_ivt+0x6a00 ///////////////////////////////////////////////////////////////////////////////////////// // 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71) ENTRY(ia32_intercept) DBG_FAULT(46) FAULT(46) END(ia32_intercept) .org ia64_ivt+0x6b00 ///////////////////////////////////////////////////////////////////////////////////////// // 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt (74) ENTRY(ia32_interrupt) DBG_FAULT(47) FAULT(47) END(ia32_interrupt) .org ia64_ivt+0x6c00 ///////////////////////////////////////////////////////////////////////////////////////// // 0x6c00 Entry 48 (size 16 bundles) Reserved DBG_FAULT(48) FAULT(48) .org ia64_ivt+0x6d00 ///////////////////////////////////////////////////////////////////////////////////////// // 0x6d00 Entry 49 (size 16 bundles) Reserved DBG_FAULT(49) FAULT(49) .org ia64_ivt+0x6e00 ///////////////////////////////////////////////////////////////////////////////////////// // 0x6e00 Entry 50 (size 16 bundles) Reserved DBG_FAULT(50) FAULT(50) .org ia64_ivt+0x6f00 ///////////////////////////////////////////////////////////////////////////////////////// // 0x6f00 Entry 51 (size 16 bundles) Reserved DBG_FAULT(51) FAULT(51) .org ia64_ivt+0x7000 ///////////////////////////////////////////////////////////////////////////////////////// // 0x7000 Entry 52 (size 16 bundles) Reserved DBG_FAULT(52) FAULT(52) .org ia64_ivt+0x7100 ///////////////////////////////////////////////////////////////////////////////////////// // 0x7100 Entry 53 (size 16 bundles) Reserved DBG_FAULT(53) FAULT(53) .org ia64_ivt+0x7200 ///////////////////////////////////////////////////////////////////////////////////////// // 0x7200 Entry 54 (size 16 bundles) Reserved DBG_FAULT(54) FAULT(54) .org ia64_ivt+0x7300 ///////////////////////////////////////////////////////////////////////////////////////// // 0x7300 Entry 55 (size 16 bundles) Reserved DBG_FAULT(55) FAULT(55) .org ia64_ivt+0x7400 ///////////////////////////////////////////////////////////////////////////////////////// // 0x7400 Entry 56 (size 16 bundles) Reserved DBG_FAULT(56) FAULT(56) .org ia64_ivt+0x7500 ///////////////////////////////////////////////////////////////////////////////////////// // 0x7500 Entry 57 (size 16 bundles) Reserved DBG_FAULT(57) FAULT(57) .org ia64_ivt+0x7600 ///////////////////////////////////////////////////////////////////////////////////////// // 0x7600 Entry 58 (size 16 bundles) Reserved DBG_FAULT(58) FAULT(58) .org ia64_ivt+0x7700 ///////////////////////////////////////////////////////////////////////////////////////// // 0x7700 Entry 59 (size 16 bundles) Reserved DBG_FAULT(59) FAULT(59) .org ia64_ivt+0x7800 ///////////////////////////////////////////////////////////////////////////////////////// // 0x7800 Entry 60 (size 16 bundles) Reserved DBG_FAULT(60) FAULT(60) .org ia64_ivt+0x7900 ///////////////////////////////////////////////////////////////////////////////////////// // 0x7900 Entry 61 (size 16 bundles) Reserved DBG_FAULT(61) FAULT(61) .org ia64_ivt+0x7a00 ///////////////////////////////////////////////////////////////////////////////////////// // 0x7a00 Entry 62 (size 16 bundles) Reserved DBG_FAULT(62) FAULT(62) .org ia64_ivt+0x7b00 ///////////////////////////////////////////////////////////////////////////////////////// // 0x7b00 Entry 63 (size 16 bundles) Reserved DBG_FAULT(63) FAULT(63) .org ia64_ivt+0x7c00 ///////////////////////////////////////////////////////////////////////////////////////// // 0x7c00 Entry 64 (size 16 bundles) Reserved DBG_FAULT(64) FAULT(64) .org ia64_ivt+0x7d00 ///////////////////////////////////////////////////////////////////////////////////////// // 0x7d00 Entry 65 (size 16 bundles) Reserved DBG_FAULT(65) FAULT(65) .org ia64_ivt+0x7e00 ///////////////////////////////////////////////////////////////////////////////////////// // 0x7e00 Entry 66 (size 16 bundles) Reserved DBG_FAULT(66) FAULT(66) .org ia64_ivt+0x7f00 ///////////////////////////////////////////////////////////////////////////////////////// // 0x7f00 Entry 67 (size 16 bundles) Reserved DBG_FAULT(67) FAULT(67) //----------------------------------------------------------------------------------- // call do_page_fault (predicates are in r31, psr.dt may be off, r16 is faulting address) ENTRY(page_fault) SSM_PSR_DT_AND_SRLZ_I ;; SAVE_MIN_WITH_COVER alloc r15=ar.pfs,0,0,3,0 MOV_FROM_IFA(out0) MOV_FROM_ISR(out1) SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r14, r3) adds r3=8,r2 // set up second base pointer SSM_PSR_I(p15, p15, r14) // restore psr.i movl r14=ia64_leave_kernel ;; SAVE_REST mov rp=r14 ;; adds out2=16,r12 // out2 = pointer to pt_regs br.call.sptk.many b6=ia64_do_page_fault // ignore return address END(page_fault) ENTRY(non_syscall) mov ar.rsc=r27 // restore ar.rsc before SAVE_MIN_WITH_COVER ;; SAVE_MIN_WITH_COVER // There is no particular reason for this code to be here, other than that // there happens to be space here that would go unused otherwise. If this // fault ever gets "unreserved", simply moved the following code to a more // suitable spot... alloc r14=ar.pfs,0,0,2,0 MOV_FROM_IIM(out0) add out1=16,sp adds r3=8,r2 // set up second base pointer for SAVE_REST SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r15, r24) // guarantee that interruption collection is on SSM_PSR_I(p15, p15, r15) // restore psr.i movl r15=ia64_leave_kernel ;; SAVE_REST mov rp=r15 ;; br.call.sptk.many b6=ia64_bad_break // avoid WAW on CFM and ignore return addr END(non_syscall) ENTRY(__interrupt) DBG_FAULT(12) mov r31=pr // prepare to save predicates ;; SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3 SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r14) // ensure everybody knows psr.ic is back on adds r3=8,r2 // set up second base pointer for SAVE_REST ;; SAVE_REST ;; MCA_RECOVER_RANGE(interrupt) alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group MOV_FROM_IVR(out0, r8) // pass cr.ivr as first arg add out1=16,sp // pass pointer to pt_regs as second arg ;; srlz.d // make sure we see the effect of cr.ivr movl r14=ia64_leave_kernel ;; mov rp=r14 br.call.sptk.many b6=ia64_handle_irq END(__interrupt) /* * There is no particular reason for this code to be here, other than that * there happens to be space here that would go unused otherwise. If this * fault ever gets "unreserved", simply moved the following code to a more * suitable spot... */ ENTRY(dispatch_unaligned_handler) SAVE_MIN_WITH_COVER ;; alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!) MOV_FROM_IFA(out0) adds out1=16,sp SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r24) // guarantee that interruption collection is on SSM_PSR_I(p15, p15, r3) // restore psr.i adds r3=8,r2 // set up second base pointer ;; SAVE_REST movl r14=ia64_leave_kernel ;; mov rp=r14 br.sptk.many ia64_prepare_handle_unaligned END(dispatch_unaligned_handler) /* * There is no particular reason for this code to be here, other than that * there happens to be space here that would go unused otherwise. If this * fault ever gets "unreserved", simply moved the following code to a more * suitable spot... */ ENTRY(dispatch_to_fault_handler) /* * Input: * psr.ic: off * r19: fault vector number (e.g., 24 for General Exception) * r31: contains saved predicates (pr) */ SAVE_MIN_WITH_COVER_R19 alloc r14=ar.pfs,0,0,5,0 MOV_FROM_ISR(out1) MOV_FROM_IFA(out2) MOV_FROM_IIM(out3) MOV_FROM_ITIR(out4) ;; SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, out0) // guarantee that interruption collection is on mov out0=r15 ;; SSM_PSR_I(p15, p15, r3) // restore psr.i adds r3=8,r2 // set up second base pointer for SAVE_REST ;; SAVE_REST movl r14=ia64_leave_kernel ;; mov rp=r14 br.call.sptk.many b6=ia64_fault END(dispatch_to_fault_handler) /* * Squatting in this space ... * * This special case dispatcher for illegal operation faults allows preserved * registers to be modified through a callback function (asm only) that is handed * back from the fault handler in r8. Up to three arguments can be passed to the * callback function by returning an aggregate with the callback as its first * element, followed by the arguments. */ ENTRY(dispatch_illegal_op_fault) .prologue .body SAVE_MIN_WITH_COVER SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r24) // guarantee that interruption collection is on ;; SSM_PSR_I(p15, p15, r3) // restore psr.i adds r3=8,r2 // set up second base pointer for SAVE_REST ;; alloc r14=ar.pfs,0,0,1,0 // must be first in insn group mov out0=ar.ec ;; SAVE_REST PT_REGS_UNWIND_INFO(0) ;; br.call.sptk.many rp=ia64_illegal_op_fault .ret0: ;; alloc r14=ar.pfs,0,0,3,0 // must be first in insn group mov out0=r9 mov out1=r10 mov out2=r11 movl r15=ia64_leave_kernel ;; mov rp=r15 mov b6=r8 ;; cmp.ne p6,p0=0,r8 (p6) br.call.dpnt.many b6=b6 // call returns to ia64_leave_kernel br.sptk.many ia64_leave_kernel END(dispatch_illegal_op_fault)
AirFortressIlikara/LS2K0300-linux-4.19
2,681
arch/ia64/kernel/efi_stub.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * EFI call stub. * * Copyright (C) 1999-2001 Hewlett-Packard Co * David Mosberger <davidm@hpl.hp.com> * * This stub allows us to make EFI calls in physical mode with interrupts * turned off. We need this because we can't call SetVirtualMap() until * the kernel has booted far enough to allow allocation of struct vma_struct * entries (which we would need to map stuff with memory attributes other * than uncached or writeback...). Since the GetTime() service gets called * earlier than that, we need to be able to make physical mode EFI calls from * the kernel. */ /* * PSR settings as per SAL spec (Chapter 8 in the "IA-64 System * Abstraction Layer Specification", revision 2.6e). Note that * psr.dfl and psr.dfh MUST be cleared, despite what this manual says. * Otherwise, SAL dies whenever it's trying to do an IA-32 BIOS call * (the br.ia instruction fails unless psr.dfl and psr.dfh are * cleared). Fortunately, SAL promises not to touch the floating * point regs, so at least we don't have to save f2-f127. */ #define PSR_BITS_TO_CLEAR \ (IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_RT | \ IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | \ IA64_PSR_DFL | IA64_PSR_DFH) #define PSR_BITS_TO_SET \ (IA64_PSR_BN) #include <asm/processor.h> #include <asm/asmmacro.h> /* * Inputs: * in0 = address of function descriptor of EFI routine to call * in1..in7 = arguments to routine * * Outputs: * r8 = EFI_STATUS returned by called function */ GLOBAL_ENTRY(efi_call_phys) .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8) alloc loc1=ar.pfs,8,7,7,0 ld8 r2=[in0],8 // load EFI function's entry point mov loc0=rp .body ;; mov loc2=gp // save global pointer mov loc4=ar.rsc // save RSE configuration mov ar.rsc=0 // put RSE in enforced lazy, LE mode ;; ld8 gp=[in0] // load EFI function's global pointer movl r16=PSR_BITS_TO_CLEAR mov loc3=psr // save processor status word movl r17=PSR_BITS_TO_SET ;; or loc3=loc3,r17 mov b6=r2 ;; andcm r16=loc3,r16 // get psr with IT, DT, and RT bits cleared br.call.sptk.many rp=ia64_switch_mode_phys .ret0: mov out4=in5 mov out0=in1 mov out1=in2 mov out2=in3 mov out3=in4 mov out5=in6 mov out6=in7 mov loc5=r19 mov loc6=r20 br.call.sptk.many rp=b6 // call the EFI function .ret1: mov ar.rsc=0 // put RSE in enforced lazy, LE mode mov r16=loc3 mov r19=loc5 mov r20=loc6 br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode .ret2: mov ar.rsc=loc4 // restore RSE configuration mov ar.pfs=loc1 mov rp=loc0 mov gp=loc2 br.ret.sptk.many rp END(efi_call_phys)
AirFortressIlikara/LS2K0300-linux-4.19
27,969
arch/ia64/kernel/mca_asm.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * File: mca_asm.S * Purpose: assembly portion of the IA64 MCA handling * * Mods by cfleck to integrate into kernel build * * 2000-03-15 David Mosberger-Tang <davidm@hpl.hp.com> * Added various stop bits to get a clean compile * * 2000-03-29 Chuck Fleckenstein <cfleck@co.intel.com> * Added code to save INIT handoff state in pt_regs format, * switch to temp kstack, switch modes, jump to C INIT handler * * 2002-01-04 J.Hall <jenna.s.hall@intel.com> * Before entering virtual mode code: * 1. Check for TLB CPU error * 2. Restore current thread pointer to kr6 * 3. Move stack ptr 16 bytes to conform to C calling convention * * 2004-11-12 Russ Anderson <rja@sgi.com> * Added per cpu MCA/INIT stack save areas. * * 2005-12-08 Keith Owens <kaos@sgi.com> * Use per cpu MCA/INIT stacks for all data. */ #include <linux/threads.h> #include <asm/asmmacro.h> #include <asm/pgtable.h> #include <asm/processor.h> #include <asm/mca_asm.h> #include <asm/mca.h> #include "entry.h" #define GET_IA64_MCA_DATA(reg) \ GET_THIS_PADDR(reg, ia64_mca_data) \ ;; \ ld8 reg=[reg] .global ia64_do_tlb_purge .global ia64_os_mca_dispatch .global ia64_os_init_on_kdump .global ia64_os_init_dispatch_monarch .global ia64_os_init_dispatch_slave .text .align 16 //StartMain//////////////////////////////////////////////////////////////////// /* * Just the TLB purge part is moved to a separate function * so we can re-use the code for cpu hotplug code as well * Caller should now setup b1, so we can branch once the * tlb flush is complete. */ ia64_do_tlb_purge: #define O(member) IA64_CPUINFO_##member##_OFFSET GET_THIS_PADDR(r2, ia64_cpu_info) // load phys addr of cpu_info into r2 ;; addl r17=O(PTCE_STRIDE),r2 addl r2=O(PTCE_BASE),r2 ;; ld8 r18=[r2],(O(PTCE_COUNT)-O(PTCE_BASE));; // r18=ptce_base ld4 r19=[r2],4 // r19=ptce_count[0] ld4 r21=[r17],4 // r21=ptce_stride[0] ;; ld4 r20=[r2] // r20=ptce_count[1] ld4 r22=[r17] // r22=ptce_stride[1] mov r24=0 ;; adds r20=-1,r20 ;; #undef O 2: cmp.ltu p6,p7=r24,r19 (p7) br.cond.dpnt.few 4f mov ar.lc=r20 3: ptc.e r18 ;; add r18=r22,r18 br.cloop.sptk.few 3b ;; add r18=r21,r18 add r24=1,r24 ;; br.sptk.few 2b 4: srlz.i // srlz.i implies srlz.d ;; // Now purge addresses formerly mapped by TR registers // 1. Purge ITR&DTR for kernel. movl r16=KERNEL_START mov r18=KERNEL_TR_PAGE_SHIFT<<2 ;; ptr.i r16, r18 ptr.d r16, r18 ;; srlz.i ;; srlz.d ;; // 3. Purge ITR for PAL code. GET_THIS_PADDR(r2, ia64_mca_pal_base) ;; ld8 r16=[r2] mov r18=IA64_GRANULE_SHIFT<<2 ;; ptr.i r16,r18 ;; srlz.i ;; // 4. Purge DTR for stack. mov r16=IA64_KR(CURRENT_STACK) ;; shl r16=r16,IA64_GRANULE_SHIFT movl r19=PAGE_OFFSET ;; add r16=r19,r16 mov r18=IA64_GRANULE_SHIFT<<2 ;; ptr.d r16,r18 ;; srlz.i ;; // Now branch away to caller. br.sptk.many b1 ;; //EndMain////////////////////////////////////////////////////////////////////// //StartMain//////////////////////////////////////////////////////////////////// ia64_os_mca_dispatch: mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack LOAD_PHYSICAL(p0,r2,1f) // return address mov r19=1 // All MCA events are treated as monarch (for now) br.sptk ia64_state_save // save the state that is not in minstate 1: GET_IA64_MCA_DATA(r2) // Using MCA stack, struct ia64_sal_os_state, variable proc_state_param ;; add r3=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SOS_OFFSET+SOS(PROC_STATE_PARAM), r2 ;; ld8 r18=[r3] // Get processor state parameter on existing PALE_CHECK. ;; tbit.nz p6,p7=r18,60 (p7) br.spnt done_tlb_purge_and_reload // The following code purges TC and TR entries. Then reload all TC entries. // Purge percpu data TC entries. begin_tlb_purge_and_reload: movl r18=ia64_reload_tr;; LOAD_PHYSICAL(p0,r18,ia64_reload_tr);; mov b1=r18;; br.sptk.many ia64_do_tlb_purge;; ia64_reload_tr: // Finally reload the TR registers. // 1. Reload DTR/ITR registers for kernel. mov r18=KERNEL_TR_PAGE_SHIFT<<2 movl r17=KERNEL_START ;; mov cr.itir=r18 mov cr.ifa=r17 mov r16=IA64_TR_KERNEL mov r19=ip movl r18=PAGE_KERNEL ;; dep r17=0,r19,0, KERNEL_TR_PAGE_SHIFT ;; or r18=r17,r18 ;; itr.i itr[r16]=r18 ;; itr.d dtr[r16]=r18 ;; srlz.i srlz.d ;; // 3. Reload ITR for PAL code. GET_THIS_PADDR(r2, ia64_mca_pal_pte) ;; ld8 r18=[r2] // load PAL PTE ;; GET_THIS_PADDR(r2, ia64_mca_pal_base) ;; ld8 r16=[r2] // load PAL vaddr mov r19=IA64_GRANULE_SHIFT<<2 ;; mov cr.itir=r19 mov cr.ifa=r16 mov r20=IA64_TR_PALCODE ;; itr.i itr[r20]=r18 ;; srlz.i ;; // 4. Reload DTR for stack. mov r16=IA64_KR(CURRENT_STACK) ;; shl r16=r16,IA64_GRANULE_SHIFT movl r19=PAGE_OFFSET ;; add r18=r19,r16 movl r20=PAGE_KERNEL ;; add r16=r20,r16 mov r19=IA64_GRANULE_SHIFT<<2 ;; mov cr.itir=r19 mov cr.ifa=r18 mov r20=IA64_TR_CURRENT_STACK ;; itr.d dtr[r20]=r16 GET_THIS_PADDR(r2, ia64_mca_tr_reload) mov r18 = 1 ;; srlz.d ;; st8 [r2] =r18 ;; done_tlb_purge_and_reload: // switch to per cpu MCA stack mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack LOAD_PHYSICAL(p0,r2,1f) // return address br.sptk ia64_new_stack 1: // everything saved, now we can set the kernel registers mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack LOAD_PHYSICAL(p0,r2,1f) // return address br.sptk ia64_set_kernel_registers 1: // This must be done in physical mode GET_IA64_MCA_DATA(r2) ;; mov r7=r2 // Enter virtual mode from physical mode VIRTUAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_begin, r4) // This code returns to SAL via SOS r2, in general SAL has no unwind // data. To get a clean termination when backtracing the C MCA/INIT // handler, set a dummy return address of 0 in this routine. That // requires that ia64_os_mca_virtual_begin be a global function. ENTRY(ia64_os_mca_virtual_begin) .prologue .save rp,r0 .body mov ar.rsc=3 // set eager mode for C handler mov r2=r7 // see GET_IA64_MCA_DATA above ;; // Call virtual mode handler alloc r14=ar.pfs,0,0,3,0 ;; DATA_PA_TO_VA(r2,r7) ;; add out0=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_PT_REGS_OFFSET, r2 add out1=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SWITCH_STACK_OFFSET, r2 add out2=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SOS_OFFSET, r2 br.call.sptk.many b0=ia64_mca_handler // Revert back to physical mode before going back to SAL PHYSICAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_end, r4) ia64_os_mca_virtual_end: END(ia64_os_mca_virtual_begin) // switch back to previous stack alloc r14=ar.pfs,0,0,0,0 // remove the MCA handler frame mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack LOAD_PHYSICAL(p0,r2,1f) // return address br.sptk ia64_old_stack 1: mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack LOAD_PHYSICAL(p0,r2,1f) // return address br.sptk ia64_state_restore // restore the SAL state 1: mov b0=r12 // SAL_CHECK return address br b0 //EndMain////////////////////////////////////////////////////////////////////// //StartMain//////////////////////////////////////////////////////////////////// // // NOP init handler for kdump. In panic situation, we may receive INIT // while kernel transition. Since we initialize registers on leave from // current kernel, no longer monarch/slave handlers of current kernel in // virtual mode are called safely. // We can unregister these init handlers from SAL, however then the INIT // will result in warmboot by SAL and we cannot retrieve the crashdump. // Therefore register this NOP function to SAL, to prevent entering virtual // mode and resulting warmboot by SAL. // ia64_os_init_on_kdump: mov r8=r0 // IA64_INIT_RESUME mov r9=r10 // SAL_GP mov r22=r17 // *minstate ;; mov r10=r0 // return to same context mov b0=r12 // SAL_CHECK return address br b0 // // SAL to OS entry point for INIT on all processors. This has been defined for // registration purposes with SAL as a part of ia64_mca_init. Monarch and // slave INIT have identical processing, except for the value of the // sos->monarch flag in r19. // ia64_os_init_dispatch_monarch: mov r19=1 // Bow, bow, ye lower middle classes! br.sptk ia64_os_init_dispatch ia64_os_init_dispatch_slave: mov r19=0 // <igor>yeth, mathter</igor> ia64_os_init_dispatch: mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack LOAD_PHYSICAL(p0,r2,1f) // return address br.sptk ia64_state_save // save the state that is not in minstate 1: // switch to per cpu INIT stack mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack LOAD_PHYSICAL(p0,r2,1f) // return address br.sptk ia64_new_stack 1: // everything saved, now we can set the kernel registers mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack LOAD_PHYSICAL(p0,r2,1f) // return address br.sptk ia64_set_kernel_registers 1: // This must be done in physical mode GET_IA64_MCA_DATA(r2) ;; mov r7=r2 // Enter virtual mode from physical mode VIRTUAL_MODE_ENTER(r2, r3, ia64_os_init_virtual_begin, r4) // This code returns to SAL via SOS r2, in general SAL has no unwind // data. To get a clean termination when backtracing the C MCA/INIT // handler, set a dummy return address of 0 in this routine. That // requires that ia64_os_init_virtual_begin be a global function. ENTRY(ia64_os_init_virtual_begin) .prologue .save rp,r0 .body mov ar.rsc=3 // set eager mode for C handler mov r2=r7 // see GET_IA64_MCA_DATA above ;; // Call virtual mode handler alloc r14=ar.pfs,0,0,3,0 ;; DATA_PA_TO_VA(r2,r7) ;; add out0=IA64_MCA_CPU_INIT_STACK_OFFSET+MCA_PT_REGS_OFFSET, r2 add out1=IA64_MCA_CPU_INIT_STACK_OFFSET+MCA_SWITCH_STACK_OFFSET, r2 add out2=IA64_MCA_CPU_INIT_STACK_OFFSET+MCA_SOS_OFFSET, r2 br.call.sptk.many b0=ia64_init_handler // Revert back to physical mode before going back to SAL PHYSICAL_MODE_ENTER(r2, r3, ia64_os_init_virtual_end, r4) ia64_os_init_virtual_end: END(ia64_os_init_virtual_begin) mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack LOAD_PHYSICAL(p0,r2,1f) // return address br.sptk ia64_state_restore // restore the SAL state 1: // switch back to previous stack alloc r14=ar.pfs,0,0,0,0 // remove the INIT handler frame mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack LOAD_PHYSICAL(p0,r2,1f) // return address br.sptk ia64_old_stack 1: mov b0=r12 // SAL_CHECK return address br b0 //EndMain////////////////////////////////////////////////////////////////////// // common defines for the stubs #define ms r4 #define regs r5 #define temp1 r2 /* careful, it overlaps with input registers */ #define temp2 r3 /* careful, it overlaps with input registers */ #define temp3 r7 #define temp4 r14 //++ // Name: // ia64_state_save() // // Stub Description: // // Save the state that is not in minstate. This is sensitive to the layout of // struct ia64_sal_os_state in mca.h. // // r2 contains the return address, r3 contains either // IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET. // // The OS to SAL section of struct ia64_sal_os_state is set to a default // value of cold boot (MCA) or warm boot (INIT) and return to the same // context. ia64_sal_os_state is also used to hold some registers that // need to be saved and restored across the stack switches. // // Most input registers to this stub come from PAL/SAL // r1 os gp, physical // r8 pal_proc entry point // r9 sal_proc entry point // r10 sal gp // r11 MCA - rendevzous state, INIT - reason code // r12 sal return address // r17 pal min_state // r18 processor state parameter // r19 monarch flag, set by the caller of this routine // // In addition to the SAL to OS state, this routine saves all the // registers that appear in struct pt_regs and struct switch_stack, // excluding those that are already in the PAL minstate area. This // results in a partial pt_regs and switch_stack, the C code copies the // remaining registers from PAL minstate to pt_regs and switch_stack. The // resulting structures contain all the state of the original process when // MCA/INIT occurred. // //-- ia64_state_save: add regs=MCA_SOS_OFFSET, r3 add ms=MCA_SOS_OFFSET+8, r3 mov b0=r2 // save return address cmp.eq p1,p2=IA64_MCA_CPU_MCA_STACK_OFFSET, r3 ;; GET_IA64_MCA_DATA(temp2) ;; add temp1=temp2, regs // struct ia64_sal_os_state on MCA or INIT stack add temp2=temp2, ms // struct ia64_sal_os_state+8 on MCA or INIT stack ;; mov regs=temp1 // save the start of sos st8 [temp1]=r1,16 // os_gp st8 [temp2]=r8,16 // pal_proc ;; st8 [temp1]=r9,16 // sal_proc st8 [temp2]=r11,16 // rv_rc mov r11=cr.iipa ;; st8 [temp1]=r18 // proc_state_param st8 [temp2]=r19 // monarch mov r6=IA64_KR(CURRENT) add temp1=SOS(SAL_RA), regs add temp2=SOS(SAL_GP), regs ;; st8 [temp1]=r12,16 // sal_ra st8 [temp2]=r10,16 // sal_gp mov r12=cr.isr ;; st8 [temp1]=r17,16 // pal_min_state st8 [temp2]=r6,16 // prev_IA64_KR_CURRENT mov r6=IA64_KR(CURRENT_STACK) ;; st8 [temp1]=r6,16 // prev_IA64_KR_CURRENT_STACK st8 [temp2]=r0,16 // prev_task, starts off as NULL mov r6=cr.ifa ;; st8 [temp1]=r12,16 // cr.isr st8 [temp2]=r6,16 // cr.ifa mov r12=cr.itir ;; st8 [temp1]=r12,16 // cr.itir st8 [temp2]=r11,16 // cr.iipa mov r12=cr.iim ;; st8 [temp1]=r12 // cr.iim (p1) mov r12=IA64_MCA_COLD_BOOT (p2) mov r12=IA64_INIT_WARM_BOOT mov r6=cr.iha add temp1=SOS(OS_STATUS), regs ;; st8 [temp2]=r6 // cr.iha add temp2=SOS(CONTEXT), regs st8 [temp1]=r12 // os_status, default is cold boot mov r6=IA64_MCA_SAME_CONTEXT ;; st8 [temp2]=r6 // context, default is same context // Save the pt_regs data that is not in minstate. The previous code // left regs at sos. add regs=MCA_PT_REGS_OFFSET-MCA_SOS_OFFSET, regs ;; add temp1=PT(B6), regs mov temp3=b6 mov temp4=b7 add temp2=PT(B7), regs ;; st8 [temp1]=temp3,PT(AR_CSD)-PT(B6) // save b6 st8 [temp2]=temp4,PT(AR_SSD)-PT(B7) // save b7 mov temp3=ar.csd mov temp4=ar.ssd cover // must be last in group ;; st8 [temp1]=temp3,PT(AR_UNAT)-PT(AR_CSD) // save ar.csd st8 [temp2]=temp4,PT(AR_PFS)-PT(AR_SSD) // save ar.ssd mov temp3=ar.unat mov temp4=ar.pfs ;; st8 [temp1]=temp3,PT(AR_RNAT)-PT(AR_UNAT) // save ar.unat st8 [temp2]=temp4,PT(AR_BSPSTORE)-PT(AR_PFS) // save ar.pfs mov temp3=ar.rnat mov temp4=ar.bspstore ;; st8 [temp1]=temp3,PT(LOADRS)-PT(AR_RNAT) // save ar.rnat st8 [temp2]=temp4,PT(AR_FPSR)-PT(AR_BSPSTORE) // save ar.bspstore mov temp3=ar.bsp ;; sub temp3=temp3, temp4 // ar.bsp - ar.bspstore mov temp4=ar.fpsr ;; shl temp3=temp3,16 // compute ar.rsc to be used for "loadrs" ;; st8 [temp1]=temp3,PT(AR_CCV)-PT(LOADRS) // save loadrs st8 [temp2]=temp4,PT(F6)-PT(AR_FPSR) // save ar.fpsr mov temp3=ar.ccv ;; st8 [temp1]=temp3,PT(F7)-PT(AR_CCV) // save ar.ccv stf.spill [temp2]=f6,PT(F8)-PT(F6) ;; stf.spill [temp1]=f7,PT(F9)-PT(F7) stf.spill [temp2]=f8,PT(F10)-PT(F8) ;; stf.spill [temp1]=f9,PT(F11)-PT(F9) stf.spill [temp2]=f10 ;; stf.spill [temp1]=f11 // Save the switch_stack data that is not in minstate nor pt_regs. The // previous code left regs at pt_regs. add regs=MCA_SWITCH_STACK_OFFSET-MCA_PT_REGS_OFFSET, regs ;; add temp1=SW(F2), regs add temp2=SW(F3), regs ;; stf.spill [temp1]=f2,32 stf.spill [temp2]=f3,32 ;; stf.spill [temp1]=f4,32 stf.spill [temp2]=f5,32 ;; stf.spill [temp1]=f12,32 stf.spill [temp2]=f13,32 ;; stf.spill [temp1]=f14,32 stf.spill [temp2]=f15,32 ;; stf.spill [temp1]=f16,32 stf.spill [temp2]=f17,32 ;; stf.spill [temp1]=f18,32 stf.spill [temp2]=f19,32 ;; stf.spill [temp1]=f20,32 stf.spill [temp2]=f21,32 ;; stf.spill [temp1]=f22,32 stf.spill [temp2]=f23,32 ;; stf.spill [temp1]=f24,32 stf.spill [temp2]=f25,32 ;; stf.spill [temp1]=f26,32 stf.spill [temp2]=f27,32 ;; stf.spill [temp1]=f28,32 stf.spill [temp2]=f29,32 ;; stf.spill [temp1]=f30,SW(B2)-SW(F30) stf.spill [temp2]=f31,SW(B3)-SW(F31) mov temp3=b2 mov temp4=b3 ;; st8 [temp1]=temp3,16 // save b2 st8 [temp2]=temp4,16 // save b3 mov temp3=b4 mov temp4=b5 ;; st8 [temp1]=temp3,SW(AR_LC)-SW(B4) // save b4 st8 [temp2]=temp4 // save b5 mov temp3=ar.lc ;; st8 [temp1]=temp3 // save ar.lc // FIXME: Some proms are incorrectly accessing the minstate area as // cached data. The C code uses region 6, uncached virtual. Ensure // that there is no cache data lying around for the first 1K of the // minstate area. // Remove this code in September 2006, that gives platforms a year to // fix their proms and get their customers updated. add r1=32*1,r17 add r2=32*2,r17 add r3=32*3,r17 add r4=32*4,r17 add r5=32*5,r17 add r6=32*6,r17 add r7=32*7,r17 ;; fc r17 fc r1 fc r2 fc r3 fc r4 fc r5 fc r6 fc r7 add r17=32*8,r17 add r1=32*8,r1 add r2=32*8,r2 add r3=32*8,r3 add r4=32*8,r4 add r5=32*8,r5 add r6=32*8,r6 add r7=32*8,r7 ;; fc r17 fc r1 fc r2 fc r3 fc r4 fc r5 fc r6 fc r7 add r17=32*8,r17 add r1=32*8,r1 add r2=32*8,r2 add r3=32*8,r3 add r4=32*8,r4 add r5=32*8,r5 add r6=32*8,r6 add r7=32*8,r7 ;; fc r17 fc r1 fc r2 fc r3 fc r4 fc r5 fc r6 fc r7 add r17=32*8,r17 add r1=32*8,r1 add r2=32*8,r2 add r3=32*8,r3 add r4=32*8,r4 add r5=32*8,r5 add r6=32*8,r6 add r7=32*8,r7 ;; fc r17 fc r1 fc r2 fc r3 fc r4 fc r5 fc r6 fc r7 br.sptk b0 //EndStub////////////////////////////////////////////////////////////////////// //++ // Name: // ia64_state_restore() // // Stub Description: // // Restore the SAL/OS state. This is sensitive to the layout of struct // ia64_sal_os_state in mca.h. // // r2 contains the return address, r3 contains either // IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET. // // In addition to the SAL to OS state, this routine restores all the // registers that appear in struct pt_regs and struct switch_stack, // excluding those in the PAL minstate area. // //-- ia64_state_restore: // Restore the switch_stack data that is not in minstate nor pt_regs. add regs=MCA_SWITCH_STACK_OFFSET, r3 mov b0=r2 // save return address ;; GET_IA64_MCA_DATA(temp2) ;; add regs=temp2, regs ;; add temp1=SW(F2), regs add temp2=SW(F3), regs ;; ldf.fill f2=[temp1],32 ldf.fill f3=[temp2],32 ;; ldf.fill f4=[temp1],32 ldf.fill f5=[temp2],32 ;; ldf.fill f12=[temp1],32 ldf.fill f13=[temp2],32 ;; ldf.fill f14=[temp1],32 ldf.fill f15=[temp2],32 ;; ldf.fill f16=[temp1],32 ldf.fill f17=[temp2],32 ;; ldf.fill f18=[temp1],32 ldf.fill f19=[temp2],32 ;; ldf.fill f20=[temp1],32 ldf.fill f21=[temp2],32 ;; ldf.fill f22=[temp1],32 ldf.fill f23=[temp2],32 ;; ldf.fill f24=[temp1],32 ldf.fill f25=[temp2],32 ;; ldf.fill f26=[temp1],32 ldf.fill f27=[temp2],32 ;; ldf.fill f28=[temp1],32 ldf.fill f29=[temp2],32 ;; ldf.fill f30=[temp1],SW(B2)-SW(F30) ldf.fill f31=[temp2],SW(B3)-SW(F31) ;; ld8 temp3=[temp1],16 // restore b2 ld8 temp4=[temp2],16 // restore b3 ;; mov b2=temp3 mov b3=temp4 ld8 temp3=[temp1],SW(AR_LC)-SW(B4) // restore b4 ld8 temp4=[temp2] // restore b5 ;; mov b4=temp3 mov b5=temp4 ld8 temp3=[temp1] // restore ar.lc ;; mov ar.lc=temp3 // Restore the pt_regs data that is not in minstate. The previous code // left regs at switch_stack. add regs=MCA_PT_REGS_OFFSET-MCA_SWITCH_STACK_OFFSET, regs ;; add temp1=PT(B6), regs add temp2=PT(B7), regs ;; ld8 temp3=[temp1],PT(AR_CSD)-PT(B6) // restore b6 ld8 temp4=[temp2],PT(AR_SSD)-PT(B7) // restore b7 ;; mov b6=temp3 mov b7=temp4 ld8 temp3=[temp1],PT(AR_UNAT)-PT(AR_CSD) // restore ar.csd ld8 temp4=[temp2],PT(AR_PFS)-PT(AR_SSD) // restore ar.ssd ;; mov ar.csd=temp3 mov ar.ssd=temp4 ld8 temp3=[temp1] // restore ar.unat add temp1=PT(AR_CCV)-PT(AR_UNAT), temp1 ld8 temp4=[temp2],PT(AR_FPSR)-PT(AR_PFS) // restore ar.pfs ;; mov ar.unat=temp3 mov ar.pfs=temp4 // ar.rnat, ar.bspstore, loadrs are restore in ia64_old_stack. ld8 temp3=[temp1],PT(F6)-PT(AR_CCV) // restore ar.ccv ld8 temp4=[temp2],PT(F7)-PT(AR_FPSR) // restore ar.fpsr ;; mov ar.ccv=temp3 mov ar.fpsr=temp4 ldf.fill f6=[temp1],PT(F8)-PT(F6) ldf.fill f7=[temp2],PT(F9)-PT(F7) ;; ldf.fill f8=[temp1],PT(F10)-PT(F8) ldf.fill f9=[temp2],PT(F11)-PT(F9) ;; ldf.fill f10=[temp1] ldf.fill f11=[temp2] // Restore the SAL to OS state. The previous code left regs at pt_regs. add regs=MCA_SOS_OFFSET-MCA_PT_REGS_OFFSET, regs ;; add temp1=SOS(SAL_RA), regs add temp2=SOS(SAL_GP), regs ;; ld8 r12=[temp1],16 // sal_ra ld8 r9=[temp2],16 // sal_gp ;; ld8 r22=[temp1],16 // pal_min_state, virtual ld8 r13=[temp2],16 // prev_IA64_KR_CURRENT ;; ld8 r16=[temp1],16 // prev_IA64_KR_CURRENT_STACK ld8 r20=[temp2],16 // prev_task ;; ld8 temp3=[temp1],16 // cr.isr ld8 temp4=[temp2],16 // cr.ifa ;; mov cr.isr=temp3 mov cr.ifa=temp4 ld8 temp3=[temp1],16 // cr.itir ld8 temp4=[temp2],16 // cr.iipa ;; mov cr.itir=temp3 mov cr.iipa=temp4 ld8 temp3=[temp1] // cr.iim ld8 temp4=[temp2] // cr.iha add temp1=SOS(OS_STATUS), regs add temp2=SOS(CONTEXT), regs ;; mov cr.iim=temp3 mov cr.iha=temp4 dep r22=0,r22,62,1 // pal_min_state, physical, uncached mov IA64_KR(CURRENT)=r13 ld8 r8=[temp1] // os_status ld8 r10=[temp2] // context /* Wire IA64_TR_CURRENT_STACK to the stack that we are resuming to. To * avoid any dependencies on the algorithm in ia64_switch_to(), just * purge any existing CURRENT_STACK mapping and insert the new one. * * r16 contains prev_IA64_KR_CURRENT_STACK, r13 contains * prev_IA64_KR_CURRENT, these values may have been changed by the C * code. Do not use r8, r9, r10, r22, they contain values ready for * the return to SAL. */ mov r15=IA64_KR(CURRENT_STACK) // physical granule mapped by IA64_TR_CURRENT_STACK ;; shl r15=r15,IA64_GRANULE_SHIFT ;; dep r15=-1,r15,61,3 // virtual granule mov r18=IA64_GRANULE_SHIFT<<2 // for cr.itir.ps ;; ptr.d r15,r18 ;; srlz.d extr.u r19=r13,61,3 // r13 = prev_IA64_KR_CURRENT shl r20=r16,IA64_GRANULE_SHIFT // r16 = prev_IA64_KR_CURRENT_STACK movl r21=PAGE_KERNEL // page properties ;; mov IA64_KR(CURRENT_STACK)=r16 cmp.ne p6,p0=RGN_KERNEL,r19 // new stack is in the kernel region? or r21=r20,r21 // construct PA | page properties (p6) br.spnt 1f // the dreaded cpu 0 idle task in region 5:( ;; mov cr.itir=r18 mov cr.ifa=r13 mov r20=IA64_TR_CURRENT_STACK ;; itr.d dtr[r20]=r21 ;; srlz.d 1: br.sptk b0 //EndStub////////////////////////////////////////////////////////////////////// //++ // Name: // ia64_new_stack() // // Stub Description: // // Switch to the MCA/INIT stack. // // r2 contains the return address, r3 contains either // IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET. // // On entry RBS is still on the original stack, this routine switches RBS // to use the MCA/INIT stack. // // On entry, sos->pal_min_state is physical, on exit it is virtual. // //-- ia64_new_stack: add regs=MCA_PT_REGS_OFFSET, r3 add temp2=MCA_SOS_OFFSET+SOS(PAL_MIN_STATE), r3 mov b0=r2 // save return address GET_IA64_MCA_DATA(temp1) invala ;; add temp2=temp2, temp1 // struct ia64_sal_os_state.pal_min_state on MCA or INIT stack add regs=regs, temp1 // struct pt_regs on MCA or INIT stack ;; // Address of minstate area provided by PAL is physical, uncacheable. // Convert to Linux virtual address in region 6 for C code. ld8 ms=[temp2] // pal_min_state, physical ;; dep temp1=-1,ms,62,2 // set region 6 mov temp3=IA64_RBS_OFFSET-MCA_PT_REGS_OFFSET ;; st8 [temp2]=temp1 // pal_min_state, virtual add temp4=temp3, regs // start of bspstore on new stack ;; mov ar.bspstore=temp4 // switch RBS to MCA/INIT stack ;; flushrs // must be first in group br.sptk b0 //EndStub////////////////////////////////////////////////////////////////////// //++ // Name: // ia64_old_stack() // // Stub Description: // // Switch to the old stack. // // r2 contains the return address, r3 contains either // IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET. // // On entry, pal_min_state is virtual, on exit it is physical. // // On entry RBS is on the MCA/INIT stack, this routine switches RBS // back to the previous stack. // // The psr is set to all zeroes. SAL return requires either all zeroes or // just psr.mc set. Leaving psr.mc off allows INIT to be issued if this // code does not perform correctly. // // The dirty registers at the time of the event were flushed to the // MCA/INIT stack in ia64_pt_regs_save(). Restore the dirty registers // before reverting to the previous bspstore. //-- ia64_old_stack: add regs=MCA_PT_REGS_OFFSET, r3 mov b0=r2 // save return address GET_IA64_MCA_DATA(temp2) LOAD_PHYSICAL(p0,temp1,1f) ;; mov cr.ipsr=r0 mov cr.ifs=r0 mov cr.iip=temp1 ;; invala rfi 1: add regs=regs, temp2 // struct pt_regs on MCA or INIT stack ;; add temp1=PT(LOADRS), regs ;; ld8 temp2=[temp1],PT(AR_BSPSTORE)-PT(LOADRS) // restore loadrs ;; ld8 temp3=[temp1],PT(AR_RNAT)-PT(AR_BSPSTORE) // restore ar.bspstore mov ar.rsc=temp2 ;; loadrs ld8 temp4=[temp1] // restore ar.rnat ;; mov ar.bspstore=temp3 // back to old stack ;; mov ar.rnat=temp4 ;; br.sptk b0 //EndStub////////////////////////////////////////////////////////////////////// //++ // Name: // ia64_set_kernel_registers() // // Stub Description: // // Set the registers that are required by the C code in order to run on an // MCA/INIT stack. // // r2 contains the return address, r3 contains either // IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET. // //-- ia64_set_kernel_registers: add temp3=MCA_SP_OFFSET, r3 mov b0=r2 // save return address GET_IA64_MCA_DATA(temp1) ;; add r12=temp1, temp3 // kernel stack pointer on MCA/INIT stack add r13=temp1, r3 // set current to start of MCA/INIT stack add r20=temp1, r3 // physical start of MCA/INIT stack ;; DATA_PA_TO_VA(r12,temp2) DATA_PA_TO_VA(r13,temp3) ;; mov IA64_KR(CURRENT)=r13 /* Wire IA64_TR_CURRENT_STACK to the MCA/INIT handler stack. To avoid * any dependencies on the algorithm in ia64_switch_to(), just purge * any existing CURRENT_STACK mapping and insert the new one. */ mov r16=IA64_KR(CURRENT_STACK) // physical granule mapped by IA64_TR_CURRENT_STACK ;; shl r16=r16,IA64_GRANULE_SHIFT ;; dep r16=-1,r16,61,3 // virtual granule mov r18=IA64_GRANULE_SHIFT<<2 // for cr.itir.ps ;; ptr.d r16,r18 ;; srlz.d shr.u r16=r20,IA64_GRANULE_SHIFT // r20 = physical start of MCA/INIT stack movl r21=PAGE_KERNEL // page properties ;; mov IA64_KR(CURRENT_STACK)=r16 or r21=r20,r21 // construct PA | page properties ;; mov cr.itir=r18 mov cr.ifa=r13 mov r20=IA64_TR_CURRENT_STACK movl r17=FPSR_DEFAULT ;; mov.m ar.fpsr=r17 // set ar.fpsr to kernel default value ;; itr.d dtr[r20]=r21 ;; srlz.d br.sptk b0 //EndStub////////////////////////////////////////////////////////////////////// #undef ms #undef regs #undef temp1 #undef temp2 #undef temp3 #undef temp4 // Support function for mca.c, it is here to avoid using inline asm. Given the // address of an rnat slot, if that address is below the current ar.bspstore // then return the contents of that slot, otherwise return the contents of // ar.rnat. GLOBAL_ENTRY(ia64_get_rnat) alloc r14=ar.pfs,1,0,0,0 mov ar.rsc=0 ;; mov r14=ar.bspstore ;; cmp.lt p6,p7=in0,r14 ;; (p6) ld8 r8=[in0] (p7) mov r8=ar.rnat mov ar.rsc=3 br.ret.sptk.many rp END(ia64_get_rnat) // void ia64_set_psr_mc(void) // // Set psr.mc bit to mask MCA/INIT. GLOBAL_ENTRY(ia64_set_psr_mc) rsm psr.i | psr.ic // disable interrupts ;; srlz.d ;; mov r14 = psr // get psr{36:35,31:0} movl r15 = 1f ;; dep r14 = -1, r14, PSR_MC, 1 // set psr.mc ;; dep r14 = -1, r14, PSR_IC, 1 // set psr.ic ;; dep r14 = -1, r14, PSR_BN, 1 // keep bank1 in use ;; mov cr.ipsr = r14 mov cr.ifs = r0 mov cr.iip = r15 ;; rfi 1: br.ret.sptk.many rp END(ia64_set_psr_mc)
AirFortressIlikara/LS2K0300-linux-4.19
6,200
arch/ia64/lib/clear_user.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * This routine clears to zero a linear memory buffer in user space. * * Inputs: * in0: address of buffer * in1: length of buffer in bytes * Outputs: * r8: number of bytes that didn't get cleared due to a fault * * Copyright (C) 1998, 1999, 2001 Hewlett-Packard Co * Stephane Eranian <eranian@hpl.hp.com> */ #include <asm/asmmacro.h> #include <asm/export.h> // // arguments // #define buf r32 #define len r33 // // local registers // #define cnt r16 #define buf2 r17 #define saved_lc r18 #define saved_pfs r19 #define tmp r20 #define len2 r21 #define len3 r22 // // Theory of operations: // - we check whether or not the buffer is small, i.e., less than 17 // in which case we do the byte by byte loop. // // - Otherwise we go progressively from 1 byte store to 8byte store in // the head part, the body is a 16byte store loop and we finish we the // tail for the last 15 bytes. // The good point about this breakdown is that the long buffer handling // contains only 2 branches. // // The reason for not using shifting & masking for both the head and the // tail is to stay semantically correct. This routine is not supposed // to write bytes outside of the buffer. While most of the time this would // be ok, we can't tolerate a mistake. A classical example is the case // of multithreaded code were to the extra bytes touched is actually owned // by another thread which runs concurrently to ours. Another, less likely, // example is with device drivers where reading an I/O mapped location may // have side effects (same thing for writing). // GLOBAL_ENTRY(__do_clear_user) .prologue .save ar.pfs, saved_pfs alloc saved_pfs=ar.pfs,2,0,0,0 cmp.eq p6,p0=r0,len // check for zero length .save ar.lc, saved_lc mov saved_lc=ar.lc // preserve ar.lc (slow) .body ;; // avoid WAW on CFM adds tmp=-1,len // br.ctop is repeat/until mov ret0=len // return value is length at this point (p6) br.ret.spnt.many rp ;; cmp.lt p6,p0=16,len // if len > 16 then long memset mov ar.lc=tmp // initialize lc for small count (p6) br.cond.dptk .long_do_clear ;; // WAR on ar.lc // // worst case 16 iterations, avg 8 iterations // // We could have played with the predicates to use the extra // M slot for 2 stores/iteration but the cost the initialization // the various counters compared to how long the loop is supposed // to last on average does not make this solution viable. // 1: EX( .Lexit1, st1 [buf]=r0,1 ) adds len=-1,len // countdown length using len br.cloop.dptk 1b ;; // avoid RAW on ar.lc // // .Lexit4: comes from byte by byte loop // len contains bytes left .Lexit1: mov ret0=len // faster than using ar.lc mov ar.lc=saved_lc br.ret.sptk.many rp // end of short clear_user // // At this point we know we have more than 16 bytes to copy // so we focus on alignment (no branches required) // // The use of len/len2 for countdown of the number of bytes left // instead of ret0 is due to the fact that the exception code // changes the values of r8. // .long_do_clear: tbit.nz p6,p0=buf,0 // odd alignment (for long_do_clear) ;; EX( .Lexit3, (p6) st1 [buf]=r0,1 ) // 1-byte aligned (p6) adds len=-1,len;; // sync because buf is modified tbit.nz p6,p0=buf,1 ;; EX( .Lexit3, (p6) st2 [buf]=r0,2 ) // 2-byte aligned (p6) adds len=-2,len;; tbit.nz p6,p0=buf,2 ;; EX( .Lexit3, (p6) st4 [buf]=r0,4 ) // 4-byte aligned (p6) adds len=-4,len;; tbit.nz p6,p0=buf,3 ;; EX( .Lexit3, (p6) st8 [buf]=r0,8 ) // 8-byte aligned (p6) adds len=-8,len;; shr.u cnt=len,4 // number of 128-bit (2x64bit) words ;; cmp.eq p6,p0=r0,cnt adds tmp=-1,cnt (p6) br.cond.dpnt .dotail // we have less than 16 bytes left ;; adds buf2=8,buf // setup second base pointer mov ar.lc=tmp ;; // // 16bytes/iteration core loop // // The second store can never generate a fault because // we come into the loop only when we are 16-byte aligned. // This means that if we cross a page then it will always be // in the first store and never in the second. // // // We need to keep track of the remaining length. A possible (optimistic) // way would be to use ar.lc and derive how many byte were left by // doing : left= 16*ar.lc + 16. this would avoid the addition at // every iteration. // However we need to keep the synchronization point. A template // M;;MB does not exist and thus we can keep the addition at no // extra cycle cost (use a nop slot anyway). It also simplifies the // (unlikely) error recovery code // 2: EX(.Lexit3, st8 [buf]=r0,16 ) ;; // needed to get len correct when error st8 [buf2]=r0,16 adds len=-16,len br.cloop.dptk 2b ;; mov ar.lc=saved_lc // // tail correction based on len only // // We alternate the use of len3,len2 to allow parallelism and correct // error handling. We also reuse p6/p7 to return correct value. // The addition of len2/len3 does not cost anything more compared to // the regular memset as we had empty slots. // .dotail: mov len2=len // for parallelization of error handling mov len3=len tbit.nz p6,p0=len,3 ;; EX( .Lexit2, (p6) st8 [buf]=r0,8 ) // at least 8 bytes (p6) adds len3=-8,len2 tbit.nz p7,p6=len,2 ;; EX( .Lexit2, (p7) st4 [buf]=r0,4 ) // at least 4 bytes (p7) adds len2=-4,len3 tbit.nz p6,p7=len,1 ;; EX( .Lexit2, (p6) st2 [buf]=r0,2 ) // at least 2 bytes (p6) adds len3=-2,len2 tbit.nz p7,p6=len,0 ;; EX( .Lexit2, (p7) st1 [buf]=r0 ) // only 1 byte left mov ret0=r0 // success br.ret.sptk.many rp // end of most likely path // // Outlined error handling code // // // .Lexit3: comes from core loop, need restore pr/lc // len contains bytes left // // // .Lexit2: // if p6 -> coming from st8 or st2 : len2 contains what's left // if p7 -> coming from st4 or st1 : len3 contains what's left // We must restore lc/pr even though might not have been used. .Lexit2: .pred.rel "mutex", p6, p7 (p6) mov len=len2 (p7) mov len=len3 ;; // // .Lexit4: comes from head, need not restore pr/lc // len contains bytes left // .Lexit3: mov ret0=len mov ar.lc=saved_lc br.ret.sptk.many rp END(__do_clear_user) EXPORT_SYMBOL(__do_clear_user)
AirFortressIlikara/LS2K0300-linux-4.19
2,761
arch/ia64/lib/flush.S
/* * Cache flushing routines. * * Copyright (C) 1999-2001, 2005 Hewlett-Packard Co * David Mosberger-Tang <davidm@hpl.hp.com> * * 05/28/05 Zoltan Menyhart Dynamic stride size */ #include <asm/asmmacro.h> #include <asm/export.h> /* * flush_icache_range(start,end) * * Make i-cache(s) coherent with d-caches. * * Must deal with range from start to end-1 but nothing else (need to * be careful not to touch addresses that may be unmapped). * * Note: "in0" and "in1" are preserved for debugging purposes. */ .section .kprobes.text,"ax" GLOBAL_ENTRY(flush_icache_range) .prologue alloc r2=ar.pfs,2,0,0,0 movl r3=ia64_i_cache_stride_shift mov r21=1 ;; ld8 r20=[r3] // r20: stride shift sub r22=in1,r0,1 // last byte address ;; shr.u r23=in0,r20 // start / (stride size) shr.u r22=r22,r20 // (last byte address) / (stride size) shl r21=r21,r20 // r21: stride size of the i-cache(s) ;; sub r8=r22,r23 // number of strides - 1 shl r24=r23,r20 // r24: addresses for "fc.i" = // "start" rounded down to stride boundary .save ar.lc,r3 mov r3=ar.lc // save ar.lc ;; .body mov ar.lc=r8 ;; /* * 32 byte aligned loop, even number of (actually 2) bundles */ .Loop: fc.i r24 // issuable on M0 only add r24=r21,r24 // we flush "stride size" bytes per iteration nop.i 0 br.cloop.sptk.few .Loop ;; sync.i ;; srlz.i ;; mov ar.lc=r3 // restore ar.lc br.ret.sptk.many rp END(flush_icache_range) EXPORT_SYMBOL_GPL(flush_icache_range) /* * clflush_cache_range(start,size) * * Flush cache lines from start to start+size-1. * * Must deal with range from start to start+size-1 but nothing else * (need to be careful not to touch addresses that may be * unmapped). * * Note: "in0" and "in1" are preserved for debugging purposes. */ .section .kprobes.text,"ax" GLOBAL_ENTRY(clflush_cache_range) .prologue alloc r2=ar.pfs,2,0,0,0 movl r3=ia64_cache_stride_shift mov r21=1 add r22=in1,in0 ;; ld8 r20=[r3] // r20: stride shift sub r22=r22,r0,1 // last byte address ;; shr.u r23=in0,r20 // start / (stride size) shr.u r22=r22,r20 // (last byte address) / (stride size) shl r21=r21,r20 // r21: stride size of the i-cache(s) ;; sub r8=r22,r23 // number of strides - 1 shl r24=r23,r20 // r24: addresses for "fc" = // "start" rounded down to stride // boundary .save ar.lc,r3 mov r3=ar.lc // save ar.lc ;; .body mov ar.lc=r8 ;; /* * 32 byte aligned loop, even number of (actually 2) bundles */ .Loop_fc: fc r24 // issuable on M0 only add r24=r21,r24 // we flush "stride size" bytes per iteration nop.i 0 br.cloop.sptk.few .Loop_fc ;; sync.i ;; srlz.i ;; mov ar.lc=r3 // restore ar.lc br.ret.sptk.many rp END(clflush_cache_range)
AirFortressIlikara/LS2K0300-linux-4.19
6,974
arch/ia64/lib/memcpy.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * * Optimized version of the standard memcpy() function * * Inputs: * in0: destination address * in1: source address * in2: number of bytes to copy * Output: * no return value * * Copyright (C) 2000-2001 Hewlett-Packard Co * Stephane Eranian <eranian@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com> */ #include <asm/asmmacro.h> #include <asm/export.h> GLOBAL_ENTRY(memcpy) # define MEM_LAT 21 /* latency to memory */ # define dst r2 # define src r3 # define retval r8 # define saved_pfs r9 # define saved_lc r10 # define saved_pr r11 # define cnt r16 # define src2 r17 # define t0 r18 # define t1 r19 # define t2 r20 # define t3 r21 # define t4 r22 # define src_end r23 # define N (MEM_LAT + 4) # define Nrot ((N + 7) & ~7) /* * First, check if everything (src, dst, len) is a multiple of eight. If * so, we handle everything with no taken branches (other than the loop * itself) and a small icache footprint. Otherwise, we jump off to * the more general copy routine handling arbitrary * sizes/alignment etc. */ .prologue .save ar.pfs, saved_pfs alloc saved_pfs=ar.pfs,3,Nrot,0,Nrot .save ar.lc, saved_lc mov saved_lc=ar.lc or t0=in0,in1 ;; or t0=t0,in2 .save pr, saved_pr mov saved_pr=pr .body cmp.eq p6,p0=in2,r0 // zero length? mov retval=in0 // return dst (p6) br.ret.spnt.many rp // zero length, return immediately ;; mov dst=in0 // copy because of rotation shr.u cnt=in2,3 // number of 8-byte words to copy mov pr.rot=1<<16 ;; adds cnt=-1,cnt // br.ctop is repeat/until cmp.gtu p7,p0=16,in2 // copying less than 16 bytes? mov ar.ec=N ;; and t0=0x7,t0 mov ar.lc=cnt ;; cmp.ne p6,p0=t0,r0 mov src=in1 // copy because of rotation (p7) br.cond.spnt.few .memcpy_short (p6) br.cond.spnt.few .memcpy_long ;; nop.m 0 ;; nop.m 0 nop.i 0 ;; nop.m 0 ;; .rotr val[N] .rotp p[N] .align 32 1: { .mib (p[0]) ld8 val[0]=[src],8 nop.i 0 brp.loop.imp 1b, 2f } 2: { .mfb (p[N-1])st8 [dst]=val[N-1],8 nop.f 0 br.ctop.dptk.few 1b } ;; mov ar.lc=saved_lc mov pr=saved_pr,-1 mov ar.pfs=saved_pfs br.ret.sptk.many rp /* * Small (<16 bytes) unaligned copying is done via a simple byte-at-the-time * copy loop. This performs relatively poorly on Itanium, but it doesn't * get used very often (gcc inlines small copies) and due to atomicity * issues, we want to avoid read-modify-write of entire words. */ .align 32 .memcpy_short: adds cnt=-1,in2 // br.ctop is repeat/until mov ar.ec=MEM_LAT brp.loop.imp 1f, 2f ;; mov ar.lc=cnt ;; nop.m 0 ;; nop.m 0 nop.i 0 ;; nop.m 0 ;; nop.m 0 ;; /* * It is faster to put a stop bit in the loop here because it makes * the pipeline shorter (and latency is what matters on short copies). */ .align 32 1: { .mib (p[0]) ld1 val[0]=[src],1 nop.i 0 brp.loop.imp 1b, 2f } ;; 2: { .mfb (p[MEM_LAT-1])st1 [dst]=val[MEM_LAT-1],1 nop.f 0 br.ctop.dptk.few 1b } ;; mov ar.lc=saved_lc mov pr=saved_pr,-1 mov ar.pfs=saved_pfs br.ret.sptk.many rp /* * Large (>= 16 bytes) copying is done in a fancy way. Latency isn't * an overriding concern here, but throughput is. We first do * sub-word copying until the destination is aligned, then we check * if the source is also aligned. If so, we do a simple load/store-loop * until there are less than 8 bytes left over and then we do the tail, * by storing the last few bytes using sub-word copying. If the source * is not aligned, we branch off to the non-congruent loop. * * stage: op: * 0 ld * : * MEM_LAT+3 shrp * MEM_LAT+4 st * * On Itanium, the pipeline itself runs without stalls. However, br.ctop * seems to introduce an unavoidable bubble in the pipeline so the overall * latency is 2 cycles/iteration. This gives us a _copy_ throughput * of 4 byte/cycle. Still not bad. */ # undef N # undef Nrot # define N (MEM_LAT + 5) /* number of stages */ # define Nrot ((N+1 + 2 + 7) & ~7) /* number of rotating regs */ #define LOG_LOOP_SIZE 6 .memcpy_long: alloc t3=ar.pfs,3,Nrot,0,Nrot // resize register frame and t0=-8,src // t0 = src & ~7 and t2=7,src // t2 = src & 7 ;; ld8 t0=[t0] // t0 = 1st source word adds src2=7,src // src2 = (src + 7) sub t4=r0,dst // t4 = -dst ;; and src2=-8,src2 // src2 = (src + 7) & ~7 shl t2=t2,3 // t2 = 8*(src & 7) shl t4=t4,3 // t4 = 8*(dst & 7) ;; ld8 t1=[src2] // t1 = 1st source word if src is 8-byte aligned, 2nd otherwise sub t3=64,t2 // t3 = 64-8*(src & 7) shr.u t0=t0,t2 ;; add src_end=src,in2 shl t1=t1,t3 mov pr=t4,0x38 // (p5,p4,p3)=(dst & 7) ;; or t0=t0,t1 mov cnt=r0 adds src_end=-1,src_end ;; (p3) st1 [dst]=t0,1 (p3) shr.u t0=t0,8 (p3) adds cnt=1,cnt ;; (p4) st2 [dst]=t0,2 (p4) shr.u t0=t0,16 (p4) adds cnt=2,cnt ;; (p5) st4 [dst]=t0,4 (p5) adds cnt=4,cnt and src_end=-8,src_end // src_end = last word of source buffer ;; // At this point, dst is aligned to 8 bytes and there at least 16-7=9 bytes left to copy: 1:{ add src=cnt,src // make src point to remainder of source buffer sub cnt=in2,cnt // cnt = number of bytes left to copy mov t4=ip } ;; and src2=-8,src // align source pointer adds t4=.memcpy_loops-1b,t4 mov ar.ec=N and t0=7,src // t0 = src & 7 shr.u t2=cnt,3 // t2 = number of 8-byte words left to copy shl cnt=cnt,3 // move bits 0-2 to 3-5 ;; .rotr val[N+1], w[2] .rotp p[N] cmp.ne p6,p0=t0,r0 // is src aligned, too? shl t0=t0,LOG_LOOP_SIZE // t0 = 8*(src & 7) adds t2=-1,t2 // br.ctop is repeat/until ;; add t4=t0,t4 mov pr=cnt,0x38 // set (p5,p4,p3) to # of bytes last-word bytes to copy mov ar.lc=t2 ;; nop.m 0 ;; nop.m 0 nop.i 0 ;; nop.m 0 ;; (p6) ld8 val[1]=[src2],8 // prime the pump... mov b6=t4 br.sptk.few b6 ;; .memcpy_tail: // At this point, (p5,p4,p3) are set to the number of bytes left to copy (which is // less than 8) and t0 contains the last few bytes of the src buffer: (p5) st4 [dst]=t0,4 (p5) shr.u t0=t0,32 mov ar.lc=saved_lc ;; (p4) st2 [dst]=t0,2 (p4) shr.u t0=t0,16 mov ar.pfs=saved_pfs ;; (p3) st1 [dst]=t0 mov pr=saved_pr,-1 br.ret.sptk.many rp /////////////////////////////////////////////////////// .align 64 #define COPY(shift,index) \ 1: { .mib \ (p[0]) ld8 val[0]=[src2],8; \ (p[MEM_LAT+3]) shrp w[0]=val[MEM_LAT+3],val[MEM_LAT+4-index],shift; \ brp.loop.imp 1b, 2f \ }; \ 2: { .mfb \ (p[MEM_LAT+4]) st8 [dst]=w[1],8; \ nop.f 0; \ br.ctop.dptk.few 1b; \ }; \ ;; \ ld8 val[N-1]=[src_end]; /* load last word (may be same as val[N]) */ \ ;; \ shrp t0=val[N-1],val[N-index],shift; \ br .memcpy_tail .memcpy_loops: COPY(0, 1) /* no point special casing this---it doesn't go any faster without shrp */ COPY(8, 0) COPY(16, 0) COPY(24, 0) COPY(32, 0) COPY(40, 0) COPY(48, 0) COPY(56, 0) END(memcpy) EXPORT_SYMBOL(memcpy)
AirFortressIlikara/LS2K0300-linux-4.19
1,990
arch/ia64/lib/clear_page.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 1999-2002 Hewlett-Packard Co * Stephane Eranian <eranian@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com> * Copyright (C) 2002 Ken Chen <kenneth.w.chen@intel.com> * * 1/06/01 davidm Tuned for Itanium. * 2/12/02 kchen Tuned for both Itanium and McKinley * 3/08/02 davidm Some more tweaking */ #include <asm/asmmacro.h> #include <asm/page.h> #include <asm/export.h> #ifdef CONFIG_ITANIUM # define L3_LINE_SIZE 64 // Itanium L3 line size # define PREFETCH_LINES 9 // magic number #else # define L3_LINE_SIZE 128 // McKinley L3 line size # define PREFETCH_LINES 12 // magic number #endif #define saved_lc r2 #define dst_fetch r3 #define dst1 r8 #define dst2 r9 #define dst3 r10 #define dst4 r11 #define dst_last r31 GLOBAL_ENTRY(clear_page) .prologue .regstk 1,0,0,0 mov r16 = PAGE_SIZE/L3_LINE_SIZE-1 // main loop count, -1=repeat/until .save ar.lc, saved_lc mov saved_lc = ar.lc .body mov ar.lc = (PREFETCH_LINES - 1) mov dst_fetch = in0 adds dst1 = 16, in0 adds dst2 = 32, in0 ;; .fetch: stf.spill.nta [dst_fetch] = f0, L3_LINE_SIZE adds dst3 = 48, in0 // executing this multiple times is harmless br.cloop.sptk.few .fetch ;; addl dst_last = (PAGE_SIZE - PREFETCH_LINES*L3_LINE_SIZE), dst_fetch mov ar.lc = r16 // one L3 line per iteration adds dst4 = 64, in0 ;; #ifdef CONFIG_ITANIUM // Optimized for Itanium 1: stf.spill.nta [dst1] = f0, 64 stf.spill.nta [dst2] = f0, 64 cmp.lt p8,p0=dst_fetch, dst_last ;; #else // Optimized for McKinley 1: stf.spill.nta [dst1] = f0, 64 stf.spill.nta [dst2] = f0, 64 stf.spill.nta [dst3] = f0, 64 stf.spill.nta [dst4] = f0, 128 cmp.lt p8,p0=dst_fetch, dst_last ;; stf.spill.nta [dst1] = f0, 64 stf.spill.nta [dst2] = f0, 64 #endif stf.spill.nta [dst3] = f0, 64 (p8) stf.spill.nta [dst_fetch] = f0, L3_LINE_SIZE br.cloop.sptk.few 1b ;; mov ar.lc = saved_lc // restore lc br.ret.sptk.many rp END(clear_page) EXPORT_SYMBOL(clear_page)
AirFortressIlikara/LS2K0300-linux-4.19
5,959
arch/ia64/lib/copy_page_mck.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * McKinley-optimized version of copy_page(). * * Copyright (C) 2002 Hewlett-Packard Co * David Mosberger <davidm@hpl.hp.com> * * Inputs: * in0: address of target page * in1: address of source page * Output: * no return value * * General idea: * - use regular loads and stores to prefetch data to avoid consuming M-slot just for * lfetches => good for in-cache performance * - avoid l2 bank-conflicts by not storing into the same 16-byte bank within a single * cycle * * Principle of operation: * First, note that L1 has a line-size of 64 bytes and L2 a line-size of 128 bytes. * To avoid secondary misses in L2, we prefetch both source and destination with a line-size * of 128 bytes. When both of these lines are in the L2 and the first half of the * source line is in L1, we start copying the remaining words. The second half of the * source line is prefetched in an earlier iteration, so that by the time we start * accessing it, it's also present in the L1. * * We use a software-pipelined loop to control the overall operation. The pipeline * has 2*PREFETCH_DIST+K stages. The first PREFETCH_DIST stages are used for prefetching * source cache-lines. The second PREFETCH_DIST stages are used for prefetching destination * cache-lines, the last K stages are used to copy the cache-line words not copied by * the prefetches. The four relevant points in the pipelined are called A, B, C, D: * p[A] is TRUE if a source-line should be prefetched, p[B] is TRUE if a destination-line * should be prefetched, p[C] is TRUE if the second half of an L2 line should be brought * into L1D and p[D] is TRUE if a cacheline needs to be copied. * * This all sounds very complicated, but thanks to the modulo-scheduled loop support, * the resulting code is very regular and quite easy to follow (once you get the idea). * * As a secondary optimization, the first 2*PREFETCH_DIST iterations are implemented * as the separate .prefetch_loop. Logically, this loop performs exactly like the * main-loop (.line_copy), but has all known-to-be-predicated-off instructions removed, * so that each loop iteration is faster (again, good for cached case). * * When reading the code, it helps to keep the following picture in mind: * * word 0 word 1 * +------+------+--- * | v[x] | t1 | ^ * | t2 | t3 | | * | t4 | t5 | | * | t6 | t7 | | 128 bytes * | n[y] | t9 | | (L2 cache line) * | t10 | t11 | | * | t12 | t13 | | * | t14 | t15 | v * +------+------+--- * * Here, v[x] is copied by the (memory) prefetch. n[y] is loaded at p[C] * to fetch the second-half of the L2 cache line into L1, and the tX words are copied in * an order that avoids bank conflicts. */ #include <asm/asmmacro.h> #include <asm/page.h> #include <asm/export.h> #define PREFETCH_DIST 8 // McKinley sustains 16 outstanding L2 misses (8 ld, 8 st) #define src0 r2 #define src1 r3 #define dst0 r9 #define dst1 r10 #define src_pre_mem r11 #define dst_pre_mem r14 #define src_pre_l2 r15 #define dst_pre_l2 r16 #define t1 r17 #define t2 r18 #define t3 r19 #define t4 r20 #define t5 t1 // alias! #define t6 t2 // alias! #define t7 t3 // alias! #define t9 t5 // alias! #define t10 t4 // alias! #define t11 t7 // alias! #define t12 t6 // alias! #define t14 t10 // alias! #define t13 r21 #define t15 r22 #define saved_lc r23 #define saved_pr r24 #define A 0 #define B (PREFETCH_DIST) #define C (B + PREFETCH_DIST) #define D (C + 3) #define N (D + 1) #define Nrot ((N + 7) & ~7) GLOBAL_ENTRY(copy_page) .prologue alloc r8 = ar.pfs, 2, Nrot-2, 0, Nrot .rotr v[2*PREFETCH_DIST], n[D-C+1] .rotp p[N] .save ar.lc, saved_lc mov saved_lc = ar.lc .save pr, saved_pr mov saved_pr = pr .body mov src_pre_mem = in1 mov pr.rot = 0x10000 mov ar.ec = 1 // special unrolled loop mov dst_pre_mem = in0 mov ar.lc = 2*PREFETCH_DIST - 1 add src_pre_l2 = 8*8, in1 add dst_pre_l2 = 8*8, in0 add src0 = 8, in1 // first t1 src add src1 = 3*8, in1 // first t3 src add dst0 = 8, in0 // first t1 dst add dst1 = 3*8, in0 // first t3 dst mov t1 = (PAGE_SIZE/128) - (2*PREFETCH_DIST) - 1 nop.m 0 nop.i 0 ;; // same as .line_copy loop, but with all predicated-off instructions removed: .prefetch_loop: (p[A]) ld8 v[A] = [src_pre_mem], 128 // M0 (p[B]) st8 [dst_pre_mem] = v[B], 128 // M2 br.ctop.sptk .prefetch_loop ;; cmp.eq p16, p0 = r0, r0 // reset p16 to 1 (br.ctop cleared it to zero) mov ar.lc = t1 // with 64KB pages, t1 is too big to fit in 8 bits! mov ar.ec = N // # of stages in pipeline ;; .line_copy: (p[D]) ld8 t2 = [src0], 3*8 // M0 (p[D]) ld8 t4 = [src1], 3*8 // M1 (p[B]) st8 [dst_pre_mem] = v[B], 128 // M2 prefetch dst from memory (p[D]) st8 [dst_pre_l2] = n[D-C], 128 // M3 prefetch dst from L2 ;; (p[A]) ld8 v[A] = [src_pre_mem], 128 // M0 prefetch src from memory (p[C]) ld8 n[0] = [src_pre_l2], 128 // M1 prefetch src from L2 (p[D]) st8 [dst0] = t1, 8 // M2 (p[D]) st8 [dst1] = t3, 8 // M3 ;; (p[D]) ld8 t5 = [src0], 8 (p[D]) ld8 t7 = [src1], 3*8 (p[D]) st8 [dst0] = t2, 3*8 (p[D]) st8 [dst1] = t4, 3*8 ;; (p[D]) ld8 t6 = [src0], 3*8 (p[D]) ld8 t10 = [src1], 8 (p[D]) st8 [dst0] = t5, 8 (p[D]) st8 [dst1] = t7, 3*8 ;; (p[D]) ld8 t9 = [src0], 3*8 (p[D]) ld8 t11 = [src1], 3*8 (p[D]) st8 [dst0] = t6, 3*8 (p[D]) st8 [dst1] = t10, 8 ;; (p[D]) ld8 t12 = [src0], 8 (p[D]) ld8 t14 = [src1], 8 (p[D]) st8 [dst0] = t9, 3*8 (p[D]) st8 [dst1] = t11, 3*8 ;; (p[D]) ld8 t13 = [src0], 4*8 (p[D]) ld8 t15 = [src1], 4*8 (p[D]) st8 [dst0] = t12, 8 (p[D]) st8 [dst1] = t14, 8 ;; (p[D-1])ld8 t1 = [src0], 8 (p[D-1])ld8 t3 = [src1], 8 (p[D]) st8 [dst0] = t13, 4*8 (p[D]) st8 [dst1] = t15, 4*8 br.ctop.sptk .line_copy ;; mov ar.lc = saved_lc mov pr = saved_pr, -1 br.ret.sptk.many rp END(copy_page) EXPORT_SYMBOL(copy_page)
AirFortressIlikara/LS2K0300-linux-4.19
2,204
arch/ia64/lib/idiv64.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 1999-2000 Hewlett-Packard Co * Copyright (C) 1999-2000 David Mosberger-Tang <davidm@hpl.hp.com> * * 64-bit integer division. * * This code is based on the application note entitled "Divide, Square Root * and Remainder Algorithms for the IA-64 Architecture". This document * is available as Intel document number 248725-002 or via the web at * http://developer.intel.com/software/opensource/numerics/ * * For more details on the theory behind these algorithms, see "IA-64 * and Elementary Functions" by Peter Markstein; HP Professional Books * (http://www.goodreads.com/book/show/2019887.Ia_64_and_Elementary_Functions) */ #include <asm/asmmacro.h> #include <asm/export.h> #ifdef MODULO # define OP mod #else # define OP div #endif #ifdef UNSIGNED # define SGN u # define INT_TO_FP(a,b) fcvt.xuf.s1 a=b # define FP_TO_INT(a,b) fcvt.fxu.trunc.s1 a=b #else # define SGN # define INT_TO_FP(a,b) fcvt.xf a=b # define FP_TO_INT(a,b) fcvt.fx.trunc.s1 a=b #endif #define PASTE1(a,b) a##b #define PASTE(a,b) PASTE1(a,b) #define NAME PASTE(PASTE(__,SGN),PASTE(OP,di3)) GLOBAL_ENTRY(NAME) .regstk 2,0,0,0 // Transfer inputs to FP registers. setf.sig f8 = in0 setf.sig f9 = in1 ;; // Convert the inputs to FP, to avoid FP software-assist faults. INT_TO_FP(f8, f8) INT_TO_FP(f9, f9) ;; frcpa.s1 f11, p6 = f8, f9 // y0 = frcpa(b) ;; (p6) fmpy.s1 f7 = f8, f11 // q0 = a*y0 (p6) fnma.s1 f6 = f9, f11, f1 // e0 = -b*y0 + 1 ;; (p6) fma.s1 f10 = f7, f6, f7 // q1 = q0*e0 + q0 (p6) fmpy.s1 f7 = f6, f6 // e1 = e0*e0 ;; #ifdef MODULO sub in1 = r0, in1 // in1 = -b #endif (p6) fma.s1 f10 = f10, f7, f10 // q2 = q1*e1 + q1 (p6) fma.s1 f6 = f11, f6, f11 // y1 = y0*e0 + y0 ;; (p6) fma.s1 f6 = f6, f7, f6 // y2 = y1*e1 + y1 (p6) fnma.s1 f7 = f9, f10, f8 // r = -b*q2 + a ;; #ifdef MODULO setf.sig f8 = in0 // f8 = a setf.sig f9 = in1 // f9 = -b #endif (p6) fma.s1 f11 = f7, f6, f10 // q3 = r*y2 + q2 ;; FP_TO_INT(f11, f11) // q = trunc(q3) ;; #ifdef MODULO xma.l f11 = f11, f9, f8 // r = q*(-b) + a ;; #endif getf.sig r8 = f11 // transfer result to result register br.ret.sptk.many rp END(NAME) EXPORT_SYMBOL(NAME)
AirFortressIlikara/LS2K0300-linux-4.19
17,849
arch/ia64/lib/memcpy_mck.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Itanium 2-optimized version of memcpy and copy_user function * * Inputs: * in0: destination address * in1: source address * in2: number of bytes to copy * Output: * for memcpy: return dest * for copy_user: return 0 if success, * or number of byte NOT copied if error occurred. * * Copyright (C) 2002 Intel Corp. * Copyright (C) 2002 Ken Chen <kenneth.w.chen@intel.com> */ #include <asm/asmmacro.h> #include <asm/page.h> #include <asm/export.h> #define EK(y...) EX(y) /* McKinley specific optimization */ #define retval r8 #define saved_pfs r31 #define saved_lc r10 #define saved_pr r11 #define saved_in0 r14 #define saved_in1 r15 #define saved_in2 r16 #define src0 r2 #define src1 r3 #define dst0 r17 #define dst1 r18 #define cnt r9 /* r19-r30 are temp for each code section */ #define PREFETCH_DIST 8 #define src_pre_mem r19 #define dst_pre_mem r20 #define src_pre_l2 r21 #define dst_pre_l2 r22 #define t1 r23 #define t2 r24 #define t3 r25 #define t4 r26 #define t5 t1 // alias! #define t6 t2 // alias! #define t7 t3 // alias! #define n8 r27 #define t9 t5 // alias! #define t10 t4 // alias! #define t11 t7 // alias! #define t12 t6 // alias! #define t14 t10 // alias! #define t13 r28 #define t15 r29 #define tmp r30 /* defines for long_copy block */ #define A 0 #define B (PREFETCH_DIST) #define C (B + PREFETCH_DIST) #define D (C + 1) #define N (D + 1) #define Nrot ((N + 7) & ~7) /* alias */ #define in0 r32 #define in1 r33 #define in2 r34 GLOBAL_ENTRY(memcpy) and r28=0x7,in0 and r29=0x7,in1 mov f6=f0 mov retval=in0 br.cond.sptk .common_code ;; END(memcpy) EXPORT_SYMBOL(memcpy) GLOBAL_ENTRY(__copy_user) .prologue // check dest alignment and r28=0x7,in0 and r29=0x7,in1 mov f6=f1 mov saved_in0=in0 // save dest pointer mov saved_in1=in1 // save src pointer mov retval=r0 // initialize return value ;; .common_code: cmp.gt p15,p0=8,in2 // check for small size cmp.ne p13,p0=0,r28 // check dest alignment cmp.ne p14,p0=0,r29 // check src alignment add src0=0,in1 sub r30=8,r28 // for .align_dest mov saved_in2=in2 // save len ;; add dst0=0,in0 add dst1=1,in0 // dest odd index cmp.le p6,p0 = 1,r30 // for .align_dest (p15) br.cond.dpnt .memcpy_short (p13) br.cond.dpnt .align_dest (p14) br.cond.dpnt .unaligned_src ;; // both dest and src are aligned on 8-byte boundary .aligned_src: .save ar.pfs, saved_pfs alloc saved_pfs=ar.pfs,3,Nrot-3,0,Nrot .save pr, saved_pr mov saved_pr=pr shr.u cnt=in2,7 // this much cache line ;; cmp.lt p6,p0=2*PREFETCH_DIST,cnt cmp.lt p7,p8=1,cnt .save ar.lc, saved_lc mov saved_lc=ar.lc .body add cnt=-1,cnt add src_pre_mem=0,in1 // prefetch src pointer add dst_pre_mem=0,in0 // prefetch dest pointer ;; (p7) mov ar.lc=cnt // prefetch count (p8) mov ar.lc=r0 (p6) br.cond.dpnt .long_copy ;; .prefetch: lfetch.fault [src_pre_mem], 128 lfetch.fault.excl [dst_pre_mem], 128 br.cloop.dptk.few .prefetch ;; .medium_copy: and tmp=31,in2 // copy length after iteration shr.u r29=in2,5 // number of 32-byte iteration add dst1=8,dst0 // 2nd dest pointer ;; add cnt=-1,r29 // ctop iteration adjustment cmp.eq p10,p0=r29,r0 // do we really need to loop? add src1=8,src0 // 2nd src pointer cmp.le p6,p0=8,tmp ;; cmp.le p7,p0=16,tmp mov ar.lc=cnt // loop setup cmp.eq p16,p17 = r0,r0 mov ar.ec=2 (p10) br.dpnt.few .aligned_src_tail ;; TEXT_ALIGN(32) 1: EX(.ex_handler, (p16) ld8 r34=[src0],16) EK(.ex_handler, (p16) ld8 r38=[src1],16) EX(.ex_handler, (p17) st8 [dst0]=r33,16) EK(.ex_handler, (p17) st8 [dst1]=r37,16) ;; EX(.ex_handler, (p16) ld8 r32=[src0],16) EK(.ex_handler, (p16) ld8 r36=[src1],16) EX(.ex_handler, (p16) st8 [dst0]=r34,16) EK(.ex_handler, (p16) st8 [dst1]=r38,16) br.ctop.dptk.few 1b ;; .aligned_src_tail: EX(.ex_handler, (p6) ld8 t1=[src0]) mov ar.lc=saved_lc mov ar.pfs=saved_pfs EX(.ex_hndlr_s, (p7) ld8 t2=[src1],8) cmp.le p8,p0=24,tmp and r21=-8,tmp ;; EX(.ex_hndlr_s, (p8) ld8 t3=[src1]) EX(.ex_handler, (p6) st8 [dst0]=t1) // store byte 1 and in2=7,tmp // remaining length EX(.ex_hndlr_d, (p7) st8 [dst1]=t2,8) // store byte 2 add src0=src0,r21 // setting up src pointer add dst0=dst0,r21 // setting up dest pointer ;; EX(.ex_handler, (p8) st8 [dst1]=t3) // store byte 3 mov pr=saved_pr,-1 br.dptk.many .memcpy_short ;; /* code taken from copy_page_mck */ .long_copy: .rotr v[2*PREFETCH_DIST] .rotp p[N] mov src_pre_mem = src0 mov pr.rot = 0x10000 mov ar.ec = 1 // special unrolled loop mov dst_pre_mem = dst0 add src_pre_l2 = 8*8, src0 add dst_pre_l2 = 8*8, dst0 ;; add src0 = 8, src_pre_mem // first t1 src mov ar.lc = 2*PREFETCH_DIST - 1 shr.u cnt=in2,7 // number of lines add src1 = 3*8, src_pre_mem // first t3 src add dst0 = 8, dst_pre_mem // first t1 dst add dst1 = 3*8, dst_pre_mem // first t3 dst ;; and tmp=127,in2 // remaining bytes after this block add cnt = -(2*PREFETCH_DIST) - 1, cnt // same as .line_copy loop, but with all predicated-off instructions removed: .prefetch_loop: EX(.ex_hndlr_lcpy_1, (p[A]) ld8 v[A] = [src_pre_mem], 128) // M0 EK(.ex_hndlr_lcpy_1, (p[B]) st8 [dst_pre_mem] = v[B], 128) // M2 br.ctop.sptk .prefetch_loop ;; cmp.eq p16, p0 = r0, r0 // reset p16 to 1 mov ar.lc = cnt mov ar.ec = N // # of stages in pipeline ;; .line_copy: EX(.ex_handler, (p[D]) ld8 t2 = [src0], 3*8) // M0 EK(.ex_handler, (p[D]) ld8 t4 = [src1], 3*8) // M1 EX(.ex_handler_lcpy, (p[B]) st8 [dst_pre_mem] = v[B], 128) // M2 prefetch dst from memory EK(.ex_handler_lcpy, (p[D]) st8 [dst_pre_l2] = n8, 128) // M3 prefetch dst from L2 ;; EX(.ex_handler_lcpy, (p[A]) ld8 v[A] = [src_pre_mem], 128) // M0 prefetch src from memory EK(.ex_handler_lcpy, (p[C]) ld8 n8 = [src_pre_l2], 128) // M1 prefetch src from L2 EX(.ex_handler, (p[D]) st8 [dst0] = t1, 8) // M2 EK(.ex_handler, (p[D]) st8 [dst1] = t3, 8) // M3 ;; EX(.ex_handler, (p[D]) ld8 t5 = [src0], 8) EK(.ex_handler, (p[D]) ld8 t7 = [src1], 3*8) EX(.ex_handler, (p[D]) st8 [dst0] = t2, 3*8) EK(.ex_handler, (p[D]) st8 [dst1] = t4, 3*8) ;; EX(.ex_handler, (p[D]) ld8 t6 = [src0], 3*8) EK(.ex_handler, (p[D]) ld8 t10 = [src1], 8) EX(.ex_handler, (p[D]) st8 [dst0] = t5, 8) EK(.ex_handler, (p[D]) st8 [dst1] = t7, 3*8) ;; EX(.ex_handler, (p[D]) ld8 t9 = [src0], 3*8) EK(.ex_handler, (p[D]) ld8 t11 = [src1], 3*8) EX(.ex_handler, (p[D]) st8 [dst0] = t6, 3*8) EK(.ex_handler, (p[D]) st8 [dst1] = t10, 8) ;; EX(.ex_handler, (p[D]) ld8 t12 = [src0], 8) EK(.ex_handler, (p[D]) ld8 t14 = [src1], 8) EX(.ex_handler, (p[D]) st8 [dst0] = t9, 3*8) EK(.ex_handler, (p[D]) st8 [dst1] = t11, 3*8) ;; EX(.ex_handler, (p[D]) ld8 t13 = [src0], 4*8) EK(.ex_handler, (p[D]) ld8 t15 = [src1], 4*8) EX(.ex_handler, (p[D]) st8 [dst0] = t12, 8) EK(.ex_handler, (p[D]) st8 [dst1] = t14, 8) ;; EX(.ex_handler, (p[C]) ld8 t1 = [src0], 8) EK(.ex_handler, (p[C]) ld8 t3 = [src1], 8) EX(.ex_handler, (p[D]) st8 [dst0] = t13, 4*8) EK(.ex_handler, (p[D]) st8 [dst1] = t15, 4*8) br.ctop.sptk .line_copy ;; add dst0=-8,dst0 add src0=-8,src0 mov in2=tmp .restore sp br.sptk.many .medium_copy ;; #define BLOCK_SIZE 128*32 #define blocksize r23 #define curlen r24 // dest is on 8-byte boundary, src is not. We need to do // ld8-ld8, shrp, then st8. Max 8 byte copy per cycle. .unaligned_src: .prologue .save ar.pfs, saved_pfs alloc saved_pfs=ar.pfs,3,5,0,8 .save ar.lc, saved_lc mov saved_lc=ar.lc .save pr, saved_pr mov saved_pr=pr .body .4k_block: mov saved_in0=dst0 // need to save all input arguments mov saved_in2=in2 mov blocksize=BLOCK_SIZE ;; cmp.lt p6,p7=blocksize,in2 mov saved_in1=src0 ;; (p6) mov in2=blocksize ;; shr.u r21=in2,7 // this much cache line shr.u r22=in2,4 // number of 16-byte iteration and curlen=15,in2 // copy length after iteration and r30=7,src0 // source alignment ;; cmp.lt p7,p8=1,r21 add cnt=-1,r21 ;; add src_pre_mem=0,src0 // prefetch src pointer add dst_pre_mem=0,dst0 // prefetch dest pointer and src0=-8,src0 // 1st src pointer (p7) mov ar.lc = cnt (p8) mov ar.lc = r0 ;; TEXT_ALIGN(32) 1: lfetch.fault [src_pre_mem], 128 lfetch.fault.excl [dst_pre_mem], 128 br.cloop.dptk.few 1b ;; shladd dst1=r22,3,dst0 // 2nd dest pointer shladd src1=r22,3,src0 // 2nd src pointer cmp.eq p8,p9=r22,r0 // do we really need to loop? cmp.le p6,p7=8,curlen; // have at least 8 byte remaining? add cnt=-1,r22 // ctop iteration adjustment ;; EX(.ex_handler, (p9) ld8 r33=[src0],8) // loop primer EK(.ex_handler, (p9) ld8 r37=[src1],8) (p8) br.dpnt.few .noloop ;; // The jump address is calculated based on src alignment. The COPYU // macro below need to confine its size to power of two, so an entry // can be caulated using shl instead of an expensive multiply. The // size is then hard coded by the following #define to match the // actual size. This make it somewhat tedious when COPYU macro gets // changed and this need to be adjusted to match. #define LOOP_SIZE 6 1: mov r29=ip // jmp_table thread mov ar.lc=cnt ;; add r29=.jump_table - 1b - (.jmp1-.jump_table), r29 shl r28=r30, LOOP_SIZE // jmp_table thread mov ar.ec=2 // loop setup ;; add r29=r29,r28 // jmp_table thread cmp.eq p16,p17=r0,r0 ;; mov b6=r29 // jmp_table thread ;; br.cond.sptk.few b6 // for 8-15 byte case // We will skip the loop, but need to replicate the side effect // that the loop produces. .noloop: EX(.ex_handler, (p6) ld8 r37=[src1],8) add src0=8,src0 (p6) shl r25=r30,3 ;; EX(.ex_handler, (p6) ld8 r27=[src1]) (p6) shr.u r28=r37,r25 (p6) sub r26=64,r25 ;; (p6) shl r27=r27,r26 ;; (p6) or r21=r28,r27 .unaligned_src_tail: /* check if we have more than blocksize to copy, if so go back */ cmp.gt p8,p0=saved_in2,blocksize ;; (p8) add dst0=saved_in0,blocksize (p8) add src0=saved_in1,blocksize (p8) sub in2=saved_in2,blocksize (p8) br.dpnt .4k_block ;; /* we have up to 15 byte to copy in the tail. * part of work is already done in the jump table code * we are at the following state. * src side: * * xxxxxx xx <----- r21 has xxxxxxxx already * -------- -------- -------- * 0 8 16 * ^ * | * src1 * * dst * -------- -------- -------- * ^ * | * dst1 */ EX(.ex_handler, (p6) st8 [dst1]=r21,8) // more than 8 byte to copy (p6) add curlen=-8,curlen // update length mov ar.pfs=saved_pfs ;; mov ar.lc=saved_lc mov pr=saved_pr,-1 mov in2=curlen // remaining length mov dst0=dst1 // dest pointer add src0=src1,r30 // forward by src alignment ;; // 7 byte or smaller. .memcpy_short: cmp.le p8,p9 = 1,in2 cmp.le p10,p11 = 2,in2 cmp.le p12,p13 = 3,in2 cmp.le p14,p15 = 4,in2 add src1=1,src0 // second src pointer add dst1=1,dst0 // second dest pointer ;; EX(.ex_handler_short, (p8) ld1 t1=[src0],2) EK(.ex_handler_short, (p10) ld1 t2=[src1],2) (p9) br.ret.dpnt rp // 0 byte copy ;; EX(.ex_handler_short, (p8) st1 [dst0]=t1,2) EK(.ex_handler_short, (p10) st1 [dst1]=t2,2) (p11) br.ret.dpnt rp // 1 byte copy EX(.ex_handler_short, (p12) ld1 t3=[src0],2) EK(.ex_handler_short, (p14) ld1 t4=[src1],2) (p13) br.ret.dpnt rp // 2 byte copy ;; cmp.le p6,p7 = 5,in2 cmp.le p8,p9 = 6,in2 cmp.le p10,p11 = 7,in2 EX(.ex_handler_short, (p12) st1 [dst0]=t3,2) EK(.ex_handler_short, (p14) st1 [dst1]=t4,2) (p15) br.ret.dpnt rp // 3 byte copy ;; EX(.ex_handler_short, (p6) ld1 t5=[src0],2) EK(.ex_handler_short, (p8) ld1 t6=[src1],2) (p7) br.ret.dpnt rp // 4 byte copy ;; EX(.ex_handler_short, (p6) st1 [dst0]=t5,2) EK(.ex_handler_short, (p8) st1 [dst1]=t6,2) (p9) br.ret.dptk rp // 5 byte copy EX(.ex_handler_short, (p10) ld1 t7=[src0],2) (p11) br.ret.dptk rp // 6 byte copy ;; EX(.ex_handler_short, (p10) st1 [dst0]=t7,2) br.ret.dptk rp // done all cases /* Align dest to nearest 8-byte boundary. We know we have at * least 7 bytes to copy, enough to crawl to 8-byte boundary. * Actual number of byte to crawl depend on the dest alignment. * 7 byte or less is taken care at .memcpy_short * src0 - source even index * src1 - source odd index * dst0 - dest even index * dst1 - dest odd index * r30 - distance to 8-byte boundary */ .align_dest: add src1=1,in1 // source odd index cmp.le p7,p0 = 2,r30 // for .align_dest cmp.le p8,p0 = 3,r30 // for .align_dest EX(.ex_handler_short, (p6) ld1 t1=[src0],2) cmp.le p9,p0 = 4,r30 // for .align_dest cmp.le p10,p0 = 5,r30 ;; EX(.ex_handler_short, (p7) ld1 t2=[src1],2) EK(.ex_handler_short, (p8) ld1 t3=[src0],2) cmp.le p11,p0 = 6,r30 EX(.ex_handler_short, (p6) st1 [dst0] = t1,2) cmp.le p12,p0 = 7,r30 ;; EX(.ex_handler_short, (p9) ld1 t4=[src1],2) EK(.ex_handler_short, (p10) ld1 t5=[src0],2) EX(.ex_handler_short, (p7) st1 [dst1] = t2,2) EK(.ex_handler_short, (p8) st1 [dst0] = t3,2) ;; EX(.ex_handler_short, (p11) ld1 t6=[src1],2) EK(.ex_handler_short, (p12) ld1 t7=[src0],2) cmp.eq p6,p7=r28,r29 EX(.ex_handler_short, (p9) st1 [dst1] = t4,2) EK(.ex_handler_short, (p10) st1 [dst0] = t5,2) sub in2=in2,r30 ;; EX(.ex_handler_short, (p11) st1 [dst1] = t6,2) EK(.ex_handler_short, (p12) st1 [dst0] = t7) add dst0=in0,r30 // setup arguments add src0=in1,r30 (p6) br.cond.dptk .aligned_src (p7) br.cond.dpnt .unaligned_src ;; /* main loop body in jump table format */ #define COPYU(shift) \ 1: \ EX(.ex_handler, (p16) ld8 r32=[src0],8); /* 1 */ \ EK(.ex_handler, (p16) ld8 r36=[src1],8); \ (p17) shrp r35=r33,r34,shift;; /* 1 */ \ EX(.ex_handler, (p6) ld8 r22=[src1]); /* common, prime for tail section */ \ nop.m 0; \ (p16) shrp r38=r36,r37,shift; \ EX(.ex_handler, (p17) st8 [dst0]=r35,8); /* 1 */ \ EK(.ex_handler, (p17) st8 [dst1]=r39,8); \ br.ctop.dptk.few 1b;; \ (p7) add src1=-8,src1; /* back out for <8 byte case */ \ shrp r21=r22,r38,shift; /* speculative work */ \ br.sptk.few .unaligned_src_tail /* branch out of jump table */ \ ;; TEXT_ALIGN(32) .jump_table: COPYU(8) // unaligned cases .jmp1: COPYU(16) COPYU(24) COPYU(32) COPYU(40) COPYU(48) COPYU(56) #undef A #undef B #undef C #undef D /* * Due to lack of local tag support in gcc 2.x assembler, it is not clear which * instruction failed in the bundle. The exception algorithm is that we * first figure out the faulting address, then detect if there is any * progress made on the copy, if so, redo the copy from last known copied * location up to the faulting address (exclusive). In the copy_from_user * case, remaining byte in kernel buffer will be zeroed. * * Take copy_from_user as an example, in the code there are multiple loads * in a bundle and those multiple loads could span over two pages, the * faulting address is calculated as page_round_down(max(src0, src1)). * This is based on knowledge that if we can access one byte in a page, we * can access any byte in that page. * * predicate used in the exception handler: * p6-p7: direction * p10-p11: src faulting addr calculation * p12-p13: dst faulting addr calculation */ #define A r19 #define B r20 #define C r21 #define D r22 #define F r28 #define saved_retval loc0 #define saved_rtlink loc1 #define saved_pfs_stack loc2 .ex_hndlr_s: add src0=8,src0 br.sptk .ex_handler ;; .ex_hndlr_d: add dst0=8,dst0 br.sptk .ex_handler ;; .ex_hndlr_lcpy_1: mov src1=src_pre_mem mov dst1=dst_pre_mem cmp.gtu p10,p11=src_pre_mem,saved_in1 cmp.gtu p12,p13=dst_pre_mem,saved_in0 ;; (p10) add src0=8,saved_in1 (p11) mov src0=saved_in1 (p12) add dst0=8,saved_in0 (p13) mov dst0=saved_in0 br.sptk .ex_handler .ex_handler_lcpy: // in line_copy block, the preload addresses should always ahead // of the other two src/dst pointers. Furthermore, src1/dst1 should // always ahead of src0/dst0. mov src1=src_pre_mem mov dst1=dst_pre_mem .ex_handler: mov pr=saved_pr,-1 // first restore pr, lc, and pfs mov ar.lc=saved_lc mov ar.pfs=saved_pfs ;; .ex_handler_short: // fault occurred in these sections didn't change pr, lc, pfs cmp.ltu p6,p7=saved_in0, saved_in1 // get the copy direction cmp.ltu p10,p11=src0,src1 cmp.ltu p12,p13=dst0,dst1 fcmp.eq p8,p0=f6,f0 // is it memcpy? mov tmp = dst0 ;; (p11) mov src1 = src0 // pick the larger of the two (p13) mov dst0 = dst1 // make dst0 the smaller one (p13) mov dst1 = tmp // and dst1 the larger one ;; (p6) dep F = r0,dst1,0,PAGE_SHIFT // usr dst round down to page boundary (p7) dep F = r0,src1,0,PAGE_SHIFT // usr src round down to page boundary ;; (p6) cmp.le p14,p0=dst0,saved_in0 // no progress has been made on store (p7) cmp.le p14,p0=src0,saved_in1 // no progress has been made on load mov retval=saved_in2 (p8) ld1 tmp=[src1] // force an oops for memcpy call (p8) st1 [dst1]=r0 // force an oops for memcpy call (p14) br.ret.sptk.many rp /* * The remaining byte to copy is calculated as: * * A = (faulting_addr - orig_src) -> len to faulting ld address * or * (faulting_addr - orig_dst) -> len to faulting st address * B = (cur_dst - orig_dst) -> len copied so far * C = A - B -> len need to be copied * D = orig_len - A -> len need to be left along */ (p6) sub A = F, saved_in0 (p7) sub A = F, saved_in1 clrrrb ;; alloc saved_pfs_stack=ar.pfs,3,3,3,0 cmp.lt p8,p0=A,r0 sub B = dst0, saved_in0 // how many byte copied so far ;; (p8) mov A = 0; // A shouldn't be negative, cap it ;; sub C = A, B sub D = saved_in2, A ;; cmp.gt p8,p0=C,r0 // more than 1 byte? mov r8=0 mov saved_retval = D mov saved_rtlink = b0 add out0=saved_in0, B add out1=saved_in1, B mov out2=C (p8) br.call.sptk.few b0=__copy_user // recursive call ;; add saved_retval=saved_retval,r8 // above might return non-zero value ;; mov retval=saved_retval mov ar.pfs=saved_pfs_stack mov b0=saved_rtlink br.ret.sptk.many rp /* end of McKinley specific optimization */ END(__copy_user) EXPORT_SYMBOL(__copy_user)
AirFortressIlikara/LS2K0300-linux-4.19
2,807
arch/ia64/lib/ip_fast_csum.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Optmized version of the ip_fast_csum() function * Used for calculating IP header checksum * * Return: 16bit checksum, complemented * * Inputs: * in0: address of buffer to checksum (char *) * in1: length of the buffer (int) * * Copyright (C) 2002, 2006 Intel Corp. * Copyright (C) 2002, 2006 Ken Chen <kenneth.w.chen@intel.com> */ #include <asm/asmmacro.h> #include <asm/export.h> /* * Since we know that most likely this function is called with buf aligned * on 4-byte boundary and 20 bytes in length, we can execution rather quickly * versus calling generic version of do_csum, which has lots of overhead in * handling various alignments and sizes. However, due to lack of constrains * put on the function input argument, cases with alignment not on 4-byte or * size not equal to 20 bytes will be handled by the generic do_csum function. */ #define in0 r32 #define in1 r33 #define in2 r34 #define in3 r35 #define in4 r36 #define ret0 r8 GLOBAL_ENTRY(ip_fast_csum) .prologue .body cmp.ne p6,p7=5,in1 // size other than 20 byte? and r14=3,in0 // is it aligned on 4-byte? add r15=4,in0 // second source pointer ;; cmp.ne.or.andcm p6,p7=r14,r0 ;; (p7) ld4 r20=[in0],8 (p7) ld4 r21=[r15],8 (p6) br.spnt .generic ;; ld4 r22=[in0],8 ld4 r23=[r15],8 ;; ld4 r24=[in0] add r20=r20,r21 add r22=r22,r23 ;; add r20=r20,r22 ;; add r20=r20,r24 ;; shr.u ret0=r20,16 // now need to add the carry zxt2 r20=r20 ;; add r20=ret0,r20 ;; shr.u ret0=r20,16 // add carry again zxt2 r20=r20 ;; add r20=ret0,r20 ;; shr.u ret0=r20,16 zxt2 r20=r20 ;; add r20=ret0,r20 mov r9=0xffff ;; andcm ret0=r9,r20 .restore sp // reset frame state br.ret.sptk.many b0 ;; .generic: .prologue .save ar.pfs, r35 alloc r35=ar.pfs,2,2,2,0 .save rp, r34 mov r34=b0 .body dep.z out1=in1,2,30 mov out0=in0 ;; br.call.sptk.many b0=do_csum ;; andcm ret0=-1,ret0 mov ar.pfs=r35 mov b0=r34 br.ret.sptk.many b0 END(ip_fast_csum) EXPORT_SYMBOL(ip_fast_csum) GLOBAL_ENTRY(csum_ipv6_magic) ld4 r20=[in0],4 ld4 r21=[in1],4 zxt4 in2=in2 ;; ld4 r22=[in0],4 ld4 r23=[in1],4 dep r15=in3,in2,32,16 ;; ld4 r24=[in0],4 ld4 r25=[in1],4 mux1 r15=r15,@rev add r16=r20,r21 add r17=r22,r23 zxt4 in4=in4 ;; ld4 r26=[in0],4 ld4 r27=[in1],4 shr.u r15=r15,16 add r18=r24,r25 add r8=r16,r17 ;; add r19=r26,r27 add r8=r8,r18 ;; add r8=r8,r19 add r15=r15,in4 ;; add r8=r8,r15 ;; shr.u r10=r8,32 // now fold sum into short zxt4 r11=r8 ;; add r8=r10,r11 ;; shr.u r10=r8,16 // yeah, keep it rolling zxt2 r11=r8 ;; add r8=r10,r11 ;; shr.u r10=r8,16 // three times lucky zxt2 r11=r8 ;; add r8=r10,r11 mov r9=0xffff ;; andcm r8=r9,r8 br.ret.sptk.many b0 END(csum_ipv6_magic) EXPORT_SYMBOL(csum_ipv6_magic)
AirFortressIlikara/LS2K0300-linux-4.19
2,201
arch/ia64/lib/copy_page.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * * Optimized version of the standard copy_page() function * * Inputs: * in0: address of target page * in1: address of source page * Output: * no return value * * Copyright (C) 1999, 2001 Hewlett-Packard Co * Stephane Eranian <eranian@hpl.hp.com> * David Mosberger <davidm@hpl.hp.com> * * 4/06/01 davidm Tuned to make it perform well both for cached and uncached copies. */ #include <asm/asmmacro.h> #include <asm/page.h> #include <asm/export.h> #define PIPE_DEPTH 3 #define EPI p[PIPE_DEPTH-1] #define lcount r16 #define saved_pr r17 #define saved_lc r18 #define saved_pfs r19 #define src1 r20 #define src2 r21 #define tgt1 r22 #define tgt2 r23 #define srcf r24 #define tgtf r25 #define tgt_last r26 #define Nrot ((8*PIPE_DEPTH+7)&~7) GLOBAL_ENTRY(copy_page) .prologue .save ar.pfs, saved_pfs alloc saved_pfs=ar.pfs,3,Nrot-3,0,Nrot .rotr t1[PIPE_DEPTH], t2[PIPE_DEPTH], t3[PIPE_DEPTH], t4[PIPE_DEPTH], \ t5[PIPE_DEPTH], t6[PIPE_DEPTH], t7[PIPE_DEPTH], t8[PIPE_DEPTH] .rotp p[PIPE_DEPTH] .save ar.lc, saved_lc mov saved_lc=ar.lc mov ar.ec=PIPE_DEPTH mov lcount=PAGE_SIZE/64-1 .save pr, saved_pr mov saved_pr=pr mov pr.rot=1<<16 .body mov src1=in1 adds src2=8,in1 mov tgt_last = PAGE_SIZE ;; adds tgt2=8,in0 add srcf=512,in1 mov ar.lc=lcount mov tgt1=in0 add tgtf=512,in0 add tgt_last = tgt_last, in0 ;; 1: (p[0]) ld8 t1[0]=[src1],16 (EPI) st8 [tgt1]=t1[PIPE_DEPTH-1],16 (p[0]) ld8 t2[0]=[src2],16 (EPI) st8 [tgt2]=t2[PIPE_DEPTH-1],16 cmp.ltu p6,p0 = tgtf, tgt_last ;; (p[0]) ld8 t3[0]=[src1],16 (EPI) st8 [tgt1]=t3[PIPE_DEPTH-1],16 (p[0]) ld8 t4[0]=[src2],16 (EPI) st8 [tgt2]=t4[PIPE_DEPTH-1],16 ;; (p[0]) ld8 t5[0]=[src1],16 (EPI) st8 [tgt1]=t5[PIPE_DEPTH-1],16 (p[0]) ld8 t6[0]=[src2],16 (EPI) st8 [tgt2]=t6[PIPE_DEPTH-1],16 ;; (p[0]) ld8 t7[0]=[src1],16 (EPI) st8 [tgt1]=t7[PIPE_DEPTH-1],16 (p[0]) ld8 t8[0]=[src2],16 (EPI) st8 [tgt2]=t8[PIPE_DEPTH-1],16 (p6) lfetch [srcf], 64 (p6) lfetch [tgtf], 64 br.ctop.sptk.few 1b ;; mov pr=saved_pr,0xffffffffffff0000 // restore predicates mov ar.pfs=saved_pfs mov ar.lc=saved_lc br.ret.sptk.many rp END(copy_page) EXPORT_SYMBOL(copy_page)
AirFortressIlikara/LS2K0300-linux-4.19
2,205
arch/ia64/lib/idiv32.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2000 Hewlett-Packard Co * Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com> * * 32-bit integer division. * * This code is based on the application note entitled "Divide, Square Root * and Remainder Algorithms for the IA-64 Architecture". This document * is available as Intel document number 248725-002 or via the web at * http://developer.intel.com/software/opensource/numerics/ * * For more details on the theory behind these algorithms, see "IA-64 * and Elementary Functions" by Peter Markstein; HP Professional Books * (http://www.goodreads.com/book/show/2019887.Ia_64_and_Elementary_Functions) */ #include <asm/asmmacro.h> #include <asm/export.h> #ifdef MODULO # define OP mod #else # define OP div #endif #ifdef UNSIGNED # define SGN u # define EXTEND zxt4 # define INT_TO_FP(a,b) fcvt.xuf.s1 a=b # define FP_TO_INT(a,b) fcvt.fxu.trunc.s1 a=b #else # define SGN # define EXTEND sxt4 # define INT_TO_FP(a,b) fcvt.xf a=b # define FP_TO_INT(a,b) fcvt.fx.trunc.s1 a=b #endif #define PASTE1(a,b) a##b #define PASTE(a,b) PASTE1(a,b) #define NAME PASTE(PASTE(__,SGN),PASTE(OP,si3)) GLOBAL_ENTRY(NAME) .regstk 2,0,0,0 // Transfer inputs to FP registers. mov r2 = 0xffdd // r2 = -34 + 65535 (fp reg format bias) EXTEND in0 = in0 // in0 = a EXTEND in1 = in1 // in1 = b ;; setf.sig f8 = in0 setf.sig f9 = in1 #ifdef MODULO sub in1 = r0, in1 // in1 = -b #endif ;; // Convert the inputs to FP, to avoid FP software-assist faults. INT_TO_FP(f8, f8) INT_TO_FP(f9, f9) ;; setf.exp f7 = r2 // f7 = 2^-34 frcpa.s1 f6, p6 = f8, f9 // y0 = frcpa(b) ;; (p6) fmpy.s1 f8 = f8, f6 // q0 = a*y0 (p6) fnma.s1 f6 = f9, f6, f1 // e0 = -b*y0 + 1 ;; #ifdef MODULO setf.sig f9 = in1 // f9 = -b #endif (p6) fma.s1 f8 = f6, f8, f8 // q1 = e0*q0 + q0 (p6) fma.s1 f6 = f6, f6, f7 // e1 = e0*e0 + 2^-34 ;; #ifdef MODULO setf.sig f7 = in0 #endif (p6) fma.s1 f6 = f6, f8, f8 // q2 = e1*q1 + q1 ;; FP_TO_INT(f6, f6) // q = trunc(q2) ;; #ifdef MODULO xma.l f6 = f6, f9, f7 // r = q*(-b) + a ;; #endif getf.sig r8 = f6 // transfer result to result register br.ret.sptk.many rp END(NAME) EXPORT_SYMBOL(NAME)
AirFortressIlikara/LS2K0300-linux-4.19
1,071
arch/ia64/lib/carta_random.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Fast, simple, yet decent quality random number generator based on * a paper by David G. Carta ("Two Fast Implementations of the * `Minimal Standard' Random Number Generator," Communications of the * ACM, January, 1990). * * Copyright (C) 2002 Hewlett-Packard Co * David Mosberger-Tang <davidm@hpl.hp.com> */ #include <asm/asmmacro.h> #define a r2 #define m r3 #define lo r8 #define hi r9 #define t0 r16 #define t1 r17 #define seed r32 GLOBAL_ENTRY(carta_random32) movl a = (16807 << 16) | 16807 ;; pmpyshr2.u t0 = a, seed, 0 pmpyshr2.u t1 = a, seed, 16 ;; unpack2.l t0 = t1, t0 dep m = -1, r0, 0, 31 ;; zxt4 lo = t0 shr.u hi = t0, 32 ;; dep t0 = 0, hi, 15, 49 // t0 = (hi & 0x7fff) ;; shl t0 = t0, 16 // t0 = (hi & 0x7fff) << 16 shr t1 = hi, 15 // t1 = (hi >> 15) ;; add lo = lo, t0 ;; cmp.gtu p6, p0 = lo, m ;; (p6) and lo = lo, m ;; (p6) add lo = 1, lo ;; add lo = lo, t1 ;; cmp.gtu p6, p0 = lo, m ;; (p6) and lo = lo, m ;; (p6) add lo = 1, lo br.ret.sptk.many rp END(carta_random32)
AirFortressIlikara/LS2K0300-linux-4.19
10,298
arch/ia64/lib/do_csum.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * * Optmized version of the standard do_csum() function * * Return: a 64bit quantity containing the 16bit Internet checksum * * Inputs: * in0: address of buffer to checksum (char *) * in1: length of the buffer (int) * * Copyright (C) 1999, 2001-2002 Hewlett-Packard Co * Stephane Eranian <eranian@hpl.hp.com> * * 02/04/22 Ken Chen <kenneth.w.chen@intel.com> * Data locality study on the checksum buffer. * More optimization cleanup - remove excessive stop bits. * 02/04/08 David Mosberger <davidm@hpl.hp.com> * More cleanup and tuning. * 01/04/18 Jun Nakajima <jun.nakajima@intel.com> * Clean up and optimize and the software pipeline, loading two * back-to-back 8-byte words per loop. Clean up the initialization * for the loop. Support the cases where load latency = 1 or 2. * Set CONFIG_IA64_LOAD_LATENCY to 1 or 2 (default). */ #include <asm/asmmacro.h> // // Theory of operations: // The goal is to go as quickly as possible to the point where // we can checksum 16 bytes/loop. Before reaching that point we must // take care of incorrect alignment of first byte. // // The code hereafter also takes care of the "tail" part of the buffer // before entering the core loop, if any. The checksum is a sum so it // allows us to commute operations. So we do the "head" and "tail" // first to finish at full speed in the body. Once we get the head and // tail values, we feed them into the pipeline, very handy initialization. // // Of course we deal with the special case where the whole buffer fits // into one 8 byte word. In this case we have only one entry in the pipeline. // // We use a (LOAD_LATENCY+2)-stage pipeline in the loop to account for // possible load latency and also to accommodate for head and tail. // // The end of the function deals with folding the checksum from 64bits // down to 16bits taking care of the carry. // // This version avoids synchronization in the core loop by also using a // pipeline for the accumulation of the checksum in resultx[] (x=1,2). // // wordx[] (x=1,2) // |---| // | | 0 : new value loaded in pipeline // |---| // | | - : in transit data // |---| // | | LOAD_LATENCY : current value to add to checksum // |---| // | | LOAD_LATENCY+1 : previous value added to checksum // |---| (previous iteration) // // resultx[] (x=1,2) // |---| // | | 0 : initial value // |---| // | | LOAD_LATENCY-1 : new checksum // |---| // | | LOAD_LATENCY : previous value of checksum // |---| // | | LOAD_LATENCY+1 : final checksum when out of the loop // |---| // // // See RFC1071 "Computing the Internet Checksum" for various techniques for // calculating the Internet checksum. // // NOT YET DONE: // - Maybe another algorithm which would take care of the folding at the // end in a different manner // - Work with people more knowledgeable than me on the network stack // to figure out if we could not split the function depending on the // type of packet or alignment we get. Like the ip_fast_csum() routine // where we know we have at least 20bytes worth of data to checksum. // - Do a better job of handling small packets. // - Note on prefetching: it was found that under various load, i.e. ftp read/write, // nfs read/write, the L1 cache hit rate is at 60% and L2 cache hit rate is at 99.8% // on the data that buffer points to (partly because the checksum is often preceded by // a copy_from_user()). This finding indiate that lfetch will not be beneficial since // the data is already in the cache. // #define saved_pfs r11 #define hmask r16 #define tmask r17 #define first1 r18 #define firstval r19 #define firstoff r20 #define last r21 #define lastval r22 #define lastoff r23 #define saved_lc r24 #define saved_pr r25 #define tmp1 r26 #define tmp2 r27 #define tmp3 r28 #define carry1 r29 #define carry2 r30 #define first2 r31 #define buf in0 #define len in1 #define LOAD_LATENCY 2 // XXX fix me #if (LOAD_LATENCY != 1) && (LOAD_LATENCY != 2) # error "Only 1 or 2 is supported/tested for LOAD_LATENCY." #endif #define PIPE_DEPTH (LOAD_LATENCY+2) #define ELD p[LOAD_LATENCY] // end of load #define ELD_1 p[LOAD_LATENCY+1] // and next stage // unsigned long do_csum(unsigned char *buf,long len) GLOBAL_ENTRY(do_csum) .prologue .save ar.pfs, saved_pfs alloc saved_pfs=ar.pfs,2,16,0,16 .rotr word1[4], word2[4],result1[LOAD_LATENCY+2],result2[LOAD_LATENCY+2] .rotp p[PIPE_DEPTH], pC1[2], pC2[2] mov ret0=r0 // in case we have zero length cmp.lt p0,p6=r0,len // check for zero length or negative (32bit len) ;; add tmp1=buf,len // last byte's address .save pr, saved_pr mov saved_pr=pr // preserve predicates (rotation) (p6) br.ret.spnt.many rp // return if zero or negative length mov hmask=-1 // initialize head mask tbit.nz p15,p0=buf,0 // is buf an odd address? and first1=-8,buf // 8-byte align down address of first1 element and firstoff=7,buf // how many bytes off for first1 element mov tmask=-1 // initialize tail mask ;; adds tmp2=-1,tmp1 // last-1 and lastoff=7,tmp1 // how many bytes off for last element ;; sub tmp1=8,lastoff // complement to lastoff and last=-8,tmp2 // address of word containing last byte ;; sub tmp3=last,first1 // tmp3=distance from first1 to last .save ar.lc, saved_lc mov saved_lc=ar.lc // save lc cmp.eq p8,p9=last,first1 // everything fits in one word ? ld8 firstval=[first1],8 // load, ahead of time, "first1" word and tmp1=7, tmp1 // make sure that if tmp1==8 -> tmp1=0 shl tmp2=firstoff,3 // number of bits ;; (p9) ld8 lastval=[last] // load, ahead of time, "last" word, if needed shl tmp1=tmp1,3 // number of bits (p9) adds tmp3=-8,tmp3 // effectively loaded ;; (p8) mov lastval=r0 // we don't need lastval if first1==last shl hmask=hmask,tmp2 // build head mask, mask off [0,first1off[ shr.u tmask=tmask,tmp1 // build tail mask, mask off ]8,lastoff] ;; .body #define count tmp3 (p8) and hmask=hmask,tmask // apply tail mask to head mask if 1 word only (p9) and word2[0]=lastval,tmask // mask last it as appropriate shr.u count=count,3 // how many 8-byte? ;; // If count is odd, finish this 8-byte word so that we can // load two back-to-back 8-byte words per loop thereafter. and word1[0]=firstval,hmask // and mask it as appropriate tbit.nz p10,p11=count,0 // if (count is odd) ;; (p8) mov result1[0]=word1[0] (p9) add result1[0]=word1[0],word2[0] ;; cmp.ltu p6,p0=result1[0],word1[0] // check the carry cmp.eq.or.andcm p8,p0=0,count // exit if zero 8-byte ;; (p6) adds result1[0]=1,result1[0] (p8) br.cond.dptk .do_csum_exit // if (within an 8-byte word) (p11) br.cond.dptk .do_csum16 // if (count is even) // Here count is odd. ld8 word1[1]=[first1],8 // load an 8-byte word cmp.eq p9,p10=1,count // if (count == 1) adds count=-1,count // loaded an 8-byte word ;; add result1[0]=result1[0],word1[1] ;; cmp.ltu p6,p0=result1[0],word1[1] ;; (p6) adds result1[0]=1,result1[0] (p9) br.cond.sptk .do_csum_exit // if (count == 1) exit // Fall through to calculate the checksum, feeding result1[0] as // the initial value in result1[0]. // // Calculate the checksum loading two 8-byte words per loop. // .do_csum16: add first2=8,first1 shr.u count=count,1 // we do 16 bytes per loop ;; adds count=-1,count mov carry1=r0 mov carry2=r0 brp.loop.imp 1f,2f ;; mov ar.ec=PIPE_DEPTH mov ar.lc=count // set lc mov pr.rot=1<<16 // result1[0] must be initialized in advance. mov result2[0]=r0 ;; .align 32 1: (ELD_1) cmp.ltu pC1[0],p0=result1[LOAD_LATENCY],word1[LOAD_LATENCY+1] (pC1[1])adds carry1=1,carry1 (ELD_1) cmp.ltu pC2[0],p0=result2[LOAD_LATENCY],word2[LOAD_LATENCY+1] (pC2[1])adds carry2=1,carry2 (ELD) add result1[LOAD_LATENCY-1]=result1[LOAD_LATENCY],word1[LOAD_LATENCY] (ELD) add result2[LOAD_LATENCY-1]=result2[LOAD_LATENCY],word2[LOAD_LATENCY] 2: (p[0]) ld8 word1[0]=[first1],16 (p[0]) ld8 word2[0]=[first2],16 br.ctop.sptk 1b ;; // Since len is a 32-bit value, carry cannot be larger than a 64-bit value. (pC1[1])adds carry1=1,carry1 // since we miss the last one (pC2[1])adds carry2=1,carry2 ;; add result1[LOAD_LATENCY+1]=result1[LOAD_LATENCY+1],carry1 add result2[LOAD_LATENCY+1]=result2[LOAD_LATENCY+1],carry2 ;; cmp.ltu p6,p0=result1[LOAD_LATENCY+1],carry1 cmp.ltu p7,p0=result2[LOAD_LATENCY+1],carry2 ;; (p6) adds result1[LOAD_LATENCY+1]=1,result1[LOAD_LATENCY+1] (p7) adds result2[LOAD_LATENCY+1]=1,result2[LOAD_LATENCY+1] ;; add result1[0]=result1[LOAD_LATENCY+1],result2[LOAD_LATENCY+1] ;; cmp.ltu p6,p0=result1[0],result2[LOAD_LATENCY+1] ;; (p6) adds result1[0]=1,result1[0] ;; .do_csum_exit: // // now fold 64 into 16 bits taking care of carry // that's not very good because it has lots of sequentiality // mov tmp3=0xffff zxt4 tmp1=result1[0] shr.u tmp2=result1[0],32 ;; add result1[0]=tmp1,tmp2 ;; and tmp1=result1[0],tmp3 shr.u tmp2=result1[0],16 ;; add result1[0]=tmp1,tmp2 ;; and tmp1=result1[0],tmp3 shr.u tmp2=result1[0],16 ;; add result1[0]=tmp1,tmp2 ;; and tmp1=result1[0],tmp3 shr.u tmp2=result1[0],16 ;; add ret0=tmp1,tmp2 mov pr=saved_pr,0xffffffffffff0000 ;; // if buf was odd then swap bytes mov ar.pfs=saved_pfs // restore ar.ec (p15) mux1 ret0=ret0,@rev // reverse word ;; mov ar.lc=saved_lc (p15) shr.u ret0=ret0,64-16 // + shift back to position = swap bytes br.ret.sptk.many rp // I (Jun Nakajima) wrote an equivalent code (see below), but it was // not much better than the original. So keep the original there so that // someone else can challenge. // // shr.u word1[0]=result1[0],32 // zxt4 result1[0]=result1[0] // ;; // add result1[0]=result1[0],word1[0] // ;; // zxt2 result2[0]=result1[0] // extr.u word1[0]=result1[0],16,16 // shr.u carry1=result1[0],32 // ;; // add result2[0]=result2[0],word1[0] // ;; // add result2[0]=result2[0],carry1 // ;; // extr.u ret0=result2[0],16,16 // ;; // add ret0=ret0,result2[0] // ;; // zxt2 ret0=ret0 // mov ar.pfs=saved_pfs // restore ar.ec // mov pr=saved_pr,0xffffffffffff0000 // ;; // // if buf was odd then swap bytes // mov ar.lc=saved_lc //(p15) mux1 ret0=ret0,@rev // reverse word // ;; //(p15) shr.u ret0=ret0,64-16 // + shift back to position = swap bytes // br.ret.sptk.many rp END(do_csum)
AirFortressIlikara/LS2K0300-linux-4.19
9,339
arch/ia64/lib/memset.S
/* SPDX-License-Identifier: GPL-2.0 */ /* Optimized version of the standard memset() function. Copyright (c) 2002 Hewlett-Packard Co/CERN Sverre Jarp <Sverre.Jarp@cern.ch> Return: dest Inputs: in0: dest in1: value in2: count The algorithm is fairly straightforward: set byte by byte until we we get to a 16B-aligned address, then loop on 128 B chunks using an early store as prefetching, then loop on 32B chucks, then clear remaining words, finally clear remaining bytes. Since a stf.spill f0 can store 16B in one go, we use this instruction to get peak speed when value = 0. */ #include <asm/asmmacro.h> #include <asm/export.h> #undef ret #define dest in0 #define value in1 #define cnt in2 #define tmp r31 #define save_lc r30 #define ptr0 r29 #define ptr1 r28 #define ptr2 r27 #define ptr3 r26 #define ptr9 r24 #define loopcnt r23 #define linecnt r22 #define bytecnt r21 #define fvalue f6 // This routine uses only scratch predicate registers (p6 - p15) #define p_scr p6 // default register for same-cycle branches #define p_nz p7 #define p_zr p8 #define p_unalgn p9 #define p_y p11 #define p_n p12 #define p_yy p13 #define p_nn p14 #define MIN1 15 #define MIN1P1HALF 8 #define LINE_SIZE 128 #define LSIZE_SH 7 // shift amount #define PREF_AHEAD 8 GLOBAL_ENTRY(memset) { .mmi .prologue alloc tmp = ar.pfs, 3, 0, 0, 0 lfetch.nt1 [dest] // .save ar.lc, save_lc mov.i save_lc = ar.lc .body } { .mmi mov ret0 = dest // return value cmp.ne p_nz, p_zr = value, r0 // use stf.spill if value is zero cmp.eq p_scr, p0 = cnt, r0 ;; } { .mmi and ptr2 = -(MIN1+1), dest // aligned address and tmp = MIN1, dest // prepare to check for correct alignment tbit.nz p_y, p_n = dest, 0 // Do we have an odd address? (M_B_U) } { .mib mov ptr1 = dest mux1 value = value, @brcst // create 8 identical bytes in word (p_scr) br.ret.dpnt.many rp // return immediately if count = 0 ;; } { .mib cmp.ne p_unalgn, p0 = tmp, r0 // } { .mib sub bytecnt = (MIN1+1), tmp // NB: # of bytes to move is 1 higher than loopcnt cmp.gt p_scr, p0 = 16, cnt // is it a minimalistic task? (p_scr) br.cond.dptk.many .move_bytes_unaligned // go move just a few (M_B_U) ;; } { .mmi (p_unalgn) add ptr1 = (MIN1+1), ptr2 // after alignment (p_unalgn) add ptr2 = MIN1P1HALF, ptr2 // after alignment (p_unalgn) tbit.nz.unc p_y, p_n = bytecnt, 3 // should we do a st8 ? ;; } { .mib (p_y) add cnt = -8, cnt // (p_unalgn) tbit.nz.unc p_yy, p_nn = bytecnt, 2 // should we do a st4 ? } { .mib (p_y) st8 [ptr2] = value,-4 // (p_n) add ptr2 = 4, ptr2 // ;; } { .mib (p_yy) add cnt = -4, cnt // (p_unalgn) tbit.nz.unc p_y, p_n = bytecnt, 1 // should we do a st2 ? } { .mib (p_yy) st4 [ptr2] = value,-2 // (p_nn) add ptr2 = 2, ptr2 // ;; } { .mmi mov tmp = LINE_SIZE+1 // for compare (p_y) add cnt = -2, cnt // (p_unalgn) tbit.nz.unc p_yy, p_nn = bytecnt, 0 // should we do a st1 ? } { .mmi setf.sig fvalue=value // transfer value to FLP side (p_y) st2 [ptr2] = value,-1 // (p_n) add ptr2 = 1, ptr2 // ;; } { .mmi (p_yy) st1 [ptr2] = value // cmp.gt p_scr, p0 = tmp, cnt // is it a minimalistic task? } { .mbb (p_yy) add cnt = -1, cnt // (p_scr) br.cond.dpnt.many .fraction_of_line // go move just a few ;; } { .mib nop.m 0 shr.u linecnt = cnt, LSIZE_SH (p_zr) br.cond.dptk.many .l1b // Jump to use stf.spill ;; } TEXT_ALIGN(32) // --------------------- // L1A: store ahead into cache lines; fill later { .mmi and tmp = -(LINE_SIZE), cnt // compute end of range mov ptr9 = ptr1 // used for prefetching and cnt = (LINE_SIZE-1), cnt // remainder } { .mmi mov loopcnt = PREF_AHEAD-1 // default prefetch loop cmp.gt p_scr, p0 = PREF_AHEAD, linecnt // check against actual value ;; } { .mmi (p_scr) add loopcnt = -1, linecnt // add ptr2 = 8, ptr1 // start of stores (beyond prefetch stores) add ptr1 = tmp, ptr1 // first address beyond total range ;; } { .mmi add tmp = -1, linecnt // next loop count mov.i ar.lc = loopcnt // ;; } .pref_l1a: { .mib stf8 [ptr9] = fvalue, 128 // Do stores one cache line apart nop.i 0 br.cloop.dptk.few .pref_l1a ;; } { .mmi add ptr0 = 16, ptr2 // Two stores in parallel mov.i ar.lc = tmp // ;; } .l1ax: { .mmi stf8 [ptr2] = fvalue, 8 stf8 [ptr0] = fvalue, 8 ;; } { .mmi stf8 [ptr2] = fvalue, 24 stf8 [ptr0] = fvalue, 24 ;; } { .mmi stf8 [ptr2] = fvalue, 8 stf8 [ptr0] = fvalue, 8 ;; } { .mmi stf8 [ptr2] = fvalue, 24 stf8 [ptr0] = fvalue, 24 ;; } { .mmi stf8 [ptr2] = fvalue, 8 stf8 [ptr0] = fvalue, 8 ;; } { .mmi stf8 [ptr2] = fvalue, 24 stf8 [ptr0] = fvalue, 24 ;; } { .mmi stf8 [ptr2] = fvalue, 8 stf8 [ptr0] = fvalue, 32 cmp.lt p_scr, p0 = ptr9, ptr1 // do we need more prefetching? ;; } { .mmb stf8 [ptr2] = fvalue, 24 (p_scr) stf8 [ptr9] = fvalue, 128 br.cloop.dptk.few .l1ax ;; } { .mbb cmp.le p_scr, p0 = 8, cnt // just a few bytes left ? (p_scr) br.cond.dpnt.many .fraction_of_line // Branch no. 2 br.cond.dpnt.many .move_bytes_from_alignment // Branch no. 3 ;; } TEXT_ALIGN(32) .l1b: // ------------------------------------ // L1B: store ahead into cache lines; fill later { .mmi and tmp = -(LINE_SIZE), cnt // compute end of range mov ptr9 = ptr1 // used for prefetching and cnt = (LINE_SIZE-1), cnt // remainder } { .mmi mov loopcnt = PREF_AHEAD-1 // default prefetch loop cmp.gt p_scr, p0 = PREF_AHEAD, linecnt // check against actual value ;; } { .mmi (p_scr) add loopcnt = -1, linecnt add ptr2 = 16, ptr1 // start of stores (beyond prefetch stores) add ptr1 = tmp, ptr1 // first address beyond total range ;; } { .mmi add tmp = -1, linecnt // next loop count mov.i ar.lc = loopcnt ;; } .pref_l1b: { .mib stf.spill [ptr9] = f0, 128 // Do stores one cache line apart nop.i 0 br.cloop.dptk.few .pref_l1b ;; } { .mmi add ptr0 = 16, ptr2 // Two stores in parallel mov.i ar.lc = tmp ;; } .l1bx: { .mmi stf.spill [ptr2] = f0, 32 stf.spill [ptr0] = f0, 32 ;; } { .mmi stf.spill [ptr2] = f0, 32 stf.spill [ptr0] = f0, 32 ;; } { .mmi stf.spill [ptr2] = f0, 32 stf.spill [ptr0] = f0, 64 cmp.lt p_scr, p0 = ptr9, ptr1 // do we need more prefetching? ;; } { .mmb stf.spill [ptr2] = f0, 32 (p_scr) stf.spill [ptr9] = f0, 128 br.cloop.dptk.few .l1bx ;; } { .mib cmp.gt p_scr, p0 = 8, cnt // just a few bytes left ? (p_scr) br.cond.dpnt.many .move_bytes_from_alignment // ;; } .fraction_of_line: { .mib add ptr2 = 16, ptr1 shr.u loopcnt = cnt, 5 // loopcnt = cnt / 32 ;; } { .mib cmp.eq p_scr, p0 = loopcnt, r0 add loopcnt = -1, loopcnt (p_scr) br.cond.dpnt.many .store_words ;; } { .mib and cnt = 0x1f, cnt // compute the remaining cnt mov.i ar.lc = loopcnt ;; } TEXT_ALIGN(32) .l2: // ------------------------------------ // L2A: store 32B in 2 cycles { .mmb stf8 [ptr1] = fvalue, 8 stf8 [ptr2] = fvalue, 8 ;; } { .mmb stf8 [ptr1] = fvalue, 24 stf8 [ptr2] = fvalue, 24 br.cloop.dptk.many .l2 ;; } .store_words: { .mib cmp.gt p_scr, p0 = 8, cnt // just a few bytes left ? (p_scr) br.cond.dpnt.many .move_bytes_from_alignment // Branch ;; } { .mmi stf8 [ptr1] = fvalue, 8 // store cmp.le p_y, p_n = 16, cnt add cnt = -8, cnt // subtract ;; } { .mmi (p_y) stf8 [ptr1] = fvalue, 8 // store (p_y) cmp.le.unc p_yy, p_nn = 16, cnt (p_y) add cnt = -8, cnt // subtract ;; } { .mmi // store (p_yy) stf8 [ptr1] = fvalue, 8 (p_yy) add cnt = -8, cnt // subtract ;; } .move_bytes_from_alignment: { .mib cmp.eq p_scr, p0 = cnt, r0 tbit.nz.unc p_y, p0 = cnt, 2 // should we terminate with a st4 ? (p_scr) br.cond.dpnt.few .restore_and_exit ;; } { .mib (p_y) st4 [ptr1] = value,4 tbit.nz.unc p_yy, p0 = cnt, 1 // should we terminate with a st2 ? ;; } { .mib (p_yy) st2 [ptr1] = value,2 tbit.nz.unc p_y, p0 = cnt, 0 // should we terminate with a st1 ? ;; } { .mib (p_y) st1 [ptr1] = value ;; } .restore_and_exit: { .mib nop.m 0 mov.i ar.lc = save_lc br.ret.sptk.many rp ;; } .move_bytes_unaligned: { .mmi .pred.rel "mutex",p_y, p_n .pred.rel "mutex",p_yy, p_nn (p_n) cmp.le p_yy, p_nn = 4, cnt (p_y) cmp.le p_yy, p_nn = 5, cnt (p_n) add ptr2 = 2, ptr1 } { .mmi (p_y) add ptr2 = 3, ptr1 (p_y) st1 [ptr1] = value, 1 // fill 1 (odd-aligned) byte [15, 14 (or less) left] (p_y) add cnt = -1, cnt ;; } { .mmi (p_yy) cmp.le.unc p_y, p0 = 8, cnt add ptr3 = ptr1, cnt // prepare last store mov.i ar.lc = save_lc } { .mmi (p_yy) st2 [ptr1] = value, 4 // fill 2 (aligned) bytes (p_yy) st2 [ptr2] = value, 4 // fill 2 (aligned) bytes [11, 10 (o less) left] (p_yy) add cnt = -4, cnt ;; } { .mmi (p_y) cmp.le.unc p_yy, p0 = 8, cnt add ptr3 = -1, ptr3 // last store tbit.nz p_scr, p0 = cnt, 1 // will there be a st2 at the end ? } { .mmi (p_y) st2 [ptr1] = value, 4 // fill 2 (aligned) bytes (p_y) st2 [ptr2] = value, 4 // fill 2 (aligned) bytes [7, 6 (or less) left] (p_y) add cnt = -4, cnt ;; } { .mmi (p_yy) st2 [ptr1] = value, 4 // fill 2 (aligned) bytes (p_yy) st2 [ptr2] = value, 4 // fill 2 (aligned) bytes [3, 2 (or less) left] tbit.nz p_y, p0 = cnt, 0 // will there be a st1 at the end ? } { .mmi (p_yy) add cnt = -4, cnt ;; } { .mmb (p_scr) st2 [ptr1] = value // fill 2 (aligned) bytes (p_y) st1 [ptr3] = value // fill last byte (using ptr3) br.ret.sptk.many rp } END(memset) EXPORT_SYMBOL(memset)
AirFortressIlikara/LS2K0300-linux-4.19
17,117
arch/ia64/lib/copy_user.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * * Optimized version of the copy_user() routine. * It is used to copy date across the kernel/user boundary. * * The source and destination are always on opposite side of * the boundary. When reading from user space we must catch * faults on loads. When writing to user space we must catch * errors on stores. Note that because of the nature of the copy * we don't need to worry about overlapping regions. * * * Inputs: * in0 address of source buffer * in1 address of destination buffer * in2 number of bytes to copy * * Outputs: * ret0 0 in case of success. The number of bytes NOT copied in * case of error. * * Copyright (C) 2000-2001 Hewlett-Packard Co * Stephane Eranian <eranian@hpl.hp.com> * * Fixme: * - handle the case where we have more than 16 bytes and the alignment * are different. * - more benchmarking * - fix extraneous stop bit introduced by the EX() macro. */ #include <asm/asmmacro.h> #include <asm/export.h> // // Tuneable parameters // #define COPY_BREAK 16 // we do byte copy below (must be >=16) #define PIPE_DEPTH 21 // pipe depth #define EPI p[PIPE_DEPTH-1] // // arguments // #define dst in0 #define src in1 #define len in2 // // local registers // #define t1 r2 // rshift in bytes #define t2 r3 // lshift in bytes #define rshift r14 // right shift in bits #define lshift r15 // left shift in bits #define word1 r16 #define word2 r17 #define cnt r18 #define len2 r19 #define saved_lc r20 #define saved_pr r21 #define tmp r22 #define val r23 #define src1 r24 #define dst1 r25 #define src2 r26 #define dst2 r27 #define len1 r28 #define enddst r29 #define endsrc r30 #define saved_pfs r31 GLOBAL_ENTRY(__copy_user) .prologue .save ar.pfs, saved_pfs alloc saved_pfs=ar.pfs,3,((2*PIPE_DEPTH+7)&~7),0,((2*PIPE_DEPTH+7)&~7) .rotr val1[PIPE_DEPTH],val2[PIPE_DEPTH] .rotp p[PIPE_DEPTH] adds len2=-1,len // br.ctop is repeat/until mov ret0=r0 ;; // RAW of cfm when len=0 cmp.eq p8,p0=r0,len // check for zero length .save ar.lc, saved_lc mov saved_lc=ar.lc // preserve ar.lc (slow) (p8) br.ret.spnt.many rp // empty mempcy() ;; add enddst=dst,len // first byte after end of source add endsrc=src,len // first byte after end of destination .save pr, saved_pr mov saved_pr=pr // preserve predicates .body mov dst1=dst // copy because of rotation mov ar.ec=PIPE_DEPTH mov pr.rot=1<<16 // p16=true all others are false mov src1=src // copy because of rotation mov ar.lc=len2 // initialize lc for small count cmp.lt p10,p7=COPY_BREAK,len // if len > COPY_BREAK then long copy xor tmp=src,dst // same alignment test prepare (p10) br.cond.dptk .long_copy_user ;; // RAW pr.rot/p16 ? // // Now we do the byte by byte loop with software pipeline // // p7 is necessarily false by now 1: EX(.failure_in_pipe1,(p16) ld1 val1[0]=[src1],1) EX(.failure_out,(EPI) st1 [dst1]=val1[PIPE_DEPTH-1],1) br.ctop.dptk.few 1b ;; mov ar.lc=saved_lc mov pr=saved_pr,0xffffffffffff0000 mov ar.pfs=saved_pfs // restore ar.ec br.ret.sptk.many rp // end of short memcpy // // Not 8-byte aligned // .diff_align_copy_user: // At this point we know we have more than 16 bytes to copy // and also that src and dest do _not_ have the same alignment. and src2=0x7,src1 // src offset and dst2=0x7,dst1 // dst offset ;; // The basic idea is that we copy byte-by-byte at the head so // that we can reach 8-byte alignment for both src1 and dst1. // Then copy the body using software pipelined 8-byte copy, // shifting the two back-to-back words right and left, then copy // the tail by copying byte-by-byte. // // Fault handling. If the byte-by-byte at the head fails on the // load, then restart and finish the pipleline by copying zeros // to the dst1. Then copy zeros for the rest of dst1. // If 8-byte software pipeline fails on the load, do the same as // failure_in3 does. If the byte-by-byte at the tail fails, it is // handled simply by failure_in_pipe1. // // The case p14 represents the source has more bytes in the // the first word (by the shifted part), whereas the p15 needs to // copy some bytes from the 2nd word of the source that has the // tail of the 1st of the destination. // // // Optimization. If dst1 is 8-byte aligned (quite common), we don't need // to copy the head to dst1, to start 8-byte copy software pipeline. // We know src1 is not 8-byte aligned in this case. // cmp.eq p14,p15=r0,dst2 (p15) br.cond.spnt 1f ;; sub t1=8,src2 mov t2=src2 ;; shl rshift=t2,3 sub len1=len,t1 // set len1 ;; sub lshift=64,rshift ;; br.cond.spnt .word_copy_user ;; 1: cmp.leu p14,p15=src2,dst2 sub t1=dst2,src2 ;; .pred.rel "mutex", p14, p15 (p14) sub word1=8,src2 // (8 - src offset) (p15) sub t1=r0,t1 // absolute value (p15) sub word1=8,dst2 // (8 - dst offset) ;; // For the case p14, we don't need to copy the shifted part to // the 1st word of destination. sub t2=8,t1 (p14) sub word1=word1,t1 ;; sub len1=len,word1 // resulting len (p15) shl rshift=t1,3 // in bits (p14) shl rshift=t2,3 ;; (p14) sub len1=len1,t1 adds cnt=-1,word1 ;; sub lshift=64,rshift mov ar.ec=PIPE_DEPTH mov pr.rot=1<<16 // p16=true all others are false mov ar.lc=cnt ;; 2: EX(.failure_in_pipe2,(p16) ld1 val1[0]=[src1],1) EX(.failure_out,(EPI) st1 [dst1]=val1[PIPE_DEPTH-1],1) br.ctop.dptk.few 2b ;; clrrrb ;; .word_copy_user: cmp.gtu p9,p0=16,len1 (p9) br.cond.spnt 4f // if (16 > len1) skip 8-byte copy ;; shr.u cnt=len1,3 // number of 64-bit words ;; adds cnt=-1,cnt ;; .pred.rel "mutex", p14, p15 (p14) sub src1=src1,t2 (p15) sub src1=src1,t1 // // Now both src1 and dst1 point to an 8-byte aligned address. And // we have more than 8 bytes to copy. // mov ar.lc=cnt mov ar.ec=PIPE_DEPTH mov pr.rot=1<<16 // p16=true all others are false ;; 3: // // The pipleline consists of 3 stages: // 1 (p16): Load a word from src1 // 2 (EPI_1): Shift right pair, saving to tmp // 3 (EPI): Store tmp to dst1 // // To make it simple, use at least 2 (p16) loops to set up val1[n] // because we need 2 back-to-back val1[] to get tmp. // Note that this implies EPI_2 must be p18 or greater. // #define EPI_1 p[PIPE_DEPTH-2] #define SWITCH(pred, shift) cmp.eq pred,p0=shift,rshift #define CASE(pred, shift) \ (pred) br.cond.spnt .copy_user_bit##shift #define BODY(rshift) \ .copy_user_bit##rshift: \ 1: \ EX(.failure_out,(EPI) st8 [dst1]=tmp,8); \ (EPI_1) shrp tmp=val1[PIPE_DEPTH-2],val1[PIPE_DEPTH-1],rshift; \ EX(3f,(p16) ld8 val1[1]=[src1],8); \ (p16) mov val1[0]=r0; \ br.ctop.dptk 1b; \ ;; \ br.cond.sptk.many .diff_align_do_tail; \ 2: \ (EPI) st8 [dst1]=tmp,8; \ (EPI_1) shrp tmp=val1[PIPE_DEPTH-2],val1[PIPE_DEPTH-1],rshift; \ 3: \ (p16) mov val1[1]=r0; \ (p16) mov val1[0]=r0; \ br.ctop.dptk 2b; \ ;; \ br.cond.sptk.many .failure_in2 // // Since the instruction 'shrp' requires a fixed 128-bit value // specifying the bits to shift, we need to provide 7 cases // below. // SWITCH(p6, 8) SWITCH(p7, 16) SWITCH(p8, 24) SWITCH(p9, 32) SWITCH(p10, 40) SWITCH(p11, 48) SWITCH(p12, 56) ;; CASE(p6, 8) CASE(p7, 16) CASE(p8, 24) CASE(p9, 32) CASE(p10, 40) CASE(p11, 48) CASE(p12, 56) ;; BODY(8) BODY(16) BODY(24) BODY(32) BODY(40) BODY(48) BODY(56) ;; .diff_align_do_tail: .pred.rel "mutex", p14, p15 (p14) sub src1=src1,t1 (p14) adds dst1=-8,dst1 (p15) sub dst1=dst1,t1 ;; 4: // Tail correction. // // The problem with this piplelined loop is that the last word is not // loaded and thus parf of the last word written is not correct. // To fix that, we simply copy the tail byte by byte. sub len1=endsrc,src1,1 clrrrb ;; mov ar.ec=PIPE_DEPTH mov pr.rot=1<<16 // p16=true all others are false mov ar.lc=len1 ;; 5: EX(.failure_in_pipe1,(p16) ld1 val1[0]=[src1],1) EX(.failure_out,(EPI) st1 [dst1]=val1[PIPE_DEPTH-1],1) br.ctop.dptk.few 5b ;; mov ar.lc=saved_lc mov pr=saved_pr,0xffffffffffff0000 mov ar.pfs=saved_pfs br.ret.sptk.many rp // // Beginning of long mempcy (i.e. > 16 bytes) // .long_copy_user: tbit.nz p6,p7=src1,0 // odd alignment and tmp=7,tmp ;; cmp.eq p10,p8=r0,tmp mov len1=len // copy because of rotation (p8) br.cond.dpnt .diff_align_copy_user ;; // At this point we know we have more than 16 bytes to copy // and also that both src and dest have the same alignment // which may not be the one we want. So for now we must move // forward slowly until we reach 16byte alignment: no need to // worry about reaching the end of buffer. // EX(.failure_in1,(p6) ld1 val1[0]=[src1],1) // 1-byte aligned (p6) adds len1=-1,len1;; tbit.nz p7,p0=src1,1 ;; EX(.failure_in1,(p7) ld2 val1[1]=[src1],2) // 2-byte aligned (p7) adds len1=-2,len1;; tbit.nz p8,p0=src1,2 ;; // // Stop bit not required after ld4 because if we fail on ld4 // we have never executed the ld1, therefore st1 is not executed. // EX(.failure_in1,(p8) ld4 val2[0]=[src1],4) // 4-byte aligned ;; EX(.failure_out,(p6) st1 [dst1]=val1[0],1) tbit.nz p9,p0=src1,3 ;; // // Stop bit not required after ld8 because if we fail on ld8 // we have never executed the ld2, therefore st2 is not executed. // EX(.failure_in1,(p9) ld8 val2[1]=[src1],8) // 8-byte aligned EX(.failure_out,(p7) st2 [dst1]=val1[1],2) (p8) adds len1=-4,len1 ;; EX(.failure_out, (p8) st4 [dst1]=val2[0],4) (p9) adds len1=-8,len1;; shr.u cnt=len1,4 // number of 128-bit (2x64bit) words ;; EX(.failure_out, (p9) st8 [dst1]=val2[1],8) tbit.nz p6,p0=len1,3 cmp.eq p7,p0=r0,cnt adds tmp=-1,cnt // br.ctop is repeat/until (p7) br.cond.dpnt .dotail // we have less than 16 bytes left ;; adds src2=8,src1 adds dst2=8,dst1 mov ar.lc=tmp ;; // // 16bytes/iteration // 2: EX(.failure_in3,(p16) ld8 val1[0]=[src1],16) (p16) ld8 val2[0]=[src2],16 EX(.failure_out, (EPI) st8 [dst1]=val1[PIPE_DEPTH-1],16) (EPI) st8 [dst2]=val2[PIPE_DEPTH-1],16 br.ctop.dptk 2b ;; // RAW on src1 when fall through from loop // // Tail correction based on len only // // No matter where we come from (loop or test) the src1 pointer // is 16 byte aligned AND we have less than 16 bytes to copy. // .dotail: EX(.failure_in1,(p6) ld8 val1[0]=[src1],8) // at least 8 bytes tbit.nz p7,p0=len1,2 ;; EX(.failure_in1,(p7) ld4 val1[1]=[src1],4) // at least 4 bytes tbit.nz p8,p0=len1,1 ;; EX(.failure_in1,(p8) ld2 val2[0]=[src1],2) // at least 2 bytes tbit.nz p9,p0=len1,0 ;; EX(.failure_out, (p6) st8 [dst1]=val1[0],8) ;; EX(.failure_in1,(p9) ld1 val2[1]=[src1]) // only 1 byte left mov ar.lc=saved_lc ;; EX(.failure_out,(p7) st4 [dst1]=val1[1],4) mov pr=saved_pr,0xffffffffffff0000 ;; EX(.failure_out, (p8) st2 [dst1]=val2[0],2) mov ar.pfs=saved_pfs ;; EX(.failure_out, (p9) st1 [dst1]=val2[1]) br.ret.sptk.many rp // // Here we handle the case where the byte by byte copy fails // on the load. // Several factors make the zeroing of the rest of the buffer kind of // tricky: // - the pipeline: loads/stores are not in sync (pipeline) // // In the same loop iteration, the dst1 pointer does not directly // reflect where the faulty load was. // // - pipeline effect // When you get a fault on load, you may have valid data from // previous loads not yet store in transit. Such data must be // store normally before moving onto zeroing the rest. // // - single/multi dispersal independence. // // solution: // - we don't disrupt the pipeline, i.e. data in transit in // the software pipeline will be eventually move to memory. // We simply replace the load with a simple mov and keep the // pipeline going. We can't really do this inline because // p16 is always reset to 1 when lc > 0. // .failure_in_pipe1: sub ret0=endsrc,src1 // number of bytes to zero, i.e. not copied 1: (p16) mov val1[0]=r0 (EPI) st1 [dst1]=val1[PIPE_DEPTH-1],1 br.ctop.dptk 1b ;; mov pr=saved_pr,0xffffffffffff0000 mov ar.lc=saved_lc mov ar.pfs=saved_pfs br.ret.sptk.many rp // // This is the case where the byte by byte copy fails on the load // when we copy the head. We need to finish the pipeline and copy // zeros for the rest of the destination. Since this happens // at the top we still need to fill the body and tail. .failure_in_pipe2: sub ret0=endsrc,src1 // number of bytes to zero, i.e. not copied 2: (p16) mov val1[0]=r0 (EPI) st1 [dst1]=val1[PIPE_DEPTH-1],1 br.ctop.dptk 2b ;; sub len=enddst,dst1,1 // precompute len br.cond.dptk.many .failure_in1bis ;; // // Here we handle the head & tail part when we check for alignment. // The following code handles only the load failures. The // main diffculty comes from the fact that loads/stores are // scheduled. So when you fail on a load, the stores corresponding // to previous successful loads must be executed. // // However some simplifications are possible given the way // things work. // // 1) HEAD // Theory of operation: // // Page A | Page B // ---------|----- // 1|8 x // 1 2|8 x // 4|8 x // 1 4|8 x // 2 4|8 x // 1 2 4|8 x // |1 // |2 x // |4 x // // page_size >= 4k (2^12). (x means 4, 2, 1) // Here we suppose Page A exists and Page B does not. // // As we move towards eight byte alignment we may encounter faults. // The numbers on each page show the size of the load (current alignment). // // Key point: // - if you fail on 1, 2, 4 then you have never executed any smaller // size loads, e.g. failing ld4 means no ld1 nor ld2 executed // before. // // This allows us to simplify the cleanup code, because basically you // only have to worry about "pending" stores in the case of a failing // ld8(). Given the way the code is written today, this means only // worry about st2, st4. There we can use the information encapsulated // into the predicates. // // Other key point: // - if you fail on the ld8 in the head, it means you went straight // to it, i.e. 8byte alignment within an unexisting page. // Again this comes from the fact that if you crossed just for the ld8 then // you are 8byte aligned but also 16byte align, therefore you would // either go for the 16byte copy loop OR the ld8 in the tail part. // The combination ld1, ld2, ld4, ld8 where you fail on ld8 is impossible // because it would mean you had 15bytes to copy in which case you // would have defaulted to the byte by byte copy. // // // 2) TAIL // Here we now we have less than 16 bytes AND we are either 8 or 16 byte // aligned. // // Key point: // This means that we either: // - are right on a page boundary // OR // - are at more than 16 bytes from a page boundary with // at most 15 bytes to copy: no chance of crossing. // // This allows us to assume that if we fail on a load we haven't possibly // executed any of the previous (tail) ones, so we don't need to do // any stores. For instance, if we fail on ld2, this means we had // 2 or 3 bytes left to copy and we did not execute the ld8 nor ld4. // // This means that we are in a situation similar the a fault in the // head part. That's nice! // .failure_in1: sub ret0=endsrc,src1 // number of bytes to zero, i.e. not copied sub len=endsrc,src1,1 // // we know that ret0 can never be zero at this point // because we failed why trying to do a load, i.e. there is still // some work to do. // The failure_in1bis and length problem is taken care of at the // calling side. // ;; .failure_in1bis: // from (.failure_in3) mov ar.lc=len // Continue with a stupid byte store. ;; 5: st1 [dst1]=r0,1 br.cloop.dptk 5b ;; mov pr=saved_pr,0xffffffffffff0000 mov ar.lc=saved_lc mov ar.pfs=saved_pfs br.ret.sptk.many rp // // Here we simply restart the loop but instead // of doing loads we fill the pipeline with zeroes // We can't simply store r0 because we may have valid // data in transit in the pipeline. // ar.lc and ar.ec are setup correctly at this point // // we MUST use src1/endsrc here and not dst1/enddst because // of the pipeline effect. // .failure_in3: sub ret0=endsrc,src1 // number of bytes to zero, i.e. not copied ;; 2: (p16) mov val1[0]=r0 (p16) mov val2[0]=r0 (EPI) st8 [dst1]=val1[PIPE_DEPTH-1],16 (EPI) st8 [dst2]=val2[PIPE_DEPTH-1],16 br.ctop.dptk 2b ;; cmp.ne p6,p0=dst1,enddst // Do we need to finish the tail ? sub len=enddst,dst1,1 // precompute len (p6) br.cond.dptk .failure_in1bis ;; mov pr=saved_pr,0xffffffffffff0000 mov ar.lc=saved_lc mov ar.pfs=saved_pfs br.ret.sptk.many rp .failure_in2: sub ret0=endsrc,src1 cmp.ne p6,p0=dst1,enddst // Do we need to finish the tail ? sub len=enddst,dst1,1 // precompute len (p6) br.cond.dptk .failure_in1bis ;; mov pr=saved_pr,0xffffffffffff0000 mov ar.lc=saved_lc mov ar.pfs=saved_pfs br.ret.sptk.many rp // // handling of failures on stores: that's the easy part // .failure_out: sub ret0=enddst,dst1 mov pr=saved_pr,0xffffffffffff0000 mov ar.lc=saved_lc mov ar.pfs=saved_pfs br.ret.sptk.many rp END(__copy_user) EXPORT_SYMBOL(__copy_user)
AirFortressIlikara/LS2K0300-linux-4.19
1,220
arch/ia64/lib/strncpy_from_user.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Just like strncpy() except that if a fault occurs during copying, * -EFAULT is returned. * * Inputs: * in0: address of destination buffer * in1: address of string to be copied * in2: length of buffer in bytes * Outputs: * r8: -EFAULT in case of fault or number of bytes copied if no fault * * Copyright (C) 1998-2001 Hewlett-Packard Co * Copyright (C) 1998-2001 David Mosberger-Tang <davidm@hpl.hp.com> * * 00/03/06 D. Mosberger Fixed to return proper return value (bug found by * by Andreas Schwab <schwab@suse.de>). */ #include <asm/asmmacro.h> #include <asm/export.h> GLOBAL_ENTRY(__strncpy_from_user) alloc r2=ar.pfs,3,0,0,0 mov r8=0 mov r9=in1 ;; add r10=in1,in2 cmp.eq p6,p0=r0,in2 (p6) br.ret.spnt.many rp // XXX braindead copy loop---this needs to be optimized .Loop1: EX(.Lexit, ld1 r8=[in1],1) ;; EX(.Lexit, st1 [in0]=r8,1) cmp.ne p6,p7=r8,r0 ;; (p6) cmp.ne.unc p8,p0=in1,r10 (p8) br.cond.dpnt.few .Loop1 ;; (p6) mov r8=in2 // buffer filled up---return buffer length (p7) sub r8=in1,r9,1 // return string length (excluding NUL character) [.Lexit:] br.ret.sptk.many rp END(__strncpy_from_user) EXPORT_SYMBOL(__strncpy_from_user)
AirFortressIlikara/LS2K0300-linux-4.19
6,489
arch/ia64/lib/strlen.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * * Optimized version of the standard strlen() function * * * Inputs: * in0 address of string * * Outputs: * ret0 the number of characters in the string (0 if empty string) * does not count the \0 * * Copyright (C) 1999, 2001 Hewlett-Packard Co * Stephane Eranian <eranian@hpl.hp.com> * * 09/24/99 S.Eranian add speculation recovery code */ #include <asm/asmmacro.h> #include <asm/export.h> // // // This is an enhanced version of the basic strlen. it includes a combination // of compute zero index (czx), parallel comparisons, speculative loads and // loop unroll using rotating registers. // // General Ideas about the algorithm: // The goal is to look at the string in chunks of 8 bytes. // so we need to do a few extra checks at the beginning because the // string may not be 8-byte aligned. In this case we load the 8byte // quantity which includes the start of the string and mask the unused // bytes with 0xff to avoid confusing czx. // We use speculative loads and software pipelining to hide memory // latency and do read ahead safely. This way we defer any exception. // // Because we don't want the kernel to be relying on particular // settings of the DCR register, we provide recovery code in case // speculation fails. The recovery code is going to "redo" the work using // only normal loads. If we still get a fault then we generate a // kernel panic. Otherwise we return the strlen as usual. // // The fact that speculation may fail can be caused, for instance, by // the DCR.dm bit being set. In this case TLB misses are deferred, i.e., // a NaT bit will be set if the translation is not present. The normal // load, on the other hand, will cause the translation to be inserted // if the mapping exists. // // It should be noted that we execute recovery code only when we need // to use the data that has been speculatively loaded: we don't execute // recovery code on pure read ahead data. // // Remarks: // - the cmp r0,r0 is used as a fast way to initialize a predicate // register to 1. This is required to make sure that we get the parallel // compare correct. // // - we don't use the epilogue counter to exit the loop but we need to set // it to zero beforehand. // // - after the loop we must test for Nat values because neither the // czx nor cmp instruction raise a NaT consumption fault. We must be // careful not to look too far for a Nat for which we don't care. // For instance we don't need to look at a NaT in val2 if the zero byte // was in val1. // // - Clearly performance tuning is required. // // // #define saved_pfs r11 #define tmp r10 #define base r16 #define orig r17 #define saved_pr r18 #define src r19 #define mask r20 #define val r21 #define val1 r22 #define val2 r23 GLOBAL_ENTRY(strlen) .prologue .save ar.pfs, saved_pfs alloc saved_pfs=ar.pfs,11,0,0,8 // rotating must be multiple of 8 .rotr v[2], w[2] // declares our 4 aliases extr.u tmp=in0,0,3 // tmp=least significant 3 bits mov orig=in0 // keep trackof initial byte address dep src=0,in0,0,3 // src=8byte-aligned in0 address .save pr, saved_pr mov saved_pr=pr // preserve predicates (rotation) ;; .body ld8 v[1]=[src],8 // must not speculate: can fail here shl tmp=tmp,3 // multiply by 8bits/byte mov mask=-1 // our mask ;; ld8.s w[1]=[src],8 // speculatively load next cmp.eq p6,p0=r0,r0 // sets p6 to true for cmp.and sub tmp=64,tmp // how many bits to shift our mask on the right ;; shr.u mask=mask,tmp // zero enough bits to hold v[1] valuable part mov ar.ec=r0 // clear epilogue counter (saved in ar.pfs) ;; add base=-16,src // keep track of aligned base or v[1]=v[1],mask // now we have a safe initial byte pattern ;; 1: ld8.s v[0]=[src],8 // speculatively load next czx1.r val1=v[1] // search 0 byte from right czx1.r val2=w[1] // search 0 byte from right following 8bytes ;; ld8.s w[0]=[src],8 // speculatively load next to next cmp.eq.and p6,p0=8,val1 // p6 = p6 and val1==8 cmp.eq.and p6,p0=8,val2 // p6 = p6 and mask==8 (p6) br.wtop.dptk 1b // loop until p6 == 0 ;; // // We must return try the recovery code iff // val1_is_nat || (val1==8 && val2_is_nat) // // XXX Fixme // - there must be a better way of doing the test // cmp.eq p8,p9=8,val1 // p6 = val1 had zero (disambiguate) tnat.nz p6,p7=val1 // test NaT on val1 (p6) br.cond.spnt .recover // jump to recovery if val1 is NaT ;; // // if we come here p7 is true, i.e., initialized for // cmp // cmp.eq.and p7,p0=8,val1// val1==8? tnat.nz.and p7,p0=val2 // test NaT if val2 (p7) br.cond.spnt .recover // jump to recovery if val2 is NaT ;; (p8) mov val1=val2 // the other test got us out of the loop (p8) adds src=-16,src // correct position when 3 ahead (p9) adds src=-24,src // correct position when 4 ahead ;; sub ret0=src,orig // distance from base sub tmp=8,val1 // which byte in word mov pr=saved_pr,0xffffffffffff0000 ;; sub ret0=ret0,tmp // adjust mov ar.pfs=saved_pfs // because of ar.ec, restore no matter what br.ret.sptk.many rp // end of normal execution // // Outlined recovery code when speculation failed // // This time we don't use speculation and rely on the normal exception // mechanism. that's why the loop is not as good as the previous one // because read ahead is not possible // // IMPORTANT: // Please note that in the case of strlen() as opposed to strlen_user() // we don't use the exception mechanism, as this function is not // supposed to fail. If that happens it means we have a bug and the // code will cause of kernel fault. // // XXX Fixme // - today we restart from the beginning of the string instead // of trying to continue where we left off. // .recover: ld8 val=[base],8 // will fail if unrecoverable fault ;; or val=val,mask // remask first bytes cmp.eq p0,p6=r0,r0 // nullify first ld8 in loop ;; // // ar.ec is still zero here // 2: (p6) ld8 val=[base],8 // will fail if unrecoverable fault ;; czx1.r val1=val // search 0 byte from right ;; cmp.eq p6,p0=8,val1 // val1==8 ? (p6) br.wtop.dptk 2b // loop until p6 == 0 ;; // (avoid WAW on p63) sub ret0=base,orig // distance from base sub tmp=8,val1 mov pr=saved_pr,0xffffffffffff0000 ;; sub ret0=ret0,tmp // length=now - back -1 mov ar.pfs=saved_pfs // because of ar.ec, restore no matter what br.ret.sptk.many rp // end of successful recovery code END(strlen) EXPORT_SYMBOL(strlen)
AirFortressIlikara/LS2K0300-linux-4.19
3,627
arch/ia64/lib/xor.S
/* * arch/ia64/lib/xor.S * * Optimized RAID-5 checksumming functions for IA-64. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * You should have received a copy of the GNU General Public License * (for example /usr/src/linux/COPYING); if not, write to the Free * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <asm/asmmacro.h> #include <asm/export.h> GLOBAL_ENTRY(xor_ia64_2) .prologue .fframe 0 .save ar.pfs, r31 alloc r31 = ar.pfs, 3, 0, 13, 16 .save ar.lc, r30 mov r30 = ar.lc .save pr, r29 mov r29 = pr ;; .body mov r8 = in1 mov ar.ec = 6 + 2 shr in0 = in0, 3 ;; adds in0 = -1, in0 mov r16 = in1 mov r17 = in2 ;; mov ar.lc = in0 mov pr.rot = 1 << 16 ;; .rotr s1[6+1], s2[6+1], d[2] .rotp p[6+2] 0: (p[0]) ld8.nta s1[0] = [r16], 8 (p[0]) ld8.nta s2[0] = [r17], 8 (p[6]) xor d[0] = s1[6], s2[6] (p[6+1])st8.nta [r8] = d[1], 8 nop.f 0 br.ctop.dptk.few 0b ;; mov ar.lc = r30 mov pr = r29, -1 br.ret.sptk.few rp END(xor_ia64_2) EXPORT_SYMBOL(xor_ia64_2) GLOBAL_ENTRY(xor_ia64_3) .prologue .fframe 0 .save ar.pfs, r31 alloc r31 = ar.pfs, 4, 0, 20, 24 .save ar.lc, r30 mov r30 = ar.lc .save pr, r29 mov r29 = pr ;; .body mov r8 = in1 mov ar.ec = 6 + 2 shr in0 = in0, 3 ;; adds in0 = -1, in0 mov r16 = in1 mov r17 = in2 ;; mov r18 = in3 mov ar.lc = in0 mov pr.rot = 1 << 16 ;; .rotr s1[6+1], s2[6+1], s3[6+1], d[2] .rotp p[6+2] 0: (p[0]) ld8.nta s1[0] = [r16], 8 (p[0]) ld8.nta s2[0] = [r17], 8 (p[6]) xor d[0] = s1[6], s2[6] ;; (p[0]) ld8.nta s3[0] = [r18], 8 (p[6+1])st8.nta [r8] = d[1], 8 (p[6]) xor d[0] = d[0], s3[6] br.ctop.dptk.few 0b ;; mov ar.lc = r30 mov pr = r29, -1 br.ret.sptk.few rp END(xor_ia64_3) EXPORT_SYMBOL(xor_ia64_3) GLOBAL_ENTRY(xor_ia64_4) .prologue .fframe 0 .save ar.pfs, r31 alloc r31 = ar.pfs, 5, 0, 27, 32 .save ar.lc, r30 mov r30 = ar.lc .save pr, r29 mov r29 = pr ;; .body mov r8 = in1 mov ar.ec = 6 + 2 shr in0 = in0, 3 ;; adds in0 = -1, in0 mov r16 = in1 mov r17 = in2 ;; mov r18 = in3 mov ar.lc = in0 mov pr.rot = 1 << 16 mov r19 = in4 ;; .rotr s1[6+1], s2[6+1], s3[6+1], s4[6+1], d[2] .rotp p[6+2] 0: (p[0]) ld8.nta s1[0] = [r16], 8 (p[0]) ld8.nta s2[0] = [r17], 8 (p[6]) xor d[0] = s1[6], s2[6] (p[0]) ld8.nta s3[0] = [r18], 8 (p[0]) ld8.nta s4[0] = [r19], 8 (p[6]) xor r20 = s3[6], s4[6] ;; (p[6+1])st8.nta [r8] = d[1], 8 (p[6]) xor d[0] = d[0], r20 br.ctop.dptk.few 0b ;; mov ar.lc = r30 mov pr = r29, -1 br.ret.sptk.few rp END(xor_ia64_4) EXPORT_SYMBOL(xor_ia64_4) GLOBAL_ENTRY(xor_ia64_5) .prologue .fframe 0 .save ar.pfs, r31 alloc r31 = ar.pfs, 6, 0, 34, 40 .save ar.lc, r30 mov r30 = ar.lc .save pr, r29 mov r29 = pr ;; .body mov r8 = in1 mov ar.ec = 6 + 2 shr in0 = in0, 3 ;; adds in0 = -1, in0 mov r16 = in1 mov r17 = in2 ;; mov r18 = in3 mov ar.lc = in0 mov pr.rot = 1 << 16 mov r19 = in4 mov r20 = in5 ;; .rotr s1[6+1], s2[6+1], s3[6+1], s4[6+1], s5[6+1], d[2] .rotp p[6+2] 0: (p[0]) ld8.nta s1[0] = [r16], 8 (p[0]) ld8.nta s2[0] = [r17], 8 (p[6]) xor d[0] = s1[6], s2[6] (p[0]) ld8.nta s3[0] = [r18], 8 (p[0]) ld8.nta s4[0] = [r19], 8 (p[6]) xor r21 = s3[6], s4[6] ;; (p[0]) ld8.nta s5[0] = [r20], 8 (p[6+1])st8.nta [r8] = d[1], 8 (p[6]) xor d[0] = d[0], r21 ;; (p[6]) xor d[0] = d[0], s5[6] nop.f 0 br.ctop.dptk.few 0b ;; mov ar.lc = r30 mov pr = r29, -1 br.ret.sptk.few rp END(xor_ia64_5) EXPORT_SYMBOL(xor_ia64_5)
AirFortressIlikara/LS2K0300-linux-4.19
1,411
arch/ia64/sn/kernel/pio_phys.S
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved. * * This file contains macros used to access MMR registers via * uncached physical addresses. * pio_phys_read_mmr - read an MMR * pio_phys_write_mmr - write an MMR * pio_atomic_phys_write_mmrs - atomically write 1 or 2 MMRs with psr.ic=0 * Second MMR will be skipped if address is NULL * * Addresses passed to these routines should be uncached physical addresses * ie., 0x80000.... */ #include <asm/asmmacro.h> #include <asm/page.h> GLOBAL_ENTRY(pio_phys_read_mmr) .prologue .regstk 1,0,0,0 .body mov r2=psr rsm psr.i | psr.dt ;; srlz.d ld8.acq r8=[r32] ;; mov psr.l=r2;; srlz.d br.ret.sptk.many rp END(pio_phys_read_mmr) GLOBAL_ENTRY(pio_phys_write_mmr) .prologue .regstk 2,0,0,0 .body mov r2=psr rsm psr.i | psr.dt ;; srlz.d st8.rel [r32]=r33 ;; mov psr.l=r2;; srlz.d br.ret.sptk.many rp END(pio_phys_write_mmr) GLOBAL_ENTRY(pio_atomic_phys_write_mmrs) .prologue .regstk 4,0,0,0 .body mov r2=psr cmp.ne p9,p0=r34,r0; rsm psr.i | psr.dt | psr.ic ;; srlz.d st8.rel [r32]=r33 (p9) st8.rel [r34]=r35 ;; mov psr.l=r2;; srlz.d br.ret.sptk.many rp END(pio_atomic_phys_write_mmrs)
AirFortressIlikara/LS2K0300-linux-4.19
2,374
arch/ia64/sn/kernel/sn2/ptc_deadlock.S
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved. */ #include <asm/types.h> #include <asm/sn/shub_mmr.h> #define DEADLOCKBIT SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_SHFT #define WRITECOUNTMASK SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK #define ALIAS_OFFSET 8 .global sn2_ptc_deadlock_recovery_core .proc sn2_ptc_deadlock_recovery_core sn2_ptc_deadlock_recovery_core: .regstk 6,0,0,0 ptc0 = in0 data0 = in1 ptc1 = in2 data1 = in3 piowc = in4 zeroval = in5 piowcphy = r30 psrsave = r2 scr1 = r16 scr2 = r17 mask = r18 extr.u piowcphy=piowc,0,61;; // Convert piowc to uncached physical address dep piowcphy=-1,piowcphy,63,1 movl mask=WRITECOUNTMASK mov r8=r0 1: cmp.ne p8,p9=r0,ptc1 // Test for shub type (ptc1 non-null on shub1) // p8 = 1 if shub1, p9 = 1 if shub2 add scr2=ALIAS_OFFSET,piowc // Address of WRITE_STATUS alias register mov scr1=7;; // Clear DEADLOCK, WRITE_ERROR, MULTI_WRITE_ERROR (p8) st8.rel [scr2]=scr1;; (p9) ld8.acq scr1=[scr2];; 5: ld8.acq scr1=[piowc];; // Wait for PIOs to complete. hint @pause and scr2=scr1,mask;; // mask of writecount bits cmp.ne p6,p0=zeroval,scr2 (p6) br.cond.sptk 5b ////////////// BEGIN PHYSICAL MODE //////////////////// mov psrsave=psr // Disable IC (no PMIs) rsm psr.i | psr.dt | psr.ic;; srlz.i;; st8.rel [ptc0]=data0 // Write PTC0 & wait for completion. 5: ld8.acq scr1=[piowcphy];; // Wait for PIOs to complete. hint @pause and scr2=scr1,mask;; // mask of writecount bits cmp.ne p6,p0=zeroval,scr2 (p6) br.cond.sptk 5b;; tbit.nz p8,p7=scr1,DEADLOCKBIT;;// Test for DEADLOCK (p7) cmp.ne p7,p0=r0,ptc1;; // Test for non-null ptc1 (p7) st8.rel [ptc1]=data1;; // Now write PTC1. 5: ld8.acq scr1=[piowcphy];; // Wait for PIOs to complete. hint @pause and scr2=scr1,mask;; // mask of writecount bits cmp.ne p6,p0=zeroval,scr2 (p6) br.cond.sptk 5b tbit.nz p8,p0=scr1,DEADLOCKBIT;;// Test for DEADLOCK mov psr.l=psrsave;; // Reenable IC srlz.i;; ////////////// END PHYSICAL MODE //////////////////// (p8) add r8=1,r8 (p8) br.cond.spnt 1b;; // Repeat if DEADLOCK occurred. br.ret.sptk rp .endp sn2_ptc_deadlock_recovery_core
AirFortressIlikara/LS2K0300-linux-4.19
4,062
arch/ia64/hp/sim/boot/boot_head.S
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 1998-2003 Hewlett-Packard Co * David Mosberger-Tang <davidm@hpl.hp.com> */ #include <asm/asmmacro.h> #include <asm/pal.h> .bss .align 16 stack_mem: .skip 16834 .text /* This needs to be defined because lib/string.c:strlcat() calls it in case of error... */ GLOBAL_ENTRY(printk) break 0 END(printk) GLOBAL_ENTRY(_start) .prologue .save rp, r0 .body movl gp = __gp movl sp = stack_mem+16384-16 bsw.1 br.call.sptk.many rp=start_bootloader 0: nop 0 /* dummy nop to make unwinding work */ END(_start) /* * Set a break point on this function so that symbols are available to set breakpoints in * the kernel being debugged. */ GLOBAL_ENTRY(debug_break) br.ret.sptk.many b0 END(debug_break) GLOBAL_ENTRY(ssc) .regstk 5,0,0,0 mov r15=in4 break 0x80001 br.ret.sptk.many b0 END(ssc) GLOBAL_ENTRY(jmp_to_kernel) .regstk 2,0,0,0 mov r28=in0 mov b7=in1 br.sptk.few b7 END(jmp_to_kernel) /* * r28 contains the index of the PAL function * r29--31 the args * Return values in ret0--3 (r8--11) */ GLOBAL_ENTRY(pal_emulator_static) mov r8=-1 mov r9=256 ;; cmp.gtu p6,p7=r9,r28 /* r28 <= 255? */ (p6) br.cond.sptk.few static ;; mov r9=512 ;; cmp.gtu p6,p7=r9,r28 (p6) br.cond.sptk.few stacked ;; static: cmp.eq p6,p7=PAL_PTCE_INFO,r28 (p7) br.cond.sptk.few 1f ;; mov r8=0 /* status = 0 */ movl r9=0x100000000 /* tc.base */ movl r10=0x0000000200000003 /* count[0], count[1] */ movl r11=0x1000000000002000 /* stride[0], stride[1] */ br.cond.sptk.few rp 1: cmp.eq p6,p7=PAL_FREQ_RATIOS,r28 (p7) br.cond.sptk.few 1f mov r8=0 /* status = 0 */ movl r9 =0x100000064 /* proc_ratio (1/100) */ movl r10=0x100000100 /* bus_ratio<<32 (1/256) */ movl r11=0x100000064 /* itc_ratio<<32 (1/100) */ ;; 1: cmp.eq p6,p7=PAL_RSE_INFO,r28 (p7) br.cond.sptk.few 1f mov r8=0 /* status = 0 */ mov r9=96 /* num phys stacked */ mov r10=0 /* hints */ mov r11=0 br.cond.sptk.few rp 1: cmp.eq p6,p7=PAL_CACHE_FLUSH,r28 /* PAL_CACHE_FLUSH */ (p7) br.cond.sptk.few 1f mov r9=ar.lc movl r8=524288 /* flush 512k million cache lines (16MB) */ ;; mov ar.lc=r8 movl r8=0xe000000000000000 ;; .loop: fc r8 add r8=32,r8 br.cloop.sptk.few .loop sync.i ;; srlz.i ;; mov ar.lc=r9 mov r8=r0 ;; 1: cmp.eq p6,p7=PAL_PERF_MON_INFO,r28 (p7) br.cond.sptk.few 1f mov r8=0 /* status = 0 */ movl r9 =0x08122f04 /* generic=4 width=47 retired=8 cycles=18 */ mov r10=0 /* reserved */ mov r11=0 /* reserved */ mov r16=0xffff /* implemented PMC */ mov r17=0x3ffff /* implemented PMD */ add r18=8,r29 /* second index */ ;; st8 [r29]=r16,16 /* store implemented PMC */ st8 [r18]=r0,16 /* clear remaining bits */ ;; st8 [r29]=r0,16 /* clear remaining bits */ st8 [r18]=r0,16 /* clear remaining bits */ ;; st8 [r29]=r17,16 /* store implemented PMD */ st8 [r18]=r0,16 /* clear remaining bits */ mov r16=0xf0 /* cycles count capable PMC */ ;; st8 [r29]=r0,16 /* clear remaining bits */ st8 [r18]=r0,16 /* clear remaining bits */ mov r17=0xf0 /* retired bundles capable PMC */ ;; st8 [r29]=r16,16 /* store cycles capable */ st8 [r18]=r0,16 /* clear remaining bits */ ;; st8 [r29]=r0,16 /* clear remaining bits */ st8 [r18]=r0,16 /* clear remaining bits */ ;; st8 [r29]=r17,16 /* store retired bundle capable */ st8 [r18]=r0,16 /* clear remaining bits */ ;; st8 [r29]=r0,16 /* clear remaining bits */ st8 [r18]=r0,16 /* clear remaining bits */ ;; 1: cmp.eq p6,p7=PAL_VM_SUMMARY,r28 (p7) br.cond.sptk.few 1f mov r8=0 /* status = 0 */ movl r9=0x2044040020F1865 /* num_tc_levels=2, num_unique_tcs=4 */ /* max_itr_entry=64, max_dtr_entry=64 */ /* hash_tag_id=2, max_pkr=15 */ /* key_size=24, phys_add_size=50, vw=1 */ movl r10=0x183C /* rid_size=24, impl_va_msb=60 */ ;; 1: cmp.eq p6,p7=PAL_MEM_ATTRIB,r28 (p7) br.cond.sptk.few 1f mov r8=0 /* status = 0 */ mov r9=0x80|0x01 /* NatPage|WB */ ;; 1: br.cond.sptk.few rp stacked: br.ret.sptk.few rp END(pal_emulator_static)
AirFortressIlikara/LS2K0300-linux-4.19
6,433
arch/unicore32/kernel/head.S
/* * linux/arch/unicore32/kernel/head.S * * Code specific to PKUnity SoC and UniCore ISA * * Copyright (C) 2001-2010 GUAN Xue-tao * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/linkage.h> #include <linux/init.h> #include <asm/assembler.h> #include <asm/ptrace.h> #include <generated/asm-offsets.h> #include <asm/memory.h> #include <asm/thread_info.h> #include <asm/hwdef-copro.h> #include <asm/pgtable-hwdef.h> #if (PHYS_OFFSET & 0x003fffff) #error "PHYS_OFFSET must be at an even 4MiB boundary!" #endif #define KERNEL_RAM_VADDR (PAGE_OFFSET + KERNEL_IMAGE_START) #define KERNEL_RAM_PADDR (PHYS_OFFSET + KERNEL_IMAGE_START) #define KERNEL_PGD_PADDR (KERNEL_RAM_PADDR - 0x1000) #define KERNEL_PGD_VADDR (KERNEL_RAM_VADDR - 0x1000) #define KERNEL_START KERNEL_RAM_VADDR #define KERNEL_END _end /* * swapper_pg_dir is the virtual address of the initial page table. * We place the page tables 4K below KERNEL_RAM_VADDR. Therefore, we must * make sure that KERNEL_RAM_VADDR is correctly set. Currently, we expect * the least significant 16 bits to be 0x8000, but we could probably * relax this restriction to KERNEL_RAM_VADDR >= PAGE_OFFSET + 0x1000. */ #if (KERNEL_RAM_VADDR & 0xffff) != 0x8000 #error KERNEL_RAM_VADDR must start at 0xXXXX8000 #endif .globl swapper_pg_dir .equ swapper_pg_dir, KERNEL_RAM_VADDR - 0x1000 /* * Kernel startup entry point. * --------------------------- * * This is normally called from the decompressor code. The requirements * are: MMU = off, D-cache = off, I-cache = dont care * * This code is mostly position independent, so if you link the kernel at * 0xc0008000, you call this at __pa(0xc0008000). */ __HEAD ENTRY(stext) @ set asr mov r0, #PRIV_MODE @ ensure priv mode or r0, #PSR_R_BIT | PSR_I_BIT @ disable irqs mov.a asr, r0 @ process identify movc r0, p0.c0, #0 @ cpuid movl r1, 0xff00ffff @ mask movl r2, 0x4d000863 @ value and r0, r1, r0 cxor.a r0, r2 bne __error_p @ invalid processor id /* * Clear the 4K level 1 swapper page table */ movl r0, #KERNEL_PGD_PADDR @ page table address mov r1, #0 add r2, r0, #0x1000 101: stw.w r1, [r0]+, #4 stw.w r1, [r0]+, #4 stw.w r1, [r0]+, #4 stw.w r1, [r0]+, #4 cxor.a r0, r2 bne 101b movl r4, #KERNEL_PGD_PADDR @ page table address mov r7, #PMD_TYPE_SECT | PMD_PRESENT @ page size: section or r7, r7, #PMD_SECT_CACHEABLE @ cacheable or r7, r7, #PMD_SECT_READ | PMD_SECT_WRITE | PMD_SECT_EXEC /* * Create identity mapping for first 4MB of kernel to * cater for the MMU enable. This identity mapping * will be removed by paging_init(). We use our current program * counter to determine corresponding section base address. */ mov r6, pc mov r6, r6 >> #22 @ start of kernel section or r1, r7, r6 << #22 @ flags + kernel base stw r1, [r4+], r6 << #2 @ identity mapping /* * Now setup the pagetables for our kernel direct * mapped region. */ add r0, r4, #(KERNEL_START & 0xff000000) >> 20 stw.w r1, [r0+], #(KERNEL_START & 0x00c00000) >> 20 movl r6, #(KERNEL_END - 1) add r0, r0, #4 add r6, r4, r6 >> #20 102: csub.a r0, r6 add r1, r1, #1 << 22 bua 103f stw.w r1, [r0]+, #4 b 102b 103: /* * Then map first 4MB of ram in case it contains our boot params. */ add r0, r4, #PAGE_OFFSET >> 20 or r6, r7, #(PHYS_OFFSET & 0xffc00000) stw r6, [r0] ldw r15, __switch_data @ address to jump to after /* * Initialise TLB, Caches, and MMU state ready to switch the MMU * on. */ mov r0, #0 movc p0.c5, r0, #28 @ cache invalidate all nop8 movc p0.c6, r0, #6 @ TLB invalidate all nop8 /* * ..V. .... ..TB IDAM * ..1. .... ..01 1111 */ movl r0, #0x201f @ control register setting /* * Setup common bits before finally enabling the MMU. Essentially * this is just loading the page table pointer and domain access * registers. */ #ifndef CONFIG_ALIGNMENT_TRAP andn r0, r0, #CR_A #endif #ifdef CONFIG_CPU_DCACHE_DISABLE andn r0, r0, #CR_D #endif #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH andn r0, r0, #CR_B #endif #ifdef CONFIG_CPU_ICACHE_DISABLE andn r0, r0, #CR_I #endif movc p0.c2, r4, #0 @ set pgd b __turn_mmu_on ENDPROC(stext) /* * Enable the MMU. This completely changes the structure of the visible * memory space. You will not be able to trace execution through this. * * r0 = cp#0 control register * r15 = *virtual* address to jump to upon completion */ .align 5 __turn_mmu_on: mov r0, r0 movc p0.c1, r0, #0 @ write control reg nop @ fetch inst by phys addr mov pc, r15 nop8 @ fetch inst by phys addr ENDPROC(__turn_mmu_on) /* * Setup the initial page tables. We only setup the barest * amount which are required to get the kernel running, which * generally means mapping in the kernel code. * * r9 = cpuid * r10 = procinfo * * Returns: * r0, r3, r6, r7 corrupted * r4 = physical page table address */ .ltorg .align 2 .type __switch_data, %object __switch_data: .long __mmap_switched .long __bss_start @ r6 .long _end @ r7 .long cr_alignment @ r8 .long init_thread_union + THREAD_START_SP @ sp /* * The following fragment of code is executed with the MMU on in MMU mode, * and uses absolute addresses; this is not position independent. * * r0 = cp#0 control register */ __mmap_switched: adr r3, __switch_data + 4 ldm.w (r6, r7, r8), [r3]+ ldw sp, [r3] mov fp, #0 @ Clear BSS (and zero fp) 203: csub.a r6, r7 bea 204f stw.w fp, [r6]+,#4 b 203b 204: andn r1, r0, #CR_A @ Clear 'A' bit stm (r0, r1), [r8]+ @ Save control register values b start_kernel ENDPROC(__mmap_switched) /* * Exception handling. Something went wrong and we can't proceed. We * ought to tell the user, but since we don't have any guarantee that * we're even running on the right architecture, we do virtually nothing. * * If CONFIG_DEBUG_LL is set we try to print out something about the error * and hope for the best (useful if bootloader fails to pass a proper * machine ID for example). */ __error_p: #ifdef CONFIG_DEBUG_LL adr r0, str_p1 b.l printascii mov r0, r9 b.l printhex8 adr r0, str_p2 b.l printascii 901: nop8 b 901b str_p1: .asciz "\nError: unrecognized processor variant (0x" str_p2: .asciz ").\n" .align #endif ENDPROC(__error_p)
AirFortressIlikara/LS2K0300-linux-4.19
17,654
arch/unicore32/kernel/entry.S
/* * linux/arch/unicore32/kernel/entry.S * * Code specific to PKUnity SoC and UniCore ISA * * Copyright (C) 2001-2010 GUAN Xue-tao * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Low-level vector interface routines */ #include <linux/init.h> #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/errno.h> #include <asm/thread_info.h> #include <asm/memory.h> #include <asm/unistd.h> #include <generated/asm-offsets.h> #include "debug-macro.S" @ @ Most of the stack format comes from struct pt_regs, but with @ the addition of 8 bytes for storing syscall args 5 and 6. @ #define S_OFF 8 /* * The SWI code relies on the fact that R0 is at the bottom of the stack * (due to slow/fast restore user regs). */ #if S_R0 != 0 #error "Please fix" #endif .macro zero_fp #ifdef CONFIG_FRAME_POINTER mov fp, #0 #endif .endm .macro alignment_trap, rtemp #ifdef CONFIG_ALIGNMENT_TRAP ldw \rtemp, .LCcralign ldw \rtemp, [\rtemp] movc p0.c1, \rtemp, #0 #endif .endm .macro load_user_sp_lr, rd, rtemp, offset = 0 mov \rtemp, asr xor \rtemp, \rtemp, #(PRIV_MODE ^ SUSR_MODE) mov.a asr, \rtemp @ switch to the SUSR mode ldw sp, [\rd+], #\offset @ load sp_user ldw lr, [\rd+], #\offset + 4 @ load lr_user xor \rtemp, \rtemp, #(PRIV_MODE ^ SUSR_MODE) mov.a asr, \rtemp @ switch back to the PRIV mode .endm .macro priv_exit, rpsr mov.a bsr, \rpsr ldm.w (r0 - r15), [sp]+ ldm.b (r16 - pc), [sp]+ @ load r0 - pc, asr .endm .macro restore_user_regs, fast = 0, offset = 0 ldw r1, [sp+], #\offset + S_PSR @ get calling asr ldw lr, [sp+], #\offset + S_PC @ get pc mov.a bsr, r1 @ save in bsr_priv .if \fast add sp, sp, #\offset + S_R1 @ r0 is syscall return value ldm.w (r1 - r15), [sp]+ @ get calling r1 - r15 ldur (r16 - lr), [sp]+ @ get calling r16 - lr .else ldm.w (r0 - r15), [sp]+ @ get calling r0 - r15 ldur (r16 - lr), [sp]+ @ get calling r16 - lr .endif nop add sp, sp, #S_FRAME_SIZE - S_R16 mov.a pc, lr @ return @ and move bsr_priv into asr .endm .macro get_thread_info, rd mov \rd, sp >> #13 mov \rd, \rd << #13 .endm .macro get_irqnr_and_base, irqnr, irqstat, base, tmp ldw \base, =(PKUNITY_INTC_BASE) ldw \irqstat, [\base+], #0xC @ INTC_ICIP ldw \tmp, [\base+], #0x4 @ INTC_ICMR and.a \irqstat, \irqstat, \tmp beq 1001f cntlz \irqnr, \irqstat rsub \irqnr, \irqnr, #31 1001: /* EQ will be set if no irqs pending */ .endm #ifdef CONFIG_DEBUG_LL .macro printreg, reg, temp adr \temp, 901f stm (r0-r3), [\temp]+ stw lr, [\temp+], #0x10 mov r0, \reg b.l printhex8 mov r0, #':' b.l printch mov r0, pc b.l printhex8 adr r0, 902f b.l printascii adr \temp, 901f ldm (r0-r3), [\temp]+ ldw lr, [\temp+], #0x10 b 903f 901: .word 0, 0, 0, 0, 0 @ r0-r3, lr 902: .asciz ": epip4d\n" .align 903: .endm #endif /* * These are the registers used in the syscall handler, and allow us to * have in theory up to 7 arguments to a function - r0 to r6. * * Note that tbl == why is intentional. * * We must set at least "tsk" and "why" when calling ret_with_reschedule. */ scno .req r21 @ syscall number tbl .req r22 @ syscall table pointer why .req r22 @ Linux syscall (!= 0) tsk .req r23 @ current thread_info /* * Interrupt handling. Preserves r17, r18, r19 */ .macro intr_handler 1: get_irqnr_and_base r0, r6, r5, lr beq 2f mov r1, sp @ @ routine called with r0 = irq number, r1 = struct pt_regs * @ adr lr, 1b b asm_do_IRQ 2: .endm /* * PRIV mode handlers */ .macro priv_entry sub sp, sp, #(S_FRAME_SIZE - 4) stm (r1 - r15), [sp]+ add r5, sp, #S_R15 stm (r16 - r28), [r5]+ ldm (r1 - r3), [r0]+ add r5, sp, #S_SP - 4 @ here for interlock avoidance mov r4, #-1 @ "" "" "" "" add r0, sp, #(S_FRAME_SIZE - 4) stw.w r1, [sp+], #-4 @ save the "real" r0 copied @ from the exception stack mov r1, lr @ @ We are now ready to fill in the remaining blanks on the stack: @ @ r0 - sp_priv @ r1 - lr_priv @ r2 - lr_<exception>, already fixed up for correct return/restart @ r3 - bsr_<exception> @ r4 - orig_r0 (see pt_regs definition in ptrace.h) @ stm (r0 - r4), [r5]+ .endm /* * User mode handlers * */ .macro user_entry sub sp, sp, #S_FRAME_SIZE stm (r1 - r15), [sp+] add r4, sp, #S_R16 stm (r16 - r28), [r4]+ ldm (r1 - r3), [r0]+ add r0, sp, #S_PC @ here for interlock avoidance mov r4, #-1 @ "" "" "" "" stw r1, [sp] @ save the "real" r0 copied @ from the exception stack @ @ We are now ready to fill in the remaining blanks on the stack: @ @ r2 - lr_<exception>, already fixed up for correct return/restart @ r3 - bsr_<exception> @ r4 - orig_r0 (see pt_regs definition in ptrace.h) @ @ Also, separately save sp_user and lr_user @ stm (r2 - r4), [r0]+ stur (sp, lr), [r0-] @ @ Enable the alignment trap while in kernel mode @ alignment_trap r0 @ @ Clear FP to mark the first stack frame @ zero_fp .endm .text @ @ __invalid - generic code for failed exception @ (re-entrant version of handlers) @ __invalid: sub sp, sp, #S_FRAME_SIZE stm (r1 - r15), [sp+] add r1, sp, #S_R16 stm (r16 - r28, sp, lr), [r1]+ zero_fp ldm (r4 - r6), [r0]+ add r0, sp, #S_PC @ here for interlock avoidance mov r7, #-1 @ "" "" "" "" stw r4, [sp] @ save preserved r0 stm (r5 - r7), [r0]+ @ lr_<exception>, @ asr_<exception>, "old_r0" mov r0, sp mov r1, asr b bad_mode ENDPROC(__invalid) .align 5 __dabt_priv: priv_entry @ @ get ready to re-enable interrupts if appropriate @ mov r17, asr cand.a r3, #PSR_I_BIT bne 1f andn r17, r17, #PSR_I_BIT 1: @ @ Call the processor-specific abort handler: @ @ r2 - aborted context pc @ r3 - aborted context asr @ @ The abort handler must return the aborted address in r0, and @ the fault status register in r1. @ movc r1, p0.c3, #0 @ get FSR movc r0, p0.c4, #0 @ get FAR @ @ set desired INTR state, then call main handler @ mov.a asr, r17 mov r2, sp b.l do_DataAbort @ @ INTRs off again before pulling preserved data off the stack @ disable_irq r0 @ @ restore BSR and restart the instruction @ ldw r2, [sp+], #S_PSR priv_exit r2 @ return from exception ENDPROC(__dabt_priv) .align 5 __intr_priv: priv_entry intr_handler mov r0, #0 @ epip4d movc p0.c5, r0, #14 nop; nop; nop; nop; nop; nop; nop; nop ldw r4, [sp+], #S_PSR @ irqs are already disabled priv_exit r4 @ return from exception ENDPROC(__intr_priv) .ltorg .align 5 __extn_priv: priv_entry mov r0, sp @ struct pt_regs *regs mov r1, asr b bad_mode @ not supported ENDPROC(__extn_priv) .align 5 __pabt_priv: priv_entry @ @ re-enable interrupts if appropriate @ mov r17, asr cand.a r3, #PSR_I_BIT bne 1f andn r17, r17, #PSR_I_BIT 1: @ @ set args, then call main handler @ @ r0 - address of faulting instruction @ r1 - pointer to registers on stack @ mov r0, r2 @ pass address of aborted instruction mov r1, #5 mov.a asr, r17 mov r2, sp @ regs b.l do_PrefetchAbort @ call abort handler @ @ INTRs off again before pulling preserved data off the stack @ disable_irq r0 @ @ restore BSR and restart the instruction @ ldw r2, [sp+], #S_PSR priv_exit r2 @ return from exception ENDPROC(__pabt_priv) .align 5 .LCcralign: .word cr_alignment .align 5 __dabt_user: user_entry #ifdef CONFIG_UNICORE_FPU_F64 cff ip, s31 cand.a ip, #0x08000000 @ FPU execption traps? beq 209f ldw ip, [sp+], #S_PC add ip, ip, #4 stw ip, [sp+], #S_PC @ @ fall through to the emulation code, which returns using r19 if @ it has emulated the instruction, or the more conventional lr @ if we are to treat this as a real extended instruction @ @ r0 - instruction @ 1: ldw.u r0, [r2] adr r19, ret_from_exception adr lr, 209f @ @ fallthrough to call do_uc_f64 @ /* * Check whether the instruction is a co-processor instruction. * If yes, we need to call the relevant co-processor handler. * * Note that we don't do a full check here for the co-processor * instructions; all instructions with bit 27 set are well * defined. The only instructions that should fault are the * co-processor instructions. * * Emulators may wish to make use of the following registers: * r0 = instruction opcode. * r2 = PC * r19 = normal "successful" return address * r20 = this threads thread_info structure. * lr = unrecognised instruction return address */ get_thread_info r20 @ get current thread and r8, r0, #0x00003c00 @ mask out CP number mov r7, #1 stb r7, [r20+], #TI_USED_CP + 2 @ set appropriate used_cp[] @ F64 hardware support entry point. @ r0 = faulted instruction @ r19 = return address @ r20 = fp_state enable_irq r4 add r20, r20, #TI_FPSTATE @ r20 = workspace cff r1, s31 @ get fpu FPSCR andn r2, r1, #0x08000000 ctf r2, s31 @ clear 27 bit mov r2, sp @ nothing stacked - regdump is at TOS mov lr, r19 @ setup for a return to the user code @ Now call the C code to package up the bounce to the support code @ r0 holds the trigger instruction @ r1 holds the FPSCR value @ r2 pointer to register dump b ucf64_exchandler 209: #endif @ @ Call the processor-specific abort handler: @ @ r2 - aborted context pc @ r3 - aborted context asr @ @ The abort handler must return the aborted address in r0, and @ the fault status register in r1. @ movc r1, p0.c3, #0 @ get FSR movc r0, p0.c4, #0 @ get FAR @ @ INTRs on, then call the main handler @ enable_irq r2 mov r2, sp adr lr, ret_from_exception b do_DataAbort ENDPROC(__dabt_user) .align 5 __intr_user: user_entry get_thread_info tsk intr_handler mov why, #0 b ret_to_user ENDPROC(__intr_user) .ltorg .align 5 __extn_user: user_entry mov r0, sp mov r1, asr b bad_mode ENDPROC(__extn_user) .align 5 __pabt_user: user_entry mov r0, r2 @ pass address of aborted instruction. mov r1, #5 enable_irq r1 @ Enable interrupts mov r2, sp @ regs b.l do_PrefetchAbort @ call abort handler /* fall through */ /* * This is the return code to user mode for abort handlers */ ENTRY(ret_from_exception) get_thread_info tsk mov why, #0 b ret_to_user ENDPROC(__pabt_user) ENDPROC(ret_from_exception) /* * Register switch for UniCore V2 processors * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info * previous and next are guaranteed not to be the same. */ ENTRY(__switch_to) add ip, r1, #TI_CPU_SAVE stm.w (r4 - r15), [ip]+ stm.w (r16 - r27, sp, lr), [ip]+ #ifdef CONFIG_UNICORE_FPU_F64 add ip, r1, #TI_FPSTATE sfm.w (f0 - f7 ), [ip]+ sfm.w (f8 - f15), [ip]+ sfm.w (f16 - f23), [ip]+ sfm.w (f24 - f31), [ip]+ cff r4, s31 stw r4, [ip] add ip, r2, #TI_FPSTATE lfm.w (f0 - f7 ), [ip]+ lfm.w (f8 - f15), [ip]+ lfm.w (f16 - f23), [ip]+ lfm.w (f24 - f31), [ip]+ ldw r4, [ip] ctf r4, s31 #endif add ip, r2, #TI_CPU_SAVE ldm.w (r4 - r15), [ip]+ ldm (r16 - r27, sp, pc), [ip]+ @ Load all regs saved previously ENDPROC(__switch_to) .align 5 /* * This is the fast syscall return path. We do as little as * possible here, and this includes saving r0 back into the PRIV * stack. */ ret_fast_syscall: disable_irq r1 @ disable interrupts ldw r1, [tsk+], #TI_FLAGS cand.a r1, #_TIF_WORK_MASK bne fast_work_pending @ fast_restore_user_regs restore_user_regs fast = 1, offset = S_OFF /* * Ok, we need to do extra processing, enter the slow path. */ fast_work_pending: stw.w r0, [sp+], #S_R0+S_OFF @ returned r0 work_pending: cand.a r1, #_TIF_NEED_RESCHED bne work_resched mov r0, sp @ 'regs' mov r2, why @ 'syscall' cand.a r1, #_TIF_SIGPENDING @ delivering a signal? cmovne why, #0 @ prevent further restarts b.l do_notify_resume b ret_slow_syscall @ Check work again work_resched: b.l schedule /* * "slow" syscall return path. "why" tells us if this was a real syscall. */ ENTRY(ret_to_user) ret_slow_syscall: disable_irq r1 @ disable interrupts get_thread_info tsk @ epip4d, one path error?! ldw r1, [tsk+], #TI_FLAGS cand.a r1, #_TIF_WORK_MASK bne work_pending no_work_pending: @ slow_restore_user_regs restore_user_regs fast = 0, offset = 0 ENDPROC(ret_to_user) /* * This is how we return from a fork. */ ENTRY(ret_from_fork) b.l schedule_tail b ret_slow_syscall ENDPROC(ret_from_fork) ENTRY(ret_from_kernel_thread) b.l schedule_tail mov r0, r5 adr lr, ret_slow_syscall mov pc, r4 ENDPROC(ret_from_kernel_thread) /*============================================================================= * SWI handler *----------------------------------------------------------------------------- */ .align 5 ENTRY(vector_swi) sub sp, sp, #S_FRAME_SIZE stm (r0 - r15), [sp]+ @ Calling r0 - r15 add r8, sp, #S_R16 stm (r16 - r28), [r8]+ @ Calling r16 - r28 add r8, sp, #S_PC stur (sp, lr), [r8-] @ Calling sp, lr mov r8, bsr @ called from non-REAL mode stw lr, [sp+], #S_PC @ Save calling PC stw r8, [sp+], #S_PSR @ Save ASR stw r0, [sp+], #S_OLD_R0 @ Save OLD_R0 zero_fp /* * Get the system call number. */ sub ip, lr, #4 ldw.u scno, [ip] @ get SWI instruction #ifdef CONFIG_ALIGNMENT_TRAP ldw ip, __cr_alignment ldw ip, [ip] movc p0.c1, ip, #0 @ update control register #endif enable_irq ip get_thread_info tsk ldw tbl, =sys_call_table @ load syscall table pointer andn scno, scno, #0xff000000 @ mask off SWI op-code andn scno, scno, #0x00ff0000 @ mask off SWI op-code stm.w (r4, r5), [sp-] @ push fifth and sixth args ldw ip, [tsk+], #TI_FLAGS @ check for syscall tracing cand.a ip, #_TIF_SYSCALL_TRACE @ are we tracing syscalls? bne __sys_trace csub.a scno, #__NR_syscalls @ check upper syscall limit adr lr, ret_fast_syscall @ return address bea 1f ldw pc, [tbl+], scno << #2 @ call sys_* routine 1: add r1, sp, #S_OFF 2: mov why, #0 @ no longer a real syscall b sys_ni_syscall @ not private func /* * This is the really slow path. We're going to be doing * context switches, and waiting for our parent to respond. */ __sys_trace: mov r2, scno add r1, sp, #S_OFF mov r0, #0 @ trace entry [IP = 0] b.l syscall_trace adr lr, __sys_trace_return @ return address mov scno, r0 @ syscall number (possibly new) add r1, sp, #S_R0 + S_OFF @ pointer to regs csub.a scno, #__NR_syscalls @ check upper syscall limit bea 2b ldm (r0 - r3), [r1]+ @ have to reload r0 - r3 ldw pc, [tbl+], scno << #2 @ call sys_* routine __sys_trace_return: stw.w r0, [sp+], #S_R0 + S_OFF @ save returned r0 mov r2, scno mov r1, sp mov r0, #1 @ trace exit [IP = 1] b.l syscall_trace b ret_slow_syscall .align 5 #ifdef CONFIG_ALIGNMENT_TRAP .type __cr_alignment, #object __cr_alignment: .word cr_alignment #endif .ltorg ENTRY(sys_rt_sigreturn) add r0, sp, #S_OFF mov why, #0 @ prevent syscall restart handling b __sys_rt_sigreturn ENDPROC(sys_rt_sigreturn) __INIT /* * Vector stubs. * * This code is copied to 0xffff0200 so we can use branches in the * vectors, rather than ldr's. Note that this code must not * exceed 0x300 bytes. * * Common stub entry macro: * Enter in INTR mode, bsr = PRIV/USER ASR, lr = PRIV/USER PC * * SP points to a minimal amount of processor-private memory, the address * of which is copied into r0 for the mode specific abort handler. */ .macro vector_stub, name, mode .align 5 vector_\name: @ @ Save r0, lr_<exception> (parent PC) and bsr_<exception> @ (parent ASR) @ stw r0, [sp] stw lr, [sp+], #4 @ save r0, lr mov lr, bsr stw lr, [sp+], #8 @ save bsr @ @ Prepare for PRIV mode. INTRs remain disabled. @ mov r0, asr xor r0, r0, #(\mode ^ PRIV_MODE) mov.a bsr, r0 @ @ the branch table must immediately follow this code @ and lr, lr, #0x03 add lr, lr, #1 mov r0, sp ldw lr, [pc+], lr << #2 mov.a pc, lr @ branch to handler in PRIV mode ENDPROC(vector_\name) .align 2 @ handler addresses follow this label .endm .globl __stubs_start __stubs_start: /* * Interrupt dispatcher */ vector_stub intr, INTR_MODE .long __intr_user @ 0 (USER) .long __invalid @ 1 .long __invalid @ 2 .long __intr_priv @ 3 (PRIV) /* * Data abort dispatcher * Enter in ABT mode, bsr = USER ASR, lr = USER PC */ vector_stub dabt, ABRT_MODE .long __dabt_user @ 0 (USER) .long __invalid @ 1 .long __invalid @ 2 (INTR) .long __dabt_priv @ 3 (PRIV) /* * Prefetch abort dispatcher * Enter in ABT mode, bsr = USER ASR, lr = USER PC */ vector_stub pabt, ABRT_MODE .long __pabt_user @ 0 (USER) .long __invalid @ 1 .long __invalid @ 2 (INTR) .long __pabt_priv @ 3 (PRIV) /* * Undef instr entry dispatcher * Enter in EXTN mode, bsr = PRIV/USER ASR, lr = PRIV/USER PC */ vector_stub extn, EXTN_MODE .long __extn_user @ 0 (USER) .long __invalid @ 1 .long __invalid @ 2 (INTR) .long __extn_priv @ 3 (PRIV) /* * We group all the following data together to optimise * for CPUs with separate I & D caches. */ .align 5 .LCvswi: .word vector_swi .globl __stubs_end __stubs_end: .equ stubs_offset, __vectors_start + 0x200 - __stubs_start .globl __vectors_start __vectors_start: jepriv SYS_ERROR0 b vector_extn + stubs_offset ldw pc, .LCvswi + stubs_offset b vector_pabt + stubs_offset b vector_dabt + stubs_offset jepriv SYS_ERROR0 b vector_intr + stubs_offset jepriv SYS_ERROR0 .globl __vectors_end __vectors_end: .data .globl cr_alignment .globl cr_no_alignment cr_alignment: .space 4 cr_no_alignment: .space 4
AirFortressIlikara/LS2K0300-linux-4.19
2,044
arch/unicore32/kernel/debug-macro.S
/* * linux/arch/unicore32/kernel/debug-macro.S * * Code specific to PKUnity SoC and UniCore ISA * * Copyright (C) 2001-2010 GUAN Xue-tao * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Debugging macro include header */ #include <generated/asm-offsets.h> #include <mach/hardware.h> .macro put_word_ocd, rd, rx=r16 1001: movc \rx, p1.c0, #0 cand.a \rx, #2 bne 1001b movc p1.c1, \rd, #1 .endm #ifdef CONFIG_DEBUG_OCD /* debug using UniCore On-Chip-Debugger */ .macro addruart, rx .endm .macro senduart, rd, rx put_word_ocd \rd, \rx .endm .macro busyuart, rd, rx .endm .macro waituart, rd, rx .endm #else #define UART_CLK_DEFAULT 3686400 * 20 /* Uartclk = MCLK/ 2, The MCLK on my board is 3686400 * 40 */ #define BAUD_RATE_DEFAULT 115200 /* The baud rate of the serial port */ #define UART_DIVISOR_DEFAULT (UART_CLK_DEFAULT \ / (16 * BAUD_RATE_DEFAULT) - 1) .macro addruart,rx mrc p0, #0, \rx, c1, c0 tst \rx, #1 @ MMU enabled? moveq \rx, #0xee000000 @ physical base address movne \rx, #0x6e000000 @ virtual address @ We probe for the active serial port here @ However, now we assume UART0 is active: epip4d @ We assume r1 and r2 can be clobbered. movl r2, #UART_DIVISOR_DEFAULT mov r1, #0x80 str r1, [\rx, #UART_LCR_OFFSET] and r1, r2, #0xff00 mov r1, r1, lsr #8 str r1, [\rx, #UART_DLH_OFFSET] and r1, r2, #0xff str r1, [\rx, #UART_DLL_OFFSET] mov r1, #0x7 str r1, [\rx, #UART_FCR_OFFSET] mov r1, #0x3 str r1, [\rx, #UART_LCR_OFFSET] mov r1, #0x0 str r1, [\rx, #UART_IER_OFFSET] .endm .macro senduart,rd,rx str \rd, [\rx, #UART_THR_OFFSET] .endm .macro waituart,rd,rx 1001: ldr \rd, [\rx, #UART_LSR_OFFSET] tst \rd, #UART_LSR_THRE beq 1001b .endm .macro busyuart,rd,rx 1001: ldr \rd, [\rx, #UART_LSR_OFFSET] tst \rd, #UART_LSR_TEMT bne 1001b .endm #endif
AirFortressIlikara/LS2K0300-linux-4.19
2,339
arch/unicore32/kernel/hibernate_asm.S
/* * linux/arch/unicore32/kernel/hibernate_asm.S * * Code specific to PKUnity SoC and UniCore ISA * * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn> * Copyright (C) 2001-2010 Guan Xuetao * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/sys.h> #include <linux/errno.h> #include <linux/linkage.h> #include <generated/asm-offsets.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/assembler.h> @ restore_image(pgd_t *resume_pg_dir, struct pbe *restore_pblist) @ r0: resume_pg_dir @ r1: restore_pblist @ copy restore_pblist pages @ restore registers from swsusp_arch_regs_cpu0 @ ENTRY(restore_image) sub r0, r0, #PAGE_OFFSET mov r5, #0 movc p0.c6, r5, #6 @invalidate ITLB & DTLB movc p0.c2, r0, #0 nop nop nop nop nop nop nop .p2align 4,,7 101: csub.a r1, #0 beq 109f ldw r6, [r1+], #PBE_ADDRESS ldw r7, [r1+], #PBE_ORIN_ADDRESS movl ip, #128 102: ldm.w (r8 - r15), [r6]+ stm.w (r8 - r15), [r7]+ sub.a ip, ip, #1 bne 102b ldw r1, [r1+], #PBE_NEXT b 101b .p2align 4,,7 109: /* go back to the original page tables */ ldw r0, =swapper_pg_dir sub r0, r0, #PAGE_OFFSET mov r5, #0 movc p0.c6, r5, #6 movc p0.c2, r0, #0 nop nop nop nop nop nop nop #ifdef CONFIG_UNICORE_FPU_F64 ldw ip, 1f add ip, ip, #SWSUSP_FPSTATE lfm.w (f0 - f7 ), [ip]+ lfm.w (f8 - f15), [ip]+ lfm.w (f16 - f23), [ip]+ lfm.w (f24 - f31), [ip]+ ldw r4, [ip] ctf r4, s31 #endif mov r0, #0x0 ldw ip, 1f add ip, ip, #SWSUSP_CPU ldm.w (r4 - r15), [ip]+ ldm (r16 - r27, sp, pc), [ip]+ @ Load all regs saved previously .align 2 1: .long swsusp_arch_regs_cpu0 @ swsusp_arch_suspend() @ - prepare pc for resume, return from function without swsusp_save on resume @ - save registers in swsusp_arch_regs_cpu0 @ - call swsusp_save write suspend image ENTRY(swsusp_arch_suspend) ldw ip, 1f add ip, ip, #SWSUSP_CPU stm.w (r4 - r15), [ip]+ stm.w (r16 - r27, sp, lr), [ip]+ #ifdef CONFIG_UNICORE_FPU_F64 ldw ip, 1f add ip, ip, #SWSUSP_FPSTATE sfm.w (f0 - f7 ), [ip]+ sfm.w (f8 - f15), [ip]+ sfm.w (f16 - f23), [ip]+ sfm.w (f24 - f31), [ip]+ cff r4, s31 stw r4, [ip] #endif b swsusp_save @ no return 1: .long swsusp_arch_regs_cpu0
AirFortressIlikara/LS2K0300-linux-4.19
1,138
arch/unicore32/kernel/vmlinux.lds.S
/* * linux/arch/unicore32/kernel/vmlinux.lds.S * * Code specific to PKUnity SoC and UniCore ISA * * Copyright (C) 2001-2010 GUAN Xue-tao * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <asm-generic/vmlinux.lds.h> #include <asm/thread_info.h> #include <asm/memory.h> #include <asm/page.h> #include <asm/cache.h> OUTPUT_ARCH(unicore32) ENTRY(stext) jiffies = jiffies_64; SECTIONS { . = PAGE_OFFSET + KERNEL_IMAGE_START; _text = .; __init_begin = .; HEAD_TEXT_SECTION INIT_TEXT_SECTION(PAGE_SIZE) INIT_DATA_SECTION(16) PERCPU_SECTION(L1_CACHE_BYTES) __init_end = .; _stext = .; .text : { /* Real text segment */ TEXT_TEXT SCHED_TEXT CPUIDLE_TEXT LOCK_TEXT *(.fixup) *(.gnu.warning) } _etext = .; _sdata = .; RO_DATA_SECTION(PAGE_SIZE) RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) _edata = .; EXCEPTION_TABLE(L1_CACHE_BYTES) NOTES BSS_SECTION(0, 0, 0) _end = .; STABS_DEBUG DWARF_DEBUG DISCARDS /* Exit code and data */ }
AirFortressIlikara/LS2K0300-linux-4.19
3,896
arch/unicore32/kernel/sleep.S
/* * linux/arch/unicore32/kernel/sleep.S * * Code specific to PKUnity SoC and UniCore ISA * * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn> * Copyright (C) 2001-2010 Guan Xuetao * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/linkage.h> #include <asm/assembler.h> #include <mach/hardware.h> .text pkunity_cpu_save_cp: @ get coprocessor registers movc r3, p0.c7, #0 @ PID movc r4, p0.c2, #0 @ translation table base addr movc r5, p0.c1, #0 @ control reg @ store them plus current virtual stack ptr on stack mov r6, sp stm.w (r3 - r6), [sp-] mov pc, lr pkunity_cpu_save_sp: @ preserve phys address of stack mov r0, sp stw.w lr, [sp+], #-4 b.l sleep_phys_sp ldw r1, =sleep_save_sp stw r0, [r1] ldw.w pc, [sp]+, #4 /* * puv3_cpu_suspend() * * Forces CPU into sleep state. * * r0 = value for PWRMODE M field for desired sleep state */ ENTRY(puv3_cpu_suspend) stm.w (r16 - r27, lr), [sp-] @ save registers on stack stm.w (r4 - r15), [sp-] @ save registers on stack #ifdef CONFIG_UNICORE_FPU_F64 sfm.w (f0 - f7 ), [sp-] sfm.w (f8 - f15), [sp-] sfm.w (f16 - f23), [sp-] sfm.w (f24 - f31), [sp-] cff r4, s31 stm.w (r4), [sp-] #endif b.l pkunity_cpu_save_cp b.l pkunity_cpu_save_sp @ clean data cache mov r1, #0 movc p0.c5, r1, #14 nop nop nop nop @ DDR2 BaseAddr ldw r0, =(PKUNITY_DDR2CTRL_BASE) @ PM BaseAddr ldw r1, =(PKUNITY_PM_BASE) @ set PLL_SYS_CFG reg, 275 movl r6, #0x00002401 stw r6, [r1+], #0x18 @ set PLL_DDR_CFG reg, 66MHz movl r6, #0x00100c00 stw r6, [r1+], #0x1c @ set wake up source movl r8, #0x800001ff @ epip4d stw r8, [r1+], #0xc @ set PGSR movl r5, #0x40000 stw r5, [r1+], #0x10 @ prepare DDR2 refresh settings ldw r5, [r0+], #0x24 or r5, r5, #0x00000001 @ prepare PMCR for PLL changing movl r6, #0xc @ prepare for closing PLL movl r7, #0x1 @ prepare sleep mode mov r8, #0x1 @ movl r0, 0x11111111 @ put_word_ocd r0 b pkunity_cpu_do_suspend .ltorg .align 5 pkunity_cpu_do_suspend: b 101f @ put DDR2 into self-refresh 100: stw r5, [r0+], #0x24 @ change PLL stw r6, [r1] b 1f .ltorg .align 5 101: b 102f @ wait for PLL changing complete 1: ldw r6, [r1+], #0x44 csub.a r6, #0x1 bne 1b b 2f .ltorg .align 5 102: b 100b @ close PLL 2: stw r7, [r1+], #0x4 @ enter sleep mode stw r8, [r1] 3: b 3b /* * puv3_cpu_resume() * * entry point from bootloader into kernel during resume * * Note: Yes, part of the following code is located into the .data section. * This is to allow sleep_save_sp to be accessed with a relative load * while we can't rely on any MMU translation. We could have put * sleep_save_sp in the .text section as well, but some setups might * insist on it to be truly read-only. */ .data .align 5 ENTRY(puv3_cpu_resume) @ movl r0, 0x20202020 @ put_word_ocd r0 ldw r0, sleep_save_sp @ stack phys addr ldw r2, =resume_after_mmu @ its absolute virtual address ldm (r3 - r6), [r0]+ @ CP regs + virt stack ptr mov sp, r6 @ CP regs + virt stack ptr mov r1, #0 movc p0.c6, r1, #6 @ invalidate I & D TLBs movc p0.c5, r1, #28 @ invalidate I & D caches, BTB movc p0.c7, r3, #0 @ PID movc p0.c2, r4, #0 @ translation table base addr movc p0.c1, r5, #0 @ control reg, turn on mmu nop jump r2 nop nop nop nop nop sleep_save_sp: .word 0 @ preserve stack phys ptr here .text resume_after_mmu: @ movl r0, 0x30303030 @ put_word_ocd r0 #ifdef CONFIG_UNICORE_FPU_F64 lfm.w (f0 - f7 ), [sp]+ lfm.w (f8 - f15), [sp]+ lfm.w (f16 - f23), [sp]+ lfm.w (f24 - f31), [sp]+ ldm.w (r4), [sp]+ ctf r4, s31 #endif ldm.w (r4 - r15), [sp]+ @ restore registers from stack ldm.w (r16 - r27, pc), [sp]+ @ return to caller
AirFortressIlikara/LS2K0300-linux-4.19
1,472
arch/unicore32/kernel/debug.S
/* * linux/arch/unicore32/kernel/debug.S * * Code specific to PKUnity SoC and UniCore ISA * * Copyright (C) 2001-2010 GUAN Xue-tao * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * 32-bit debugging code */ #include <linux/linkage.h> #include <asm/assembler.h> .text /* * Some debugging routines (useful if you've got MM problems and * printk isn't working). For DEBUGGING ONLY!!! Do not leave * references to these in a production kernel! */ #include "debug-macro.S" /* * Useful debugging routines */ ENTRY(printhex8) mov r1, #8 b printhex ENDPROC(printhex8) ENTRY(printhex4) mov r1, #4 b printhex ENDPROC(printhex4) ENTRY(printhex2) mov r1, #2 printhex: adr r2, hexbuf add r3, r2, r1 mov r1, #0 stb r1, [r3] 1: and r1, r0, #15 mov r0, r0 >> #4 csub.a r1, #10 beg 2f add r1, r1, #'0' - 'a' + 10 2: add r1, r1, #'a' - 10 stb.w r1, [r3+], #-1 cxor.a r3, r2 bne 1b mov r0, r2 b printascii ENDPROC(printhex2) .ltorg ENTRY(printascii) addruart r3 b 2f 1: waituart r2, r3 senduart r1, r3 busyuart r2, r3 cxor.a r1, #'\n' cmoveq r1, #'\r' beq 1b 2: cxor.a r0, #0 beq 3f ldb.w r1, [r0]+, #1 cxor.a r1, #0 bne 1b 3: mov pc, lr ENDPROC(printascii) ENTRY(printch) addruart r3 mov r1, r0 mov r0, #0 b 1b ENDPROC(printch) hexbuf: .space 16
AirFortressIlikara/LS2K0300-linux-4.19
1,821
arch/unicore32/lib/copy_to_user.S
/* * linux/arch/unicore32/lib/copy_to_user.S * * Code specific to PKUnity SoC and UniCore ISA * * Copyright (C) 2001-2010 GUAN Xue-tao * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/linkage.h> #include <asm/assembler.h> /* * Prototype: * * size_t raw_copy_to_user(void *to, const void *from, size_t n) * * Purpose: * * copy a block to user memory from kernel memory * * Params: * * to = user memory * from = kernel memory * n = number of bytes to copy * * Return value: * * Number of bytes NOT copied. */ .macro ldr1w ptr reg abort ldw.w \reg, [\ptr]+, #4 .endm .macro ldr4w ptr reg1 reg2 reg3 reg4 abort ldm.w (\reg1, \reg2, \reg3, \reg4), [\ptr]+ .endm .macro ldr8w ptr reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort ldm.w (\reg1, \reg2, \reg3, \reg4, \reg5, \reg6, \reg7, \reg8), [\ptr]+ .endm .macro ldr1b ptr reg cond=al abort notcond \cond, .+8 ldb.w \reg, [\ptr]+, #1 .endm .macro str1w ptr reg abort strusr \reg, \ptr, 4, abort=\abort .endm .macro str8w ptr reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort 100: stm.w (\reg1, \reg2, \reg3, \reg4, \reg5, \reg6, \reg7, \reg8), [\ptr]+ .pushsection __ex_table, "a" .long 100b, \abort .popsection .endm .macro str1b ptr reg cond=al abort strusr \reg, \ptr, 1, \cond, abort=\abort .endm .macro enter mov r3, #0 stm.w (r0, r2, r3), [sp-] .endm .macro exit add sp, sp, #8 ldm.w (r0), [sp]+ mov pc, lr .endm .text WEAK(raw_copy_to_user) #include "copy_template.S" ENDPROC(raw_copy_to_user) .pushsection .fixup,"ax" .align 0 copy_abort_preamble ldm.w (r1, r2, r3), [sp]+ sub r0, r0, r1 rsub r0, r0, r2 copy_abort_end .popsection
AirFortressIlikara/LS2K0300-linux-4.19
1,368
arch/unicore32/lib/clear_user.S
/* * linux/arch/unicore32/lib/clear_user.S * * Code specific to PKUnity SoC and UniCore ISA * * Copyright (C) 2001-2010 GUAN Xue-tao * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/linkage.h> #include <asm/assembler.h> .text /* Prototype: int __clear_user(void *addr, size_t sz) * Purpose : clear some user memory * Params : addr - user memory address to clear * : sz - number of bytes to clear * Returns : number of bytes NOT cleared */ WEAK(__clear_user) stm.w (lr), [sp-] stm.w (r1), [sp-] mov r2, #0 csub.a r1, #4 bsl 2f and.a ip, r0, #3 beq 1f csub.a ip, #2 strusr r2, r0, 1 strusr r2, r0, 1, el strusr r2, r0, 1, sl rsub ip, ip, #4 sub r1, r1, ip @ 7 6 5 4 3 2 1 1: sub.a r1, r1, #8 @ -1 -2 -3 -4 -5 -6 -7 strusr r2, r0, 4, ns, rept=2 bns 1b add.a r1, r1, #4 @ 3 2 1 0 -1 -2 -3 strusr r2, r0, 4, ns 2: cand.a r1, #2 @ 1x 1x 0x 0x 1x 1x 0x strusr r2, r0, 1, ne, rept=2 cand.a r1, #1 @ x1 x0 x1 x0 x1 x0 x1 beq 3f USER( stb.u r2, [r0]) 3: mov r0, #0 ldm.w (r1), [sp]+ ldm.w (pc), [sp]+ ENDPROC(__clear_user) .pushsection .fixup,"ax" .align 0 9001: ldm.w (r0), [sp]+ ldm.w (pc), [sp]+ .popsection
AirFortressIlikara/LS2K0300-linux-4.19
3,773
arch/unicore32/lib/backtrace.S
/* * linux/arch/unicore32/lib/backtrace.S * * Code specific to PKUnity SoC and UniCore ISA * * Copyright (C) 2001-2010 GUAN Xue-tao * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/linkage.h> #include <asm/assembler.h> .text @ fp is 0 or stack frame #define frame v4 #define sv_fp v5 #define sv_pc v6 #define offset v8 ENTRY(__backtrace) mov r0, fp ENTRY(c_backtrace) #if !defined(CONFIG_FRAME_POINTER) || !defined(CONFIG_PRINTK) mov pc, lr ENDPROC(__backtrace) ENDPROC(c_backtrace) #else stm.w (v4 - v8, lr), [sp-] @ Save an extra register @ so we have a location... mov.a frame, r0 @ if frame pointer is zero beq no_frame @ we have no stack frames 1: stm.w (pc), [sp-] @ calculate offset of PC stored ldw.w r0, [sp]+, #4 @ by stmfd for this CPU adr r1, 1b sub offset, r0, r1 /* * Stack frame layout: * optionally saved caller registers (r4 - r10) * saved fp * saved sp * saved lr * frame => saved pc * optionally saved arguments (r0 - r3) * saved sp => <next word> * * Functions start with the following code sequence: * mov ip, sp * stm.w (r0 - r3), [sp-] (optional) * corrected pc => stm.w sp, (..., fp, ip, lr, pc) */ for_each_frame: 1001: ldw sv_pc, [frame+], #0 @ get saved pc 1002: ldw sv_fp, [frame+], #-12 @ get saved fp sub sv_pc, sv_pc, offset @ Correct PC for prefetching 1003: ldw r2, [sv_pc+], #-4 @ if stmfd sp, {args} exists, ldw r3, .Ldsi+4 @ adjust saved 'pc' back one cxor.a r3, r2 >> #14 @ instruction beq 201f sub r0, sv_pc, #4 @ allow for mov b 202f 201: sub r0, sv_pc, #8 @ allow for mov + stmia 202: ldw r1, [frame+], #-4 @ get saved lr mov r2, frame b.l dump_backtrace_entry ldw r1, [sv_pc+], #-4 @ if stmfd sp, {args} exists, ldw r3, .Ldsi+4 cxor.a r3, r1 >> #14 bne 1004f ldw r0, [frame+], #-8 @ get sp sub r0, r0, #4 @ point at the last arg b.l .Ldumpstm @ dump saved registers 1004: ldw r1, [sv_pc+], #0 @ if stmfd {, fp, ip, lr, pc} ldw r3, .Ldsi @ instruction exists, cxor.a r3, r1 >> #14 bne 201f sub r0, frame, #16 b.l .Ldumpstm @ dump saved registers 201: cxor.a sv_fp, #0 @ zero saved fp means beq no_frame @ no further frames csub.a sv_fp, frame @ next frame must be mov frame, sv_fp @ above the current frame bua for_each_frame 1006: adr r0, .Lbad mov r1, frame b.l printk no_frame: ldm.w (v4 - v8, pc), [sp]+ ENDPROC(__backtrace) ENDPROC(c_backtrace) .pushsection __ex_table,"a" .align 3 .long 1001b, 1006b .long 1002b, 1006b .long 1003b, 1006b .long 1004b, 1006b .popsection #define instr v4 #define reg v5 #define stack v6 .Ldumpstm: stm.w (instr, reg, stack, v7, lr), [sp-] mov stack, r0 mov instr, r1 mov reg, #14 mov v7, #0 1: mov r3, #1 csub.a reg, #8 bne 201f sub reg, reg, #3 201: cand.a instr, r3 << reg beq 2f add v7, v7, #1 cxor.a v7, #6 cmoveq v7, #1 cmoveq r1, #'\n' cmovne r1, #' ' ldw.w r3, [stack]+, #-4 mov r2, reg csub.a r2, #8 bsl 201f sub r2, r2, #3 201: cand.a instr, #0x40 @ if H is 1, high 16 regs beq 201f add r2, r2, #0x10 @ so r2 need add 16 201: adr r0, .Lfp b.l printk 2: sub.a reg, reg, #1 bns 1b cxor.a v7, #0 beq 201f adr r0, .Lcr b.l printk 201: ldm.w (instr, reg, stack, v7, pc), [sp]+ .Lfp: .asciz "%cr%d:%08x" .Lcr: .asciz "\n" .Lbad: .asciz "Backtrace aborted due to bad frame pointer <%p>\n" .align .Ldsi: .word 0x92eec000 >> 14 @ stm.w sp, (... fp, ip, lr, pc) .word 0x92e10000 >> 14 @ stm.w sp, () #endif
AirFortressIlikara/LS2K0300-linux-4.19
4,676
arch/unicore32/lib/copy_template.S
/* * linux/arch/unicore32/lib/copy_template.S * * Code specific to PKUnity SoC and UniCore ISA * * Copyright (C) 2001-2010 GUAN Xue-tao * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ /* * Theory of operation * ------------------- * * This file provides the core code for a forward memory copy used in * the implementation of memcopy(), copy_to_user() and copy_from_user(). * * The including file must define the following accessor macros * according to the need of the given function: * * ldr1w ptr reg abort * * This loads one word from 'ptr', stores it in 'reg' and increments * 'ptr' to the next word. The 'abort' argument is used for fixup tables. * * ldr4w ptr reg1 reg2 reg3 reg4 abort * ldr8w ptr, reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort * * This loads four or eight words starting from 'ptr', stores them * in provided registers and increments 'ptr' past those words. * The'abort' argument is used for fixup tables. * * ldr1b ptr reg cond abort * * Similar to ldr1w, but it loads a byte and increments 'ptr' one byte. * It also must apply the condition code if provided, otherwise the * "al" condition is assumed by default. * * str1w ptr reg abort * str8w ptr reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort * str1b ptr reg cond abort * * Same as their ldr* counterparts, but data is stored to 'ptr' location * rather than being loaded. * * enter * * Preserve the provided registers on the stack plus any additional * data as needed by the implementation including this code. Called * upon code entry. * * exit * * Restore registers with the values previously saved with the * 'preserv' macro. Called upon code termination. */ enter sub.a r2, r2, #4 bsl 8f and.a ip, r0, #3 bne 9f and.a ip, r1, #3 bne 10f 1: sub.a r2, r2, #(28) stm.w (r5 - r8), [sp-] bsl 5f 3: 4: ldr8w r1, r3, r4, r5, r6, r7, r8, r10, r11, abort=20f sub.a r2, r2, #32 str8w r0, r3, r4, r5, r6, r7, r8, r10, r11, abort=20f beg 3b 5: and.a ip, r2, #28 rsub ip, ip, #32 beq 7f add pc, pc, ip @ C is always clear here nop ldr1w r1, r3, abort=20f ldr1w r1, r4, abort=20f ldr1w r1, r5, abort=20f ldr1w r1, r6, abort=20f ldr1w r1, r7, abort=20f ldr1w r1, r8, abort=20f ldr1w r1, r11, abort=20f add pc, pc, ip nop str1w r0, r3, abort=20f str1w r0, r4, abort=20f str1w r0, r5, abort=20f str1w r0, r6, abort=20f str1w r0, r7, abort=20f str1w r0, r8, abort=20f str1w r0, r11, abort=20f 7: ldm.w (r5 - r8), [sp]+ 8: mov.a r2, r2 << #31 ldr1b r1, r3, ne, abort=21f ldr1b r1, r4, ea, abort=21f ldr1b r1, r10, ea, abort=21f str1b r0, r3, ne, abort=21f str1b r0, r4, ea, abort=21f str1b r0, r10, ea, abort=21f exit 9: rsub ip, ip, #4 csub.a ip, #2 ldr1b r1, r3, sg, abort=21f ldr1b r1, r4, eg, abort=21f ldr1b r1, r11, abort=21f str1b r0, r3, sg, abort=21f str1b r0, r4, eg, abort=21f sub.a r2, r2, ip str1b r0, r11, abort=21f bsl 8b and.a ip, r1, #3 beq 1b 10: andn r1, r1, #3 csub.a ip, #2 ldr1w r1, r11, abort=21f beq 17f bsg 18f .macro forward_copy_shift a b sub.a r2, r2, #28 bsl 14f 11: stm.w (r5 - r9), [sp-] 12: ldr4w r1, r4, r5, r6, r7, abort=19f mov r3, r11 pull #\a sub.a r2, r2, #32 ldr4w r1, r8, r9, r10, r11, abort=19f or r3, r3, r4 push #\b mov r4, r4 pull #\a or r4, r4, r5 push #\b mov r5, r5 pull #\a or r5, r5, r6 push #\b mov r6, r6 pull #\a or r6, r6, r7 push #\b mov r7, r7 pull #\a or r7, r7, r8 push #\b mov r8, r8 pull #\a or r8, r8, r9 push #\b mov r9, r9 pull #\a or r9, r9, r10 push #\b mov r10, r10 pull #\a or r10, r10, r11 push #\b str8w r0, r3, r4, r5, r6, r7, r8, r9, r10, , abort=19f beg 12b ldm.w (r5 - r9), [sp]+ 14: and.a ip, r2, #28 beq 16f 15: mov r3, r11 pull #\a ldr1w r1, r11, abort=21f sub.a ip, ip, #4 or r3, r3, r11 push #\b str1w r0, r3, abort=21f bsg 15b 16: sub r1, r1, #(\b / 8) b 8b .endm forward_copy_shift a=8 b=24 17: forward_copy_shift a=16 b=16 18: forward_copy_shift a=24 b=8 /* * Abort preamble and completion macros. * If a fixup handler is required then those macros must surround it. * It is assumed that the fixup code will handle the private part of * the exit macro. */ .macro copy_abort_preamble 19: ldm.w (r5 - r9), [sp]+ b 21f 299: .word 0 @ store lr @ to avoid function call in fixup 20: ldm.w (r5 - r8), [sp]+ 21: adr r1, 299b stw lr, [r1] .endm .macro copy_abort_end adr lr, 299b ldw pc, [lr] .endm
AirFortressIlikara/LS2K0300-linux-4.19
1,086
arch/unicore32/lib/delay.S
/* * linux/arch/unicore32/lib/delay.S * * Code specific to PKUnity SoC and UniCore ISA * * Copyright (C) 2001-2010 GUAN Xue-tao * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/param.h> .text .LC0: .word loops_per_jiffy .LC1: .word (2199023*HZ)>>11 /* * r0 <= 2000 * lpj <= 0x01ffffff (max. 3355 bogomips) * HZ <= 1000 */ ENTRY(__udelay) ldw r2, .LC1 mul r0, r2, r0 ENTRY(__const_udelay) @ 0 <= r0 <= 0x7fffff06 ldw r2, .LC0 ldw r2, [r2] @ max = 0x01ffffff mov r0, r0 >> #14 @ max = 0x0001ffff mov r2, r2 >> #10 @ max = 0x00007fff mul r0, r2, r0 @ max = 2^32-1 mov.a r0, r0 >> #6 cmoveq pc, lr /* * loops = r0 * HZ * loops_per_jiffy / 1000000 * * Oh, if only we had a cycle counter... */ @ Delay routine ENTRY(__delay) sub.a r0, r0, #2 bua __delay mov pc, lr ENDPROC(__udelay) ENDPROC(__const_udelay) ENDPROC(__delay)
AirFortressIlikara/LS2K0300-linux-4.19
2,425
arch/unicore32/lib/findbit.S
/* * linux/arch/unicore32/lib/findbit.S * * Code specific to PKUnity SoC and UniCore ISA * * Copyright (C) 2001-2010 GUAN Xue-tao * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/linkage.h> #include <asm/assembler.h> .text /* * Purpose : Find a 'zero' bit * Prototype: int find_first_zero_bit(void *addr, unsigned int maxbit); */ ENTRY(find_first_zero_bit) cxor.a r1, #0 beq 3f mov r2, #0 1: ldb r3, [r0+], r2 >> #3 xor.a r3, r3, #0xff @ invert bits bne .L_found @ any now set - found zero bit add r2, r2, #8 @ next bit pointer 2: csub.a r2, r1 @ any more? bub 1b 3: mov r0, r1 @ no free bits mov pc, lr ENDPROC(find_first_zero_bit) /* * Purpose : Find next 'zero' bit * Prototype: int find_next_zero_bit * (void *addr, unsigned int maxbit, int offset) */ ENTRY(find_next_zero_bit) cxor.a r1, #0 beq 3b and.a ip, r2, #7 beq 1b @ If new byte, goto old routine ldb r3, [r0+], r2 >> #3 xor r3, r3, #0xff @ now looking for a 1 bit mov.a r3, r3 >> ip @ shift off unused bits bne .L_found or r2, r2, #7 @ if zero, then no bits here add r2, r2, #1 @ align bit pointer b 2b @ loop for next bit ENDPROC(find_next_zero_bit) /* * Purpose : Find a 'one' bit * Prototype: int find_first_bit * (const unsigned long *addr, unsigned int maxbit); */ ENTRY(find_first_bit) cxor.a r1, #0 beq 3f mov r2, #0 1: ldb r3, [r0+], r2 >> #3 mov.a r3, r3 bne .L_found @ any now set - found zero bit add r2, r2, #8 @ next bit pointer 2: csub.a r2, r1 @ any more? bub 1b 3: mov r0, r1 @ no free bits mov pc, lr ENDPROC(find_first_bit) /* * Purpose : Find next 'one' bit * Prototype: int find_next_zero_bit * (void *addr, unsigned int maxbit, int offset) */ ENTRY(find_next_bit) cxor.a r1, #0 beq 3b and.a ip, r2, #7 beq 1b @ If new byte, goto old routine ldb r3, [r0+], r2 >> #3 mov.a r3, r3 >> ip @ shift off unused bits bne .L_found or r2, r2, #7 @ if zero, then no bits here add r2, r2, #1 @ align bit pointer b 2b @ loop for next bit ENDPROC(find_next_bit) /* * One or more bits in the LSB of r3 are assumed to be set. */ .L_found: rsub r1, r3, #0 and r3, r3, r1 cntlz r3, r3 rsub r3, r3, #31 add r0, r2, r3 mov pc, lr
AirFortressIlikara/LS2K0300-linux-4.19
1,042
arch/unicore32/lib/strncpy_from_user.S
/* * linux/arch/unicore32/lib/strncpy_from_user.S * * Code specific to PKUnity SoC and UniCore ISA * * Copyright (C) 2001-2010 GUAN Xue-tao * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/errno.h> .text .align 5 /* * Copy a string from user space to kernel space. * r0 = dst, r1 = src, r2 = byte length * returns the number of characters copied (strlen of copied string), * -EFAULT on exception, or "len" if we fill the whole buffer */ ENTRY(__strncpy_from_user) mov ip, r1 1: sub.a r2, r2, #1 ldrusr r3, r1, 1, ns bfs 2f stb.w r3, [r0]+, #1 cxor.a r3, #0 bne 1b sub r1, r1, #1 @ take NUL character out of count 2: sub r0, r1, ip mov pc, lr ENDPROC(__strncpy_from_user) .pushsection .fixup,"ax" .align 0 9001: mov r3, #0 stb r3, [r0+], #0 @ null terminate mov r0, #-EFAULT mov pc, lr .popsection
AirFortressIlikara/LS2K0300-linux-4.19
1,951
arch/unicore32/lib/copy_from_user.S
/* * linux/arch/unicore32/lib/copy_from_user.S * * Code specific to PKUnity SoC and UniCore ISA * * Copyright (C) 2001-2010 GUAN Xue-tao * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/linkage.h> #include <asm/assembler.h> /* * Prototype: * * size_t raw_copy_from_user(void *to, const void *from, size_t n) * * Purpose: * * copy a block to kernel memory from user memory * * Params: * * to = kernel memory * from = user memory * n = number of bytes to copy * * Return value: * * Number of bytes NOT copied. */ .macro ldr1w ptr reg abort ldrusr \reg, \ptr, 4, abort=\abort .endm .macro ldr4w ptr reg1 reg2 reg3 reg4 abort 100: ldm.w (\reg1, \reg2, \reg3, \reg4), [\ptr]+ .pushsection __ex_table, "a" .align 3 .long 100b, \abort .popsection .endm .macro ldr8w ptr reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort 100: ldm.w (\reg1, \reg2, \reg3, \reg4, \reg5, \reg6, \reg7, \reg8), [\ptr]+ .pushsection __ex_table, "a" .align 3 .long 100b, \abort .popsection .endm .macro ldr1b ptr reg cond=al abort ldrusr \reg, \ptr, 1, \cond, abort=\abort .endm .macro str1w ptr reg abort stw.w \reg, [\ptr]+, #4 .endm .macro str8w ptr reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort stm.w (\reg1, \reg2, \reg3, \reg4, \reg5, \reg6, \reg7, \reg8), [\ptr]+ .endm .macro str1b ptr reg cond=al abort .ifnc \cond, al b\cond 201f b 202f .endif 201: stb.w \reg, [\ptr]+, #1 202: .endm .macro enter mov r3, #0 stm.w (r0, r2, r3), [sp-] .endm .macro exit add sp, sp, #8 ldm.w (r0), [sp]+ mov pc, lr .endm .text ENTRY(raw_copy_from_user) #include "copy_template.S" ENDPROC(raw_copy_from_user) .pushsection .fixup,"ax" .align 0 copy_abort_preamble ldm.w (r1, r2, r3), [sp]+ sub r0, r0, r1 rsub r0, r0, r2 copy_abort_end .popsection
AirFortressIlikara/LS2K0300-linux-4.19
4,492
arch/unicore32/mm/cache-ucv2.S
/* * linux/arch/unicore32/mm/cache-ucv2.S * * Code specific to PKUnity SoC and UniCore ISA * * Copyright (C) 2001-2010 GUAN Xue-tao * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This is the "shell" of the UniCore-v2 processor support. */ #include <linux/linkage.h> #include <linux/init.h> #include <asm/assembler.h> #include <asm/page.h> #include "proc-macros.S" /* * __cpuc_flush_icache_all() * __cpuc_flush_kern_all() * __cpuc_flush_user_all() * * Flush the entire cache. */ ENTRY(__cpuc_flush_icache_all) /*FALLTHROUGH*/ ENTRY(__cpuc_flush_kern_all) /*FALLTHROUGH*/ ENTRY(__cpuc_flush_user_all) mov r0, #0 movc p0.c5, r0, #14 @ Dcache flush all nop8 mov r0, #0 movc p0.c5, r0, #20 @ Icache invalidate all nop8 mov pc, lr /* * __cpuc_flush_user_range(start, end, flags) * * Flush a range of TLB entries in the specified address space. * * - start - start address (may not be aligned) * - end - end address (exclusive, may not be aligned) * - flags - vm_area_struct flags describing address space */ ENTRY(__cpuc_flush_user_range) cxor.a r2, #0 beq __cpuc_dma_flush_range #ifndef CONFIG_CPU_DCACHE_LINE_DISABLE andn r0, r0, #CACHE_LINESIZE - 1 @ Safety check sub r1, r1, r0 csub.a r1, #MAX_AREA_SIZE bsg 2f andn r1, r1, #CACHE_LINESIZE - 1 add r1, r1, #CACHE_LINESIZE 101: dcacheline_flush r0, r11, r12 add r0, r0, #CACHE_LINESIZE sub.a r1, r1, #CACHE_LINESIZE bns 101b b 3f #endif 2: mov ip, #0 movc p0.c5, ip, #14 @ Dcache flush all nop8 3: mov ip, #0 movc p0.c5, ip, #20 @ Icache invalidate all nop8 mov pc, lr /* * __cpuc_coherent_kern_range(start,end) * __cpuc_coherent_user_range(start,end) * * Ensure that the I and D caches are coherent within specified * region. This is typically used when code has been written to * a memory region, and will be executed. * * - start - virtual start address of region * - end - virtual end address of region */ ENTRY(__cpuc_coherent_kern_range) /* FALLTHROUGH */ ENTRY(__cpuc_coherent_user_range) #ifndef CONFIG_CPU_DCACHE_LINE_DISABLE andn r0, r0, #CACHE_LINESIZE - 1 @ Safety check sub r1, r1, r0 csub.a r1, #MAX_AREA_SIZE bsg 2f andn r1, r1, #CACHE_LINESIZE - 1 add r1, r1, #CACHE_LINESIZE @ r0 va2pa r10 mov r9, #PAGE_SZ sub r9, r9, #1 @ PAGE_MASK 101: va2pa r0, r10, r11, r12, r13, 2f @ r10 is PA b 103f 102: cand.a r0, r9 beq 101b 103: movc p0.c5, r10, #11 @ Dcache clean line of R10 nop8 add r0, r0, #CACHE_LINESIZE add r10, r10, #CACHE_LINESIZE sub.a r1, r1, #CACHE_LINESIZE bns 102b b 3f #endif 2: mov ip, #0 movc p0.c5, ip, #10 @ Dcache clean all nop8 3: mov ip, #0 movc p0.c5, ip, #20 @ Icache invalidate all nop8 mov pc, lr /* * __cpuc_flush_kern_dcache_area(void *addr, size_t size) * * - addr - kernel address * - size - region size */ ENTRY(__cpuc_flush_kern_dcache_area) mov ip, #0 movc p0.c5, ip, #14 @ Dcache flush all nop8 mov pc, lr /* * __cpuc_dma_clean_range(start,end) * - start - virtual start address of region * - end - virtual end address of region */ ENTRY(__cpuc_dma_clean_range) #ifndef CONFIG_CPU_DCACHE_LINE_DISABLE andn r0, r0, #CACHE_LINESIZE - 1 sub r1, r1, r0 andn r1, r1, #CACHE_LINESIZE - 1 add r1, r1, #CACHE_LINESIZE csub.a r1, #MAX_AREA_SIZE bsg 2f @ r0 va2pa r10 mov r9, #PAGE_SZ sub r9, r9, #1 @ PAGE_MASK 101: va2pa r0, r10, r11, r12, r13, 2f @ r10 is PA b 1f 102: cand.a r0, r9 beq 101b 1: movc p0.c5, r10, #11 @ Dcache clean line of R10 nop8 add r0, r0, #CACHE_LINESIZE add r10, r10, #CACHE_LINESIZE sub.a r1, r1, #CACHE_LINESIZE bns 102b mov pc, lr #endif 2: mov ip, #0 movc p0.c5, ip, #10 @ Dcache clean all nop8 mov pc, lr /* * __cpuc_dma_inv_range(start,end) * __cpuc_dma_flush_range(start,end) * - start - virtual start address of region * - end - virtual end address of region */ __cpuc_dma_inv_range: /* FALLTHROUGH */ ENTRY(__cpuc_dma_flush_range) #ifndef CONFIG_CPU_DCACHE_LINE_DISABLE andn r0, r0, #CACHE_LINESIZE - 1 sub r1, r1, r0 andn r1, r1, #CACHE_LINESIZE - 1 add r1, r1, #CACHE_LINESIZE csub.a r1, #MAX_AREA_SIZE bsg 2f @ r0 va2pa r10 101: dcacheline_flush r0, r11, r12 add r0, r0, #CACHE_LINESIZE sub.a r1, r1, #CACHE_LINESIZE bns 101b mov pc, lr #endif 2: mov ip, #0 movc p0.c5, ip, #14 @ Dcache flush all nop8 mov pc, lr
AirFortressIlikara/LS2K0300-linux-4.19
2,730
arch/unicore32/mm/proc-ucv2.S
/* * linux/arch/unicore32/mm/proc-ucv2.S * * Code specific to PKUnity SoC and UniCore ISA * * Copyright (C) 2001-2010 GUAN Xue-tao * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/hwcap.h> #include <asm/pgtable-hwdef.h> #include <asm/pgtable.h> #include "proc-macros.S" ENTRY(cpu_proc_fin) stm.w (lr), [sp-] mov ip, #PSR_R_BIT | PSR_I_BIT | PRIV_MODE mov.a asr, ip b.l __cpuc_flush_kern_all ldm.w (pc), [sp]+ /* * cpu_reset(loc) * * Perform a soft reset of the system. Put the CPU into the * same state as it would be if it had been reset, and branch * to what would be the reset vector. * * - loc - location to jump to for soft reset */ .align 5 ENTRY(cpu_reset) mov ip, #0 movc p0.c5, ip, #28 @ Cache invalidate all nop8 movc p0.c6, ip, #6 @ TLB invalidate all nop8 movc ip, p0.c1, #0 @ ctrl register or ip, ip, #0x2000 @ vector base address andn ip, ip, #0x000f @ ............idam movc p0.c1, ip, #0 @ disable caches and mmu nop mov pc, r0 @ jump to loc nop8 /* * cpu_do_idle() * * Idle the processor (eg, wait for interrupt). * * IRQs are already disabled. */ ENTRY(cpu_do_idle) mov r0, #0 @ PCI address .rept 8 ldw r1, [r0] .endr mov pc, lr ENTRY(cpu_dcache_clean_area) #ifndef CONFIG_CPU_DCACHE_LINE_DISABLE csub.a r1, #MAX_AREA_SIZE bsg 101f mov r9, #PAGE_SZ sub r9, r9, #1 @ PAGE_MASK 1: va2pa r0, r10, r11, r12, r13 @ r10 is PA b 3f 2: cand.a r0, r9 beq 1b 3: movc p0.c5, r10, #11 @ clean D entry nop8 add r0, r0, #CACHE_LINESIZE add r10, r10, #CACHE_LINESIZE sub.a r1, r1, #CACHE_LINESIZE bua 2b mov pc, lr #endif 101: mov ip, #0 movc p0.c5, ip, #10 @ Dcache clean all nop8 mov pc, lr /* * cpu_do_switch_mm(pgd_phys) * * Set the translation table base pointer to be pgd_phys * * - pgd_phys - physical address of new pgd * * It is assumed that: * - we are not using split page tables */ .align 5 ENTRY(cpu_do_switch_mm) movc p0.c2, r0, #0 @ update page table ptr nop8 movc p0.c6, ip, #6 @ TLB invalidate all nop8 mov pc, lr /* * cpu_set_pte(ptep, pte) * * Set a level 2 translation table entry. * * - ptep - pointer to level 2 translation table entry * - pte - PTE value to store */ .align 5 ENTRY(cpu_set_pte) stw r1, [r0] #ifndef CONFIG_CPU_DCACHE_LINE_DISABLE sub r2, r0, #PAGE_OFFSET movc p0.c5, r2, #11 @ Dcache clean line nop8 #else mov ip, #0 movc p0.c5, ip, #10 @ Dcache clean all nop8 @dcacheline_flush r0, r2, ip #endif mov pc, lr
AirFortressIlikara/LS2K0300-linux-4.19
1,779
arch/unicore32/mm/tlb-ucv2.S
/* * linux/arch/unicore32/mm/tlb-ucv2.S * * Code specific to PKUnity SoC and UniCore ISA * * Copyright (C) 2001-2010 GUAN Xue-tao * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/page.h> #include <asm/tlbflush.h> #include "proc-macros.S" /* * __cpu_flush_user_tlb_range(start, end, vma) * * Invalidate a range of TLB entries in the specified address space. * * - start - start address (may not be aligned) * - end - end address (exclusive, may not be aligned) * - vma - vma_struct describing address range */ ENTRY(__cpu_flush_user_tlb_range) #ifndef CONFIG_CPU_TLB_SINGLE_ENTRY_DISABLE mov r0, r0 >> #PAGE_SHIFT @ align address mov r0, r0 << #PAGE_SHIFT vma_vm_flags r2, r2 @ get vma->vm_flags 1: movc p0.c6, r0, #3 nop8 cand.a r2, #VM_EXEC @ Executable area ? beq 2f movc p0.c6, r0, #5 nop8 2: add r0, r0, #PAGE_SZ csub.a r0, r1 beb 1b #else movc p0.c6, r0, #2 nop8 cand.a r2, #VM_EXEC @ Executable area ? beq 2f movc p0.c6, r0, #4 nop8 2: #endif mov pc, lr /* * __cpu_flush_kern_tlb_range(start,end) * * Invalidate a range of kernel TLB entries * * - start - start address (may not be aligned) * - end - end address (exclusive, may not be aligned) */ ENTRY(__cpu_flush_kern_tlb_range) #ifndef CONFIG_CPU_TLB_SINGLE_ENTRY_DISABLE mov r0, r0 >> #PAGE_SHIFT @ align address mov r0, r0 << #PAGE_SHIFT 1: movc p0.c6, r0, #3 nop8 movc p0.c6, r0, #5 nop8 add r0, r0, #PAGE_SZ csub.a r0, r1 beb 1b #else movc p0.c6, r0, #2 nop8 movc p0.c6, r0, #4 nop8 #endif mov pc, lr
AirFortressIlikara/LS2K0300-linux-4.19
3,688
arch/unicore32/mm/proc-macros.S
/* * linux/arch/unicore32/mm/proc-macros.S * * Code specific to PKUnity SoC and UniCore ISA * * Copyright (C) 2001-2010 GUAN Xue-tao * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * We need constants.h for: * VMA_VM_MM * VMA_VM_FLAGS * VM_EXEC */ #include <generated/asm-offsets.h> #include <asm/thread_info.h> #include <asm/memory.h> /* * the cache line sizes of the I and D cache are the same */ #define CACHE_LINESIZE 32 /* * This is the maximum size of an area which will be invalidated * using the single invalidate entry instructions. Anything larger * than this, and we go for the whole cache. * * This value should be chosen such that we choose the cheapest * alternative. */ #ifdef CONFIG_CPU_UCV2 #define MAX_AREA_SIZE 0x800 /* 64 cache line */ #endif /* * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm) */ .macro vma_vm_mm, rd, rn ldw \rd, [\rn+], #VMA_VM_MM .endm /* * vma_vm_flags - get vma->vm_flags */ .macro vma_vm_flags, rd, rn ldw \rd, [\rn+], #VMA_VM_FLAGS .endm .macro tsk_mm, rd, rn ldw \rd, [\rn+], #TI_TASK ldw \rd, [\rd+], #TSK_ACTIVE_MM .endm /* * act_mm - get current->active_mm */ .macro act_mm, rd andn \rd, sp, #8128 andn \rd, \rd, #63 ldw \rd, [\rd+], #TI_TASK ldw \rd, [\rd+], #TSK_ACTIVE_MM .endm /* * mmid - get context id from mm pointer (mm->context.id) */ .macro mmid, rd, rn ldw \rd, [\rn+], #MM_CONTEXT_ID .endm /* * mask_asid - mask the ASID from the context ID */ .macro asid, rd, rn and \rd, \rn, #255 .endm .macro crval, clear, mmuset, ucset .word \clear .word \mmuset .endm #ifndef CONFIG_CPU_DCACHE_LINE_DISABLE /* * va2pa va, pa, tbl, msk, off, err * This macro is used to translate virtual address to its physical address. * * va: virtual address * pa: physical address, result is stored in this register * tbl, msk, off: temp registers, will be destroyed * err: jump to error label if the physical address not exist * NOTE: all regs must be different */ .macro va2pa, va, pa, tbl, msk, off, err=990f movc \pa, p0.c2, #0 mov \off, \va >> #22 @ off <- index of 1st page table adr \tbl, 910f @ tbl <- table of 1st page table 900: @ ---- handle 1, 2 page table add \pa, \pa, #PAGE_OFFSET @ pa <- virt addr of page table ldw \pa, [\pa+], \off << #2 @ pa <- the content of pt cand.a \pa, #4 @ test exist bit beq \err @ if not exist and \off, \pa, #3 @ off <- the last 2 bits add \tbl, \tbl, \off << #3 @ cmove table pointer ldw \msk, [\tbl+], #0 @ get the mask ldw pc, [\tbl+], #4 930: @ ---- handle 2nd page table and \pa, \pa, \msk @ pa <- phys addr of 2nd pt mov \off, \va << #10 cntlo \tbl, \msk @ use tbl as temp reg mov \off, \off >> \tbl mov \off, \off >> #2 @ off <- index of 2nd pt adr \tbl, 920f @ tbl <- table of 2nd pt b 900b 910: @ 1st level page table .word 0xfffff000, 930b @ second level page table .word 0xfffffc00, 930b @ second level large page table .word 0x00000000, \err @ invalid .word 0xffc00000, 980f @ super page 920: @ 2nd level page table .word 0xfffff000, 980f @ page .word 0xffffc000, 980f @ middle page .word 0xffff0000, 980f @ large page .word 0x00000000, \err @ invalid 980: andn \tbl, \va, \msk and \pa, \pa, \msk or \pa, \pa, \tbl 990: .endm #endif .macro dcacheline_flush, addr, t1, t2 mov \t1, \addr << #20 ldw \t2, =_stext @ _stext must ALIGN(4096) add \t2, \t2, \t1 >> #20 ldw \t1, [\t2+], #0x0000 ldw \t1, [\t2+], #0x1000 ldw \t1, [\t2+], #0x2000 ldw \t1, [\t2+], #0x3000 .endm
AirFortressIlikara/LS2K0300-linux-4.19
4,184
arch/unicore32/boot/compressed/head.S
/* * linux/arch/unicore32/boot/compressed/head.S * * Code specific to PKUnity SoC and UniCore ISA * * Copyright (C) 2001-2010 GUAN Xue-tao * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/linkage.h> #include <mach/memory.h> #define csub cmpsub #define cand cmpand #define nop8 nop; nop; nop; nop; nop; nop; nop; nop .section ".start", #alloc, #execinstr .text start: .type start,#function /* Initialize ASR, PRIV mode and INTR off */ mov r0, #0xD3 mov.a asr, r0 adr r0, LC0 ldm (r1, r2, r3, r5, r6, r7, r8), [r0]+ ldw sp, [r0+], #28 sub.a r0, r0, r1 @ calculate the delta offset /* * if delta is zero, we are running at the address * we were linked at. */ beq not_relocated /* * We're running at a different address. We need to fix * up various pointers: * r5 - zImage base address (_start) * r7 - GOT start * r8 - GOT end */ add r5, r5, r0 add r7, r7, r0 add r8, r8, r0 /* * we need to fix up pointers into the BSS region. * r2 - BSS start * r3 - BSS end * sp - stack pointer */ add r2, r2, r0 add r3, r3, r0 add sp, sp, r0 /* * Relocate all entries in the GOT table. * This fixes up the C references. * r7 - GOT start * r8 - GOT end */ 1001: ldw r1, [r7+], #0 add r1, r1, r0 stw.w r1, [r7]+, #4 csub.a r7, r8 bub 1001b not_relocated: /* * Clear BSS region. * r2 - BSS start * r3 - BSS end */ mov r0, #0 1002: stw.w r0, [r2]+, #4 csub.a r2, r3 bub 1002b /* * Turn on the cache. */ mov r0, #0 movc p0.c5, r0, #28 @ cache invalidate all nop8 movc p0.c6, r0, #6 @ tlb invalidate all nop8 mov r0, #0x1c @ en icache and wb dcache movc p0.c1, r0, #0 nop8 /* * Set up some pointers, for starting decompressing. */ mov r1, sp @ malloc space above stack add r2, sp, #0x10000 @ 64k max /* * Check to see if we will overwrite ourselves. * r4 = final kernel address * r5 = start of this image * r6 = size of decompressed image * r2 = end of malloc space (and therefore this image) * We basically want: * r4 >= r2 -> OK * r4 + image length <= r5 -> OK */ ldw r4, =KERNEL_IMAGE_START csub.a r4, r2 bea wont_overwrite add r0, r4, r6 csub.a r0, r5 beb wont_overwrite /* * If overwrite, just print error message */ b __error_overwrite /* * We're not in danger of overwriting ourselves. * Do this the simple way. */ wont_overwrite: /* * decompress_kernel: * r0: output_start * r1: free_mem_ptr_p * r2: free_mem_ptr_end_p */ mov r0, r4 b.l decompress_kernel @ C functions /* * Clean and flush the cache to maintain consistency. */ mov r0, #0 movc p0.c5, r0, #14 @ flush dcache nop8 movc p0.c5, r0, #20 @ icache invalidate all nop8 /* * Turn off the Cache and MMU. */ mov r0, #0 @ disable i/d cache and MMU movc p0.c1, r0, #0 nop8 mov r0, #0 @ must be zero ldw r4, =KERNEL_IMAGE_START mov pc, r4 @ call kernel .align 2 .type LC0, #object LC0: .word LC0 @ r1 .word __bss_start @ r2 .word _end @ r3 .word _start @ r5 .word _image_size @ r6 .word _got_start @ r7 .word _got_end @ r8 .word decompress_stack_end @ sp .size LC0, . - LC0 print_string: #ifdef CONFIG_DEBUG_OCD 2001: ldb.w r1, [r0]+, #1 csub.a r1, #0 bne 2002f mov pc, lr 2002: movc r2, p1.c0, #0 cand.a r2, #2 bne 2002b movc p1.c1, r1, #1 csub.a r1, #'\n' cmoveq r1, #'\r' beq 2002b b 2001b #else mov pc, lr #endif __error_overwrite: adr r0, str_error b.l print_string 2001: nop8 b 2001b str_error: .asciz "\nError: Kernel address OVERWRITE\n" .align .ltorg .align 4 .section ".stack", "aw", %nobits decompress_stack: .space 4096 decompress_stack_end:
AirFortressIlikara/LS2K0300-linux-4.19
3,839
arch/arc/kernel/head.S
/* * ARC CPU startup Code * * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Vineetg: Dec 2007 * -Check if we are running on Simulator or on real hardware * to skip certain things during boot on simulator */ #include <linux/linkage.h> #include <asm/asm-offsets.h> #include <asm/entry.h> #include <asm/arcregs.h> #include <asm/cache.h> #include <asm/irqflags.h> .macro CPU_EARLY_SETUP ; Setting up Vectror Table (in case exception happens in early boot sr @_int_vec_base_lds, [AUX_INTR_VEC_BASE] ; Disable I-cache/D-cache if kernel so configured lr r5, [ARC_REG_IC_BCR] breq r5, 0, 1f ; I$ doesn't exist lr r5, [ARC_REG_IC_CTRL] #ifdef CONFIG_ARC_HAS_ICACHE bclr r5, r5, 0 ; 0 - Enable, 1 is Disable #else bset r5, r5, 0 ; I$ exists, but is not used #endif sr r5, [ARC_REG_IC_CTRL] 1: lr r5, [ARC_REG_DC_BCR] breq r5, 0, 1f ; D$ doesn't exist lr r5, [ARC_REG_DC_CTRL] bclr r5, r5, 6 ; Invalidate (discard w/o wback) #ifdef CONFIG_ARC_HAS_DCACHE bclr r5, r5, 0 ; Enable (+Inv) #else bset r5, r5, 0 ; Disable (+Inv) #endif sr r5, [ARC_REG_DC_CTRL] 1: #ifdef CONFIG_ISA_ARCV2 ; Unaligned access is disabled at reset, so re-enable early as ; gcc 7.3.1 (ARC GNU 2018.03) onwards generates unaligned access ; by default lr r5, [status32] bset r5, r5, STATUS_AD_BIT kflag r5 #endif .endm .section .init.text, "ax",@progbits ;---------------------------------------------------------------- ; Default Reset Handler (jumped into from Reset vector) ; - Don't clobber r0,r1,r2 as they might have u-boot provided args ; - Platforms can override this weak version if needed ;---------------------------------------------------------------- WEAK(res_service) j stext END(res_service) ;---------------------------------------------------------------- ; Kernel Entry point ;---------------------------------------------------------------- ENTRY(stext) CPU_EARLY_SETUP #ifdef CONFIG_SMP GET_CPU_ID r5 cmp r5, 0 mov.nz r0, r5 bz .Lmaster_proceed ; Non-Masters wait for Master to boot enough and bring them up ; when they resume, tail-call to entry point mov blink, @first_lines_of_secondary j arc_platform_smp_wait_to_boot .Lmaster_proceed: #endif ; Clear BSS before updating any globals ; XXX: use ZOL here mov r5, __bss_start sub r6, __bss_stop, r5 lsr.f lp_count, r6, 2 lpnz 1f st.ab 0, [r5, 4] 1: ; Uboot - kernel ABI ; r0 = [0] No uboot interaction, [1] cmdline in r2, [2] DTB in r2 ; r1 = magic number (always zero as of now) ; r2 = pointer to uboot provided cmdline or external DTB in mem ; These are handled later in handle_uboot_args() st r0, [@uboot_tag] st r1, [@uboot_magic] st r2, [@uboot_arg] ; setup "current" tsk and optionally cache it in dedicated r25 mov r9, @init_task SET_CURR_TASK_ON_CPU r9, r0 ; r9 = tsk, r0 = scratch ; setup stack (fp, sp) mov fp, 0 ; tsk->thread_info is really a PAGE, whose bottom hoists stack GET_TSK_STACK_BASE r9, sp ; r9 = tsk, sp = stack base(output) j start_kernel ; "C" entry point END(stext) #ifdef CONFIG_SMP ;---------------------------------------------------------------- ; First lines of code run by secondary before jumping to 'C' ;---------------------------------------------------------------- .section .text, "ax",@progbits ENTRY(first_lines_of_secondary) ; setup per-cpu idle task as "current" on this CPU ld r0, [@secondary_idle_tsk] SET_CURR_TASK_ON_CPU r0, r1 ; setup stack (fp, sp) mov fp, 0 ; set it's stack base to tsk->thread_info bottom GET_TSK_STACK_BASE r0, sp j start_kernel_secondary END(first_lines_of_secondary) #endif
AirFortressIlikara/LS2K0300-linux-4.19
12,349
arch/arc/kernel/entry-compact.S
/* * Low Level Interrupts/Traps/Exceptions(non-TLB) Handling for ARCompact ISA * * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com) * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * vineetg: May 2011 * -Userspace unaligned access emulation * * vineetg: Feb 2011 (ptrace low level code fixes) * -traced syscall return code (r0) was not saved into pt_regs for restoring * into user reg-file when traded task rets to user space. * -syscalls needing arch-wrappers (mainly for passing sp as pt_regs) * were not invoking post-syscall trace hook (jumping directly into * ret_from_system_call) * * vineetg: Nov 2010: * -Vector table jumps (@8 bytes) converted into branches (@4 bytes) * -To maintain the slot size of 8 bytes/vector, added nop, which is * not executed at runtime. * * vineetg: Nov 2009 (Everything needed for TIF_RESTORE_SIGMASK) * -do_signal()invoked upon TIF_RESTORE_SIGMASK as well * -Wrappers for sys_{,rt_}sigsuspend() no longer needed as they don't * need ptregs anymore * * Vineetg: Oct 2009 * -In a rare scenario, Process gets a Priv-V exception and gets scheduled * out. Since we don't do FAKE RTIE for Priv-V, CPU exception state remains * active (AE bit enabled). This causes a double fault for a subseq valid * exception. Thus FAKE RTIE needed in low level Priv-Violation handler. * Instr Error could also cause similar scenario, so same there as well. * * Vineetg: March 2009 (Supporting 2 levels of Interrupts) * * Vineetg: Aug 28th 2008: Bug #94984 * -Zero Overhead Loop Context shd be cleared when entering IRQ/EXcp/Trap * Normally CPU does this automatically, however when doing FAKE rtie, * we need to explicitly do this. The problem in macros * FAKE_RET_FROM_EXCPN and FAKE_RET_FROM_EXCPN_LOCK_IRQ was that this bit * was being "CLEARED" rather then "SET". Since it is Loop INHIBIT Bit, * setting it and not clearing it clears ZOL context * * Vineetg: May 16th, 2008 * - r25 now contains the Current Task when in kernel * * Vineetg: Dec 22, 2007 * Minor Surgery of Low Level ISR to make it SMP safe * - MMU_SCRATCH0 Reg used for freeing up r9 in Level 1 ISR * - _current_task is made an array of NR_CPUS * - Access of _current_task wrapped inside a macro so that if hardware * team agrees for a dedicated reg, no other code is touched * * Amit Bhor, Rahul Trivedi, Kanika Nema, Sameer Dhavale : Codito Tech 2004 */ #include <linux/errno.h> #include <linux/linkage.h> /* {ENTRY,EXIT} */ #include <asm/entry.h> #include <asm/irqflags.h> .cpu A7 ;############################ Vector Table ################################# .macro VECTOR lbl #if 1 /* Just in case, build breaks */ j \lbl #else b \lbl nop #endif .endm .section .vector, "ax",@progbits .align 4 /* Each entry in the vector table must occupy 2 words. Since it is a jump * across sections (.vector to .text) we are guaranteed that 'j somewhere' * will use the 'j limm' form of the instruction as long as somewhere is in * a section other than .vector. */ ; ********* Critical System Events ********************** VECTOR res_service ; 0x0, Reset Vector (0x0) VECTOR mem_service ; 0x8, Mem exception (0x1) VECTOR instr_service ; 0x10, Instrn Error (0x2) ; ******************** Device ISRs ********************** #ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS VECTOR handle_interrupt_level2 #else VECTOR handle_interrupt_level1 #endif .rept 28 VECTOR handle_interrupt_level1 ; Other devices .endr /* FOR ARC600: timer = 0x3, uart = 0x8, emac = 0x10 */ ; ******************** Exceptions ********************** VECTOR EV_MachineCheck ; 0x100, Fatal Machine check (0x20) VECTOR EV_TLBMissI ; 0x108, Instruction TLB miss (0x21) VECTOR EV_TLBMissD ; 0x110, Data TLB miss (0x22) VECTOR EV_TLBProtV ; 0x118, Protection Violation (0x23) ; or Misaligned Access VECTOR EV_PrivilegeV ; 0x120, Privilege Violation (0x24) VECTOR EV_Trap ; 0x128, Trap exception (0x25) VECTOR EV_Extension ; 0x130, Extn Instruction Excp (0x26) .rept 24 VECTOR reserved ; Reserved Exceptions .endr ;##################### Scratch Mem for IRQ stack switching ############# ARCFP_DATA int1_saved_reg .align 32 .type int1_saved_reg, @object .size int1_saved_reg, 4 int1_saved_reg: .zero 4 /* Each Interrupt level needs its own scratch */ ARCFP_DATA int2_saved_reg .type int2_saved_reg, @object .size int2_saved_reg, 4 int2_saved_reg: .zero 4 ; --------------------------------------------- .section .text, "ax",@progbits reserved: flag 1 ; Unexpected event, halt ;##################### Interrupt Handling ############################## #ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS ; --------------------------------------------- ; Level 2 ISR: Can interrupt a Level 1 ISR ; --------------------------------------------- ENTRY(handle_interrupt_level2) INTERRUPT_PROLOGUE 2 ;------------------------------------------------------ ; if L2 IRQ interrupted a L1 ISR, disable preemption ; ; This is to avoid a potential L1-L2-L1 scenario ; -L1 IRQ taken ; -L2 interrupts L1 (before L1 ISR could run) ; -preemption off IRQ, user task in syscall picked to run ; -RTIE to userspace ; Returns from L2 context fine ; But both L1 and L2 re-enabled, so another L1 can be taken ; while prev L1 is still unserviced ; ;------------------------------------------------------ ; L2 interrupting L1 implies both L2 and L1 active ; However both A2 and A1 are NOT set in STATUS32, thus ; need to check STATUS32_L2 to determine if L1 was active ld r9, [sp, PT_status32] ; get statu32_l2 (saved in pt_regs) bbit0 r9, STATUS_A1_BIT, 1f ; L1 not active when L2 IRQ, so normal ; bump thread_info->preempt_count (Disable preemption) GET_CURR_THR_INFO_FROM_SP r10 ld r9, [r10, THREAD_INFO_PREEMPT_COUNT] add r9, r9, 1 st r9, [r10, THREAD_INFO_PREEMPT_COUNT] 1: ;------------------------------------------------------ ; setup params for Linux common ISR and invoke it ;------------------------------------------------------ lr r0, [icause2] and r0, r0, 0x1f bl.d @arch_do_IRQ mov r1, sp mov r8,0x2 sr r8, [AUX_IRQ_LV12] ; clear bit in Sticky Status Reg b ret_from_exception END(handle_interrupt_level2) #endif ; --------------------------------------------- ; User Mode Memory Bus Error Interrupt Handler ; (Kernel mode memory errors handled via separate exception vectors) ; --------------------------------------------- ENTRY(mem_service) INTERRUPT_PROLOGUE 2 mov r0, ilink2 mov r1, sp ; User process needs to be killed with SIGBUS, but first need to get ; out of the L2 interrupt context (drop to pure kernel mode) and jump ; off to "C" code where SIGBUS in enqueued lr r3, [status32] bclr r3, r3, STATUS_A2_BIT or r3, r3, (STATUS_E1_MASK|STATUS_E2_MASK) sr r3, [status32_l2] mov ilink2, 1f rtie 1: bl do_memory_error b ret_from_exception END(mem_service) ; --------------------------------------------- ; Level 1 ISR ; --------------------------------------------- ENTRY(handle_interrupt_level1) INTERRUPT_PROLOGUE 1 lr r0, [icause1] and r0, r0, 0x1f #ifdef CONFIG_TRACE_IRQFLAGS ; icause1 needs to be read early, before calling tracing, which ; can clobber scratch regs, hence use of stack to stash it push r0 TRACE_ASM_IRQ_DISABLE pop r0 #endif bl.d @arch_do_IRQ mov r1, sp mov r8,0x1 sr r8, [AUX_IRQ_LV12] ; clear bit in Sticky Status Reg b ret_from_exception END(handle_interrupt_level1) ;################### Non TLB Exception Handling ############################# ; --------------------------------------------- ; Protection Violation Exception Handler ; --------------------------------------------- ENTRY(EV_TLBProtV) EXCEPTION_PROLOGUE mov r2, r9 ; ECR set into r9 already lr r0, [efa] ; Faulting Data address (not part of pt_regs saved above) ; Exception auto-disables further Intr/exceptions. ; Re-enable them by pretending to return from exception ; (so rest of handler executes in pure K mode) FAKE_RET_FROM_EXCPN mov r1, sp ; Handle to pt_regs ;------ (5) Type of Protection Violation? ---------- ; ; ProtV Hardware Exception is triggered for Access Faults of 2 types ; -Access Violation : 00_23_(00|01|02|03)_00 ; x r w r+w ; -Unaligned Access : 00_23_04_00 ; bbit1 r2, ECR_C_BIT_PROTV_MISALIG_DATA, 4f ;========= (6a) Access Violation Processing ======== bl do_page_fault b ret_from_exception ;========== (6b) Non aligned access ============ 4: SAVE_CALLEE_SAVED_USER mov r2, sp ; callee_regs bl do_misaligned_access ; TBD: optimize - do this only if a callee reg was involved ; either a dst of emulated LD/ST or src with address-writeback RESTORE_CALLEE_SAVED_USER b ret_from_exception END(EV_TLBProtV) ; Wrapper for Linux page fault handler called from EV_TLBMiss* ; Very similar to ProtV handler case (6a) above, but avoids the extra checks ; for Misaligned access ; ENTRY(call_do_page_fault) EXCEPTION_PROLOGUE lr r0, [efa] ; Faulting Data address mov r1, sp FAKE_RET_FROM_EXCPN mov blink, ret_from_exception b do_page_fault END(call_do_page_fault) ;############# Common Handlers for ARCompact and ARCv2 ############## #include "entry.S" ;############# Return from Intr/Excp/Trap (ARC Specifics) ############## ; ; Restore the saved sys context (common exit-path for EXCPN/IRQ/Trap) ; IRQ shd definitely not happen between now and rtie ; All 2 entry points to here already disable interrupts .Lrestore_regs: # Interrupts are actually disabled from this point on, but will get # reenabled after we return from interrupt/exception. # But irq tracer needs to be told now... TRACE_ASM_IRQ_ENABLE lr r10, [status32] ; Restore REG File. In case multiple Events outstanding, ; use the same priority as rtie: EXCPN, L2 IRQ, L1 IRQ, None ; Note that we use realtime STATUS32 (not pt_regs->status32) to ; decide that. and.f 0, r10, (STATUS_A1_MASK|STATUS_A2_MASK) bz .Lexcep_or_pure_K_ret ; Returning from Interrupts (Level 1 or 2) #ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS ; Level 2 interrupt return Path - from hardware standpoint bbit0 r10, STATUS_A2_BIT, not_level2_interrupt ;------------------------------------------------------------------ ; However the context returning might not have taken L2 intr itself ; e.g. Task'A' user-code -> L2 intr -> schedule -> 'B' user-code ret ; Special considerations needed for the context which took L2 intr ld r9, [sp, PT_event] ; Ensure this is L2 intr context brne r9, event_IRQ2, 149f ;------------------------------------------------------------------ ; if L2 IRQ interrupted an L1 ISR, we'd disabled preemption earlier ; so that sched doesn't move to new task, causing L1 to be delayed ; undeterministically. Now that we've achieved that, let's reset ; things to what they were, before returning from L2 context ;---------------------------------------------------------------- ld r9, [sp, PT_status32] ; get statu32_l2 (saved in pt_regs) bbit0 r9, STATUS_A1_BIT, 149f ; L1 not active when L2 IRQ, so normal ; decrement thread_info->preempt_count (re-enable preemption) GET_CURR_THR_INFO_FROM_SP r10 ld r9, [r10, THREAD_INFO_PREEMPT_COUNT] ; paranoid check, given A1 was active when A2 happened, preempt count ; must not be 0 because we would have incremented it. ; If this does happen we simply HALT as it means a BUG !!! cmp r9, 0 bnz 2f flag 1 2: sub r9, r9, 1 st r9, [r10, THREAD_INFO_PREEMPT_COUNT] 149: INTERRUPT_EPILOGUE 2 ; return from level 2 interrupt debug_marker_l2: rtie not_level2_interrupt: #endif INTERRUPT_EPILOGUE 1 ; return from level 1 interrupt debug_marker_l1: rtie .Lexcep_or_pure_K_ret: ;this case is for syscalls or Exceptions or pure kernel mode EXCEPTION_EPILOGUE debug_marker_syscall: rtie END(ret_from_exception)
AirFortressIlikara/LS2K0300-linux-4.19
8,216
arch/arc/kernel/entry-arcv2.S
/* * ARCv2 ISA based core Low Level Intr/Traps/Exceptions(non-TLB) Handling * * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/linkage.h> /* ARC_{EXTRY,EXIT} */ #include <asm/entry.h> /* SAVE_ALL_{INT1,INT2,TRAP...} */ #include <asm/errno.h> #include <asm/arcregs.h> #include <asm/irqflags.h> ; A maximum number of supported interrupts in the core interrupt controller. ; This number is not equal to the maximum interrupt number (256) because ; first 16 lines are reserved for exceptions and are not configurable. #define NR_CPU_IRQS 240 .cpu HS #define VECTOR .word ;############################ Vector Table ################################# .section .vector,"a",@progbits .align 4 # Initial 16 slots are Exception Vectors VECTOR res_service ; Reset Vector VECTOR mem_service ; Mem exception VECTOR instr_service ; Instrn Error VECTOR EV_MachineCheck ; Fatal Machine check VECTOR EV_TLBMissI ; Intruction TLB miss VECTOR EV_TLBMissD ; Data TLB miss VECTOR EV_TLBProtV ; Protection Violation VECTOR EV_PrivilegeV ; Privilege Violation VECTOR EV_SWI ; Software Breakpoint VECTOR EV_Trap ; Trap exception VECTOR EV_Extension ; Extn Instruction Exception VECTOR EV_DivZero ; Divide by Zero VECTOR EV_DCError ; Data Cache Error VECTOR EV_Misaligned ; Misaligned Data Access VECTOR reserved ; Reserved slots VECTOR reserved ; Reserved slots # Begin Interrupt Vectors VECTOR handle_interrupt ; (16) Timer0 VECTOR handle_interrupt ; unused (Timer1) VECTOR handle_interrupt ; unused (WDT) VECTOR handle_interrupt ; (19) Inter core Interrupt (IPI) VECTOR handle_interrupt ; (20) perf Interrupt VECTOR handle_interrupt ; (21) Software Triggered Intr (Self IPI) VECTOR handle_interrupt ; unused VECTOR handle_interrupt ; (23) unused # End of fixed IRQs .rept NR_CPU_IRQS - 8 VECTOR handle_interrupt .endr .section .text, "ax",@progbits reserved: flag 1 ; Unexpected event, halt ;##################### Interrupt Handling ############################## ENTRY(handle_interrupt) INTERRUPT_PROLOGUE irq # irq control APIs local_irq_save/restore/disable/enable fiddle with # global interrupt enable bits in STATUS32 (.IE for 1 prio, .E[] for 2 prio) # However a taken interrupt doesn't clear these bits. Thus irqs_disabled() # query in hard ISR path would return false (since .IE is set) which would # trips genirq interrupt handling asserts. # # So do a "soft" disable of interrutps here. # # Note this disable is only for consistent book-keeping as further interrupts # will be disabled anyways even w/o this. Hardware tracks active interrupts # seperately in AUX_IRQ_ACTIVE.active and will not take new interrupts # unless this one returns (or higher prio becomes pending in 2-prio scheme) IRQ_DISABLE ; icause is banked: one per priority level ; so a higher prio interrupt taken here won't clobber prev prio icause lr r0, [ICAUSE] mov blink, ret_from_exception b.d arch_do_IRQ mov r1, sp END(handle_interrupt) ;################### Non TLB Exception Handling ############################# ENTRY(EV_SWI) ; TODO: implement this EXCEPTION_PROLOGUE b ret_from_exception END(EV_SWI) ENTRY(EV_DivZero) ; TODO: implement this EXCEPTION_PROLOGUE b ret_from_exception END(EV_DivZero) ENTRY(EV_DCError) ; TODO: implement this EXCEPTION_PROLOGUE b ret_from_exception END(EV_DCError) ; --------------------------------------------- ; Memory Error Exception Handler ; - Unlike ARCompact, handles Bus errors for both User/Kernel mode, ; Instruction fetch or Data access, under a single Exception Vector ; --------------------------------------------- ENTRY(mem_service) EXCEPTION_PROLOGUE lr r0, [efa] mov r1, sp FAKE_RET_FROM_EXCPN bl do_memory_error b ret_from_exception END(mem_service) ENTRY(EV_Misaligned) EXCEPTION_PROLOGUE lr r0, [efa] ; Faulting Data address mov r1, sp FAKE_RET_FROM_EXCPN SAVE_CALLEE_SAVED_USER mov r2, sp ; callee_regs bl do_misaligned_access ; TBD: optimize - do this only if a callee reg was involved ; either a dst of emulated LD/ST or src with address-writeback RESTORE_CALLEE_SAVED_USER b ret_from_exception END(EV_Misaligned) ; --------------------------------------------- ; Protection Violation Exception Handler ; --------------------------------------------- ENTRY(EV_TLBProtV) EXCEPTION_PROLOGUE lr r0, [efa] ; Faulting Data address mov r1, sp ; pt_regs FAKE_RET_FROM_EXCPN mov blink, ret_from_exception b do_page_fault END(EV_TLBProtV) ; From Linux standpoint Slow Path I/D TLB Miss is same a ProtV as they ; need to call do_page_fault(). ; ECR in pt_regs provides whether access was R/W/X .global call_do_page_fault .set call_do_page_fault, EV_TLBProtV ;############# Common Handlers for ARCompact and ARCv2 ############## #include "entry.S" ;############# Return from Intr/Excp/Trap (ARCv2 ISA Specifics) ############## ; ; Restore the saved sys context (common exit-path for EXCPN/IRQ/Trap) ; IRQ shd definitely not happen between now and rtie ; All 2 entry points to here already disable interrupts .Lrestore_regs: restore_regs: # Interrpts are actually disabled from this point on, but will get # reenabled after we return from interrupt/exception. # But irq tracer needs to be told now... TRACE_ASM_IRQ_ENABLE ld r0, [sp, PT_status32] ; U/K mode at time of entry lr r10, [AUX_IRQ_ACT] bmsk r11, r10, 15 ; AUX_IRQ_ACT.ACTIVE breq r11, 0, .Lexcept_ret ; No intr active, ret from Exception ;####### Return from Intr ####### debug_marker_l1: ; bbit1.nt r0, STATUS_DE_BIT, .Lintr_ret_to_delay_slot btst r0, STATUS_DE_BIT ; Z flag set if bit clear bnz .Lintr_ret_to_delay_slot ; branch if STATUS_DE_BIT set .Lisr_ret_fast_path: ; Handle special case #1: (Entry via Exception, Return via IRQ) ; ; Exception in U mode, preempted in kernel, Intr taken (K mode), orig ; task now returning to U mode (riding the Intr) ; AUX_IRQ_ACTIVE won't have U bit set (since intr in K mode), hence SP ; won't be switched to correct U mode value (from AUX_SP) ; So force AUX_IRQ_ACT.U for such a case btst r0, STATUS_U_BIT ; Z flag set if K (Z clear for U) bset.nz r11, r11, AUX_IRQ_ACT_BIT_U ; NZ means U sr r11, [AUX_IRQ_ACT] INTERRUPT_EPILOGUE irq rtie ;####### Return from Exception / pure kernel mode ####### .Lexcept_ret: ; Expects r0 has PT_status32 debug_marker_syscall: EXCEPTION_EPILOGUE rtie ;####### Return from Intr to insn in delay slot ####### ; Handle special case #2: (Entry via Exception in Delay Slot, Return via IRQ) ; ; Intr returning to a Delay Slot (DS) insn ; (since IRQ NOT allowed in DS in ARCv2, this can only happen if orig ; entry was via Exception in DS which got preempted in kernel). ; ; IRQ RTIE won't reliably restore DE bit and/or BTA, needs workaround ; ; Solution is return from Intr w/o any delay slot quirks into a kernel trampoline ; and from pure kernel mode return to delay slot which handles DS bit/BTA correctly .Lintr_ret_to_delay_slot: debug_marker_ds: ld r2, [@intr_to_DE_cnt] add r2, r2, 1 st r2, [@intr_to_DE_cnt] ld r2, [sp, PT_ret] ld r3, [sp, PT_status32] ; STAT32 for Int return created from scratch ; (No delay dlot, disable Further intr in trampoline) bic r0, r3, STATUS_U_MASK|STATUS_DE_MASK|STATUS_IE_MASK|STATUS_L_MASK st r0, [sp, PT_status32] mov r1, .Lintr_ret_to_delay_slot_2 st r1, [sp, PT_ret] ; Orig exception PC/STAT32 safekept @orig_r0 and @event stack slots st r2, [sp, 0] st r3, [sp, 4] b .Lisr_ret_fast_path .Lintr_ret_to_delay_slot_2: ; Trampoline to restore orig exception PC/STAT32/BTA/AUX_USER_SP sub sp, sp, SZ_PT_REGS st r9, [sp, -4] ld r9, [sp, 0] sr r9, [eret] ld r9, [sp, 4] sr r9, [erstatus] ; restore AUX_USER_SP if returning to U mode bbit0 r9, STATUS_U_BIT, 1f ld r9, [sp, PT_sp] sr r9, [AUX_USER_SP] 1: ld r9, [sp, 8] sr r9, [erbta] ld r9, [sp, -4] add sp, sp, SZ_PT_REGS ; return from pure kernel mode to delay slot rtie END(ret_from_exception)
AirFortressIlikara/LS2K0300-linux-4.19
10,170
arch/arc/kernel/entry.S
/* * Common Low Level Interrupts/Traps/Exceptions(non-TLB) Handling for ARC * (included from entry-<isa>.S * * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com) * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ /*------------------------------------------------------------------ * Function ABI *------------------------------------------------------------------ * * Arguments r0 - r7 * Caller Saved Registers r0 - r12 * Callee Saved Registers r13- r25 * Global Pointer (gp) r26 * Frame Pointer (fp) r27 * Stack Pointer (sp) r28 * Branch link register (blink) r31 *------------------------------------------------------------------ */ ;################### Special Sys Call Wrappers ########################## ENTRY(sys_clone_wrapper) SAVE_CALLEE_SAVED_USER bl @sys_clone DISCARD_CALLEE_SAVED_USER GET_CURR_THR_INFO_FLAGS r10 btst r10, TIF_SYSCALL_TRACE bnz tracesys_exit b .Lret_from_system_call END(sys_clone_wrapper) ENTRY(ret_from_fork) ; when the forked child comes here from the __switch_to function ; r0 has the last task pointer. ; put last task in scheduler queue jl @schedule_tail ld r9, [sp, PT_status32] brne r9, 0, 1f jl.d [r14] ; kernel thread entry point mov r0, r13 ; (see PF_KTHREAD block in copy_thread) 1: ; Return to user space ; 1. Any forked task (Reach here via BRne above) ; 2. First ever init task (Reach here via return from JL above) ; This is the historic "kernel_execve" use-case, to return to init ; user mode, in a round about way since that is always done from ; a kernel thread which is executed via JL above but always returns ; out whenever kernel_execve (now inline do_fork()) is involved b ret_from_exception END(ret_from_fork) ;################### Non TLB Exception Handling ############################# ; --------------------------------------------- ; Instruction Error Exception Handler ; --------------------------------------------- ENTRY(instr_service) EXCEPTION_PROLOGUE lr r0, [efa] mov r1, sp FAKE_RET_FROM_EXCPN bl do_insterror_or_kprobe b ret_from_exception END(instr_service) ; --------------------------------------------- ; Machine Check Exception Handler ; --------------------------------------------- ENTRY(EV_MachineCheck) EXCEPTION_PROLOGUE lr r2, [ecr] lr r0, [efa] mov r1, sp ; hardware auto-disables MMU, re-enable it to allow kernel vaddr ; access for say stack unwinding of modules for crash dumps lr r3, [ARC_REG_PID] or r3, r3, MMU_ENABLE sr r3, [ARC_REG_PID] lsr r3, r2, 8 bmsk r3, r3, 7 brne r3, ECR_C_MCHK_DUP_TLB, 1f bl do_tlb_overlap_fault b ret_from_exception 1: ; DEAD END: can't do much, display Regs and HALT SAVE_CALLEE_SAVED_USER GET_CURR_TASK_FIELD_PTR TASK_THREAD, r10 st sp, [r10, THREAD_CALLEE_REG] j do_machine_check_fault END(EV_MachineCheck) ; --------------------------------------------- ; Privilege Violation Exception Handler ; --------------------------------------------- ENTRY(EV_PrivilegeV) EXCEPTION_PROLOGUE lr r0, [efa] mov r1, sp FAKE_RET_FROM_EXCPN bl do_privilege_fault b ret_from_exception END(EV_PrivilegeV) ; --------------------------------------------- ; Extension Instruction Exception Handler ; --------------------------------------------- ENTRY(EV_Extension) EXCEPTION_PROLOGUE lr r0, [efa] mov r1, sp FAKE_RET_FROM_EXCPN bl do_extension_fault b ret_from_exception END(EV_Extension) ;################ Trap Handling (Syscall, Breakpoint) ################## ; --------------------------------------------- ; syscall Tracing ; --------------------------------------------- tracesys: ; save EFA in case tracer wants the PC of traced task ; using ERET won't work since next-PC has already committed lr r12, [efa] GET_CURR_TASK_FIELD_PTR TASK_THREAD, r11 st r12, [r11, THREAD_FAULT_ADDR] ; thread.fault_address ; PRE Sys Call Ptrace hook mov r0, sp ; pt_regs needed bl @syscall_trace_entry ; Tracing code now returns the syscall num (orig or modif) mov r8, r0 ; Do the Sys Call as we normally would. ; Validate the Sys Call number cmp r8, NR_syscalls mov.hi r0, -ENOSYS bhi tracesys_exit ; Restore the sys-call args. Mere invocation of the hook abv could have ; clobbered them (since they are in scratch regs). The tracer could also ; have deliberately changed the syscall args: r0-r7 ld r0, [sp, PT_r0] ld r1, [sp, PT_r1] ld r2, [sp, PT_r2] ld r3, [sp, PT_r3] ld r4, [sp, PT_r4] ld r5, [sp, PT_r5] ld r6, [sp, PT_r6] ld r7, [sp, PT_r7] ld.as r9, [sys_call_table, r8] jl [r9] ; Entry into Sys Call Handler tracesys_exit: st r0, [sp, PT_r0] ; sys call return value in pt_regs ;POST Sys Call Ptrace Hook bl @syscall_trace_exit b ret_from_exception ; NOT ret_from_system_call at is saves r0 which ; we'd done before calling post hook above ; --------------------------------------------- ; Breakpoint TRAP ; --------------------------------------------- trap_with_param: ; stop_pc info by gdb needs this info lr r0, [efa] mov r1, sp ; Now that we have read EFA, it is safe to do "fake" rtie ; and get out of CPU exception mode FAKE_RET_FROM_EXCPN ; Save callee regs in case gdb wants to have a look ; SP will grow up by size of CALLEE Reg-File ; NOTE: clobbers r12 SAVE_CALLEE_SAVED_USER ; save location of saved Callee Regs @ thread_struct->pc GET_CURR_TASK_FIELD_PTR TASK_THREAD, r10 st sp, [r10, THREAD_CALLEE_REG] ; Call the trap handler bl do_non_swi_trap ; unwind stack to discard Callee saved Regs DISCARD_CALLEE_SAVED_USER b ret_from_exception ; --------------------------------------------- ; syscall TRAP ; ABI: (r0-r7) upto 8 args, (r8) syscall number ; --------------------------------------------- ENTRY(EV_Trap) EXCEPTION_PROLOGUE ;============ TRAP 1 :breakpoints ; Check ECR for trap with arg (PROLOGUE ensures r9 has ECR) bmsk.f 0, r9, 7 bnz trap_with_param ;============ TRAP (no param): syscall top level ; First return from Exception to pure K mode (Exception/IRQs renabled) FAKE_RET_FROM_EXCPN ; If syscall tracing ongoing, invoke pre-post-hooks GET_CURR_THR_INFO_FLAGS r10 btst r10, TIF_SYSCALL_TRACE bnz tracesys ; this never comes back ;============ Normal syscall case ; syscall num shd not exceed the total system calls avail cmp r8, NR_syscalls mov.hi r0, -ENOSYS bhi .Lret_from_system_call ; Offset into the syscall_table and call handler ld.as r9,[sys_call_table, r8] jl [r9] ; Entry into Sys Call Handler .Lret_from_system_call: st r0, [sp, PT_r0] ; sys call return value in pt_regs ; fall through to ret_from_exception END(EV_Trap) ;############# Return from Intr/Excp/Trap (Linux Specifics) ############## ; ; If ret to user mode do we need to handle signals, schedule() et al. ENTRY(ret_from_exception) ; Pre-{IRQ,Trap,Exception} K/U mode from pt_regs->status32 ld r8, [sp, PT_status32] ; returning to User/Kernel Mode bbit0 r8, STATUS_U_BIT, resume_kernel_mode ; Before returning to User mode check-for-and-complete any pending work ; such as rescheduling/signal-delivery etc. resume_user_mode_begin: ; Disable IRQs to ensures that chk for pending work itself is atomic ; (and we don't end up missing a NEED_RESCHED/SIGPENDING due to an ; interim IRQ). IRQ_DISABLE r10 ; Fast Path return to user mode if no pending work GET_CURR_THR_INFO_FLAGS r9 and.f 0, r9, _TIF_WORK_MASK bz .Lrestore_regs ; --- (Slow Path #1) task preemption --- bbit0 r9, TIF_NEED_RESCHED, .Lchk_pend_signals mov blink, resume_user_mode_begin ; tail-call to U mode ret chks j @schedule ; BTST+Bnz causes relo error in link .Lchk_pend_signals: IRQ_ENABLE r10 ; --- (Slow Path #2) pending signal --- mov r0, sp ; pt_regs for arg to do_signal()/do_notify_resume() GET_CURR_THR_INFO_FLAGS r9 bbit0 r9, TIF_SIGPENDING, .Lchk_notify_resume ; Normal Trap/IRQ entry only saves Scratch (caller-saved) regs ; in pt_reg since the "C" ABI (kernel code) will automatically ; save/restore callee-saved regs. ; ; However, here we need to explicitly save callee regs because ; (i) If this signal causes coredump - full regfile needed ; (ii) If signal is SIGTRAP/SIGSTOP, task is being traced thus ; tracer might call PEEKUSR(CALLEE reg) ; ; NOTE: SP will grow up by size of CALLEE Reg-File SAVE_CALLEE_SAVED_USER ; clobbers r12 ; save location of saved Callee Regs @ thread_struct->callee GET_CURR_TASK_FIELD_PTR TASK_THREAD, r10 st sp, [r10, THREAD_CALLEE_REG] bl @do_signal ; Ideally we want to discard the Callee reg above, however if this was ; a tracing signal, tracer could have done a POKEUSR(CALLEE reg) RESTORE_CALLEE_SAVED_USER b resume_user_mode_begin ; loop back to start of U mode ret ; --- (Slow Path #3) notify_resume --- .Lchk_notify_resume: btst r9, TIF_NOTIFY_RESUME blnz @do_notify_resume b resume_user_mode_begin ; unconditionally back to U mode ret chks ; for single exit point from this block resume_kernel_mode: ; Disable Interrupts from this point on ; CONFIG_PREEMPT: This is a must for preempt_schedule_irq() ; !CONFIG_PREEMPT: To ensure restore_regs is intr safe IRQ_DISABLE r9 #ifdef CONFIG_PREEMPT ; Can't preempt if preemption disabled GET_CURR_THR_INFO_FROM_SP r10 ld r8, [r10, THREAD_INFO_PREEMPT_COUNT] brne r8, 0, .Lrestore_regs ; check if this task's NEED_RESCHED flag set ld r9, [r10, THREAD_INFO_FLAGS] bbit0 r9, TIF_NEED_RESCHED, .Lrestore_regs ; Invoke PREEMPTION jl preempt_schedule_irq ; preempt_schedule_irq() always returns with IRQ disabled #endif b .Lrestore_regs ##### DONT ADD CODE HERE - .Lrestore_regs actually follows in entry-<isa>.S
AirFortressIlikara/LS2K0300-linux-4.19
1,715
arch/arc/kernel/ctx_sw_asm.S
/* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Vineetg: Aug 2009 * -Moved core context switch macro out of entry.S into this file. * -This is the more "natural" hand written assembler */ #include <linux/linkage.h> #include <asm/entry.h> /* For the SAVE_* macros */ #include <asm/asm-offsets.h> #define KSP_WORD_OFF ((TASK_THREAD + THREAD_KSP) / 4) ;################### Low Level Context Switch ########################## .section .sched.text,"ax",@progbits .align 4 .global __switch_to .type __switch_to, @function __switch_to: CFI_STARTPROC /* Save regs on kernel mode stack of task */ st.a blink, [sp, -4] st.a fp, [sp, -4] SAVE_CALLEE_SAVED_KERNEL /* Save the now KSP in task->thread.ksp */ #if KSP_WORD_OFF <= 255 st.as sp, [r0, KSP_WORD_OFF] #else /* Workaround for NR_CPUS=4k as ST.as can only take s9 offset */ add2 r24, r0, KSP_WORD_OFF st sp, [r24] #endif /* * Return last task in r0 (return reg) * On ARC, Return reg = First Arg reg = r0. * Since we already have last task in r0, * don't need to do anything special to return it */ /* * switch to new task, contained in r1 * Temp reg r3 is required to get the ptr to store val */ SET_CURR_TASK_ON_CPU r1, r3 /* reload SP with kernel mode stack pointer in task->thread.ksp */ ld.as sp, [r1, (TASK_THREAD + THREAD_KSP)/4] /* restore the registers */ RESTORE_CALLEE_SAVED_KERNEL ld.ab fp, [sp, 4] ld.ab blink, [sp, 4] j [blink] END_CFI(__switch_to)
AirFortressIlikara/LS2K0300-linux-4.19
3,167
arch/arc/kernel/vmlinux.lds.S
/* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <asm-generic/vmlinux.lds.h> #include <asm/cache.h> #include <asm/page.h> #include <asm/thread_info.h> OUTPUT_ARCH(arc) ENTRY(res_service) #ifdef CONFIG_CPU_BIG_ENDIAN jiffies = jiffies_64 + 4; #else jiffies = jiffies_64; #endif SECTIONS { /* * ICCM starts at 0x8000_0000. So if kernel is relocated to some other * address, make sure peripheral at 0x8z doesn't clash with ICCM * Essentially vector is also in ICCM. */ . = CONFIG_LINUX_LINK_BASE; _int_vec_base_lds = .; .vector : { *(.vector) . = ALIGN(PAGE_SIZE); } #ifdef CONFIG_ARC_HAS_ICCM .text.arcfp : { *(.text.arcfp) . = ALIGN(CONFIG_ARC_ICCM_SZ * 1024); } #endif /* * The reason for having a seperate subsection .init.ramfs is to * prevent objump from including it in kernel dumps * * Reason for having .init.ramfs above .init is to make sure that the * binary blob is tucked away to one side, reducing the displacement * between .init.text and .text, avoiding any possible relocation * errors because of calls from .init.text to .text * Yes such calls do exist. e.g. * decompress_inflate.c:gunzip( ) -> zlib_inflate_workspace( ) */ __init_begin = .; .init.ramfs : { INIT_RAM_FS } . = ALIGN(PAGE_SIZE); _stext = .; HEAD_TEXT_SECTION INIT_TEXT_SECTION(L1_CACHE_BYTES) /* INIT_DATA_SECTION open-coded: special INIT_RAM_FS handling */ .init.data : { INIT_DATA INIT_SETUP(L1_CACHE_BYTES) INIT_CALLS CON_INITCALL SECURITY_INITCALL } .init.arch.info : { __arch_info_begin = .; *(.arch.info.init) __arch_info_end = .; } PERCPU_SECTION(L1_CACHE_BYTES) . = ALIGN(PAGE_SIZE); __init_end = .; .text : { _text = .; TEXT_TEXT SCHED_TEXT CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT *(.fixup) *(.gnu.warning) } EXCEPTION_TABLE(L1_CACHE_BYTES) _etext = .; _sdata = .; RO_DATA_SECTION(PAGE_SIZE) /* * 1. this is .data essentially * 2. THREAD_SIZE for init.task, must be kernel-stk sz aligned */ RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) _edata = .; BSS_SECTION(4, 4, 4) #ifdef CONFIG_ARC_DW2_UNWIND . = ALIGN(PAGE_SIZE); .eh_frame : { __start_unwind = .; *(.eh_frame) __end_unwind = .; } #else /DISCARD/ : { *(.eh_frame) } #endif NOTES . = ALIGN(PAGE_SIZE); _end = . ; STABS_DEBUG DISCARDS .arcextmap 0 : { *(.gnu.linkonce.arcextmap.*) *(.arcextmap.*) } #ifndef CONFIG_DEBUG_INFO /DISCARD/ : { *(.debug_frame) } /DISCARD/ : { *(.debug_aranges) } /DISCARD/ : { *(.debug_pubnames) } /DISCARD/ : { *(.debug_info) } /DISCARD/ : { *(.debug_abbrev) } /DISCARD/ : { *(.debug_line) } /DISCARD/ : { *(.debug_str) } /DISCARD/ : { *(.debug_loc) } /DISCARD/ : { *(.debug_macinfo) } /DISCARD/ : { *(.debug_ranges) } #endif #ifdef CONFIG_ARC_HAS_DCCM . = CONFIG_ARC_DCCM_BASE; __arc_dccm_base = .; .data.arcfp : { *(.data.arcfp) } . = ALIGN(CONFIG_ARC_DCCM_SZ * 1024); #endif }
AirFortressIlikara/LS2K0300-linux-4.19
1,555
arch/arc/lib/strcpy-700.S
/* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ /* If dst and src are 4 byte aligned, copy 8 bytes at a time. If the src is 4, but not 8 byte aligned, we first read 4 bytes to get it 8 byte aligned. Thus, we can do a little read-ahead, without dereferencing a cache line that we should not touch. Note that short and long instructions have been scheduled to avoid branch stalls. The beq_s to r3z could be made unaligned & long to avoid a stall there, but the it is not likely to be taken often, and it would also be likey to cost an unaligned mispredict at the next call. */ #include <linux/linkage.h> ENTRY_CFI(strcpy) or r2,r0,r1 bmsk_s r2,r2,1 brne.d r2,0,charloop mov_s r10,r0 ld_s r3,[r1,0] mov r8,0x01010101 bbit0.d r1,2,loop_start ror r12,r8 sub r2,r3,r8 bic_s r2,r2,r3 tst_s r2,r12 bne r3z mov_s r4,r3 .balign 4 loop: ld.a r3,[r1,4] st.ab r4,[r10,4] loop_start: ld.a r4,[r1,4] sub r2,r3,r8 bic_s r2,r2,r3 tst_s r2,r12 bne_s r3z st.ab r3,[r10,4] sub r2,r4,r8 bic r2,r2,r4 tst r2,r12 beq loop mov_s r3,r4 #ifdef __LITTLE_ENDIAN__ r3z: bmsk.f r1,r3,7 lsr_s r3,r3,8 #else r3z: lsr.f r1,r3,24 asl_s r3,r3,8 #endif bne.d r3z stb.ab r1,[r10,1] j_s [blink] .balign 4 charloop: ldb.ab r3,[r1,1] brne.d r3,0,charloop stb.ab r3,[r10,1] j [blink] END_CFI(strcpy)
AirFortressIlikara/LS2K0300-linux-4.19
2,722
arch/arc/lib/strchr-700.S
/* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ /* ARC700 has a relatively long pipeline and branch prediction, so we want to avoid branches that are hard to predict. On the other hand, the presence of the norm instruction makes it easier to operate on whole words branch-free. */ #include <linux/linkage.h> ENTRY_CFI(strchr) extb_s r1,r1 asl r5,r1,8 bmsk r2,r0,1 or r5,r5,r1 mov_s r3,0x01010101 breq.d r2,r0,.Laligned asl r4,r5,16 sub_s r0,r0,r2 asl r7,r2,3 ld_s r2,[r0] #ifdef __LITTLE_ENDIAN__ asl r7,r3,r7 #else lsr r7,r3,r7 #endif or r5,r5,r4 ror r4,r3 sub r12,r2,r7 bic_s r12,r12,r2 and r12,r12,r4 brne.d r12,0,.Lfound0_ua xor r6,r2,r5 ld.a r2,[r0,4] sub r12,r6,r7 bic r12,r12,r6 #ifdef __LITTLE_ENDIAN__ and r7,r12,r4 breq r7,0,.Loop ; For speed, we want this branch to be unaligned. b .Lfound_char ; Likewise this one. #else and r12,r12,r4 breq r12,0,.Loop ; For speed, we want this branch to be unaligned. lsr_s r12,r12,7 bic r2,r7,r6 b.d .Lfound_char_b and_s r2,r2,r12 #endif ; /* We require this code address to be unaligned for speed... */ .Laligned: ld_s r2,[r0] or r5,r5,r4 ror r4,r3 ; /* ... so that this code address is aligned, for itself and ... */ .Loop: sub r12,r2,r3 bic_s r12,r12,r2 and r12,r12,r4 brne.d r12,0,.Lfound0 xor r6,r2,r5 ld.a r2,[r0,4] sub r12,r6,r3 bic r12,r12,r6 and r7,r12,r4 breq r7,0,.Loop /* ... so that this branch is unaligned. */ ; Found searched-for character. r0 has already advanced to next word. #ifdef __LITTLE_ENDIAN__ /* We only need the information about the first matching byte (i.e. the least significant matching byte) to be exact, hence there is no problem with carry effects. */ .Lfound_char: sub r3,r7,1 bic r3,r3,r7 norm r2,r3 sub_s r0,r0,1 asr_s r2,r2,3 j.d [blink] sub_s r0,r0,r2 .balign 4 .Lfound0_ua: mov r3,r7 .Lfound0: sub r3,r6,r3 bic r3,r3,r6 and r2,r3,r4 or_s r12,r12,r2 sub_s r3,r12,1 bic_s r3,r3,r12 norm r3,r3 add_s r0,r0,3 asr_s r12,r3,3 asl.f 0,r2,r3 sub_s r0,r0,r12 j_s.d [blink] mov.pl r0,0 #else /* BIG ENDIAN */ .Lfound_char: lsr r7,r7,7 bic r2,r7,r6 .Lfound_char_b: norm r2,r2 sub_s r0,r0,4 asr_s r2,r2,3 j.d [blink] add_s r0,r0,r2 .Lfound0_ua: mov_s r3,r7 .Lfound0: asl_s r2,r2,7 or r7,r6,r4 bic_s r12,r12,r2 sub r2,r7,r3 or r2,r2,r6 bic r12,r2,r12 bic.f r3,r4,r12 norm r3,r3 add.pl r3,r3,1 asr_s r12,r3,3 asl.f 0,r2,r3 add_s r0,r0,r12 j_s.d [blink] mov.mi r0,0 #endif /* ENDIAN */ END_CFI(strchr)